

fix: models route not returning openai compatible response
@eb226c55c23254df90c0f6fd542b41b8c3fe1499
--- faster_whisper_server/gradio_app.py
+++ faster_whisper_server/gradio_app.py
... | ... | @@ -67,7 +67,7 @@ |
67 | 67 |
def update_model_dropdown() -> gr.Dropdown: |
68 | 68 |
res = http_client.get("/v1/models") |
69 | 69 |
res_data = res.json() |
70 |
- models: list[str] = [model["id"] for model in res_data] |
|
70 |
+ models: list[str] = [model["id"] for model in res_data["data"]] |
|
71 | 71 |
assert config.whisper.model in models |
72 | 72 |
recommended_models = {model for model in models if model.startswith("Systran")} |
73 | 73 |
other_models = [model for model in models if model not in recommended_models] |
--- faster_whisper_server/main.py
+++ faster_whisper_server/main.py
... | ... | @@ -38,6 +38,7 @@ |
38 | 38 |
from faster_whisper_server.gradio_app import create_gradio_demo |
39 | 39 |
from faster_whisper_server.logger import logger |
40 | 40 |
from faster_whisper_server.server_models import ( |
41 |
+ ModelListResponse, |
|
41 | 42 |
ModelObject, |
42 | 43 |
TranscriptionJsonResponse, |
43 | 44 |
TranscriptionVerboseJsonResponse, |
... | ... | @@ -85,7 +86,7 @@ |
85 | 86 |
|
86 | 87 |
|
87 | 88 |
@app.get("/v1/models") |
88 |
-def get_models() -> list[ModelObject]: |
|
89 |
+def get_models() -> ModelListResponse: |
|
89 | 90 |
models = huggingface_hub.list_models(library="ctranslate2", tags="automatic-speech-recognition") |
90 | 91 |
models = [ |
91 | 92 |
ModelObject( |
... | ... | @@ -97,7 +98,7 @@ |
97 | 98 |
for model in models |
98 | 99 |
if model.created_at is not None |
99 | 100 |
] |
100 |
- return models |
|
101 |
+ return ModelListResponse(data=models) |
|
101 | 102 |
|
102 | 103 |
|
103 | 104 |
@app.get("/v1/models/{model_name:path}") |
--- faster_whisper_server/server_models.py
+++ faster_whisper_server/server_models.py
... | ... | @@ -119,6 +119,11 @@ |
119 | 119 |
) |
120 | 120 |
|
121 | 121 |
|
122 |
+class ModelListResponse(BaseModel): |
|
123 |
+ data: list[ModelObject] |
|
124 |
+ object: Literal["list"] = "list" |
|
125 |
+ |
|
126 |
+ |
|
122 | 127 |
class ModelObject(BaseModel): |
123 | 128 |
id: str |
124 | 129 |
"""The model identifier, which can be referenced in the API endpoints.""" |
--- pyproject.toml
+++ pyproject.toml
... | ... | @@ -17,8 +17,8 @@ |
17 | 17 |
] |
18 | 18 |
|
19 | 19 |
[project.optional-dependencies] |
20 |
-dev = ["ruff", "pytest", "pytest-xdist"] |
|
21 |
-other = ["youtube-dl@git+https://github.com/ytdl-org/youtube-dl.git", "openai", "aider-chat"] |
|
20 |
+dev = ["ruff", "pytest", "pytest-xdist", "openai"] |
|
21 |
+other = ["youtube-dl@git+https://github.com/ytdl-org/youtube-dl.git", "aider-chat"] |
|
22 | 22 |
|
23 | 23 |
# https://docs.astral.sh/ruff/configuration/ |
24 | 24 |
[tool.ruff] |
--- requirements-dev.txt
+++ requirements-dev.txt
... | ... | @@ -9,6 +9,7 @@ |
9 | 9 |
anyio==4.4.0 |
10 | 10 |
# via |
11 | 11 |
# httpx |
12 |
+ # openai |
|
12 | 13 |
# starlette |
13 | 14 |
# watchfiles |
14 | 15 |
attrs==23.2.0 |
... | ... | @@ -38,6 +39,8 @@ |
38 | 39 |
# via faster-whisper |
39 | 40 |
cycler==0.12.1 |
40 | 41 |
# via matplotlib |
42 |
+distro==1.9.0 |
|
43 |
+ # via openai |
|
41 | 44 |
dnspython==2.6.1 |
42 | 45 |
# via email-validator |
43 | 46 |
email-validator==2.2.0 |
... | ... | @@ -82,6 +85,7 @@ |
82 | 85 |
# fastapi |
83 | 86 |
# gradio |
84 | 87 |
# gradio-client |
88 |
+ # openai |
|
85 | 89 |
httpx-sse==0.4.0 |
86 | 90 |
# via faster-whisper-server (pyproject.toml) |
87 | 91 |
huggingface-hub==0.23.4 |
... | ... | @@ -138,6 +142,8 @@ |
138 | 142 |
# pandas |
139 | 143 |
onnxruntime==1.18.0 |
140 | 144 |
# via faster-whisper |
145 |
+openai==1.35.9 |
|
146 |
+ # via faster-whisper-server (pyproject.toml) |
|
141 | 147 |
orjson==3.10.5 |
142 | 148 |
# via |
143 | 149 |
# fastapi |
... | ... | @@ -170,6 +176,7 @@ |
170 | 176 |
# faster-whisper-server (pyproject.toml) |
171 | 177 |
# fastapi |
172 | 178 |
# gradio |
179 |
+ # openai |
|
173 | 180 |
# pydantic-settings |
174 | 181 |
pydantic-core==2.20.0 |
175 | 182 |
# via pydantic |
... | ... | @@ -236,6 +243,7 @@ |
236 | 243 |
# via |
237 | 244 |
# anyio |
238 | 245 |
# httpx |
246 |
+ # openai |
|
239 | 247 |
soundfile==0.12.1 |
240 | 248 |
# via faster-whisper-server (pyproject.toml) |
241 | 249 |
starlette==0.37.2 |
... | ... | @@ -249,7 +257,9 @@ |
249 | 257 |
toolz==0.12.1 |
250 | 258 |
# via altair |
251 | 259 |
tqdm==4.66.4 |
252 |
- # via huggingface-hub |
|
260 |
+ # via |
|
261 |
+ # huggingface-hub |
|
262 |
+ # openai |
|
253 | 263 |
typer==0.12.3 |
254 | 264 |
# via |
255 | 265 |
# fastapi-cli |
... | ... | @@ -260,6 +270,7 @@ |
260 | 270 |
# gradio |
261 | 271 |
# gradio-client |
262 | 272 |
# huggingface-hub |
273 |
+ # openai |
|
263 | 274 |
# pydantic |
264 | 275 |
# pydantic-core |
265 | 276 |
# typer |
--- tests/api_model_test.py
+++ tests/api_model_test.py
... | ... | @@ -1,4 +1,5 @@ |
1 | 1 |
from fastapi.testclient import TestClient |
2 |
+from openai import OpenAI |
|
2 | 3 |
|
3 | 4 |
from faster_whisper_server.server_models import ModelObject |
4 | 5 |
|
... | ... | @@ -17,10 +18,8 @@ |
17 | 18 |
) |
18 | 19 |
|
19 | 20 |
|
20 |
-def test_list_models(client: TestClient) -> None: |
|
21 |
- response = client.get("/v1/models") |
|
22 |
- data = response.json() |
|
23 |
- models = [model_dict_to_object(model_dict) for model_dict in data] |
|
21 |
+def test_list_models(openai_client: OpenAI) -> None: |
|
22 |
+ models = openai_client.models.list().data |
|
24 | 23 |
assert len(models) > MIN_EXPECTED_NUMBER_OF_MODELS |
25 | 24 |
|
26 | 25 |
|
--- tests/conftest.py
+++ tests/conftest.py
... | ... | @@ -2,6 +2,7 @@ |
2 | 2 |
import logging |
3 | 3 |
|
4 | 4 |
from fastapi.testclient import TestClient |
5 |
+from openai import OpenAI |
|
5 | 6 |
import pytest |
6 | 7 |
|
7 | 8 |
from faster_whisper_server.main import app |
... | ... | @@ -19,3 +20,8 @@ |
19 | 20 |
def client() -> Generator[TestClient, None, None]: |
20 | 21 |
with TestClient(app) as client: |
21 | 22 |
yield client |
23 |
+ |
|
24 |
+ |
|
25 |
+@pytest.fixture() |
|
26 |
+def openai_client(client: TestClient) -> OpenAI: |
|
27 |
+ return OpenAI(api_key="cant-be-empty", http_client=client) |
Add a comment
Delete comment
Once you delete this comment, you won't be able to recover it. Are you sure you want to delete this comment?