Fedir Zadniprovskyi 2024-06-14
chore: improve api docs
@b29e0ba26b152110d52626a92e7f61fc51498ad8
faster_whisper_server/main.py
--- faster_whisper_server/main.py
+++ faster_whisper_server/main.py
@@ -104,7 +104,10 @@
 
 
 @app.get("/v1/models/{model_name:path}")
-def get_model(model_name: Annotated[str, Path()]) -> ModelObject:
+# NOTE: `examples` doesn't work https://github.com/tiangolo/fastapi/discussions/10537
+def get_model(
+    model_name: Annotated[str, Path(example="Systran/faster-distil-whisper-large-v3")],
+) -> ModelObject:
     models = list(
         huggingface_hub.list_models(model_name=model_name, library="ctranslate2")
     )
@@ -148,7 +151,10 @@
 ModelName = Annotated[str, AfterValidator(handle_default_openai_model)]
 
 
-@app.post("/v1/audio/translations")
+@app.post(
+    "/v1/audio/translations",
+    response_model=str | TranscriptionJsonResponse | TranscriptionVerboseJsonResponse,
+)
 def translate_file(
     file: Annotated[UploadFile, Form()],
     model: Annotated[ModelName, Form()] = config.whisper.model,
@@ -156,6 +162,11 @@
     response_format: Annotated[ResponseFormat, Form()] = config.default_response_format,
     temperature: Annotated[float, Form()] = 0.0,
     stream: Annotated[bool, Form()] = False,
+) -> (
+    str
+    | TranscriptionJsonResponse
+    | TranscriptionVerboseJsonResponse
+    | StreamingResponse
 ):
     start = time.perf_counter()
     whisper = load_model(model)
@@ -201,7 +212,10 @@
 
 # https://platform.openai.com/docs/api-reference/audio/createTranscription
 # https://github.com/openai/openai-openapi/blob/master/openapi.yaml#L8915
-@app.post("/v1/audio/transcriptions")
+@app.post(
+    "/v1/audio/transcriptions",
+    response_model=str | TranscriptionJsonResponse | TranscriptionVerboseJsonResponse,
+)
 def transcribe_file(
     file: Annotated[UploadFile, Form()],
     model: Annotated[ModelName, Form()] = config.whisper.model,
@@ -214,6 +228,11 @@
         Form(alias="timestamp_granularities[]"),
     ] = ["segment"],
     stream: Annotated[bool, Form()] = False,
+) -> (
+    str
+    | TranscriptionJsonResponse
+    | TranscriptionVerboseJsonResponse
+    | StreamingResponse
 ):
     start = time.perf_counter()
     whisper = load_model(model)
faster_whisper_server/server_models.py
--- faster_whisper_server/server_models.py
+++ faster_whisper_server/server_models.py
@@ -130,8 +130,6 @@
 
 
 class ModelObject(BaseModel):
-    model_config = ConfigDict(populate_by_name=True)
-
     id: str
     """The model identifier, which can be referenced in the API endpoints."""
     created: int
@@ -140,3 +138,29 @@
     """The object type, which is always "model"."""
     owned_by: str
     """The organization that owns the model."""
+
+    model_config = ConfigDict(
+        populate_by_name=True,
+        json_schema_extra={
+            "examples": [
+                {
+                    "id": "Systran/faster-whisper-large-v3",
+                    "created": 1700732060,
+                    "object": "model",
+                    "owned_by": "Systran",
+                },
+                {
+                    "id": "Systran/faster-distil-whisper-large-v3",
+                    "created": 1711378296,
+                    "object": "model",
+                    "owned_by": "Systran",
+                },
+                {
+                    "id": "bofenghuang/whisper-large-v2-cv11-french-ct2",
+                    "created": 1687968011,
+                    "object": "model",
+                    "owned_by": "bofenghuang",
+                },
+            ]
+        },
+    )
Add a comment
List