

refactor
@844928f65c8a789975618ae061accb7260e8da9b
--- faster_whisper_server/config.py
+++ faster_whisper_server/config.py
... | ... | @@ -195,6 +195,9 @@ |
195 | 195 |
model_config = SettingsConfigDict(env_nested_delimiter="__") |
196 | 196 |
|
197 | 197 |
log_level: str = "info" |
198 |
+ host: str = Field(alias="UVICORN_HOST", default="0.0.0.0") |
|
199 |
+ port: int = Field(alias="UVICORN_PORT", default=8000) |
|
200 |
+ |
|
198 | 201 |
default_language: Language | None = None |
199 | 202 |
default_response_format: ResponseFormat = ResponseFormat.JSON |
200 | 203 |
whisper: WhisperConfig = WhisperConfig() |
--- faster_whisper_server/gradio_app.py
+++ faster_whisper_server/gradio_app.py
... | ... | @@ -1,5 +1,4 @@ |
1 | 1 |
from collections.abc import Generator |
2 |
-import os |
|
3 | 2 |
|
4 | 3 |
import gradio as gr |
5 | 4 |
import httpx |
... | ... | @@ -11,30 +10,29 @@ |
11 | 10 |
TRANSCRIPTION_ENDPOINT = "/v1/audio/transcriptions" |
12 | 11 |
TRANSLATION_ENDPOINT = "/v1/audio/translations" |
13 | 12 |
TIMEOUT_SECONDS = 180 |
13 |
+TIMEOUT = httpx.Timeout(timeout=TIMEOUT_SECONDS) |
|
14 | 14 |
|
15 | 15 |
|
16 | 16 |
def create_gradio_demo(config: Config) -> gr.Blocks: |
17 |
- host = os.getenv("UVICORN_HOST", "0.0.0.0") |
|
18 |
- port = int(os.getenv("UVICORN_PORT", "8000")) |
|
19 |
- # NOTE: worth looking into generated clients |
|
20 |
- http_client = httpx.Client(base_url=f"http://{host}:{port}", timeout=httpx.Timeout(timeout=TIMEOUT_SECONDS)) |
|
21 |
- openai_client = OpenAI(base_url=f"http://{host}:{port}/v1", api_key="cant-be-empty") |
|
17 |
+ base_url = f"http://{config.host}:{config.port}" |
|
18 |
+ http_client = httpx.Client(base_url=base_url, timeout=TIMEOUT) |
|
19 |
+ openai_client = OpenAI(base_url=f"{base_url}/v1", api_key="cant-be-empty") |
|
22 | 20 |
|
23 | 21 |
def handler(file_path: str, model: str, task: Task, temperature: float, stream: bool) -> Generator[str, None, None]: |
24 |
- if stream: |
|
25 |
- previous_transcription = "" |
|
26 |
- for transcription in transcribe_audio_streaming(file_path, task, temperature, model): |
|
27 |
- previous_transcription += transcription |
|
28 |
- yield previous_transcription |
|
29 |
- else: |
|
30 |
- yield transcribe_audio(file_path, task, temperature, model) |
|
31 |
- |
|
32 |
- def transcribe_audio(file_path: str, task: Task, temperature: float, model: str) -> str: |
|
33 | 22 |
if task == Task.TRANSCRIBE: |
34 | 23 |
endpoint = TRANSCRIPTION_ENDPOINT |
35 | 24 |
elif task == Task.TRANSLATE: |
36 | 25 |
endpoint = TRANSLATION_ENDPOINT |
37 | 26 |
|
27 |
+ if stream: |
|
28 |
+ previous_transcription = "" |
|
29 |
+ for transcription in streaming_audio_task(file_path, endpoint, temperature, model): |
|
30 |
+ previous_transcription += transcription |
|
31 |
+ yield previous_transcription |
|
32 |
+ else: |
|
33 |
+ yield audio_task(file_path, endpoint, temperature, model) |
|
34 |
+ |
|
35 |
+ def audio_task(file_path: str, endpoint: str, temperature: float, model: str) -> str: |
|
38 | 36 |
with open(file_path, "rb") as file: |
39 | 37 |
response = http_client.post( |
40 | 38 |
endpoint, |
... | ... | @@ -49,8 +47,8 @@ |
49 | 47 |
response.raise_for_status() |
50 | 48 |
return response.text |
51 | 49 |
|
52 |
- def transcribe_audio_streaming( |
|
53 |
- file_path: str, task: Task, temperature: float, model: str |
|
50 |
+ def streaming_audio_task( |
|
51 |
+ file_path: str, endpoint: str, temperature: float, model: str |
|
54 | 52 |
) -> Generator[str, None, None]: |
55 | 53 |
with open(file_path, "rb") as file: |
56 | 54 |
kwargs = { |
... | ... | @@ -62,7 +60,6 @@ |
62 | 60 |
"stream": True, |
63 | 61 |
}, |
64 | 62 |
} |
65 |
- endpoint = TRANSCRIPTION_ENDPOINT if task == Task.TRANSCRIBE else TRANSLATION_ENDPOINT |
|
66 | 63 |
with connect_sse(http_client, "POST", endpoint, **kwargs) as event_source: |
67 | 64 |
for event in event_source.iter_sse(): |
68 | 65 |
yield event.data |
Add a comment
Delete comment
Once you delete this comment, you won't be able to recover it. Are you sure you want to delete this comment?