

chore: fix some ruff errors
@e14ef3b4d7157033c412f1e4fc28eaf93ed3bc77
--- pyproject.toml
+++ pyproject.toml
... | ... | @@ -48,11 +48,9 @@ |
48 | 48 |
"FIX", |
49 | 49 |
"TD", # disable todo warnings |
50 | 50 |
"ERA", # allow commented out code |
51 |
- "PTH", |
|
52 | 51 |
|
53 | 52 |
"ANN003", # missing kwargs |
54 | 53 |
"ANN101", # missing self type |
55 |
- "ANN102", # missing cls |
|
56 | 54 |
"B006", |
57 | 55 |
"B008", |
58 | 56 |
"COM812", # trailing comma |
--- scripts/client.py
+++ scripts/client.py
... | ... | @@ -64,7 +64,7 @@ |
64 | 64 |
print(f"Recording finished. File size: {file.stat().st_size} bytes") |
65 | 65 |
|
66 | 66 |
try: |
67 |
- with open(file, "rb") as fd: |
|
67 |
+ with file.open("rb") as fd: |
|
68 | 68 |
start = time.perf_counter() |
69 | 69 |
res = client.post( |
70 | 70 |
OPENAI_BASE_URL + TRANSCRIBE_PATH, |
--- src/faster_whisper_server/gradio_app.py
+++ src/faster_whisper_server/gradio_app.py
... | ... | @@ -1,4 +1,5 @@ |
1 | 1 |
from collections.abc import Generator |
2 |
+from pathlib import Path |
|
2 | 3 |
|
3 | 4 |
import gradio as gr |
4 | 5 |
import httpx |
... | ... | @@ -33,7 +34,7 @@ |
33 | 34 |
yield audio_task(file_path, endpoint, temperature, model) |
34 | 35 |
|
35 | 36 |
def audio_task(file_path: str, endpoint: str, temperature: float, model: str) -> str: |
36 |
- with open(file_path, "rb") as file: |
|
37 |
+ with Path(file_path).open("rb") as file: |
|
37 | 38 |
response = http_client.post( |
38 | 39 |
endpoint, |
39 | 40 |
files={"file": file}, |
... | ... | @@ -50,7 +51,7 @@ |
50 | 51 |
def streaming_audio_task( |
51 | 52 |
file_path: str, endpoint: str, temperature: float, model: str |
52 | 53 |
) -> Generator[str, None, None]: |
53 |
- with open(file_path, "rb") as file: |
|
54 |
+ with Path(file_path).open("rb") as file: |
|
54 | 55 |
kwargs = { |
55 | 56 |
"files": {"file": file}, |
56 | 57 |
"data": { |
--- src/faster_whisper_server/routers/list_models.py
+++ src/faster_whisper_server/routers/list_models.py
... | ... | @@ -24,7 +24,7 @@ |
24 | 24 |
def get_models() -> ListModelsResponse: |
25 | 25 |
models = huggingface_hub.list_models(library="ctranslate2", tags="automatic-speech-recognition", cardData=True) |
26 | 26 |
models = list(models) |
27 |
- models.sort(key=lambda model: model.downloads, reverse=True) # type: ignore # noqa: PGH003 |
|
27 |
+ models.sort(key=lambda model: model.downloads or -1, reverse=True) |
|
28 | 28 |
transformed_models: list[Model] = [] |
29 | 29 |
for model in models: |
30 | 30 |
assert model.created_at is not None |
... | ... | @@ -56,7 +56,7 @@ |
56 | 56 |
model_name=model_name, library="ctranslate2", tags="automatic-speech-recognition", cardData=True |
57 | 57 |
) |
58 | 58 |
models = list(models) |
59 |
- models.sort(key=lambda model: model.downloads, reverse=True) # type: ignore # noqa: PGH003 |
|
59 |
+ models.sort(key=lambda model: model.downloads or -1, reverse=True) |
|
60 | 60 |
if len(models) == 0: |
61 | 61 |
raise HTTPException(status_code=404, detail="Model doesn't exists") |
62 | 62 |
exact_match: ModelInfo | None = None |
--- src/faster_whisper_server/routers/stt.py
+++ src/faster_whisper_server/routers/stt.py
... | ... | @@ -57,26 +57,27 @@ |
57 | 57 |
response_format: ResponseFormat, |
58 | 58 |
) -> Response: |
59 | 59 |
segments = list(segments) |
60 |
- if response_format == ResponseFormat.TEXT: # noqa: RET503 |
|
61 |
- return Response(segments_to_text(segments), media_type="text/plain") |
|
62 |
- elif response_format == ResponseFormat.JSON: |
|
63 |
- return Response( |
|
64 |
- CreateTranscriptionResponseJson.from_segments(segments).model_dump_json(), |
|
65 |
- media_type="application/json", |
|
66 |
- ) |
|
67 |
- elif response_format == ResponseFormat.VERBOSE_JSON: |
|
68 |
- return Response( |
|
69 |
- CreateTranscriptionResponseVerboseJson.from_segments(segments, transcription_info).model_dump_json(), |
|
70 |
- media_type="application/json", |
|
71 |
- ) |
|
72 |
- elif response_format == ResponseFormat.VTT: |
|
73 |
- return Response( |
|
74 |
- "".join(segments_to_vtt(segment, i) for i, segment in enumerate(segments)), media_type="text/vtt" |
|
75 |
- ) |
|
76 |
- elif response_format == ResponseFormat.SRT: |
|
77 |
- return Response( |
|
78 |
- "".join(segments_to_srt(segment, i) for i, segment in enumerate(segments)), media_type="text/plain" |
|
79 |
- ) |
|
60 |
+ match response_format: |
|
61 |
+ case ResponseFormat.TEXT: |
|
62 |
+ return Response(segments_to_text(segments), media_type="text/plain") |
|
63 |
+ case ResponseFormat.JSON: |
|
64 |
+ return Response( |
|
65 |
+ CreateTranscriptionResponseJson.from_segments(segments).model_dump_json(), |
|
66 |
+ media_type="application/json", |
|
67 |
+ ) |
|
68 |
+ case ResponseFormat.VERBOSE_JSON: |
|
69 |
+ return Response( |
|
70 |
+ CreateTranscriptionResponseVerboseJson.from_segments(segments, transcription_info).model_dump_json(), |
|
71 |
+ media_type="application/json", |
|
72 |
+ ) |
|
73 |
+ case ResponseFormat.VTT: |
|
74 |
+ return Response( |
|
75 |
+ "".join(segments_to_vtt(segment, i) for i, segment in enumerate(segments)), media_type="text/vtt" |
|
76 |
+ ) |
|
77 |
+ case ResponseFormat.SRT: |
|
78 |
+ return Response( |
|
79 |
+ "".join(segments_to_srt(segment, i) for i, segment in enumerate(segments)), media_type="text/plain" |
|
80 |
+ ) |
|
80 | 81 |
|
81 | 82 |
|
82 | 83 |
def format_as_sse(data: str) -> str: |
--- tests/sse_test.py
+++ tests/sse_test.py
... | ... | @@ -1,5 +1,5 @@ |
1 | 1 |
import json |
2 |
-import os |
|
2 |
+from pathlib import Path |
|
3 | 3 |
|
4 | 4 |
import anyio |
5 | 5 |
from faster_whisper_server.api_models import ( |
... | ... | @@ -26,7 +26,7 @@ |
26 | 26 |
@pytest.mark.asyncio() |
27 | 27 |
@pytest.mark.parametrize(("file_path", "endpoint"), parameters) |
28 | 28 |
async def test_streaming_transcription_text(aclient: AsyncClient, file_path: str, endpoint: str) -> None: |
29 |
- extension = os.path.splitext(file_path)[1] |
|
29 |
+ extension = Path(file_path).suffix[1:] |
|
30 | 30 |
async with await anyio.open_file(file_path, "rb") as f: |
31 | 31 |
data = await f.read() |
32 | 32 |
kwargs = { |
... | ... | @@ -42,7 +42,7 @@ |
42 | 42 |
@pytest.mark.asyncio() |
43 | 43 |
@pytest.mark.parametrize(("file_path", "endpoint"), parameters) |
44 | 44 |
async def test_streaming_transcription_json(aclient: AsyncClient, file_path: str, endpoint: str) -> None: |
45 |
- extension = os.path.splitext(file_path)[1] |
|
45 |
+ extension = Path(file_path).suffix[1:] |
|
46 | 46 |
async with await anyio.open_file(file_path, "rb") as f: |
47 | 47 |
data = await f.read() |
48 | 48 |
kwargs = { |
... | ... | @@ -57,7 +57,7 @@ |
57 | 57 |
@pytest.mark.asyncio() |
58 | 58 |
@pytest.mark.parametrize(("file_path", "endpoint"), parameters) |
59 | 59 |
async def test_streaming_transcription_verbose_json(aclient: AsyncClient, file_path: str, endpoint: str) -> None: |
60 |
- extension = os.path.splitext(file_path)[1] |
|
60 |
+ extension = Path(file_path).suffix[1:] |
|
61 | 61 |
async with await anyio.open_file(file_path, "rb") as f: |
62 | 62 |
data = await f.read() |
63 | 63 |
kwargs = { |
Add a comment
Delete comment
Once you delete this comment, you won't be able to recover it. Are you sure you want to delete this comment?