

style: add ruff
@94c75432f4ac1f02aed03a31b42bd5017441065f
--- .pre-commit-config.yaml
+++ .pre-commit-config.yaml
... | ... | @@ -8,18 +8,21 @@ |
8 | 8 |
- id: end-of-file-fixer |
9 | 9 |
- id: check-yaml |
10 | 10 |
- id: check-added-large-files |
11 |
- |
|
12 |
- - repo: https://github.com/pre-commit/mirrors-mypy |
|
13 |
- rev: v1.10.0 |
|
14 |
- hooks: |
|
15 |
- - id: mypy |
|
16 |
- |
|
17 |
- # - repo: https://github.com/PyCQA/isort |
|
18 |
- # rev: 5.13.2 |
|
11 |
+ # TODO: enable |
|
12 |
+ # - repo: https://github.com/pre-commit/mirrors-mypy |
|
13 |
+ # rev: v1.10.0 |
|
19 | 14 |
# hooks: |
20 |
- # - id: isort |
|
21 |
- # |
|
22 |
- # - repo: https://github.com/psf/black |
|
23 |
- # rev: 24.4.2 |
|
15 |
+ # - id: mypy |
|
16 |
+ # args: [--strict] |
|
17 |
+ # TODO: enable |
|
18 |
+ # - repo: https://github.com/RobertCraigie/pyright-python |
|
19 |
+ # rev: v1.1.363 |
|
24 | 20 |
# hooks: |
25 |
- # - id: black |
|
21 |
+ # - id: pyright |
|
22 |
+ # Disabled because it doesn't work on NixOS |
|
23 |
+ # - repo: https://github.com/astral-sh/ruff-pre-commit |
|
24 |
+ # rev: v0.4.4 |
|
25 |
+ # hooks: |
|
26 |
+ # - id: ruff # linter |
|
27 |
+ # args: [--fix] |
|
28 |
+ # - id: ruff-format |
--- flake.nix
+++ flake.nix
... | ... | @@ -26,6 +26,7 @@ |
26 | 26 |
pv |
27 | 27 |
pyright |
28 | 28 |
python311 |
29 |
+ ruff |
|
29 | 30 |
websocat |
30 | 31 |
]; |
31 | 32 |
shellHook = '' |
--- pyproject.toml
+++ pyproject.toml
... | ... | @@ -27,6 +27,9 @@ |
27 | 27 |
httpx = "^0.27.0" |
28 | 28 |
httpx-ws = "^0.6.0" |
29 | 29 |
|
30 |
+[tool.ruff] |
|
31 |
+target-version = "py311" |
|
32 |
+ |
|
30 | 33 |
[build-system] |
31 | 34 |
requires = ["poetry-core"] |
32 | 35 |
build-backend = "poetry.core.masonry.api" |
--- speaches/audio.py
+++ speaches/audio.py
... | ... | @@ -12,7 +12,7 @@ |
12 | 12 |
|
13 | 13 |
|
14 | 14 |
def audio_samples_from_file(file: BinaryIO) -> NDArray[np.float32]: |
15 |
- audio_and_sample_rate: tuple[NDArray[np.float32], Any] = sf.read( # type: ignore |
|
15 |
+ audio_and_sample_rate = sf.read( |
|
16 | 16 |
file, |
17 | 17 |
format="RAW", |
18 | 18 |
channels=1, |
... | ... | @@ -22,7 +22,7 @@ |
22 | 22 |
endian="LITTLE", |
23 | 23 |
) |
24 | 24 |
audio = audio_and_sample_rate[0] |
25 |
- return audio |
|
25 |
+ return audio # type: ignore |
|
26 | 26 |
|
27 | 27 |
|
28 | 28 |
class Audio: |
... | ... | @@ -68,12 +68,12 @@ |
68 | 68 |
self.modify_event = asyncio.Event() |
69 | 69 |
|
70 | 70 |
def extend(self, data: NDArray[np.float32]) -> None: |
71 |
- assert self.closed == False |
|
71 |
+ assert not self.closed |
|
72 | 72 |
super().extend(data) |
73 | 73 |
self.modify_event.set() |
74 | 74 |
|
75 | 75 |
def close(self) -> None: |
76 |
- assert self.closed == False |
|
76 |
+ assert not self.closed |
|
77 | 77 |
self.closed = True |
78 | 78 |
self.modify_event.set() |
79 | 79 |
logger.info("AudioStream closed") |
--- speaches/core.py
+++ speaches/core.py
... | ... | @@ -92,14 +92,14 @@ |
92 | 92 |
|
93 | 93 |
|
94 | 94 |
def test_segment_is_eos(): |
95 |
- assert Segment("Hello").is_eos == False |
|
96 |
- assert Segment("Hello...").is_eos == False |
|
97 |
- assert Segment("Hello.").is_eos == True |
|
98 |
- assert Segment("Hello!").is_eos == True |
|
99 |
- assert Segment("Hello?").is_eos == True |
|
100 |
- assert Segment("Hello. Yo").is_eos == False |
|
101 |
- assert Segment("Hello. Yo...").is_eos == False |
|
102 |
- assert Segment("Hello. Yo.").is_eos == True |
|
95 |
+ assert not Segment("Hello").is_eos |
|
96 |
+ assert not Segment("Hello...").is_eos |
|
97 |
+ assert Segment("Hello.").is_eos |
|
98 |
+ assert Segment("Hello!").is_eos |
|
99 |
+ assert Segment("Hello?").is_eos |
|
100 |
+ assert not Segment("Hello. Yo").is_eos |
|
101 |
+ assert not Segment("Hello. Yo...").is_eos |
|
102 |
+ assert Segment("Hello. Yo.").is_eos |
|
103 | 103 |
|
104 | 104 |
|
105 | 105 |
def to_full_sentences(words: list[Word]) -> list[Segment]: |
--- speaches/main.py
+++ speaches/main.py
... | ... | @@ -7,8 +7,14 @@ |
7 | 7 |
from io import BytesIO |
8 | 8 |
from typing import Annotated |
9 | 9 |
|
10 |
-from fastapi import (Depends, FastAPI, Response, UploadFile, WebSocket, |
|
11 |
- WebSocketDisconnect) |
|
10 |
+from fastapi import ( |
|
11 |
+ Depends, |
|
12 |
+ FastAPI, |
|
13 |
+ Response, |
|
14 |
+ UploadFile, |
|
15 |
+ WebSocket, |
|
16 |
+ WebSocketDisconnect, |
|
17 |
+) |
|
12 | 18 |
from fastapi.websockets import WebSocketState |
13 | 19 |
from faster_whisper import WhisperModel |
14 | 20 |
from faster_whisper.vad import VadOptions, get_speech_timestamps |
... | ... | @@ -18,8 +24,11 @@ |
18 | 24 |
from speaches.config import SAMPLES_PER_SECOND, Language, config |
19 | 25 |
from speaches.core import Transcription |
20 | 26 |
from speaches.logger import logger |
21 |
-from speaches.server_models import (ResponseFormat, TranscriptionResponse, |
|
22 |
- TranscriptionVerboseResponse) |
|
27 |
+from speaches.server_models import ( |
|
28 |
+ ResponseFormat, |
|
29 |
+ TranscriptionResponse, |
|
30 |
+ TranscriptionVerboseResponse, |
|
31 |
+) |
|
23 | 32 |
from speaches.transcriber import audio_transcriber |
24 | 33 |
|
25 | 34 |
whisper: WhisperModel = None # type: ignore |
Add a comment
Delete comment
Once you delete this comment, you won't be able to recover it. Are you sure you want to delete this comment?