• Y
  • List All
  • Feedback
    • This Project
    • All Projects
Profile Account settings Log out
  • Favorite
  • Project
  • All
Loading...
  • Log in
  • Sign up
yjyoon / whisper_server_speaches star
  • Project homeH
  • CodeC
  • IssueI
  • Pull requestP
  • Review R
  • MilestoneM
  • BoardB
  • Files
  • Commit
  • Branches
whisper_server_speachestestsspeech_test.py
Download as .zip file
File name
Commit message
Commit date
.github/workflows
chore(deps): update astral-sh/setup-uv action to v3
2024-10-13
docs
docs: initialize mkdocs
2024-10-03
examples
Update script.sh
2024-09-03
scripts
chore: misc changes
2024-10-03
src/speaches
feat: tts
2024-11-01
tests
feat: tts
2024-11-01
.dockerignore
chore: update .dockerignore
2024-11-01
.envrc
init
2024-05-20
.gitattributes
docs: add live-transcription demo
2024-05-28
.gitignore
chore: update .gitignore
2024-07-03
.pre-commit-config.yaml
deps: update `ruff`
2024-11-01
Dockerfile.cpu
chore(deps): update ghcr.io/astral-sh/uv docker tag to v0.4.20
2024-10-13
Dockerfile.cuda
chore(deps): update ghcr.io/astral-sh/uv docker tag to v0.4.20
2024-10-13
LICENSE
init
2024-05-20
README.md
Improved readability in README.md
2024-10-09
Taskfile.yaml
chore: Taskfile changes
2024-11-01
audio.wav
docs: update README.md
2024-05-27
compose.yaml
chore: format compose
2024-09-11
flake.lock
deps: update flake
2024-11-01
flake.nix
Add `pre-commit` to pyproject.toml
2024-10-13
mkdocs.yml
docs: initialize mkdocs
2024-10-03
pyproject.toml
deps: add `piper-tts`
2024-11-01
renovate.json
Add renovate.json
2024-10-13
uv.lock
deps: add `piper-tts`
2024-11-01
File name
Commit message
Commit date
__init__.py
feat: add /v1/models and /v1/model routes #14
2024-06-03
api_model_test.py
chore: auto-fix ruff errors
2024-10-01
api_timestamp_granularities_test.py
chore: auto-fix ruff errors
2024-10-01
conftest.py
feat: tts
2024-11-01
model_manager_test.py
feat: model unloading
2024-10-01
openai_timestamp_granularities_test.py
chore: auto-fix ruff errors
2024-10-01
speech_test.py
feat: tts
2024-11-01
sse_test.py
chore: auto-fix ruff errors
2024-10-01
Fedir Zadniprovskyi 2024-11-01 1d2b518 feat: tts UNIX
Raw Open in browser Change history
import io import platform from openai import APIConnectionError, AsyncOpenAI, UnprocessableEntityError import pytest import soundfile as sf from faster_whisper_server.routers.speech import ( DEFAULT_MODEL, DEFAULT_RESPONSE_FORMAT, DEFAULT_VOICE, SUPPORTED_RESPONSE_FORMATS, ResponseFormat, ) DEFAULT_INPUT = "Hello, world!" platform_machine = platform.machine() @pytest.mark.asyncio @pytest.mark.skipif(platform_machine != "x86_64", reason="Only supported on x86_64") @pytest.mark.parametrize("response_format", SUPPORTED_RESPONSE_FORMATS) async def test_create_speech_formats(openai_client: AsyncOpenAI, response_format: ResponseFormat) -> None: await openai_client.audio.speech.create( model=DEFAULT_MODEL, voice=DEFAULT_VOICE, # type: ignore # noqa: PGH003 input=DEFAULT_INPUT, response_format=response_format, ) GOOD_MODEL_VOICE_PAIRS: list[tuple[str, str]] = [ ("tts-1", "alloy"), # OpenAI and OpenAI ("tts-1-hd", "echo"), # OpenAI and OpenAI ("tts-1", DEFAULT_VOICE), # OpenAI and Piper (DEFAULT_MODEL, "echo"), # Piper and OpenAI (DEFAULT_MODEL, DEFAULT_VOICE), # Piper and Piper ] @pytest.mark.asyncio @pytest.mark.skipif(platform_machine != "x86_64", reason="Only supported on x86_64") @pytest.mark.parametrize(("model", "voice"), GOOD_MODEL_VOICE_PAIRS) async def test_create_speech_good_model_voice_pair(openai_client: AsyncOpenAI, model: str, voice: str) -> None: await openai_client.audio.speech.create( model=model, voice=voice, # type: ignore # noqa: PGH003 input=DEFAULT_INPUT, response_format=DEFAULT_RESPONSE_FORMAT, ) BAD_MODEL_VOICE_PAIRS: list[tuple[str, str]] = [ ("tts-1", "invalid"), # OpenAI and invalid ("invalid", "echo"), # Invalid and OpenAI (DEFAULT_MODEL, "invalid"), # Piper and invalid ("invalid", DEFAULT_VOICE), # Invalid and Piper ("invalid", "invalid"), # Invalid and invalid ] @pytest.mark.asyncio @pytest.mark.skipif(platform_machine != "x86_64", reason="Only supported on x86_64") @pytest.mark.parametrize(("model", "voice"), BAD_MODEL_VOICE_PAIRS) async def test_create_speech_bad_model_voice_pair(openai_client: AsyncOpenAI, model: str, voice: str) -> None: # NOTE: not sure why `APIConnectionError` is sometimes raised with pytest.raises((UnprocessableEntityError, APIConnectionError)): await openai_client.audio.speech.create( model=model, voice=voice, # type: ignore # noqa: PGH003 input=DEFAULT_INPUT, response_format=DEFAULT_RESPONSE_FORMAT, ) SUPPORTED_SPEEDS = [0.25, 0.5, 1.0, 2.0, 4.0] @pytest.mark.asyncio @pytest.mark.skipif(platform_machine != "x86_64", reason="Only supported on x86_64") async def test_create_speech_with_varying_speed(openai_client: AsyncOpenAI) -> None: previous_size: int | None = None for speed in SUPPORTED_SPEEDS: res = await openai_client.audio.speech.create( model=DEFAULT_MODEL, voice=DEFAULT_VOICE, # type: ignore # noqa: PGH003 input=DEFAULT_INPUT, response_format="pcm", speed=speed, ) audio_bytes = res.read() if previous_size is not None: assert len(audio_bytes) * 1.5 < previous_size # TODO: document magic number previous_size = len(audio_bytes) UNSUPPORTED_SPEEDS = [0.1, 4.1] @pytest.mark.asyncio @pytest.mark.skipif(platform_machine != "x86_64", reason="Only supported on x86_64") @pytest.mark.parametrize("speed", UNSUPPORTED_SPEEDS) async def test_create_speech_with_unsupported_speed(openai_client: AsyncOpenAI, speed: float) -> None: with pytest.raises(UnprocessableEntityError): await openai_client.audio.speech.create( model=DEFAULT_MODEL, voice=DEFAULT_VOICE, # type: ignore # noqa: PGH003 input=DEFAULT_INPUT, response_format="pcm", speed=speed, ) VALID_SAMPLE_RATES = [16000, 22050, 24000, 48000] @pytest.mark.asyncio @pytest.mark.skipif(platform_machine != "x86_64", reason="Only supported on x86_64") @pytest.mark.parametrize("sample_rate", VALID_SAMPLE_RATES) async def test_speech_valid_resample(openai_client: AsyncOpenAI, sample_rate: int) -> None: res = await openai_client.audio.speech.create( model=DEFAULT_MODEL, voice=DEFAULT_VOICE, # type: ignore # noqa: PGH003 input=DEFAULT_INPUT, response_format="wav", extra_body={"sample_rate": sample_rate}, ) _, actual_sample_rate = sf.read(io.BytesIO(res.content)) assert actual_sample_rate == sample_rate INVALID_SAMPLE_RATES = [7999, 48001] @pytest.mark.asyncio @pytest.mark.skipif(platform_machine != "x86_64", reason="Only supported on x86_64") @pytest.mark.parametrize("sample_rate", INVALID_SAMPLE_RATES) async def test_speech_invalid_resample(openai_client: AsyncOpenAI, sample_rate: int) -> None: with pytest.raises(UnprocessableEntityError): await openai_client.audio.speech.create( model=DEFAULT_MODEL, voice=DEFAULT_VOICE, # type: ignore # noqa: PGH003 input=DEFAULT_INPUT, response_format="wav", extra_body={"sample_rate": sample_rate}, ) # TODO: implement the following test # NUMBER_OF_MODELS = 1 # NUMBER_OF_VOICES = 124 # # # @pytest.mark.asyncio # async def test_list_tts_models(openai_client: AsyncOpenAI) -> None: # raise NotImplementedError

          
        
    
    
Copyright Yona authors & © NAVER Corp. & NAVER LABS Supported by NAVER CLOUD PLATFORM

or
Sign in with github login with Google Sign in with Google
Reset password | Sign up