• Y
  • List All
  • Feedback
    • This Project
    • All Projects
Profile Account settings Log out
  • Favorite
  • Project
  • All
Loading...
  • Log in
  • Sign up
yjyoon / whisper_server_speaches star
  • Project homeH
  • CodeC
  • IssueI
  • Pull requestP
  • Review R
  • MilestoneM
  • BoardB
  • Files
  • Commit
  • Branches
whisper_server_speachestestsspeech_test.py
Download as .zip file
File name
Commit message
Commit date
.github/workflows
fix: tests
01-13
configuration
feat: add instrumentation
2024-12-17
docs
docs: update
01-13
examples
rename to `speaches`
01-12
scripts
chore: misc changes
2024-10-03
src/speaches
fix: tests
01-13
tests
fix: tests
01-13
.dockerignore
fix: .dockerignore
01-12
.envrc
init
2024-05-20
.gitattributes
chore(deps): update pre-commit hook astral-sh/ruff-pre-commit to v0.7.2
2024-11-02
.gitignore
chore: update .gitignore
2024-07-03
.pre-commit-config.yaml
chore(deps): update pre-commit hook python-jsonschema/check-jsonschema to v0.31.0
01-12
Dockerfile
chore(deps): update ghcr.io/astral-sh/uv docker tag to v0.5.18
01-12
LICENSE
init
2024-05-20
README.md
Update README.md
01-13
Taskfile.yaml
chore(deps): add portaudio to flake.nix
01-13
audio.wav
chore: update volume names and mount points
01-10
compose.cpu.yaml
rename to `speaches`
01-12
compose.cuda-cdi.yaml
rename to `speaches`
01-12
compose.cuda.yaml
rename to `speaches`
01-12
compose.observability.yaml
chore(deps): update otel/opentelemetry-collector-contrib docker tag to v0.117.0
01-12
compose.yaml
rename to `speaches`
01-12
flake.lock
deps: update flake
2024-11-01
flake.nix
chore(deps): add portaudio to flake.nix
01-13
mkdocs.yml
docs/fix: home page 404
01-13
pyproject.toml
chore(deps): upgrade ruff
01-13
renovate.json
feat: renovate handle pre-commit
2024-11-01
uv.lock
chore(deps): upgrade ruff
01-13
File name
Commit message
Commit date
__init__.py
feat: add /v1/models and /v1/model routes #14
2024-06-03
api_model_test.py
chore: auto-fix ruff errors
2024-10-01
api_timestamp_granularities_test.py
feat: add kokoro tts support (#230)
01-13
conftest.py
fix: tests
01-13
model_manager_test.py
rename to `speaches`
01-12
openai_timestamp_granularities_test.py
feat: add kokoro tts support (#230)
01-13
speech_test.py
fix: tests
01-13
sse_test.py
feat: add kokoro tts support (#230)
01-13
Fedir Zadniprovskyi 01-13 e931c3b fix: tests UNIX
Raw Open in browser Change history
import io import platform from openai import APIConnectionError, AsyncOpenAI, UnprocessableEntityError import pytest import soundfile as sf platform_machine = platform.machine() if platform_machine != "x86_64": pytest.skip("Only supported on x86_64", allow_module_level=True) from speaches.routers.speech import ( # noqa: E402 DEFAULT_MODEL_ID, DEFAULT_RESPONSE_FORMAT, DEFAULT_VOICE_ID, SUPPORTED_RESPONSE_FORMATS, ResponseFormat, ) DEFAULT_INPUT = "Hello, world!" @pytest.mark.asyncio @pytest.mark.parametrize("response_format", SUPPORTED_RESPONSE_FORMATS) async def test_create_speech_formats(openai_client: AsyncOpenAI, response_format: ResponseFormat) -> None: await openai_client.audio.speech.create( model=DEFAULT_MODEL_ID, voice=DEFAULT_VOICE_ID, # type: ignore # noqa: PGH003 input=DEFAULT_INPUT, response_format=response_format, ) GOOD_MODEL_VOICE_PAIRS: list[tuple[str, str]] = [ ("tts-1", "alloy"), # OpenAI and OpenAI ("tts-1-hd", "echo"), # OpenAI and OpenAI ("tts-1", DEFAULT_VOICE_ID), # OpenAI and Piper (DEFAULT_MODEL_ID, "echo"), # Piper and OpenAI (DEFAULT_MODEL_ID, DEFAULT_VOICE_ID), # Piper and Piper ] @pytest.mark.asyncio @pytest.mark.parametrize(("model", "voice"), GOOD_MODEL_VOICE_PAIRS) async def test_create_speech_good_model_voice_pair(openai_client: AsyncOpenAI, model: str, voice: str) -> None: await openai_client.audio.speech.create( model=model, voice=voice, # type: ignore # noqa: PGH003 input=DEFAULT_INPUT, response_format=DEFAULT_RESPONSE_FORMAT, ) BAD_MODEL_VOICE_PAIRS: list[tuple[str, str]] = [ ("tts-1", "invalid"), # OpenAI and invalid ("invalid", "echo"), # Invalid and OpenAI (DEFAULT_MODEL_ID, "invalid"), # Piper and invalid ("invalid", DEFAULT_VOICE_ID), # Invalid and Piper ("invalid", "invalid"), # Invalid and invalid ] @pytest.mark.asyncio @pytest.mark.parametrize(("model", "voice"), BAD_MODEL_VOICE_PAIRS) async def test_create_speech_bad_model_voice_pair(openai_client: AsyncOpenAI, model: str, voice: str) -> None: # NOTE: not sure why `APIConnectionError` is sometimes raised with pytest.raises((UnprocessableEntityError, APIConnectionError)): await openai_client.audio.speech.create( model=model, voice=voice, # type: ignore # noqa: PGH003 input=DEFAULT_INPUT, response_format=DEFAULT_RESPONSE_FORMAT, ) SUPPORTED_SPEEDS = [0.5, 1.0, 2.0] @pytest.mark.asyncio async def test_create_speech_with_varying_speed(openai_client: AsyncOpenAI) -> None: previous_size: int | None = None for speed in SUPPORTED_SPEEDS: res = await openai_client.audio.speech.create( model=DEFAULT_MODEL_ID, voice=DEFAULT_VOICE_ID, # type: ignore # noqa: PGH003 input=DEFAULT_INPUT, response_format="pcm", speed=speed, ) audio_bytes = res.read() if previous_size is not None: assert len(audio_bytes) * 1.5 < previous_size # TODO: document magic number previous_size = len(audio_bytes) UNSUPPORTED_SPEEDS = [0.1, 4.1] @pytest.mark.asyncio @pytest.mark.parametrize("speed", UNSUPPORTED_SPEEDS) async def test_create_speech_with_unsupported_speed(openai_client: AsyncOpenAI, speed: float) -> None: with pytest.raises(UnprocessableEntityError): await openai_client.audio.speech.create( model=DEFAULT_MODEL_ID, voice=DEFAULT_VOICE_ID, # type: ignore # noqa: PGH003 input=DEFAULT_INPUT, response_format="pcm", speed=speed, ) VALID_SAMPLE_RATES = [16000, 22050, 24000, 48000] @pytest.mark.asyncio @pytest.mark.parametrize("sample_rate", VALID_SAMPLE_RATES) async def test_speech_valid_resample(openai_client: AsyncOpenAI, sample_rate: int) -> None: res = await openai_client.audio.speech.create( model=DEFAULT_MODEL_ID, voice=DEFAULT_VOICE_ID, # type: ignore # noqa: PGH003 input=DEFAULT_INPUT, response_format="wav", extra_body={"sample_rate": sample_rate}, ) _, actual_sample_rate = sf.read(io.BytesIO(res.content)) assert actual_sample_rate == sample_rate INVALID_SAMPLE_RATES = [7999, 48001] @pytest.mark.asyncio @pytest.mark.parametrize("sample_rate", INVALID_SAMPLE_RATES) async def test_speech_invalid_resample(openai_client: AsyncOpenAI, sample_rate: int) -> None: with pytest.raises(UnprocessableEntityError): await openai_client.audio.speech.create( model=DEFAULT_MODEL_ID, voice=DEFAULT_VOICE_ID, # type: ignore # noqa: PGH003 input=DEFAULT_INPUT, response_format="wav", extra_body={"sample_rate": sample_rate}, ) # TODO: add piper tests # TODO: implement the following test # NUMBER_OF_MODELS = 1 # NUMBER_OF_VOICES = 124 # # # @pytest.mark.asyncio # async def test_list_tts_models(openai_client: AsyncOpenAI) -> None: # raise NotImplementedError

          
        
    
    
Copyright Yona authors & © NAVER Corp. & NAVER LABS Supported by NAVER CLOUD PLATFORM

or
Sign in with github login with Google Sign in with Google
Reset password | Sign up