

OpenAI Whisper API backend
@7ed1d4581feb3d70dd5b358bd1754cbef8a434c8
--- whisper_online.py
+++ whisper_online.py
... | ... | @@ -4,6 +4,8 @@ |
4 | 4 |
import librosa |
5 | 5 |
from functools import lru_cache |
6 | 6 |
import time |
7 |
+import io |
|
8 |
+import soundfile as sf |
|
7 | 9 |
|
8 | 10 |
|
9 | 11 |
|
... | ... | @@ -147,6 +149,76 @@ |
147 | 149 |
def set_translate_task(self): |
148 | 150 |
self.transcribe_kargs["task"] = "translate" |
149 | 151 |
|
152 |
+ |
|
153 |
+class OpenaiApiASR(ASRBase): |
|
154 |
+ """Uses OpenAI's Whisper API for audio transcription.""" |
|
155 |
+ |
|
156 |
+ def __init__(self, modelsize=None, lan=None, cache_dir=None, model_dir=None, response_format="verbose_json", temperature=0): |
|
157 |
+ self.modelname = "whisper-1" # modelsize is not used but kept for interface consistency |
|
158 |
+ self.language = lan # ISO-639-1 language code |
|
159 |
+ self.response_format = response_format |
|
160 |
+ self.temperature = temperature |
|
161 |
+ self.model = self.load_model(modelsize, cache_dir, model_dir) |
|
162 |
+ |
|
163 |
+ def load_model(self, *args, **kwargs): |
|
164 |
+ from openai import OpenAI |
|
165 |
+ self.client = OpenAI() |
|
166 |
+ # Since we're using the OpenAI API, there's no model to load locally. |
|
167 |
+ print("Model configuration is set to use the OpenAI Whisper API.") |
|
168 |
+ |
|
169 |
+ def ts_words(self, segments): |
|
170 |
+ o = [] |
|
171 |
+ for segment in segments: |
|
172 |
+ # Skip segments containing no speech |
|
173 |
+ if segment["no_speech_prob"] > 0.8: |
|
174 |
+ continue |
|
175 |
+ |
|
176 |
+ # Splitting the text into words and filtering out empty strings |
|
177 |
+ words = [word.strip() for word in segment["text"].split() if word.strip()] |
|
178 |
+ |
|
179 |
+ if not words: |
|
180 |
+ continue |
|
181 |
+ |
|
182 |
+ # Assign start and end times for each word |
|
183 |
+ # We only have timestamps per segment, so interpolating start and end-times |
|
184 |
+ # assuming equal duration per word |
|
185 |
+ segment_duration = segment["end"] - segment["start"] |
|
186 |
+ duration_per_word = segment_duration / len(words) |
|
187 |
+ start_time = segment["start"] |
|
188 |
+ for word in words: |
|
189 |
+ end_time = start_time + duration_per_word |
|
190 |
+ o.append((start_time, end_time, word)) |
|
191 |
+ start_time = end_time |
|
192 |
+ |
|
193 |
+ return o |
|
194 |
+ |
|
195 |
+ |
|
196 |
+ def segments_end_ts(self, res): |
|
197 |
+ return [s["end"] for s in res] |
|
198 |
+ |
|
199 |
+ def transcribe(self, audio_data, prompt=None, *args, **kwargs): |
|
200 |
+ # Write the audio data to a buffer |
|
201 |
+ buffer = io.BytesIO() |
|
202 |
+ buffer.name = "temp.wav" |
|
203 |
+ sf.write(buffer, audio_data, samplerate=16000, format='WAV', subtype='PCM_16') |
|
204 |
+ buffer.seek(0) # Reset buffer's position to the beginning |
|
205 |
+ |
|
206 |
+ # Prepare transcription parameters |
|
207 |
+ transcription_params = { |
|
208 |
+ "model": self.modelname, |
|
209 |
+ "file": buffer, |
|
210 |
+ "response_format": self.response_format, |
|
211 |
+ "temperature": self.temperature |
|
212 |
+ } |
|
213 |
+ if self.language: |
|
214 |
+ transcription_params["language"] = self.language |
|
215 |
+ if prompt: |
|
216 |
+ transcription_params["prompt"] = prompt |
|
217 |
+ |
|
218 |
+ # Perform the transcription |
|
219 |
+ transcript = self.client.audio.transcriptions.create(**transcription_params) |
|
220 |
+ |
|
221 |
+ return transcript.segments |
|
150 | 222 |
|
151 | 223 |
|
152 | 224 |
class HypothesisBuffer: |
... | ... | @@ -459,7 +531,7 @@ |
459 | 531 |
parser.add_argument('--model_dir', type=str, default=None, help="Dir where Whisper model.bin and other files are saved. This option overrides --model and --model_cache_dir parameter.") |
460 | 532 |
parser.add_argument('--lan', '--language', type=str, default='en', help="Source language code, e.g. en,de,cs, or 'auto' for language detection.") |
461 | 533 |
parser.add_argument('--task', type=str, default='transcribe', choices=["transcribe","translate"],help="Transcribe or translate.") |
462 |
- parser.add_argument('--backend', type=str, default="faster-whisper", choices=["faster-whisper", "whisper_timestamped"],help='Load only this backend for Whisper processing.') |
|
534 |
+ parser.add_argument('--backend', type=str, default="faster-whisper", choices=["faster-whisper", "whisper_timestamped", "openai-api"],help='Load only this backend for Whisper processing.') |
|
463 | 535 |
parser.add_argument('--vad', action="store_true", default=False, help='Use VAD = voice activity detection, with the default parameters.') |
464 | 536 |
parser.add_argument('--buffer_trimming', type=str, default="segment", choices=["sentence", "segment"],help='Buffer trimming strategy -- trim completed sentences marked with punctuation mark and detected by sentence segmenter, or the completed segments returned by Whisper. Sentence segmenter must be installed for "sentence" option.') |
465 | 537 |
parser.add_argument('--buffer_trimming_sec', type=float, default=15, help='Buffer trimming length threshold in seconds. If buffer length is longer, trimming sentence/segment is triggered.') |
... | ... | @@ -499,6 +571,8 @@ |
499 | 571 |
|
500 | 572 |
if args.backend == "faster-whisper": |
501 | 573 |
asr_cls = FasterWhisperASR |
574 |
+ elif args.backend == "openai-api": |
|
575 |
+ asr_cls = OpenaiApiASR |
|
502 | 576 |
else: |
503 | 577 |
asr_cls = WhisperTimestampedASR |
504 | 578 |
|
--- whisper_online_server.py
+++ whisper_online_server.py
... | ... | @@ -29,6 +29,8 @@ |
29 | 29 |
if args.backend == "faster-whisper": |
30 | 30 |
from faster_whisper import WhisperModel |
31 | 31 |
asr_cls = FasterWhisperASR |
32 |
+elif args.backend == "openai-api": |
|
33 |
+ asr_cls = OpenaiApiASR |
|
32 | 34 |
else: |
33 | 35 |
import whisper |
34 | 36 |
import whisper_timestamped |
Add a comment
Delete comment
Once you delete this comment, you won't be able to recover it. Are you sure you want to delete this comment?