Dominik Macháček 2024-01-03
Merge remote-tracking branch 'rodrigo/main' into vad-streaming
@ecee2fbef721857dd270d51988135390c5291fcb
 
mic_test_whisper_simple.py (added)
+++ mic_test_whisper_simple.py
@@ -0,0 +1,95 @@
+from microphone_stream import MicrophoneStream
+from voice_activity_controller import VoiceActivityController
+from whisper_online import *
+import numpy as np
+import librosa  
+import io
+import soundfile
+import sys
+
+
+
+
+class SimpleASRProcessor:
+
+    def __init__(self, asr, sampling_rate = 16000):
+        """run this when starting or restarting processing"""
+        self.audio_buffer = np.array([],dtype=np.float32)
+        self.prompt_buffer = ""
+        self.asr = asr
+        self.sampling_rate = sampling_rate
+        self.init_prompt = ''
+
+    def ts_words(self, segments):
+        result = ""
+        for segment in segments:
+            if segment.no_speech_prob > 0.9:
+                continue
+            for word in segment.words:
+                w = word.word
+                t = (word.start, word.end, w)
+                result +=w
+        return result 
+
+    def stream_process(self, vad_result):
+        iter_in_phrase = 0
+        for chunk, is_final in vad_result:
+            iter_in_phrase += 1
+
+            if chunk is not None:
+                sf = soundfile.SoundFile(io.BytesIO(chunk), channels=1,endian="LITTLE",samplerate=SAMPLING_RATE, subtype="PCM_16",format="RAW")
+                audio, _ = librosa.load(sf,sr=SAMPLING_RATE)
+                out = []
+                out.append(audio)
+                a = np.concatenate(out)
+                self.audio_buffer = np.append(self.audio_buffer, a)
+
+            if is_final and len(self.audio_buffer) > 0:
+                res = self.asr.transcribe(self.audio_buffer, init_prompt=self.init_prompt)
+                tsw = self.ts_words(res)
+                
+                self.init_prompt = self.init_prompt + tsw
+                self.init_prompt  = self.init_prompt [-100:]
+                self.audio_buffer.resize(0)
+                iter_in_phrase =0
+                
+                yield True, tsw
+            # show progress evry 50 chunks
+            elif iter_in_phrase % 50 == 0 and len(self.audio_buffer) > 0:
+                res = self.asr.transcribe(self.audio_buffer, init_prompt=self.init_prompt)
+                # use custom ts_words
+                tsw = self.ts_words(res)
+                yield False, tsw
+            
+        
+
+
+
+
+
+SAMPLING_RATE = 16000
+
+model = "large-v2"
+src_lan = "en"  # source language
+tgt_lan = "en"  # target language  -- same as source for ASR, "en" if translate task is used
+use_vad = False
+min_sample_length = 1 * SAMPLING_RATE
+
+
+
+vac = VoiceActivityController(use_vad_result = use_vad)
+asr = FasterWhisperASR(src_lan, "large-v2")  # loads and wraps Whisper model
+
+tokenizer = create_tokenizer(tgt_lan)
+online = SimpleASRProcessor(asr)
+
+
+stream = MicrophoneStream()
+stream = vac.detect_user_speech(stream, audio_in_int16 = False) 
+stream = online.stream_process(stream)
+
+for isFinal, text in stream:
+    if isFinal:
+        print( text,  end="\r\n")
+    else:
+        print( text,  end="\r")
 
mic_test_whisper_streaming.py (added)
+++ mic_test_whisper_streaming.py
@@ -0,0 +1,71 @@
+from microphone_stream import MicrophoneStream
+from voice_activity_controller import VoiceActivityController
+from whisper_online import *
+import numpy as np
+import librosa  
+import io
+import soundfile
+import sys
+
+
+SAMPLING_RATE = 16000
+model = "large-v2"
+src_lan = "en"  # source language
+tgt_lan = "en"  # target language  -- same as source for ASR, "en" if translate task is used
+use_vad_result = True
+min_sample_length = 1 * SAMPLING_RATE
+
+
+
+asr = FasterWhisperASR(src_lan, model)  # loads and wraps Whisper model
+tokenizer = create_tokenizer(tgt_lan)  # sentence segmenter for the target language
+online = OnlineASRProcessor(asr, tokenizer)  # create processing object
+
+microphone_stream = MicrophoneStream() 
+vad = VoiceActivityController(use_vad_result = use_vad_result)
+
+complete_text = ''
+final_processing_pending = False
+out = []
+out_len = 0
+for iter in vad.detect_user_speech(microphone_stream):   # processing loop:
+    raw_bytes=  iter[0]
+    is_final =  iter[1]
+
+    if  raw_bytes:
+        sf = soundfile.SoundFile(io.BytesIO(raw_bytes), channels=1,endian="LITTLE",samplerate=SAMPLING_RATE, subtype="PCM_16",format="RAW")
+        audio, _ = librosa.load(sf,sr=SAMPLING_RATE)
+        out.append(audio)
+        out_len += len(audio)
+
+    
+    if (is_final or out_len >= min_sample_length) and out_len>0:
+        a = np.concatenate(out)
+        online.insert_audio_chunk(a)    
+        
+    if out_len > min_sample_length:
+        o = online.process_iter()
+        print('-----'*10)
+        complete_text = complete_text + o[2]
+        print('PARTIAL - '+ complete_text) # do something with current partial output
+        print('-----'*10)     
+        out = []
+        out_len = 0   
+
+    if is_final:
+        o = online.finish()
+        # final_processing_pending = False         
+        print('-----'*10)
+        complete_text = complete_text + o[2]
+        print('FINAL - '+ complete_text) # do something with current partial output
+        print('-----'*10)   
+        online.init()   
+        out = []
+        out_len = 0    
+        
+
+
+
+
+
+
 
microphone_stream.py (added)
+++ microphone_stream.py
@@ -0,0 +1,82 @@
+
+
+### mic stream
+
+import queue
+import re
+import sys
+import pyaudio
+
+
+class MicrophoneStream:
+    def __init__(
+        self,
+        sample_rate: int = 16000,
+    ):
+        """
+        Creates a stream of audio from the microphone.
+
+        Args:
+            chunk_size: The size of each chunk of audio to read from the microphone.
+            channels: The number of channels to record audio from.
+            sample_rate: The sample rate to record audio at.
+        """
+        try:
+            import pyaudio
+        except ImportError:
+            raise Exception('py audio not installed')
+
+        self._pyaudio = pyaudio.PyAudio()
+        self.sample_rate = sample_rate
+
+        self._chunk_size = int(self.sample_rate * 40  / 1000)
+        self._stream = self._pyaudio.open(
+            format=pyaudio.paInt16,
+            channels=1,
+            rate=sample_rate,
+            input=True,
+            frames_per_buffer=self._chunk_size,
+        )
+
+        self._open = True
+
+    def __iter__(self):
+        """
+        Returns the iterator object.
+        """
+
+        return self
+
+    def __next__(self):
+        """
+        Reads a chunk of audio from the microphone.
+        """
+        if not self._open:
+            raise StopIteration
+
+        try:
+            return self._stream.read(self._chunk_size)
+        except KeyboardInterrupt:
+            raise StopIteration
+
+    def close(self):
+        """
+        Closes the stream.
+        """
+
+        self._open = False
+
+        if self._stream.is_active():
+            self._stream.stop_stream()
+
+        self._stream.close()
+        self._pyaudio.terminate()
+
+
+
+
+
+
+
+
+
 
voice_activity_controller.py (added)
+++ voice_activity_controller.py
@@ -0,0 +1,119 @@
+import torch
+import numpy as np
+# import sounddevice as sd
+import torch
+import numpy as np
+import datetime
+
+
+def int2float(sound):
+    abs_max = np.abs(sound).max()
+    sound = sound.astype('float32')
+    if abs_max > 0:
+        sound *= 1/32768
+    sound = sound.squeeze()  # depends on the use case
+    return sound
+
+class VoiceActivityController:
+    def __init__(
+            self, 
+            sampling_rate = 16000,
+            min_silence_to_final_ms = 500,
+            min_speech_to_final_ms = 100,
+            min_silence_duration_ms = 100,
+            use_vad_result = True,
+            activity_detected_callback=None,
+            threshold =0.3
+        ):
+        self.activity_detected_callback=activity_detected_callback
+        self.model, self.utils = torch.hub.load(
+            repo_or_dir='snakers4/silero-vad',
+            model='silero_vad'
+        )
+        # (self.get_speech_timestamps,
+        # save_audio,
+        # read_audio,
+        # VADIterator,
+        # collect_chunks) = self.utils
+
+        self.sampling_rate = sampling_rate  
+        self.final_silence_limit = min_silence_to_final_ms * self.sampling_rate / 1000 
+        self.final_speech_limit = min_speech_to_final_ms *self.sampling_rate / 1000
+        self.min_silence_samples = sampling_rate * min_silence_duration_ms / 1000
+
+        self.use_vad_result = use_vad_result
+        self.last_marked_chunk = None
+        self.threshold = threshold
+        self.reset_states()
+
+    def reset_states(self):
+        self.model.reset_states()
+        self.temp_end = 0
+        self.current_sample = 0
+
+    def apply_vad(self, audio):
+        x = int2float(audio)
+        if not torch.is_tensor(x):
+            try:
+                x = torch.Tensor(x)
+            except:
+                raise TypeError("Audio cannot be casted to tensor. Cast it manually")
+
+        speech_prob = self.model(x, self.sampling_rate).item()
+        
+        window_size_samples = len(x[0]) if x.dim() == 2 else len(x)
+        self.current_sample += window_size_samples 
+
+
+        if (speech_prob >= self.threshold):
+            self.temp_end = 0
+            return audio, window_size_samples, 0
+
+        else :
+            if not self.temp_end:
+                self.temp_end = self.current_sample
+
+            if self.current_sample - self.temp_end < self.min_silence_samples:
+                return audio, 0, window_size_samples
+            else:
+                return np.array([], dtype=np.float16) if self.use_vad_result else audio, 0, window_size_samples
+
+
+
+
+
+    def detect_user_speech(self, audio_stream, audio_in_int16 = False):
+        last_silence_len= 0
+        speech_len = 0
+
+        for data in audio_stream:  # replace with your condition of choice
+            
+            
+            audio_block = np.frombuffer(data, dtype=np.int16) if not audio_in_int16 else data
+            wav = audio_block
+            
+            is_final = False
+            voice_audio, speech_in_wav, last_silent_in_wav = self.apply_vad(wav)
+
+
+            if speech_in_wav > 0 :
+                last_silence_len= 0                
+                speech_len += speech_in_wav
+                if self.activity_detected_callback is not None:
+                    self.activity_detected_callback()
+
+            last_silence_len +=  last_silent_in_wav
+            if last_silence_len>= self.final_silence_limit and speech_len >= self.final_speech_limit:
+
+                is_final = True
+                last_silence_len= 0
+                speech_len = 0                
+
+            yield voice_audio.tobytes(), is_final
+
+
+
+
+
+
+
whisper_online.py
--- whisper_online.py
+++ whisper_online.py
@@ -4,7 +4,7 @@
 import librosa  
 from functools import lru_cache
 import time
-
+import datetime
 
 
 @lru_cache
@@ -118,14 +118,21 @@
         return model
 
     def transcribe(self, audio, init_prompt=""):
+
+        # tiempo_inicio = datetime.datetime.now()
         # tested: beam_size=5 is faster and better than 1 (on one 200 second document from En ESIC, min chunk 0.01)
         segments, info = self.model.transcribe(audio, language=self.original_language, initial_prompt=init_prompt, beam_size=5, word_timestamps=True, condition_on_previous_text=True, **self.transcribe_kargs)
+        
+        # print(f'({datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")})----------r> whisper transcribe  take { (datetime.datetime.now() -tiempo_inicio)  } ms.')
+
         return list(segments)
 
     def ts_words(self, segments):
         o = []
         for segment in segments:
             for word in segment.words:
+                if segment.no_speech_prob > 0.9:
+                    continue
                 # not stripping the spaces -- should not be merged with them!
                 w = word.word
                 t = (word.start, word.end, w)
Add a comment
List