|
42 | 42 | import re |
43 | 43 | import sys |
44 | 44 |
|
45 | | -from google.cloud import speech |
46 | | -from google.cloud.speech import enums |
47 | | -from google.cloud.speech import types |
| 45 | +#from google.cloud import speech |
| 46 | +#from google.cloud.speech import enums |
| 47 | +#from google.cloud.speech import types |
48 | 48 | import pyaudio |
49 | 49 | from six.moves import queue |
50 | 50 | # [END import_libraries] |
@@ -79,7 +79,7 @@ def __init__(self): |
79 | 79 | except Exception as e: |
80 | 80 | logging.info("Audio: input stream not available") |
81 | 81 |
|
82 | | - self._google_speech_client = speech.SpeechClient() |
| 82 | + #self._google_speech_client = speech.SpeechClient() |
83 | 83 |
|
84 | 84 | def exit(self): |
85 | 85 | pass |
@@ -201,32 +201,32 @@ def speech_recog(self, model): |
201 | 201 | logging.info("recog text: " + recog_text) |
202 | 202 | return recog_text |
203 | 203 |
|
204 | | - def speech_recog_google(self, locale): |
205 | | - config = types.RecognitionConfig( |
206 | | - encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16, |
207 | | - sample_rate_hertz=RATE, |
208 | | - language_code=locale) |
209 | | - streaming_config = types.StreamingRecognitionConfig( |
210 | | - config=config, |
211 | | - interim_results=False, |
212 | | - single_utterance=True) |
213 | | - |
214 | | - t1 = time.time() |
215 | | - with self.stream_in as stream: |
216 | | - audio_generator = stream.generator() |
217 | | - requests = (types.StreamingRecognizeRequest(audio_content=content) |
218 | | - for content in audio_generator) |
219 | | - |
220 | | - responses = self._google_speech_client.streaming_recognize(streaming_config, requests) |
| 204 | + # def speech_recog_google(self, locale): |
| 205 | + # config = types.RecognitionConfig( |
| 206 | + # encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16, |
| 207 | + # sample_rate_hertz=RATE, |
| 208 | + # language_code=locale) |
| 209 | + # streaming_config = types.StreamingRecognitionConfig( |
| 210 | + # config=config, |
| 211 | + # interim_results=False, |
| 212 | + # single_utterance=True) |
| 213 | +# |
| 214 | + # t1 = time.time() |
| 215 | + # with self.stream_in as stream: |
| 216 | + # audio_generator = stream.generator() |
| 217 | + # requests = (types.StreamingRecognizeRequest(audio_content=content) |
| 218 | + # for content in audio_generator) |
| 219 | +# |
| 220 | + # responses = self._google_speech_client.streaming_recognize(streaming_config, requests) |
221 | 221 |
|
222 | 222 | # Now, put the transcription responses to use. |
223 | | - for response in responses: |
224 | | - if time.time() - t1 > 10: |
225 | | - return "" |
226 | | - if response.results: |
227 | | - result = response.results[0] |
228 | | - if result.is_final: |
229 | | - return result.alternatives[0].transcript |
| 223 | + # for response in responses: |
| 224 | + # if time.time() - t1 > 10: |
| 225 | + # return "" |
| 226 | + # if response.results: |
| 227 | + # result = response.results[0] |
| 228 | + # if result.is_final: |
| 229 | + # return result.alternatives[0].transcript |
230 | 230 |
|
231 | 231 | class MicrophoneStream(object): |
232 | 232 | """Opens a recording stream as a generator yielding the audio chunks.""" |
|
0 commit comments