Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- diff --git a/python/microphone.py b/python/microphone.py
- index 944273b..47d3027 100644
- --- a/python/microphone.py
- +++ b/python/microphone.py
- @@ -7,18 +7,24 @@ import config
- def start_stream(callback):
- p = pyaudio.PyAudio()
- frames_per_buffer = int(config.MIC_RATE / config.FPS)
- + wasapi = p.get_default_output_device_info()
- + config.MIC_RATE = int(wasapi["defaultSampleRate"])
- stream = p.open(format=pyaudio.paInt16,
- - channels=1,
- + channels=wasapi["maxOutputChannels"],
- rate=config.MIC_RATE,
- input=True,
- - frames_per_buffer=frames_per_buffer)
- + input_device_index = wasapi["index"],
- + frames_per_buffer=frames_per_buffer,
- + as_loopback = True)
- overflows = 0
- prev_ovf_time = time.time()
- while True:
- try:
- + # http://stackoverflow.com/questions/22636499/convert-multi-channel-pyaudio-into-numpy-array
- y = np.fromstring(stream.read(frames_per_buffer), dtype=np.int16)
- + y = np.reshape(y, (frames_per_buffer, wasapi["maxOutputChannels"]))
- y = y.astype(np.float32)
- - callback(y)
- + callback(y[:, 0])
- except IOError:
- overflows += 1
- if time.time() > prev_ovf_time + 1:
- diff --git a/python/visualization.py b/python/visualization.py
- index 18d9486..c204470 100644
- --- a/python/visualization.py
- +++ b/python/visualization.py
- @@ -14,6 +14,8 @@ _time_prev = time.time() * 1000.0
- _fps = dsp.ExpFilter(val=config.FPS, alpha_decay=0.2, alpha_rise=0.2)
- """The low-pass filter used to estimate frames-per-second"""
- +_silence = True
- +
- def frames_per_second():
- """Return the estimated frames per second
- @@ -189,7 +191,7 @@ prev_fps_update = time.time()
- def microphone_update(audio_samples):
- - global y_roll, prev_rms, prev_exp, prev_fps_update
- + global y_roll, prev_rms, prev_exp, prev_fps_update, _silence
- # Normalize samples between 0 and 1
- y = audio_samples / 2.0**15
- # Construct a rolling window of audio samples
- @@ -199,10 +201,13 @@ def microphone_update(audio_samples):
- vol = np.max(np.abs(y_data))
- if vol < config.MIN_VOLUME_THRESHOLD:
- - print('No audio input. Volume below threshold. Volume:', vol)
- - led.pixels = np.tile(0, (3, config.N_PIXELS))
- - led.update()
- + if not _silence:
- + print('No audio input. Volume below threshold. Volume:', vol) # only print the warning once
- + _silence = True
- + #led.pixels = np.tile(0, (3, config.N_PIXELS))
- + #led.update()
- else:
- + _silence = False
- # Transform audio input into the frequency domain
- N = len(y_data)
- N_zeros = 2**int(np.ceil(np.log2(N))) - N
Advertisement
Add Comment
Please, Sign In to add comment