dark_skeleton

audio-reactive-led-strip wasapi patch

Mar 2nd, 2017
310
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Diff 2.84 KB | None | 0 0
  1. diff --git a/python/microphone.py b/python/microphone.py
  2. index 944273b..47d3027 100644
  3. --- a/python/microphone.py
  4. +++ b/python/microphone.py
  5. @@ -7,18 +7,24 @@ import config
  6.  def start_stream(callback):
  7.      p = pyaudio.PyAudio()
  8.      frames_per_buffer = int(config.MIC_RATE / config.FPS)
  9. +    wasapi = p.get_default_output_device_info()
  10. +    config.MIC_RATE = int(wasapi["defaultSampleRate"])
  11.      stream = p.open(format=pyaudio.paInt16,
  12. -                    channels=1,
  13. +                    channels=wasapi["maxOutputChannels"],
  14.                      rate=config.MIC_RATE,
  15.                      input=True,
  16. -                    frames_per_buffer=frames_per_buffer)
  17. +                    input_device_index = wasapi["index"],
  18. +                    frames_per_buffer=frames_per_buffer,
  19. +                    as_loopback = True)
  20.      overflows = 0
  21.      prev_ovf_time = time.time()
  22.      while True:
  23.          try:
  24. +                   # http://stackoverflow.com/questions/22636499/convert-multi-channel-pyaudio-into-numpy-array
  25.              y = np.fromstring(stream.read(frames_per_buffer), dtype=np.int16)
  26. +            y = np.reshape(y, (frames_per_buffer, wasapi["maxOutputChannels"]))
  27.              y = y.astype(np.float32)
  28. -            callback(y)
  29. +            callback(y[:, 0])
  30.          except IOError:
  31.              overflows += 1
  32.              if time.time() > prev_ovf_time + 1:
  33. diff --git a/python/visualization.py b/python/visualization.py
  34. index 18d9486..c204470 100644
  35. --- a/python/visualization.py
  36. +++ b/python/visualization.py
  37. @@ -14,6 +14,8 @@ _time_prev = time.time() * 1000.0
  38.  _fps = dsp.ExpFilter(val=config.FPS, alpha_decay=0.2, alpha_rise=0.2)
  39.  """The low-pass filter used to estimate frames-per-second"""
  40.  
  41. +_silence = True
  42. +
  43.  
  44.  def frames_per_second():
  45.      """Return the estimated frames per second
  46. @@ -189,7 +191,7 @@ prev_fps_update = time.time()
  47.  
  48.  
  49.  def microphone_update(audio_samples):
  50. -    global y_roll, prev_rms, prev_exp, prev_fps_update
  51. +    global y_roll, prev_rms, prev_exp, prev_fps_update, _silence
  52.      # Normalize samples between 0 and 1
  53.      y = audio_samples / 2.0**15
  54.      # Construct a rolling window of audio samples
  55. @@ -199,10 +201,13 @@ def microphone_update(audio_samples):
  56.  
  57.      vol = np.max(np.abs(y_data))
  58.      if vol < config.MIN_VOLUME_THRESHOLD:
  59. -        print('No audio input. Volume below threshold. Volume:', vol)
  60. -        led.pixels = np.tile(0, (3, config.N_PIXELS))
  61. -        led.update()
  62. +        if not _silence:
  63. +          print('No audio input. Volume below threshold. Volume:', vol) # only print the warning once
  64. +          _silence = True
  65. +        #led.pixels = np.tile(0, (3, config.N_PIXELS))
  66. +        #led.update()
  67.      else:
  68. +        _silence = False
  69.          # Transform audio input into the frequency domain
  70.          N = len(y_data)
  71.          N_zeros = 2**int(np.ceil(np.log2(N))) - N
Advertisement
Add Comment
Please, Sign In to add comment