Advertisement
Guest User

Untitled

a guest
May 26th, 2019
111
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 3.69 KB | None | 0 0
  1. import os
  2. import mne
  3. import glob
  4. import numpy as np
  5. import pandas as pd
  6. from scipy.io import wavfile
  7. from mne.time_frequency import tfr_array_morlet
  8. from scipy.signal import resample
  9. import matplotlib.pyplot as plt
  10.  
  11. from utils import text_order, add_phonetic_info
  12. mne.utils.set_log_level(None)
  13.  
  14. data_path = '/home/jrk/data/cnrs/masc/example_subject/raw'
  15.  
  16. ID2TEXT = {0: 'lw1',
  17.            1: 'cable_spool_fort',
  18.            2: 'easy_money',
  19.            3: 'The_Black_Willow'}
  20. texts = {'The_Black_Willow': 6 * 2,
  21.          'cable_spool_fort': 3 * 2,
  22.          'easy_money': 4 * 2,
  23.          'lw1': 2 * 2}
  24.  
  25. subject = 'A0253'
  26. tmin = -2.
  27. tmax = 2.
  28. hpf = None
  29. lpf = 60.
  30. time_decim = 40
  31.  
  32. # loop through each text and wav file
  33. logfile = glob.glob("%s/%s*.log" % (data_path, subject))
  34. assert len(logfile) == 1
  35. logfile = logfile[0]
  36. text_orders = text_order(logfile)
  37.  
  38. text_number = 0
  39.  
  40. # get story order for this subject to figure out the text name
  41. text_name = ID2TEXT[int(text_orders[text_number])]
  42.  
  43. # get speech timing information
  44. info_dir = './timing_info/%s-json_timing_wPhonemes.csv'
  45. info_dir = info_dir % text_name
  46. annot = pd.read_csv(info_dir)
  47.  
  48. # add phoneme features (just for the example)
  49. annot = add_phonetic_info(annot)
  50.  
  51. # only keep events where p2fa successfully found the timing in
  52. # the wav file and it is a normal sentence trial
  53. not_nan_idx = pd.notnull(annot['start'].values)
  54. annot = annot[not_nan_idx].query("start != 0")
  55.  
  56. # load data
  57. raw_fname = glob.glob("%s/%s_MASC*_%s_*[0-9].con" % (
  58.     data_path, subject, text_number + 1))
  59.  
  60. assert len(raw_fname) == 1
  61. raw_fname = raw_fname[0]
  62. raw = mne.io.read_raw_kit(raw_fname, preload=True, slope='+')
  63.  
  64. # get triggers sent to MEG (onset of each wav file)
  65. raw_events = mne.find_events(raw, min_duration=0.002)
  66.  
  67. # only keep triggers from the audio -- not button responses
  68. raw_events = raw_events[raw_events[:, 2] > 8]
  69.  
  70. raw.pick_types(meg=True)
  71. raw.filter(hpf, lpf)
  72.  
  73. # fix the word start timing for each wav file
  74. for wav_number in range(texts[text_name]):
  75.     wav_rows_location = annot.query("wav_file == @wav_number")
  76.     wav_rows_location = wav_rows_location.index
  77.  
  78.     # change units to ms in the annot, and add the
  79.     # baseline trigger timing from the MEG
  80.     annot.loc[wav_rows_location, ['start', 'stop']] = \
  81.         (annot.loc[wav_rows_location, ['start', 'stop']] * raw.info['sfreq']) + raw_events[wav_number][0]  # noqa
  82.  
  83. # Compute audio poer
  84. raw_volume = np.zeros(raw.n_times)
  85. for wav_number in range(texts[text_name]):
  86.     print(wav_number)
  87.     wav_fname = os.path.join(data_path, '../../wav_files',
  88.                              '%s_%i.wav' % (text_name, wav_number))
  89.     fs, data = wavfile.read(wav_fname)
  90.  
  91.     # wavelet decomposition
  92.     tfr = tfr_array_morlet(data[None, None], fs,
  93.                            np.logspace(2, 3, 100),
  94.                            decim=10, output='power')
  95.     # volume
  96.     volume = tfr[0][0].sum(0)
  97.    
  98.     # resample to meg frquency
  99.     volume_decim = resample(volume, int(
  100.         10. * len(volume) / fs * raw.info['sfreq']))
  101.  
  102.     # add at correct time
  103.     start = raw_events[wav_number, 0]
  104.     raw_volume[start:(start+len(volume_decim))] = volume_decim
  105.  
  106. # Check alignment
  107. voiced = annot.loc[annot.phonation=='v', 'start'].values.astype(int)
  108. sounds = np.array([raw_volume[voiced+t] for t in range(-500, 2000)])
  109.  
  110. # plot mean volume with regard to voicing onset
  111. plt.plot(np.mean(sounds, 1))
  112. plt.axvline(520)
  113.  
  114. # Correlation between meg and volume
  115. Y = raw._data
  116. Y -= Y.mean(1, keepdims=True)
  117. Y -= Y.std(1, keepdims=True)
  118.  
  119. coef = list()
  120. for t in range(0, 500, 10):
  121.     X = np.r_[np.diff(np.roll(raw_volume, t)), 0]
  122.     coef.append(X[None] @ Y.T)
  123. coef = np.squeeze(coef)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement