SHARE
TWEET

Untitled

a guest Mar 23rd, 2019 63 Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. from IPython.display import display
  2. from biosppy.signals import ecg
  3. from wfdb import processing
  4. import matplotlib.pyplot as plt
  5. import numpy as np
  6. import os
  7. import shutil
  8. import wfdb
  9. import tools as st
  10. from keras.utils import plot_model
  11.  
  12. from keras.models import Sequential
  13. from keras.layers import Dense, Embedding
  14. from keras.layers import Dropout, Flatten, BatchNormalization
  15. from keras.layers import Conv1D, MaxPooling1D, LSTM
  16. from keras.models import model_from_json
  17.  
  18.  
  19.  
  20. list = []
  21. record_result = []
  22. input = open('records.txt', 'r')
  23. for line in input:
  24.     line1 = line.replace("\n","")
  25.     arr = line1.split('/')
  26.     _, fields = wfdb.rdsamp(arr[1], pb_dir='ptbdb/' + arr[0] + '/')
  27.     if 'Healthy control' in fields['comments'][4]:
  28.         list.append(1)
  29.         record_result.append(line)
  30.     if 'Myocardial infarction' in fields['comments'][4]:
  31.         if 'inferior' in fields['comments'][5]:
  32.             list.append(0)
  33.             record_result.append(line)
  34. f = open('output.txt','w')
  35. ff = open('output_result.txt','w')
  36. f.write("\n".join([str(x) for x in record_result]))
  37. ff.write("\n".join([str(x) for x in list]))
  38. f.close()
  39. ff.close()
  40.  
  41.  
  42. '''list = []
  43. input = open('output.txt', 'r')
  44. for line in input:
  45.    line = line.replace("\n","")
  46.    arr = line.split('/')
  47.     record2 = wfdb.rdrecord(arr[1], pb_dir='ptbdb/' + arr[0] + '/', channels=[1,2,5])  
  48.    f = open('data.txt', 'w')
  49.    f.write("# Simple Text Format\n")
  50.    f.write("# Sampling Rate (Hz):= 1000.00\n")
  51.    f.write("# Resolution:= 12\n")
  52.    f.write("# Labels:= ECG\n")
  53.    print(np.array(record2.p_signal).shape)
  54.    for x in record2.p_signal:
  55.        f.write(str(x[0]) + " " + str(x[1]) + " " + str(x[2]) + "\n")
  56.    f.close()
  57.    xxxs = ""
  58.    xxx = open("data.txt")
  59.    s=xxx.readlines()[4:]
  60.    signal0 = np.loadtxt("data.txt", usecols = (0))
  61.    out0 = ecg.ecg(signal=signal0, sampling_rate=1000., show = False)["templates"]
  62.    signal1 = np.loadtxt("data.txt", usecols = (1))
  63.    out1 = ecg.ecg(signal=signal1, sampling_rate=1000., show = False)["templates"]
  64.    signal2 = np.loadtxt("data.txt", usecols = (2))
  65.    out2 = ecg.ecg(signal=signal2, sampling_rate=1000., show = False)["templates"]
  66.    ff = open('test.txt','w')
  67.    b = []
  68.    for x in range(len(out0[0])):
  69.        b.append(sum(out0[:,x])/len(out0[:,x]))
  70.    for x in range(len(out1[0])):
  71.         b.append(sum(out1[:,x])/len(out1[:,x]))
  72.    for x in range(len(out2[0])):
  73.         b.append(sum(out2[:,x])/len(out2[:,x]))
  74.    for i in range(len(b)):
  75.        ff.write(str(b[i])+ "\n")
  76.    ff.close()
  77.    a = []
  78.    for i in range(1,1801):
  79.        a.append(i)
  80.    load_data = np.array(b)
  81.    list.append(load_data)
  82. output = open('result_data.txt','w')
  83. output.write("\n".join([",".join([str(x) for x in np_arr.tolist()]) for np_arr in list]))
  84. output.close()
  85.  
  86.  
  87. def model(test_data, res_data, validation_data, validation_res):
  88.     model = Sequential()
  89.     model.add(Conv1D(filters=25, kernel_size=3,activation='relu',input_shape=(600,3)))
  90.     model.add(MaxPooling1D(pool_size = 2))
  91.     model.add(Dropout(0.2))
  92.     model.add(Conv1D(filters=20, kernel_size=3,activation='relu'))
  93.     model.add(MaxPooling1D(pool_size = 2))
  94.     model.add(Dropout(0.2))
  95.     model.add(Conv1D(filters=15, kernel_size=3,activation='relu'))
  96.     model.add(MaxPooling1D(pool_size = 2))
  97.     model.add(Dropout(0.2))
  98.     model.add(Conv1D(filters=10, kernel_size=3,activation='relu'))
  99.     model.add(MaxPooling1D(pool_size = 2))
  100.     model.add(Dropout(0.2))
  101.     model.add(Flatten())
  102.     model.add(Dense(600, activation='relu', input_dim = 600))
  103.     model.add(Dense(50, activation='relu'))
  104.     model.add(Dense(10, activation='relu'))
  105.     model.add(Dropout(0.5))
  106.     model.add(Dense(1, activation='sigmoid'))
  107.     model.compile(loss="binary_crossentropy", optimizer="adam", metrics=['accuracy'])
  108.     model.fit(np.array(test_data), np.array(res_data), epochs = 20, batch_size=8, verbose=1,validation_data=(np.array(validation_data),np.array(validation_res)))
  109.     scores = model.evaluate(np.array(validation_data),np.array(validation_res), verbose=0)
  110.     pred = model.predict(np.array(np.array(validation_data[:]).reshape(1,600,3)),verbose = 0)
  111.     plot_model(model, to_file='model.png', show_shapes=True)
  112.     return scores[1]
  113.  
  114.  
  115. fi = open('result_data.txt')
  116. test_data = [np.array([float(xs) for xs in x.split(",")]) for x in fi.readlines()]
  117. test_data = np.array(test_data).reshape((169,600,3))
  118.  
  119. fu = open('output_result.txt','r')
  120. res_data = [[int(x)] for x in fu.readlines()]
  121. neural_res = []
  122. k = 0
  123. for i in range(len(res_data)):
  124.     validation_data = np.array(test_data[i]).reshape(1,600,3)
  125.     tst_data = test_data[:i]
  126.     tst_data = np.array(np.append(tst_data, test_data[i+1:])).reshape(168,600,3)
  127.  
  128.     validation_res = list(res_data[i])
  129.     rs_data = res_data[:i]
  130.     rs_data = np.append(rs_data, res_data[i+1:])
  131.     k = k + 1
  132.     print(k)
  133.     kk = model(tst_data,rs_data,validation_data,validation_res)
  134.     neural_res.append(kk)
  135. print(neural_res)
  136. for i in (neural_res):
  137.     itog = sum(neural_res)/len(neural_res)
  138. print(itog)'''
RAW Paste Data
We use cookies for various purposes including analytics. By continuing to use Pastebin, you agree to our use of cookies as described in the Cookies Policy. OK, I Understand
 
Top