Advertisement
Guest User

Untitled

a guest
Jun 16th, 2019
145
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 4.36 KB | None | 0 0
  1. from IPython.display import display
  2. from biosppy.signals import ecg
  3. from wfdb import processing
  4. import matplotlib.pyplot as plt
  5. import numpy as np
  6. import shutil
  7. import wfdb
  8. import tools as st
  9. from keras.utils import plot_model
  10.  
  11. from keras.models import Sequential
  12. from keras.layers import Dense
  13. from keras.layers import Dropout, Flatten
  14. from keras.layers import Conv1D, MaxPooling1D
  15. from keras.models import model_from_json
  16.  
  17.  
  18.  
  19. diagnosis = []
  20. records = []
  21. input = open('records.txt', 'r')
  22. for line in input:
  23.     line = line.replace("\n","")
  24.     arr = line.split('/')
  25.     _, fields = wfdb.rdsamp(arr[1], pb_dir='ptbdb/' + arr[0] + '/')
  26.     if 'Healthy control' in fields['comments'][4]:
  27.         diagnosis.append(1)
  28.         records.append(line)
  29.     if 'Myocardial infarction' in fields['comments'][4]:
  30.         if 'inferior' in fields['comments'][5]:
  31.             diagnosis.append(0)
  32.             records.append(line)
  33. f = open('upd_records.txt','w')
  34. ff = open('diagnosis.txt','w')
  35. f.write("\n".join([str(x) for x in records]))
  36. ff.write("\n".join([str(x) for x in diagnosis]))
  37. f.close()
  38. ff.close()
  39.  
  40.  
  41. patients = []
  42. input = open('upd_records.txt', 'r')
  43. for line in input:
  44.     line = line.replace("\n","")
  45.     arr = line.split('/')
  46.     record2 = wfdb.rdrecord(arr[1], pb_dir='ptbdb/' + arr[0] + '/', channels=[1,2,5])  
  47.     f = open('data.txt', 'w')
  48.     f.write("# Simple Text Format\n")
  49.     f.write("# Sampling Rate (Hz):= 1000.00\n")
  50.     f.write("# Resolution:= 12\n")
  51.     f.write("# Labels:= ECG\n")
  52.     print(np.array(record2.p_signal).shape)
  53.     for x in record2.p_signal:
  54.         f.write(str(x[0]) + " " + str(x[1]) + " " + str(x[2]) + "\n")
  55.     f.close()
  56.     xxxs = ""
  57.     xxx = open("data.txt")
  58.     s=xxx.readlines()[4:]
  59.     signal0 = np.loadtxt("data.txt", usecols = (0))
  60.     out0 = ecg.ecg(signal=signal0, sampling_rate=1000., show = False)["templates"]
  61.     signal1 = np.loadtxt("data.txt", usecols = (1))
  62.     out1 = ecg.ecg(signal=signal1, sampling_rate=1000., show = False)["templates"]
  63.     signal2 = np.loadtxt("data.txt", usecols = (2))
  64.     out2 = ecg.ecg(signal=signal2, sampling_rate=1000., show = False)["templates"]
  65.     ff = open('temp.txt','w')
  66.     average_signal = []
  67.     for x in range(len(out0[0])):
  68.         average_signal.append(sum(out0[:,x])/len(out0[:,x]))
  69.     for x in range(len(out1[0])):
  70.         average_signal.append(sum(out1[:,x])/len(out1[:,x]))
  71.     for x in range(len(out2[0])):
  72.         average_signal.append(sum(out2[:,x])/len(out2[:,x]))
  73.     for i in range(len(average_signal)):
  74.         ff.write(str(average_signal[i])+ "\n")
  75.     ff.close()
  76.     time = [i for i in range(1, 1801)]
  77.     load_data = np.array(average_signal)
  78.     patients.append(load_data)
  79. output = open('result_data.txt','w')
  80. output.write("\n".join([",".join([str(x) for x in np_arr.tolist()]) for np_arr in patients]))
  81. output.close()
  82.  
  83.  
  84. def model(train_data, train_result, test_data):
  85.     model = Sequential()
  86.     model.add(Conv1D(filters=15, kernel_size=5,activation='relu',input_shape=(600 , 3)))
  87.     model.add(MaxPooling1D(pool_size = 2))
  88.     model.add(Dropout(0.2))
  89.     model.add(Conv1D(filters=10, kernel_size=3,activation='relu'))
  90.     model.add(MaxPooling1D(pool_size = 2))
  91.     model.add(Dropout(0.2))
  92.     model.add(Flatten())
  93.     model.add(Dense(25, activation='relu'))
  94.     model.add(Dropout(0.5))
  95.     model.add(Dense(1, activation='sigmoid'))
  96.     model.compile(loss="binary_crossentropy", optimizer="adam", metrics=['accuracy'])
  97.     model.fit(np.array(train_data), np.array(train_result), epochs = 45, batch_size = 8, verbose=1)
  98.     pred = model.predict(np.array(np.array(test_data[:]).reshape(1,600,3)),verbose = 0)
  99.     print(pred)
  100.     return pred
  101.  
  102.  
  103. fi = open('result_data.txt')
  104. data = [np.array([float(xs) for xs in x.split(",")]) for x in fi.readlines()]
  105. data = np.array(data).reshape((169,600,3))
  106.  
  107. fu = open('diagnosis.txt','r')
  108. result = [[int(x)] for x in fu.readlines()]
  109. result_predict = []
  110. k = 0
  111. for i in range(169):
  112.     test_data = np.array(data[i]).reshape(1, 600, 3)
  113.     train_data = data[:i]
  114.     train_data = np.array(np.append(train_data, data[i+1:])).reshape(168, 600, 3)
  115.  
  116.     test_result = list(result[i])
  117.     train_result = result[:i]
  118.     train_result = np.append(train_result, result[i+1:])
  119.     k = k + 1
  120.     print(k)
  121.     pred = model(train_data, train_result, test_data)
  122.     if pred > 0.5:
  123.         pred = 1
  124.     else:
  125.         pred = 0
  126.     print(pred)
  127.     result_predict.append(pred)
  128.  
  129. print(result_predict)
  130. with open('pred.txt', 'w') as fw:
  131.      json.dump(result_predict, fw)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement