Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from IPython.display import display
- from biosppy.signals import ecg
- from wfdb import processing
- import matplotlib.pyplot as plt
- import numpy as np
- import os
- import shutil
- import wfdb
- import tools as st
- from keras.utils import plot_model
- from keras.models import Sequential
- from keras.layers import Dense, Embedding
- from keras.layers import Dropout, Flatten, BatchNormalization
- from keras.layers import Conv1D, MaxPooling1D, LSTM
- from keras.models import model_from_json
- '''list = []
- record_result = []
- input = open('records.txt', 'r')
- for line in input:
- line1 = line.replace("\n","")
- arr = line1.split('/')
- _, fields = wfdb.rdsamp(arr[1], pb_dir='ptbdb/' + arr[0] + '/')
- if 'Healthy control' in fields['comments'][4]:
- list.append(1)
- record_result.append(line)
- if 'Myocardial infarction' in fields['comments'][4]:
- if 'inferior' in fields['comments'][5]:
- list.append(0)
- record_result.append(line)
- f = open('output.txt','w')
- ff = open('output_result.txt','w')
- f.write("\n".join([str(x) for x in record_result]))
- ff.write("\n".join([str(x) for x in list]))
- f.close()
- ff.close()'''
- '''list = []
- input = open('output.txt', 'r')
- for line in input:
- line = line.replace("\n","")
- arr = line.split('/')
- record2 = wfdb.rdrecord(arr[1], pb_dir='ptbdb/' + arr[0] + '/', channels=[1,2,5])
- f = open('data.txt', 'w')
- f.write("# Simple Text Format\n")
- f.write("# Sampling Rate (Hz):= 1000.00\n")
- f.write("# Resolution:= 12\n")
- f.write("# Labels:= ECG\n")
- print(np.array(record2.p_signal).shape)
- for x in record2.p_signal:
- f.write(str(x[0]) + " " + str(x[1]) + " " + str(x[2]) + "\n")
- f.close()
- xxxs = ""
- xxx = open("data.txt")
- s=xxx.readlines()[4:]
- signal0 = np.loadtxt("data.txt", usecols = (0))
- out0 = ecg.ecg(signal=signal0, sampling_rate=1000., show = False)["templates"]
- signal1 = np.loadtxt("data.txt", usecols = (1))
- out1 = ecg.ecg(signal=signal1, sampling_rate=1000., show = False)["templates"]
- signal2 = np.loadtxt("data.txt", usecols = (2))
- out2 = ecg.ecg(signal=signal2, sampling_rate=1000., show = False)["templates"]
- ff = open('test.txt','w')
- b = []
- for x in range(len(out0[0])):
- b.append(sum(out0[:,x])/len(out0[:,x]))
- for x in range(len(out1[0])):
- b.append(sum(out1[:,x])/len(out1[:,x]))
- for x in range(len(out2[0])):
- b.append(sum(out2[:,x])/len(out2[:,x]))
- for i in range(len(b)):
- ff.write(str(b[i])+ "\n")
- ff.close()
- a = []
- for i in range(1,1801):
- a.append(i)
- load_data = np.array(b)
- list.append(load_data)
- output = open('result_data.txt','w')
- output.write("\n".join([",".join([str(x) for x in np_arr.tolist()]) for np_arr in list]))
- output.close()'''
- def model(test_data, res_data, validation_data, validation_res):
- model = Sequential()
- model.add(Conv1D(filters=25, kernel_size=3,activation='relu',input_shape=(600,3)))
- model.add(MaxPooling1D(pool_size = 2))
- model.add(Dropout(0.2))
- model.add(Conv1D(filters=20, kernel_size=3,activation='relu'))
- model.add(MaxPooling1D(pool_size = 2))
- model.add(Dropout(0.2))
- model.add(Conv1D(filters=15, kernel_size=3,activation='relu'))
- model.add(MaxPooling1D(pool_size = 2))
- model.add(Dropout(0.2))
- model.add(Conv1D(filters=10, kernel_size=3,activation='relu'))
- model.add(MaxPooling1D(pool_size = 2))
- model.add(Dropout(0.2))
- model.add(Flatten())
- model.add(Dense(600, activation='relu', input_dim = 600))
- model.add(Dense(50, activation='relu'))
- model.add(Dense(10, activation='relu'))
- model.add(Dropout(0.5))
- model.add(Dense(1, activation='sigmoid'))
- model.compile(loss="binary_crossentropy", optimizer="adam", metrics=['accuracy'])
- model.fit(np.array(test_data), np.array(res_data), epochs = 20, batch_size=8, verbose=1,validation_data=(np.array(validation_data),np.array(validation_res)))
- scores = model.evaluate(np.array(validation_data),np.array(validation_res), verbose=0)
- pred = model.predict(np.array(np.array(validation_data[:]).reshape(1,600,3)),verbose = 0)
- plot_model(model, to_file='model.png', show_shapes=True)
- return scores[1]
- fi = open('result_data.txt')
- test_data = [np.array([float(xs) for xs in x.split(",")]) for x in fi.readlines()]
- test_data = np.array(test_data).reshape((169,600,3))
- fu = open('output_result.txt','r')
- res_data = [[int(x)] for x in fu.readlines()]
- neural_res = []
- k = 0
- for i in range(len(res_data)):
- validation_data = np.array(test_data[i]).reshape(1,600,3)
- tst_data = test_data[:i]
- tst_data = np.array(np.append(tst_data, test_data[i+1:])).reshape(168,600,3)
- validation_res = list(res_data[i])
- rs_data = res_data[:i]
- rs_data = np.append(rs_data, res_data[i+1:])
- k = k + 1
- print(k)
- kk = model(tst_data,rs_data,validation_data,validation_res)
- neural_res.append(kk)
- print(neural_res)
- for i in (neural_res):
- itog = sum(neural_res)/len(neural_res)
- print(itog)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement