Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- loss_list= []
- x =[]
- u = 0.001 # константа для скорости обучения
- def delta_L(y, x, w, reg = 0.001): #градиент
- return np.sum((-X*(y[:,np.newaxis]))/(1+np.exp(y[:,np.newaxis]*(X.dot(w[:,np.newaxis])))) + reg * w,axis=0)
- for epoch in range(10000): #10000 эпох для обучения
- loss = 0
- loss = np.sum(np.log(1+np.exp(-y[:,np.newaxis]*(X.dot(w[:,np.newaxis])))),axis = 0)/len(X)
- w -= u* delta_L(y,X,w)
- x.append(epoch)
- loss /= len(X)
- loss_list.append(loss)
- plt.plot(x,loss_list)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement