Advertisement
Guest User

Untitled

a guest
Jan 18th, 2020
116
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 0.99 KB | None | 0 0
  1. import numpy as np
  2. from sklearn.preprocessing import LabelBinarizer
  3.  
  4. Y = np.array(['a','b','e','a','e','b','e','a','e'])
  5.  
  6. lb = LabelBinarizer()
  7. lb.fit(Y)
  8. lb.transform(Y)
  9. nowa_klasa = np.array([0,0,11]).reshape((1,3))
  10.  
  11. lb.inverse_transform(nowa_klasa)
  12.  
  13. from tensorflow.keras.utils import to_categorical
  14.  
  15. YY = np.array([1,2,3,1,2,3])
  16.  
  17. to_categorical(YY)
  18.  
  19. X = np.random.sample(60).reshape(30,2)
  20. Y=np.tile([1,2],15) #zwraca nam klasy (1 lub 2) dla każdej kolejnej kolumny (kolumny mamy podane jako 30)
  21.  
  22.  
  23. from sklearn.model_selection import train_test_split
  24.  
  25. X_train,X_test,Y_train,Y_test = train_test_split(X,Y,test_size = 0.2 , random_state=0)
  26.  
  27. from sklearn.model_selection import KFold
  28.  
  29. kf = KFold(n_splits=5, shuffle=True)
  30.  
  31. for train, test in kf.split(X):
  32. X_train = X[train]
  33. X_test = X[test]
  34. print("%s " % X_test)
  35.  
  36. from sklearn.model_selection import LeaveOneOut
  37.  
  38. loo = LeaveOneOut()
  39.  
  40. for train, test in loo.split(X):
  41. print("%s %s" % (train,test))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement