Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- from sklearn.preprocessing import LabelBinarizer
- Y = np.array(['a','b','e','a','e','b','e','a','e'])
- lb = LabelBinarizer()
- lb.fit(Y)
- lb.transform(Y)
- nowa_klasa = np.array([0,0,11]).reshape((1,3))
- lb.inverse_transform(nowa_klasa)
- from tensorflow.keras.utils import to_categorical
- YY = np.array([1,2,3,1,2,3])
- to_categorical(YY)
- X = np.random.sample(60).reshape(30,2)
- Y=np.tile([1,2],15) #zwraca nam klasy (1 lub 2) dla każdej kolejnej kolumny (kolumny mamy podane jako 30)
- from sklearn.model_selection import train_test_split
- X_train,X_test,Y_train,Y_test = train_test_split(X,Y,test_size = 0.2 , random_state=0)
- from sklearn.model_selection import KFold
- kf = KFold(n_splits=5, shuffle=True)
- for train, test in kf.split(X):
- X_train = X[train]
- X_test = X[test]
- print("%s " % X_test)
- from sklearn.model_selection import LeaveOneOut
- loo = LeaveOneOut()
- for train, test in loo.split(X):
- print("%s %s" % (train,test))
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement