Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from sklearn import datasets
- iris = datasets.load_iris()
- from sklearn.cross_validation import train_test_split
- X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
- from sklearn.linear_model import Perceptron
- ppn = Perceptron(n_iter=40, eta0=0.1, random_state=0)
- ppn.fit(X_train_std, y_train)
- y_pred = ppn.predict(X_test_std)
- from sklearn.metrics import accuracy_score
- accuracy_score(y_test, y_pred)
- from sklearn.linear_model import LogisticRegression
- lr = LogisticRegression(C=1000.0, random_state=0)
- lr.fit(X_train_std, y_train)
- lr.predict_proba(X_test_std[0, :])
- from sklearn.svm import SVC
- svm = SVC(kernel='linear', C=1.0, random_state=0)
- svm.fit(X_train_std, y_train)
- svm = SVC(kernel='rbf', random_state=0, gamma=0.10, C=10.0)
- svm.fit(X_xor, y_xor)
- from sklearn.linear_model import SGDClassifier
- ppn = SGDClassifier(loss='perceptron')
- lr = SGDClassifier(loss='log')
- svm = SGDClassifier(loss='hinge')
- from sklearn.preprocessing import Imputer
- imr = Imputer(missing_values='NaN', strategy='mean', axis=0)
- imr = imr.fit(df)
- imputed_data = imr.transform(df.values)
- from sklearn.preprocessing import LabelEncoder
- class_le = LabelEncoder()
- y = class_le.fit_transform(df['classlabel'].values)
- y = class_le.inverse_transform(y)
- from sklearn.preprocessing import OneHotEncoder
- ohe = OneHotEncoder(categorical_features=[0])
- ohe.fit_transform(X).toarray()
- pd.get_dummies(df[['price', 'color', 'size']])
- from sklearn.preprocessing import MinMaxScaler
- mms = MinMaxScaler()
- X_train_norm = mms.fit_transform(X_train)
- X_test_norm = mms.transform(X_test)
- from sklearn.preprocessing import StandardScaler
- sc = StandardScaler()
- sc.fit(X_train)
- X_train_std = sc.transform(X_train)
- X_test_std = sc.transform(X_test)
- from sklearn.preprocessing import StandardScaler
- stdsc = StandardScaler()
- X_train_std = stdsc.fit_transform(X_train)
- X_test_std = stdsc.transform(X_test)
- from sklearn.linear_model import LogisticRegression
- LogisticRegression(penalty='l1')
- lr = LogisticRegression(penalty='l1', C=0.1)
- lr.fit(X_train_std, y_train)
- lr.score(X_train_std, y_train)
- lr.score(X_test_std, y_test)
- lr.intercept_
- lr.coef_
- from sklearn import tree
- target = train["Survived"].values
- features = train[["Sex", "Age"]].values
- my_tree = tree.DecisionTreeClassifier()
- my_tree = my_tree.fit(features, target)
- from sklearn.ensemble import RandomForestClassifier
Add Comment
Please, Sign In to add comment