Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from sklearn import tree
- import matplotlib.pyplot as plt
- import numpy as np
- import ipdb
- X = np.random.random([500,2])
- Y = []
- for x_value in X:
- y_value = 0
- if (x_value[0] > 0.5) and (x_value[1] > 0.5): # AND "logic"
- y_value = 1
- #parte aleatória
- #if np.random.random() > 0.9:
- # y_value = not y_value
- Y.append(y_value)
- Y = np.array(Y)
- X_array = np.asarray(X)
- #plt.scatter(np.asarray(X[:,0]),X[:,1],c=Y,cmap='hsv')
- #plt.show()
- feature_names = ['x1','x2']
- target_names = ['falso','verdadeiro']
- clf = tree.DecisionTreeClassifier()
- clf = clf.fit(X, Y)
- # exportando de forma gráfica
- import graphviz
- dot_data = tree.export_graphviz(clf, out_file=None, filled=False, rounded=True, impurity=True,
- class_names=target_names,
- feature_names=feature_names
- )
- graph = graphviz.Source(dot_data)
- graph.render("graph")
- # exportando de formato texto
- r = tree.export_text(clf)
- print('\n'+r)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement