In [1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
In [2]:
#regressor
In [3]:
from sklearn.ensemble import RandomForestRegressor
from sklearn import datasets,metrics
from sklearn.model_selection import train_test_split
In [4]:
diabetes=datasets.load_diabetes()
In [5]:
print(diabetes.DESCR)
In [6]:
x=diabetes.data
y=diabetes.target
In [7]:
df=pd.DataFrame(x,columns=diabetes.feature_names)
df['target']=y
df.head()
Out[7]:
In [8]:
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.20,random_state=101)
In [9]:
regressor=RandomForestRegressor(random_state=101)
In [10]:
regressor.fit(x_train,y_train)
Out[10]:
In [11]:
y_prediction=regressor.predict(x_test)
In [12]:
y_prediction
Out[12]:
In [13]:
new_prediction=regressor.predict([[-0.001882,-0.044642,-0.051474,-0.026328,-0.008449,-0.019163,0.074412,-0.039493,-0.068330,-0.092204]])
In [14]:
new_prediction
Out[14]:
In [15]:
metrics.mean_squared_error(y_test,y_prediction)
Out[15]:
In [16]:
np.sqrt(metrics.mean_squared_error(y_test,y_prediction))
Out[16]:
In [17]:
#now we will use decision tree as classifier
In [18]:
from sklearn.ensemble import RandomForestClassifier
In [19]:
iris=datasets.load_iris()
In [20]:
x1=iris.data
y1=iris.target
In [23]:
df2=pd.DataFrame(x1,columns=iris.feature_names)
df2['target']=y1
df2.head()
Out[23]:
In [24]:
x1_train,x1_test,y1_train,y1_test=train_test_split(x1,y1,test_size=0.20,random_state=101)
In [25]:
clf=RandomForestClassifier()
In [26]:
clf.fit(x1_train,y1_train)
Out[26]:
In [27]:
y_pred=clf.predict(x1_test)
In [28]:
metrics.accuracy_score(y1_test,y_pred)
Out[28]:
In [29]:
metrics.confusion_matrix(y1_test,y_pred)
Out[29]:
In [30]:
y_pred
Out[30]:
In [ ]:
No comments:
Post a Comment