...
Full Bio
Use Machine Learning To Teach Robots to Navigate by CMU & Facebook Artificial Intelligence Research Team
228 days ago
Top 10 Artificial Intelligence & Data Science Master's Courses for 2020
229 days ago
Is Data Science Dead? Long Live Business Science
257 days ago
New Way to write code is about to Change: Join the Revolution
258 days ago
Google Go Language Future, Programming Language Programmer Will Get Best Paid Jobs
579 days ago
Top 10 Best Countries for Software Engineers to Work & High in-Demand Programming Languages
725355 views
Highest Paying Programming Language, Skills: Here Are The Top Earners
669429 views
Which Programming Languages in Demand & Earn The Highest Salaries?
474561 views
Top 5 Programming Languages Mostly Used By Facebook Programmers To Developed All Product
463953 views
World's Most Popular 5 Hardest Programming Language
395631 views
How Hitchhiker's Guide Workes For Machine Learning in Python
- Linear Regression
- Logistic Regression
- Decision Trees
- Support Vector Machines
- K-Nearest Neighbors
- Random Forests
- K-Means Clustering
- Principal Components Analysis
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
%matplotlib inline
from sklearn import linear_model
df = pd.read_csv(â??linear_regression_df.csvâ??)
df.columns = [â??Xâ??, â??Yâ??]
df.head()
sns.set_style(â??ticksâ??)
sns.lmplot(â??Xâ??,â??Yâ??, data=df)
plt.ylabel(â??Responseâ??)
plt.xlabel(â??Explanatoryâ??)
trainX = np.asarray(df.X[20:len(df.X)]).reshape(-1, 1)
trainY = np.asarray(df.Y[20:len(df.Y)]).reshape(-1, 1)
testX = np.asarray(df.X[:20]).reshape(-1, 1)
testY = np.asarray(df.Y[:20]).reshape(-1, 1)
linear.fit(trainX, trainY)
linear.score(trainX, trainY)
print(â??Coefficient: \nâ??, linear.coef_)
print(â??Intercept: \nâ??, linear.intercept_)
print(â??R² Value: \nâ??, linear.score(trainX, trainY))
predicted = linear.predict(testX)
from sklearn.linear_model import LogisticRegression
df = pd.read_csv(â??logistic_regression_df.csvâ??)
df.columns = [â??Xâ??, â??Yâ??]
df.head()
sns.set_style(â??ticksâ??)
sns.regplot(â??Xâ??,â??Yâ??, data=df, logistic=True)
plt.ylabel(â??Probabilityâ??)
plt.xlabel(â??Explanatoryâ??)
X = (np.asarray(df.X)).reshape(-1, 1)
Y = (np.asarray(df.Y)).ravel()
logistic.fit(X, Y)
logistic.score(X, Y)
print(â??Coefficient: \nâ??, logistic.coef_)
print(â??Intercept: \nâ??, logistic.intercept_)
print(â??R² Value: \nâ??, logistic.score(X, Y))
df = pd.read_csv(â??iris_df.csvâ??)
df.columns = [â??X1â??, â??X2â??, â??X3â??, â??X4â??, â??Yâ??]
df.head()
from sklearn.cross_validation import train_test_split
decision = tree.DecisionTreeClassifier(criterion=â??giniâ??)
X = df.values[:, 0:4]
Y = df.values[:, 4]
trainX, testX, trainY, testY = train_test_split( X, Y, test_size = 0.3)
decision.fit(trainX, trainY)
print(â??Accuracy: \nâ??, decision.score(testX, testY))
from IPython.display import Image
import pydotplus as pydot
dot_data = StringIO()
tree.export_graphviz(decision, out_file=dot_data)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())
df = pd.read_csv(â??iris_df.csvâ??)
df.columns = [â??X4â??, â??X3â??, â??X1â??, â??X2â??, â??Yâ??]
df = df.drop([â??X4â??, â??X3â??], 1)
df.head()
from sklearn.cross_validation import train_test_split
support = svm.SVC()
X = df.values[:, 0:2]
Y = df.values[:, 2]
trainX, testX, trainY, testY = train_test_split( X, Y, test_size = 0.3)
support.fit(trainX, trainY)
print(â??Accuracy: \nâ??, support.score(testX, testY))
pred = support.predict(testX)
sns.set_context(â??notebookâ??, font_scale=1.1)
sns.set_style(â??ticksâ??)
sns.lmplot(â??X1â??,â??X2', scatter=True, fit_reg=False, data=df, hue=â??Yâ??)
plt.ylabel(â??X2â??)
plt.xlabel(â??X1â??)
df = pd.read_csv(â??iris_df.csvâ??)
df.columns = [â??X1â??, â??X2â??, â??X3â??, â??X4â??, â??Yâ??]
df = df.drop([â??X4â??, â??X3â??], 1)
df.head()
sns.set_style(â??ticksâ??)
sns.lmplot(â??X1â??,â??X2', scatter=True, fit_reg=False, data=df, hue=â??Yâ??)
plt.ylabel(â??X2â??)
plt.xlabel(â??X1â??)
neighbors = KNeighborsClassifier(n_neighbors=5)
X = df.values[:, 0:2]
Y = df.values[:, 2]
trainX, testX, trainY, testY = train_test_split( X, Y, test_size = 0.3)
neighbors.fit(trainX, trainY)
print(â??Accuracy: \nâ??, neighbors.score(testX, testY))
pred = neighbors.predict(testX)
df = pd.read_csv(â??iris_df.csvâ??)
df.columns = [â??X1â??, â??X2â??, â??X3â??, â??X4â??, â??Yâ??]
df.head()
forest = RandomForestClassifier()
X = df.values[:, 0:4]
Y = df.values[:, 4]
trainX, testX, trainY, testY = train_test_split( X, Y, test_size = 0.3)
forest.fit(trainX, trainY)
print(â??Accuracy: \nâ??, forest.score(testX, testY))
pred = forest.predict(testX)
df = pd.read_csv(â??iris_df.csvâ??)
df.columns = [â??X1â??, â??X2â??, â??X3â??, â??X4â??, â??Yâ??]
df = df.drop([â??X4â??, â??X3â??], 1)
df.head()
kmeans = KMeans(n_clusters=3)
X = df.values[:, 0:2]
kmeans.fit(X)
df[â??Predâ??] = kmeans.predict(X)
df.head()
sns.set_style(â??ticksâ??)
sns.lmplot(â??X1â??,â??X2', scatter=True, fit_reg=False, data=df, hue = â??Predâ??)
df = pd.read_csv(â??iris_df.csvâ??)
df.columns = [â??X1â??, â??X2â??, â??X3â??, â??X4â??, â??Yâ??]
df.head()
pca = decomposition.PCA()
fa = decomposition.FactorAnalysis()
X = df.values[:, 0:4]
Y = df.values[:, 4]
train, test = train_test_split(X,test_size = 0.3)
train_reduced = pca.fit_transform(train)
test_reduced = pca.transform(test)
pca.n_components_