1200字范文,内容丰富有趣,写作的好帮手!
1200字范文 > [python机器学习]机器学习简单示例-KNN 决策树 线性回归 逻辑回归

[python机器学习]机器学习简单示例-KNN 决策树 线性回归 逻辑回归

时间:2024-03-31 21:58:02

相关推荐

[python机器学习]机器学习简单示例-KNN 决策树 线性回归 逻辑回归

1.KNN

查找距离已知的几个点最近的类型,并返回这个类型进行预测。

如小明在北京,小红在北京,小刚在河南,而我距离小明和小红比小刚近,则我最可能在北京而不是河南

#!/usr/bin/env python# -*- coding: utf-8 -*-# @File : KNN近邻算法.py# @Author: 赵路仓# @Date : /4/2# @Desc : 学习网站:/video/BV1nt411r7tj?p=21# @Contact : 398333404@from sklearn.datasets import load_irisfrom sklearn.model_selection import train_test_split, GridSearchCVfrom sklearn.preprocessing import StandardScalerfrom sklearn.neighbors import KNeighborsClassifierimport numpy as npdef knn_iris():"""用KNN算法对鸢尾花进行分类:return:"""# 1.获取数据iris = load_iris()print(iris)# 2.划分数据集x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=6)# 3.特征工程:标准化transfer = StandardScaler()x_train = transfer.fit_transform(x_train)x_test = transfer.transform(x_test)# 4.KNN算法预估器estimator = KNeighborsClassifier(n_neighbors=6)estimator.fit(x_train, y_train)# 5.模型评估# 方法一:直接对比真实数据和预测值y_predit = estimator.predict(x_test)print("y_predit:\n", y_predit)print("对比真实值和预测值:\n", y_test == y_predit)# 方法2:计算准确率score = estimator.score(x_test, y_test)print("准确率为:\n", score)# 预测新的鸾尾花品种x_new = np.array([[5, 2.9, 1, 0.2]])prediction = estimator.predict(x_new)print(prediction)return Nonedef knn_iris_gscv():"""用KNN算法对鸢尾花进行分类,添加网格搜索与交叉验证:return:"""# 1.获取数据iris = load_iris()print(iris)# 2.划分数据集x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=6)# 3.特征工程:标准化transfer = StandardScaler()x_train = transfer.fit_transform(x_train)x_test = transfer.transform(x_test)# 4.KNN算法预估器estimator = KNeighborsClassifier(n_neighbors=5)# 加入网格搜索与交叉验证# 参数准备 从下侧中取n_neighborsparam_dict = {"n_neighbors": [1, 3, 5, 7, 9, 11]}estimator = GridSearchCV(estimator, param_grid=param_dict, cv=10)estimator.fit(x_train, y_train)# 5.模型评估# 方法一:直接对比真实数据和预测值y_predit = estimator.predict(x_test)print("y_predit:\n", y_predit)print("对比真实值和预测值:\n", y_test == y_predit)# 方法2:计算准确率score = estimator.score(x_test, y_test)print("准确率为:\n", score)"""最佳参数:best_params_最佳结果:best_score_最佳估计器:best_estimator_交叉验证结果:cv_results_"""print("最佳参数:\n", estimator.best_params_)print("最佳结果:\n", estimator.best_score_)print("最佳估计器:\n", estimator.best_estimator_)print("交叉验证结果:\n", estimator.cv_results_)# 预测新的鸾尾花品种x_new = np.array([[5, 2.9, 1, 0.2]])prediction = estimator.predict(x_new)print(prediction)return Noneif __name__ == "__main__":# 代码1:KNN对鸾尾花分类# knn_iris()# 代码2:KNN预测鸾尾花分类并添加网格搜索和交叉验证knn_iris_gscv()

View Code

2.决策树

分类树(决策树)是一种十分常用的分类方法。他是一种监管学习,所谓监管学习就是给定一堆样本,每个样本都有一组属性和一个类别,这些类别是事先确定的,那么通过学习得到一个分类器,这个分类器能够对新出现的对象给出正确的分类。这样的机器学习就被称之为监督学习。

#!/usr/bin/env python# -*- coding: utf-8 -*-# @File : 决策树.py# @Author: 赵路仓# @Date : /4/3# @Desc : /video/BV1nt411r7tj?p=28# @Contact : 398333404@import osfrom sklearn.datasets import load_irisfrom sklearn.model_selection import train_test_splitfrom sklearn.tree import DecisionTreeClassifier, export_graphvizimport graphvizdef decision_iris():"""用决策树对鸢尾花数据进行分类:return:"""# 1.获取数据集iris = load_iris()print(iris.data[1])print(iris.target[1])# 2.划分数据集x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state=22)print(y_train)# 3.决策树预估器estimator = DecisionTreeClassifier(criterion="entropy")estimator.fit(x_train, y_train)# 4.模型评估# 方法一:直接对比真实数据和预测值y_predit = estimator.predict(x_test)print("y_predit:\n", y_predit)print("对比真实值和预测值:\n", y_test == y_predit)# 方法2:计算准确率score = estimator.score(x_test, y_test)print("准确率为:\n", score)# 可视化决策树# 生成文件dot_data = export_graphviz(estimator, out_file=None)graph = graphviz.Source(dot_data)graph.render("tree") # tree3是我想要命名的pdf名称return Noneif __name__ == "__main__":decision_iris()

View Code

3.线性回归

线性回归的任务是找到一个从特征空间X到输出空间Y的最优的线性映射函数

#!/usr/bin/env python# -*- coding: utf-8 -*-# @File : 波士顿房价预测.py# @Author: 赵路仓# @Date : /4/11# @Desc :# @Contact : 398333404@ from sklearn.datasets import load_bostonfrom sklearn.model_selection import train_test_splitfrom sklearn.preprocessing import StandardScalerfrom sklearn.linear_model import LinearRegression, SGDRegressor, Ridgefrom sklearn.metrics import mean_squared_error# 正规方程def linear1():"""正规方程的优化方法对波士顿房价进行预测:return:"""# 1.获取数据boston = load_boston()# 2.划分数据集x_train, x_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=22)# 3.标准化transfer = StandardScaler()x_train = transfer.fit_transform(x_train)x_test = transfer.transform(x_test)# 4.预估器 正规方程优化 小于十万条estimator = LinearRegression()estimator.fit(x_train, y_train)# 5.得出模型print("正规方程权重系数为:", estimator.coef_)print("正规方程偏置:", estimator.intercept_)# 6.模型评估y_predit = estimator.predict(x_test)print("预测房价:", y_predit)error = mean_squared_error(y_test, y_predit)print("正规方程-均方误差:", error)return None# 梯度下降def linear2():"""梯度下降的优化方法对波士顿房价进行预测:return:"""# 1.获取数据boston = load_boston()# 2.划分数据集x_train, x_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=22)# 3.标准化transfer = StandardScaler()x_train = transfer.fit_transform(x_train)x_test = transfer.transform(x_test)# 4.预估器 梯度下降,eta0学习率,max_iter迭代次数,大量数据推荐使用estimator = SGDRegressor(learning_rate="constant", eta0=0.001, max_iter=10000)estimator.fit(x_train, y_train)# 5.得出模型print("梯度下降权重系数为:", estimator.coef_)print("梯度下降偏置:", estimator.intercept_)# 6.模型评估y_predit = estimator.predict(x_test)print("预测房价:", y_predit)error = mean_squared_error(y_test, y_predit)print("梯度下降-均方误差:", error)return None# 岭回归def linear3():"""岭回归对波士顿房价进行预测:return:"""# 1.获取数据boston = load_boston()# 2.划分数据集x_train, x_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=22)# 3.标准化transfer = StandardScaler()x_train = transfer.fit_transform(x_train)x_test = transfer.transform(x_test)# 4.预估器 梯度下降,eta0学习率,max_iter迭代次数,大量数据推荐使用estimator = Ridge(max_iter=10000)estimator.fit(x_train, y_train)# 5.得出模型print("岭回归权重系数为:", estimator.coef_)print("岭回归偏置:", estimator.intercept_)# 6.模型评估y_predit = estimator.predict(x_test)print("预测房价:", y_predit)error = mean_squared_error(y_test, y_predit)print("岭回归-均方误差:", error)return Noneif __name__ == "__main__":# 代码1:正规方程linear1()# 代码2:梯度下降linear2()# 代码3:岭回归linear3()

View Code

4.逻辑回归

简单来说, 逻辑回归(Logistic Regression)是一种用于解决二分类(0 or 1)问题的机器学习方法,用于估计某种事物的可能性。比如某用户购买某商品的可能性,某病人患有某种疾病的可能性,以及某广告被用户点击的可能性等。

#!/usr/bin/env python# -*- coding: utf-8 -*-# @File : 癌症逻辑回归.py# @Author: 赵路仓# @Date : /4/11# @Desc :# @Contact : 398333404@ from sklearn.datasets import load_breast_cancerfrom sklearn.model_selection import train_test_splitfrom sklearn.preprocessing import StandardScalerfrom sklearn.linear_model import LogisticRegressionfrom sklearn.metrics import classification_report, roc_auc_scoreimport pandas as pdimport numpy as npdef cancer_demo():"""利用逻辑回归对乳腺癌进行二分类:return:"""# 载入数据cancer = load_breast_cancer()# print(cancer.feature_names)# print(cancer.data)# print(cancer.target)# 划分数据集x_train, x_test, y_train, y_test = train_test_split(cancer.data, cancer.target)# 标准化transfer = StandardScaler()x_train = transfer.fit_transform(x_train)x_test = transfer.transform(x_test)print(x_train)# 构建预估器estimator = LogisticRegression()estimator.fit(x_train, y_train)# 得出模型print("逻辑回归权重系数:", estimator.coef_)print("逻辑回归偏置:", estimator.intercept_)# 模型评估# 方法一:直接对比真实数据和预测值y_predit = estimator.predict(x_test)print("y_predit:\n", y_predit)print("对比真实值和预测值:\n", y_test == y_predit)# 方法2:计算准确率score = estimator.score(x_test, y_test)print("准确率为:\n", score)# 查看精确率 召回率 以及F1-scorereport = classification_report(y_test, y_predit, labels=[0, 1], target_names=['良性', '恶性'])print(report)roc=roc_auc_score(y_test,y_predit)print("ROC曲线:",roc)if __name__ == "__main__":cancer_demo()

View Code

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。