sklearn常见分类器(二分类模板)

# -*- coding: utf-8 -*-
import pandas as pd
import matplotlib
matplotlib.rcParams[‘font.sans-serif‘]=[u‘simHei‘]
matplotlib.rcParams[‘axes.unicode_minus‘]=False
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.datasets import load_breast_cancer

data_set = pd.read_csv(‘pima-indians-diabetes.csv‘)
data = data_set.values[:,:]

y = data[:,8]
X = data[:,:8]
X_train,X_test,y_train,y_test = train_test_split(X,y)

### 随机森林
print("==========================================")
RF = RandomForestClassifier(n_estimators=10,random_state=11)
RF.fit(X_train,y_train)
predictions = RF.predict(X_test)
print("RF")
print(classification_report(y_test,predictions))
print("AC",accuracy_score(y_test,predictions))

### Logistic Regression Classifier
print("==========================================")
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression(penalty=‘l2‘)
clf.fit(X_train,y_train)
predictions = clf.predict(X_test)
print("LR")
print(classification_report(y_test,predictions))
print("AC",accuracy_score(y_test,predictions))

### Decision Tree Classifier
print("==========================================")
from sklearn import tree
clf = tree.DecisionTreeClassifier()
clf.fit(X_train,y_train)
predictions = clf.predict(X_test)
print("DT")
print(classification_report(y_test,predictions))
print("AC",accuracy_score(y_test,predictions))

### GBDT(Gradient Boosting Decision Tree) Classifier
print("==========================================")
from sklearn.ensemble import GradientBoostingClassifier
clf = GradientBoostingClassifier(n_estimators=200)
clf.fit(X_train,y_train)
predictions = clf.predict(X_test)
print("GBDT")
print(classification_report(y_test,predictions))
print("AC",accuracy_score(y_test,predictions))

###AdaBoost Classifier
print("==========================================")
from sklearn.ensemble import  AdaBoostClassifier
clf = AdaBoostClassifier()
clf.fit(X_train,y_train)
predictions = clf.predict(X_test)
print("AdaBoost")
print(classification_report(y_test,predictions))
print("AC",accuracy_score(y_test,predictions))

### GaussianNB
print("==========================================")
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(X_train,y_train)
predictions = clf.predict(X_test)
print("GaussianNB")
print(classification_report(y_test,predictions))
print("AC",accuracy_score(y_test,predictions))

### Linear Discriminant Analysis
print("==========================================")
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
clf = LinearDiscriminantAnalysis()
clf.fit(X_train,y_train)
predictions = clf.predict(X_test)
print("Linear Discriminant Analysis")
print(classification_report(y_test,predictions))
print("AC",accuracy_score(y_test,predictions))

### Quadratic Discriminant Analysis
print("==========================================")
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
clf = QuadraticDiscriminantAnalysis()
clf.fit(X_train,y_train)
predictions = clf.predict(X_test)
print("Quadratic Discriminant Analysis")
print(classification_report(y_test,predictions))
print("AC",accuracy_score(y_test,predictions))

### SVM Classifier
print("==========================================")
from sklearn.svm import SVC
clf = SVC(kernel=‘rbf‘, probability=True)
clf.fit(X_train,y_train)
predictions = clf.predict(X_test)
print("SVM")
print(classification_report(y_test,predictions))
print("AC",accuracy_score(y_test,predictions))

### Multinomial Naive Bayes Classifier
print("==========================================")
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB(alpha=0.01)
clf.fit(X_train,y_train)
predictions = clf.predict(X_test)
print("Multinomial Naive Bayes")
print(classification_report(y_test,predictions))
print("AC",accuracy_score(y_test,predictions))

### xgboost
import xgboost
print("==========================================")
from sklearn.naive_bayes import MultinomialNB
clf = xgboost.XGBClassifier()
clf.fit(X_train,y_train)
predictions = clf.predict(X_test)
print("xgboost")
print(classification_report(y_test,predictions))
print("AC",accuracy_score(y_test,predictions))

### voting_classify
from sklearn.ensemble import GradientBoostingClassifier, VotingClassifier, RandomForestClassifier
import xgboost
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
clf1 = GradientBoostingClassifier(n_estimators=200)
clf2 = RandomForestClassifier(random_state=0, n_estimators=500)
# clf3 = LogisticRegression(random_state=1)
# clf4 = GaussianNB()
clf5 = xgboost.XGBClassifier()
clf = VotingClassifier(estimators=[
    # (‘gbdt‘,clf1),
    (‘rf‘,clf2),
    # (‘lr‘,clf3),
    # (‘nb‘,clf4),
    # (‘xgboost‘,clf5),
    ],
    voting=‘soft‘)
clf.fit(X_train,y_train)
predictions = clf.predict(X_test)
print("voting_classify")
print(classification_report(y_test,predictions))
print("AC",accuracy_score(y_test,predictions))

原文地址:https://www.cnblogs.com/caiyishuai/p/11385825.html

时间: 2024-10-21 20:24:47

sklearn常见分类器(二分类模板)的相关文章

JavaScript中易犯的小错误-------常见错误二:传统编程语言的生命周期误区

JavaScript中易犯的小错误-------常见错误二:传统编程语言的生命周期误区另一种易犯的错误,便是带着其他编程语言的思维,认为在JS中,也存在生命周期这么一说.请看下面的代码:for (var i = 0; i < 10; i++) { /* ... */ } console.log(i);如果你认为在运行console.log() 时肯定会报出 undefined 错误,那么你就大错特错了.我会告诉你其实它会返回 10吗.当然,在许多其他语言当中,遇到这样的代码,肯定会报错.因为i明

0709 C语言常见误区----------二维数组做参数

总结: 1.二维数组名是指向一位数组的指针,本例中,其类型为 int (*)[4],在传递的过程中丢失了第一维的信息,因此需要将第一维的信息传递给调用函数. 关于二维数组名代表的类型,可通过下面的例子看出. 1 /************************************************************************* 2 > File Name: test_2arr.c 3 > Author:Monica 4 > Mail:[email prot

常见模块(二)

1.OS模块 提供对操作系统进行调用的接口 1.1. os.getcwd() 获取当前工作目录,即当前python脚本工作的目录路径 >>> os.getcwd() 'C:\\Users\\soft' 1.2. os.chdir("dirname")  改变当前脚本工作目录:相当于shell下cd >>> os.chdir("c:/") >>> os.getcwd() 'c:\\' 和os.system(&quo

[2]工欲善其事必先利其器-------UML中的几种常见关系(二)

目录 1.UML类图中几种常见的关系 经过(一)中介绍,我选择的是StarUML作为UML的学习工具,个人喜好,至少在功能上能够满足我现在的需求, 在介绍StarUML的使用之前首先介绍下UML中几种常见的关系: UML类图中常见的关系按照关系的强弱可分为:泛化 ,实现 ,组合, 聚合 , 依赖这几种 1.泛化关系:是一种继承关系,也就是XX is a kind of XX 描述. 2.实现关系:是一种类与接口的关系. 3. 组合关系:是一种强关联,属于一种整体与部分的关系,但是部分不能离开整体

高性能计算系列之三-常见名词二

GPGPU GPGPU全称General Purpose GPU,即通用计算图形处理器.其中第一个"GP"通用目的(GeneralPurpose)而第二个"GP"则表示图形处理(GraphicProcess),这两个"GP"搭配起来即"通用图形处理".再加上"U"(Unit)就成为了完整的通用处理器. 人们一直在寻找各种加速图像处理的方法,然而受到CPU本身在浮点计算能力上的限制,对于那些需要高密度计算的图

Hibernate常见操作二

接上篇. One to many(many to One): 1.many to one:

常见标签二

1.标题标签h1-h6 <h1>标题</h1> 2.P标签----段落 <p>段落</p> 3.strong粗体 <strong>强调加粗</strong> 4.em 斜体 <em>强调斜体</em> 5.无序列表 ul <ul> <li>无序列表</li> <li>无序列表</li> <li>无序列表</li> </ul

(6)UIView常见属性二

例如创建一个view视图,view是最纯洁的控制,必须得指定它的位置,而不像其他的控件像UISwitch默认都有一个位置 在viewDidLoad方法中打印它的位置: 将控件放入一个视图中,只需移动白色的视图,里面的子控件也会跟着移动,因为里面的控件的frame是相对子父控件左上角为坐标原点 注意:bounds和frame的相同点都是能表示宽度和高度,所以获取控件的宽度和高度有两种方法一种是通过bounds另一种是通过frame 而获取位置(x\y)值只能通过self.view.frame.or

Golang常见误区(二)

35. 关闭 HTTP 的响应体 使用 HTTP 标准库发起请求.获取响应时,即使你不从响应中读取任何数据或响应为空,都需要手动关闭响应体.新手很容易忘记手动关闭,或者写在了错误的位置: // 请求失败造成 panic func main() { resp, err := http.Get("https://api.ipify.org?format=json") defer resp.Body.Close()    // resp 可能为 nil,不能读取 Body if err !=