吴裕雄 python 机器学习——集成学习随机森林RandomForestClassifier分类模型

import numpy as np
import matplotlib.pyplot as plt

from sklearn import datasets,ensemble
from sklearn.model_selection import train_test_split

def load_data_classification():
    ‘‘‘
    加载用于分类问题的数据集
    ‘‘‘
    # 使用 scikit-learn 自带的 digits 数据集
    digits=datasets.load_digits()
    # 分层采样拆分成训练集和测试集,测试集大小为原始数据集大小的 1/4
    return train_test_split(digits.data,digits.target,test_size=0.25,random_state=0,stratify=digits.target) 

#集成学习随机森林RandomForestClassifier分类模型
def test_RandomForestClassifier(*data):
    X_train,X_test,y_train,y_test=data
    clf=ensemble.RandomForestClassifier()
    clf.fit(X_train,y_train)
    print("Traing Score:%f"%clf.score(X_train,y_train))
    print("Testing Score:%f"%clf.score(X_test,y_test))

# 获取分类数据
X_train,X_test,y_train,y_test=load_data_classification()
# 调用 test_RandomForestClassifier
test_RandomForestClassifier(X_train,X_test,y_train,y_test) 

def test_RandomForestClassifier_num(*data):
    ‘‘‘
    测试 RandomForestClassifier 的预测性能随 n_estimators 参数的影响
    ‘‘‘
    X_train,X_test,y_train,y_test=data
    nums=np.arange(1,100,step=2)
    fig=plt.figure()
    ax=fig.add_subplot(1,1,1)
    testing_scores=[]
    training_scores=[]
    for num in nums:
        clf=ensemble.RandomForestClassifier(n_estimators=num)
        clf.fit(X_train,y_train)
        training_scores.append(clf.score(X_train,y_train))
        testing_scores.append(clf.score(X_test,y_test))
    ax.plot(nums,training_scores,label="Training Score")
    ax.plot(nums,testing_scores,label="Testing Score")
    ax.set_xlabel("estimator num")
    ax.set_ylabel("score")
    ax.legend(loc="lower right")
    ax.set_ylim(0,1.05)
    plt.suptitle("RandomForestClassifier")
    plt.show()

# 调用 test_RandomForestClassifier_num
test_RandomForestClassifier_num(X_train,X_test,y_train,y_test) 

def test_RandomForestClassifier_max_depth(*data):
    ‘‘‘
    测试 RandomForestClassifier 的预测性能随 max_depth 参数的影响
    ‘‘‘
    X_train,X_test,y_train,y_test=data
    maxdepths=range(1,20)
    fig=plt.figure()
    ax=fig.add_subplot(1,1,1)
    testing_scores=[]
    training_scores=[]
    for max_depth in maxdepths:
        clf=ensemble.RandomForestClassifier(max_depth=max_depth)
        clf.fit(X_train,y_train)
        training_scores.append(clf.score(X_train,y_train))
        testing_scores.append(clf.score(X_test,y_test))
    ax.plot(maxdepths,training_scores,label="Training Score")
    ax.plot(maxdepths,testing_scores,label="Testing Score")
    ax.set_xlabel("max_depth")
    ax.set_ylabel("score")
    ax.legend(loc="lower right")
    ax.set_ylim(0,1.05)
    plt.suptitle("RandomForestClassifier")
    plt.show()

# 调用 test_RandomForestClassifier_max_depth
test_RandomForestClassifier_max_depth(X_train,X_test,y_train,y_test) 

def test_RandomForestClassifier_max_features(*data):
    ‘‘‘
    测试 RandomForestClassifier 的预测性能随 max_features 参数的影响
    ‘‘‘
    X_train,X_test,y_train,y_test=data
    max_features=np.linspace(0.01,1.0)
    fig=plt.figure()
    ax=fig.add_subplot(1,1,1)
    testing_scores=[]
    training_scores=[]
    for max_feature in max_features:
        clf=ensemble.RandomForestClassifier(max_features=max_feature)
        clf.fit(X_train,y_train)
        training_scores.append(clf.score(X_train,y_train))
        testing_scores.append(clf.score(X_test,y_test))
    ax.plot(max_features,training_scores,label="Training Score")
    ax.plot(max_features,testing_scores,label="Testing Score")
    ax.set_xlabel("max_feature")
    ax.set_ylabel("score")
    ax.legend(loc="lower right")
    ax.set_ylim(0,1.05)
    plt.suptitle("RandomForestClassifier")
    plt.show()

# 调用 test_RandomForestClassifier_max_features
test_RandomForestClassifier_max_features(X_train,X_test,y_train,y_test) 

原文地址:https://www.cnblogs.com/tszr/p/10801588.html

时间: 2024-12-12 09:09:56

吴裕雄 python 机器学习——集成学习随机森林RandomForestClassifier分类模型的相关文章

吴裕雄 python 机器学习——集成学习随机森林RandomForestRegressor回归模型

import numpy as np import matplotlib.pyplot as plt from sklearn import datasets,ensemble from sklearn.model_selection import train_test_split def load_data_regression(): ''' 加载用于回归问题的数据集 ''' #使用 scikit-learn 自带的一个糖尿病病人的数据集 diabetes = datasets.load_di

吴裕雄 python 机器学习——集成学习梯度提升决策树GradientBoostingRegressor回归模型

import numpy as np import matplotlib.pyplot as plt from sklearn import datasets,ensemble from sklearn.model_selection import train_test_split def load_data_regression(): ''' 加载用于回归问题的数据集 ''' #使用 scikit-learn 自带的一个糖尿病病人的数据集 diabetes = datasets.load_di

吴裕雄 python 机器学习——集成学习AdaBoost算法分类模型

import numpy as np import matplotlib.pyplot as plt from sklearn import datasets,ensemble from sklearn.model_selection import train_test_split def load_data_classification(): ''' 加载用于分类问题的数据集 ''' # 使用 scikit-learn 自带的 digits 数据集 digits=datasets.load_d

吴裕雄 python 机器学习——集成学习AdaBoost算法回归模型

import numpy as np import matplotlib.pyplot as plt from sklearn import datasets,ensemble from sklearn.model_selection import train_test_split def load_data_classification(): ''' 加载用于分类问题的数据集 ''' # 使用 scikit-learn 自带的 digits 数据集 digits=datasets.load_d

集成学习:随机森林.GBDT

集成学习(Ensemble Learning) 集成学习的思想是将若干个学习器(分类器&回归器)组合之后产生一个新学习器.弱分类器(weak learner)指那些分类准确率只稍微好于随机猜测的分类器(errorrate < 0.5): 集成算法的成功在于保证弱分类器的多样性(Diversity).而且集成不稳定的算法也能够得到一个比较明显的性能提升 常见的集成学习思想有: Bagging Boosting Stacking Why need Ensemble Learning? 1. 弱分

吴裕雄 python 机器学习——人工神经网络感知机学习算法的应用

import numpy as np from matplotlib import pyplot as plt from sklearn import neighbors, datasets from matplotlib.colors import ListedColormap from sklearn.neural_network import MLPClassifier ## 加载数据集 np.random.seed(0) # 使用 scikit-learn 自带的 iris 数据集 ir

吴裕雄 python 机器学习-KNN算法(1)

import numpy as np import operator as op from os import listdir def classify0(inX, dataSet, labels, k): dataSetSize = dataSet.shape[0] diffMat = np.tile(inX, (dataSetSize,1)) - dataSet sqDiffMat = diffMat**2 sqDistances = sqDiffMat.sum(axis=1) distan

吴裕雄 python 机器学习——K均值聚类KMeans模型

import numpy as np import matplotlib.pyplot as plt from sklearn import cluster from sklearn.metrics import adjusted_rand_score from sklearn.datasets.samples_generator import make_blobs def create_data(centers,num=100,std=0.7): X, labels_true = make_b

吴裕雄 python 机器学习——聚类

import numpy as np import matplotlib.pyplot as plt from sklearn.datasets.samples_generator import make_blobs def create_data(centers,num=100,std=0.7): ''' 生成用于聚类的数据集 :param centers: 聚类的中心点组成的数组.如果中心点是二维的,则产生的每个样本都是二维的. :param num: 样本数 :param std: 每个簇