随机森林的思想:随机取样,创建M棵决策树,并对决策树的结果进行投票,选出最多的类别作为最后的分类结果。这里在创建决策树的时候引入了基尼指数的概念,基尼指数和信息增益的作用一样,都是选出数据集的最佳分界点,不过这里选择的是最小的基尼指数,每一个划分好的数据集就是一颗决策树,计算每棵决策树的基尼指数选出最小的基尼指数的树的特征,索引等信息。这里有一个剪枝的处理,取出左右子集,如果左右子树为空,就强制产生叶节点,或者树的深度超过最大深度,就对树进行剪枝,忽略超过最大深度的结点。如果左右子集中的样本数太少,就强制产生叶节点。否则就对左右子树进行划分,保证树的生成。我们随机取出一部分数据,在这个数据的基础上进行建树,组成森林,找到最大的预测值。
随机森林有一个重要的优点就是,没有必要对它进行交叉验证或者用一个独立的测试集来获得误差的一个无偏估计。它可以在内部进行评估,也就是说在生成的过程中就可以对误差建立一个无偏估计。
这里的树是一个CART树。
代码:
# -*- coding: utf-8 -*-
# Random Forest Algorithm on Sonar Dataset
from random import seed
from random import randrange
from csv import reader
from math import sqrt
import numpy as np
# 加载数据
def load_csv(filename):dataset = list() #建立空列表with open(filename, 'r') as file:csv_reader = reader(file)if not row:continuedataset.append(row)return dataset# 转化成浮点类型
def str_column_to_float(dataset, column):for row in dataset:row[column] = float(row[column].strip())# 字符串转化成整数类型
#def str_column_to_int(dataset, column):
# class_values = [row[column] for row in dataset]
# unique = set(class_values)
# lookup = dict()
# for i, value in enumerate(unique):
# lookup[value] = i
# for row in dataset:
# row[column] = lookup[row[column]]
# return lookup#将数据集分成K折叠 将数据分为n_folds份,继续存在列表中
def cross_validation_split(dataset, n_folds):dataset_split = list()dataset_copy = list(dataset)fold_size = int(len(dataset) / n_folds)for i in range(n_folds):fold = list()while len(fold) < fold_size:index = randrange(len(dataset_copy))fold.append(dataset_copy.pop(index))dataset_split.append(fold)return dataset_split# Calculate accuracy percentage
#计算准确性百分比
def accuracy_metric(actual, predicted):correct = 0for i in range(len(actual)):if actual[i] == predicted[i]:correct += 1return correct / float(len(actual)) * 100.0#使用交叉验证来评估算法
def evaluate_algorithm(dataset, algorithm, n_folds, *args):folds = cross_validation_split(dataset, n_folds)scores = list()for fold in folds:train_set = list(folds)train_set.remove(fold)train_set = sum(train_set, [])test_set = list()for row in fold:row_copy = list(row)test_set.append(row_copy)row_copy[-1] = Nonepredicted = algorithm(train_set, test_set, *args) #random_forest随机森林 返回的是最大的预测值actual = [row[-1] for row in fold]#真实值accuracy = accuracy_metric(actual, predicted)scores.append(accuracy)return scores #基于属性值拆分数据集
def test_split(index, value, dataset):left, right = list(), list()for row in dataset:if row[index] < value:left.append(row)else:right.append(row)return left, right# Calculate the Gini index for a split dataset
#计算分割数据集的Gini索引
def gini_index(groups, classes):# count all samples at split point 在分离点清点所有样本n_instances = float(sum([len(group) for group in groups]))# sum weighted Gini index for each group 每组加权Gini指数gini = 0.0for group in groups:size = float(len(group))# avoid divide by zero 避免以零为除if size == 0:continuescore = 0.0# score the group based on the score for each class 每组评分for class_val in classes:p = [row[-1] for row in group].count(class_val) / sizescore += p * p# weight the group score by its relative size 基尼指数计算gini += (1.0 - score) * (size / n_instances)return gini# Select the best split point for a dataset
#选择数据集的最佳分界点
def get_split(dataset, n_features):class_values = list(set(row[-1] for row in dataset)) #取出样本类别标签b_index, b_value, b_score, b_groups = 999, 999, 999, Nonefeatures = list()while len(features) < n_features:index = randrange(len(dataset[0])-1) #随机获取n_features个特征用于创建决策树if index not in features:features.append(index)for index in features: #对某一个特征进行操作for row in dataset: #尝试逐个样本的值作为划分左右子集的标准groups = test_split(index, row[index], dataset) #按照样本row的index特征值将样本集合dataset分为两个子集gini = gini_index(groups, class_values) #左右子集的gini之和,取最小值来选取测试属性if gini < b_score:b_index, b_value, b_score, b_groups = index, row[index], gini, groups #更新最佳特征的相关信息return {'index':b_index, 'value':b_value, 'groups':b_groups} #返回测试属性和左右子集和划分左右子集的标准# Create a terminal node value
#创建终端节点值
def to_terminal(group): #强制产生叶节点,并标记节点为类别样本数目最多的类别outcomes = [row[-1] for row in group]return max(set(outcomes), key=outcomes.count)#为节点创建子拆分或创建终端
def split(node, max_depth, min_size, n_features, depth):left, right = node['groups'] #取出左右子集del(node['groups'])# check for a no splitif not left or not right: #左右子集为空,强制产生叶节点,并标记节点为类别样本数目最多的类别node['left'] = node['right'] = to_terminal(left + right) return# check for max depthif depth >= max_depth: #决策树层数过多,强制产生叶节点,并标记节点为类别样本数目最多的类别;depth一般默认设置为1表示当前在根节点分支node['left'], node['right'] = to_terminal(left), to_terminal(right)return# process left childif len(left) <= min_size: #左子集中样本数过少,强制产生叶节点,并标记节点为类别样本数目最多的类别 node['left'] = to_terminal(left)else:node['left'] = get_split(left, n_features) #对左子集进行分支,即决策树的递归操作split(node['left'], max_depth, min_size, n_features, depth+1)# process right child if len(right) <= min_size: #右子集中样本数过少,强制产生叶节点,并标记节点为类别样本数目最多的类别node['right'] = to_terminal(right)else:node['right'] = get_split(right, n_features) #对右子集进行分支,决策树递归split(node['right'], max_depth, min_size, n_features, depth+1)# Build a decision tree 建立决策树
def build_tree(train, max_depth, min_size, n_features):root = get_split(train, n_features)split(root, max_depth, min_size, n_features, 1)return root# Make a prediction with a decision tree
#用决策树做预测
def predict(node, row):if row[node['index']] < node['value']:if isinstance(node['left'], dict):return predict(node['left'], row)else:return node['left']else:if isinstance(node['right'], dict):return predict(node['right'], row)else:return node['right']# Create a random subsample from the dataset with replacement
#创建数据集中的随机子示例,并进行替换
def subsample(dataset, ratio):sample = list()n_sample = round(len(dataset) * ratio) #round() 方法返回浮点数x的四舍五入值。while len(sample) < n_sample:index = randrange(len(dataset))sample.append(dataset[index])return sample# Make a prediction with a list of bagged trees
#利用随机森林预测类别
def bagging_predict(trees, row):predictions = [predict(tree, row) for tree in trees]return max(set(predictions), key=predictions.count)#随机森林算法
def random_forest(train, test, max_depth, min_size, sample_size, n_trees, n_features):trees = list()for i in range(n_trees):sample = subsample(train, sample_size)tree = build_tree(sample, max_depth, min_size, n_features)trees.append(tree)predictions = [bagging_predict(trees, row) for row in test]return(predictions)#测试随机森林算法
seed(2)
#装载和准备数据
filename = 'sonar-all-data.csv'
dataset = load_csv(filename)for i in range(0, len(dataset[0])-1):str_column_to_float(dataset, i)
#将类列转换为整数
#str_column_to_int(dataset, len(dataset[0])-1)
# evaluate algorithm 评估算法
n_folds = 5
max_depth = 10
min_size = 1
sample_size = 1.0
n_features = int(sqrt(len(dataset[0])-1))
for n_trees in [1, 5, 10]:scores = evaluate_algorithm(dataset, random_forest, n_folds, max_depth, min_size, sample_size, n_trees, n_features)print('Trees: %d' % n_trees)print('Scores: %s' % scores) #准确性百分比print('Mean Accuracy: %.3f%%' % (sum(scores)/float(len(scores))))