使用 LGBMClassifier
# https://www.kaggle.com/code/ddosad/ps4e2-visual-eda-lgbm-obesity-risk
import pandas as pd
from lightgbm import LGBMClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split, GridSearchCVdf_train = pd.read_csv('D:\\python\\dataset\\PredictionOfObesityRisk\\playground-series-s4e2\\train.csv')
original = pd.read_csv('D:\\python\\dataset\\PredictionOfObesityRisk\\playground-series-s4e2\\ObesityDataSet.csv')
df_test = pd.read_csv('D:\\python\\dataset\\PredictionOfObesityRisk\\playground-series-s4e2\\test.csv')df_train = pd.concat([df_train, original]).drop(['id'], axis=1).drop_duplicates()
id = df_test['id']
df_test = df_test.drop(['id'], axis=1)
obesityTypeDict = {'Insufficient_Weight': 0, 'Normal_Weight': 1, 'Obesity_Type_I': 2, 'Obesity_Type_II': 3,'Obesity_Type_III': 4, 'Overweight_Level_I': 5, 'Overweight_Level_II': 6}
obesityNumDict = {0: 'Insufficient_Weight', 1: 'Normal_Weight', 2: 'Obesity_Type_I', 3: 'Obesity_Type_II',4: 'Obesity_Type_III', 5: 'Overweight_Level_I', 6: 'Overweight_Level_II'}
df_train['BIM'] = df_train['Weight'] / (df_train['Height'] * df_train['Height'])
df_test['BIM'] = df_test['Weight'] / (df_test['Height'] * df_test['Height'])Y = df_train['NObeyesdad'].map(lambda x: obesityTypeDict[x])
x_train_o = df_train.drop(['NObeyesdad'], axis=1)
X = pd.get_dummies(x_train_o, drop_first=False)X_train, X_test, y_train, y_test = train_test_split(X, Y,test_size=0.2, random_state=42)model3 = LGBMClassifier()
parameters3 = {"n_estimators": [100, 200, 300, 400, 500],"learning_rate": [0.01, 0.05, 0.1, 0.5, 1],"random_state": [42],"num_leaves": [16, 17, 18]}grid_search3 = GridSearchCV(model3, parameters3, cv=5, n_jobs=-1, scoring='accuracy')
grid_search3.fit(X_train, y_train)
print(grid_search3.best_score_)
best_parameters3 = grid_search3.best_params_
print(f"best_parameters3: {best_parameters3}")
model3 = LGBMClassifier(**best_parameters3)
model3.fit(X_train, y_train)
X_test_pred3 = model3.predict(X_test)
accuracy_score(y_test, X_test_pred3)df_test = pd.get_dummies(df_test, drop_first=False)
df_preds = pd.DataFrame({'id': id, 'NObeyesdad': model3.predict(df_test)})df_preds['NObeyesdad'] = df_preds['NObeyesdad'].map(lambda x: obesityNumDict[x])
df_preds.to_csv('submission_lgb.csv', index=False)
使用 XGBClassifier
# https://www.kaggle.com/code/ddosad/ps4e2-visual-eda-lgbm-obesity-risk
import pandas as pd
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split, GridSearchCV
from xgboost import XGBClassifierdf_train = pd.read_csv('D:\\python\\dataset\\PredictionOfObesityRisk\\playground-series-s4e2\\train.csv')
original = pd.read_csv('D:\\python\\dataset\\PredictionOfObesityRisk\\playground-series-s4e2\\ObesityDataSet.csv')
df_test = pd.read_csv('D:\\python\\dataset\\PredictionOfObesityRisk\\playground-series-s4e2\\test.csv')df_train = pd.concat([df_train, original]).drop(['id'], axis=1).drop_duplicates()
id = df_test['id']
df_test = df_test.drop(['id'], axis=1)obesityTypeDict = {'Insufficient_Weight': 0, 'Normal_Weight': 1, 'Obesity_Type_I': 2, 'Obesity_Type_II': 3,'Obesity_Type_III': 4, 'Overweight_Level_I': 5, 'Overweight_Level_II': 6}
obesityNumDict = {0: 'Insufficient_Weight', 1: 'Normal_Weight', 2: 'Obesity_Type_I', 3: 'Obesity_Type_II',4: 'Obesity_Type_III', 5: 'Overweight_Level_I', 6: 'Overweight_Level_II'}
df_train['BIM'] = df_train['Weight'] / (df_train['Height'] * df_train['Height'])
df_test['BIM'] = df_test['Weight'] / (df_test['Height'] * df_test['Height'])Y = df_train['NObeyesdad'].map(lambda x: obesityTypeDict[x])
x_train_o = df_train.drop(['NObeyesdad'], axis=1)
X = pd.get_dummies(x_train_o, drop_first=False)X_train, X_test, y_train, y_test = train_test_split(X, Y,test_size=0.2, random_state=42)model3 = XGBClassifier()
parameters3 = {"n_estimators": [50, 100, 150, 200, 300], # 多少棵树"learning_rate": [0.05, 0.1, 0, 2, 0.3], # 学习率"max_depth": [3, 4, 5, 6, 7], # 树的最大深度"colsample_bytree": [0.4, 0.6, 0.8, 1], # 选择多少列构建一个树"min_child_weight": [1, 2, 3, 4] # 叶子节点最小样本数目
}
# 构建grid search 模型, 5折交叉验证。# grid_search3 = RandomizedSearchCV(model3, parameters3, cv=5, n_jobs=-1, scoring='accuracy')grid_search3 = GridSearchCV(model3, parameters3, cv=5, n_jobs=-1, scoring='accuracy')
grid_search3.fit(X_train, y_train)
print(grid_search3.best_score_)
best_parameters3 = grid_search3.best_params_
print(best_parameters3)
model3 = XGBClassifier(**best_parameters3)
model3.fit(X_train, y_train)
X_test_pred3 = model3.predict(X_test)
accuracy_score(y_test, X_test_pred3)df_test = pd.get_dummies(df_test, drop_first=False)
df_preds = pd.DataFrame({'id': id, 'NObeyesdad': model3.predict(df_test)})df_preds['NObeyesdad'] = df_preds['NObeyesdad'].map(lambda x: obesityNumDict[x])
df_preds.to_csv('submission_xbg.csv', index=False)
使用stacking
# https://www.kaggle.com/code/ddosad/ps4e2-visual-eda-lgbm-obesity-risk
import pandas as pd
import numpy as npimport seaborn as sns
import matplotlib.pyplot as plt
import ydata_profilingfrom sklearn.metrics import roc_auc_score, accuracy_score
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier, StackingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier, ExtraTreeClassifier
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from catboost import CatBoostClassifierdf_train = pd.read_csv('D:\\python\\dataset\\PredictionOfObesityRisk\\playground-series-s4e2\\train.csv')
original = pd.read_csv('D:\\python\\dataset\\PredictionOfObesityRisk\\playground-series-s4e2\\ObesityDataSet.csv')
df_test = pd.read_csv('D:\\python\\dataset\\PredictionOfObesityRisk\\playground-series-s4e2\\test.csv')df_train = pd.concat([df_train, original]).drop(['id'], axis=1).drop_duplicates()
id = df_test['id']
df_test = df_test.drop(['id'], axis=1)
print(f'The Train dataset has {df_train.shape[0]} rows and {df_train.shape[1]} columns')
print(f'The Test dataset has {df_test.shape[0]} rows and {df_test.shape[1]} columns')# df_train.describe().to_csv('output.csv')
# pfr = ydata_profiling.ProfileReport(df_train)
# pfr.to_file("profile.html")obesityType = df_train[['NObeyesdad']].copy()
obesityType = obesityType.drop_duplicates(ignore_index=True)
print(obesityType)
obesityTypeDict = {'Insufficient_Weight': 0, 'Normal_Weight': 1, 'Obesity_Type_I': 2, 'Obesity_Type_II': 3,'Obesity_Type_III': 4, 'Overweight_Level_I': 5, 'Overweight_Level_II': 6}
obesityNumDict = {0: 'Insufficient_Weight', 1: 'Normal_Weight', 2: 'Obesity_Type_I', 3: 'Obesity_Type_II',4: 'Obesity_Type_III', 5: 'Overweight_Level_I', 6: 'Overweight_Level_II'}
df_train['BIM'] = df_train['Weight'] / (df_train['Height'] * df_train['Height'])
df_test['BIM'] = df_test['Weight'] / (df_test['Height'] * df_test['Height'])# def cal_grade(bim):
# if bim < 18.5:
# return 'Insufficient_Weight'
# elif 18.5 <= bim < 25:
# return 'Normal_Weight'
# elif 25 <= bim < 26.89:
# return 'Overweight_Level_I'
# elif 26.89 <= bim < 30:
# return 'Overweight_Level_II'
# elif 30 <= bim < 35:
# return 'Obesity_Type_I'
# elif 35 <= bim < 40:
# return 'Obesity_Type_II'
# else:
# return 'Obesity_Type_III'# df_train['cal_NObeyesdad'] = df_train['BIM'].map(cal_grade)
# same = df_train[df_train['cal_NObeyesdad'] == df_train['NObeyesdad']]
# print(same.shape)
# diff = df_train[df_train['cal_NObeyesdad'] != df_train['NObeyesdad']]
# print(diff.shape)
# BIMsort = df_train.sort_values(by=['BIM'])
# Overweight_Level_II = df_train[df_train['NObeyesdad']=='Overweight_Level_II'].sort_values(by = ['BIM'])
Y = df_train['NObeyesdad'].map(lambda x: obesityTypeDict[x])
x_train_o = df_train.drop(['NObeyesdad'], axis=1)
X = pd.get_dummies(x_train_o, drop_first=False)X_train, X_test, y_train, y_test = train_test_split(X, Y,test_size=0.2, random_state=42)training_score = []
testing_score = []def model_prediction(model):model.fit(X_train, y_train)y_pred_train = model.predict(X_train)y_pred_test = model.predict(X_test)train_accuracy = accuracy_score(y_train, y_pred_train)test_accuracy = accuracy_score(y_test, y_pred_test)training_score.append(train_accuracy)testing_score.append(test_accuracy)print(f"Training accuracy: {train_accuracy}")print(f"Testing accuracy: {test_accuracy}")model_prediction(SVC())
model_prediction(RandomForestClassifier())
model_prediction(AdaBoostClassifier())
model_prediction(GradientBoostingClassifier())
model_prediction(LGBMClassifier())
model_prediction(XGBClassifier())
model_prediction(CatBoostClassifier(verbose=False))models = ["SVC", "RandomForest", "AdaBoost", "GradientBoost", "LGBM", "XGB", "CatBoost"]df = pd.DataFrame({'Model': models, 'Training Accuracy': training_score, 'Testing Accuracy': testing_score})print(df)
# Plotting the above results as column-bar chart
df.plot(x='Model', y=['Training Accuracy', 'Testing Accuracy'], kind='bar', figsize=(10, 8))
plt.title('Training and Testing Accuracy for Each Model')
plt.ylabel('Accuracy')
plt.xlabel('Model')
plt.show()# 过于耗时
model1 = LGBMClassifier(force_row_wise=True)parameters1 = {"n_estimators": [200, 300, 400],"learning_rate": [0.01, 0.05],"random_state": [42],"num_leaves": [17, 18]}grid_search1 = GridSearchCV(model1, parameters1, scoring='accuracy', n_jobs=-1, cv=5)
grid_search1.fit(X_train, y_train)
print(f"grid_search1.best_score_: {grid_search1.best_score_}")
best_parameters1 = grid_search1.best_params_
print(best_parameters1)model1 = LGBMClassifier(**best_parameters1)
model1.fit(X_train, y_train)
X_test_pred1 = model1.predict(X_test)
accuracy_score(y_test, X_test_pred1)#model2 = CatBoostClassifier(verbose=False)
# parameters2 = {"learning_rate": np.arange(0.1, 0.5),
# "random_state": [42],
# "depth": [9, 10],
# "iterations": [40, 50]}
# grid_search2 = GridSearchCV(model2, parameters2, cv=5, n_jobs=-1, scoring='accuracy')
#
# grid_search2.fit(X_train, y_train)
# print(f"grid_search2.best_score_: {grid_search2.best_score_}")
# best_parameters2 = grid_search2.best_params_
# print(best_parameters2)
# model2 = CatBoostClassifier(**best_parameters2, verbose=False)
# model2.fit(X_train, y_train)
#
# X_test_pred2 = model2.predict(X_test)
# accuracy_score(y_test, X_test_pred2)model3 = XGBClassifier()
parameters3 = {"n_estimators": [100, 150],"random_state": [42],"learning_rate": [0.1, 0.3, 0.5]}grid_search3 = GridSearchCV(model3, parameters3, cv=5, n_jobs=-1, scoring='accuracy')
grid_search3.fit(X_train, y_train)
print(grid_search3.best_score_)
best_parameters3 = grid_search3.best_params_
model3 = XGBClassifier(**best_parameters3)
model3.fit(X_train, y_train)
X_test_pred3 = model3.predict(X_test)
accuracy_score(y_test, X_test_pred3)model4 = RandomForestClassifier()
parameters4 = {'n_estimators': [300, 500, 550],'min_samples_split': [8, 9],'max_depth': [11, 12],'min_samples_leaf': [4, 5]}
grid_search4 = GridSearchCV(model4, parameters4, cv=5, n_jobs=-1, scoring='accuracy')
grid_search4.fit(X_train, y_train)
best_parameters4 = grid_search4.best_params_
model4 = RandomForestClassifier(**best_parameters4)
model4.fit(X_train, y_train)
X_test_pred4 = model4.predict(X_test)
accuracy_score(y_test, X_test_pred4)stacking_model = StackingClassifier(estimators=[('LGBM', model1),('XGBoost', model3),('RandomForest', model4)])stacking_model.fit(X_train, y_train)X_train_pred5 = stacking_model.predict(X_train)
X_test_pred5 = stacking_model.predict(X_test)
print(f'Stacking model training data is {accuracy_score(y_train, X_train_pred5)}')
print(f'Stacking model testing data is {accuracy_score(y_test, X_test_pred5)}')df_test = pd.get_dummies(df_test, drop_first=False)
df_preds = pd.DataFrame({'id': id, 'NObeyesdad': stacking_model.predict(df_test)})df_preds['NObeyesdad'] = df_preds['NObeyesdad'].map(lambda x: obesityNumDict[x])
df_preds.to_csv('submission1.csv', index=False)