Skip to content

Commit 2fc0d77

Browse files
authored
feat: add a new competition (#474)
* add tabular-playground-series-dec-2021 * finished * fix a mistake * fix a bug * fix a bug
1 parent d41343a commit 2fc0d77

11 files changed

Lines changed: 324 additions & 22 deletions

File tree

‎rdagent/scenarios/kaggle/docker/Dockerfile‎

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,4 +25,5 @@ RUN pip install xgboost
2525
RUN pip install sparse
2626
RUN pip install lightgbm
2727
RUN pip install pyarrow
28-
RUN pip install fastparquet
28+
RUN pip install fastparquet
29+
RUN pip install optuna
Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
import os
2+
3+
import numpy as np
4+
import pandas as pd
5+
from sklearn.impute import SimpleImputer
6+
from sklearn.model_selection import train_test_split
7+
8+
9+
def prepreprocess():
10+
"""
11+
This method loads the data, drops the unnecessary columns, and splits it into train and validation sets.
12+
"""
13+
# Load and preprocess the data
14+
data_df = pd.read_csv("/kaggle/input/train.csv")
15+
data_df = data_df.drop(["Id"], axis=1)
16+
17+
X = data_df.drop(["Cover_Type"], axis=1)
18+
y = data_df["Cover_Type"] - 1
19+
20+
# Split the data into training and validation sets
21+
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.20, random_state=42)
22+
23+
return X_train, X_valid, y_train, y_valid
24+
25+
26+
def preprocess_script():
27+
"""
28+
This method applies the preprocessing steps to the training, validation, and test datasets.
29+
"""
30+
if os.path.exists("/kaggle/input/X_train.pkl"):
31+
X_train = pd.read_pickle("/kaggle/input/X_train.pkl")
32+
X_valid = pd.read_pickle("/kaggle/input/X_valid.pkl")
33+
y_train = pd.read_pickle("/kaggle/input/y_train.pkl")
34+
y_valid = pd.read_pickle("/kaggle/input/y_valid.pkl")
35+
X_test = pd.read_pickle("/kaggle/input/X_test.pkl")
36+
others = pd.read_pickle("/kaggle/input/others.pkl")
37+
38+
return X_train, X_valid, y_train, y_valid, X_test, *others
39+
40+
X_train, X_valid, y_train, y_valid = prepreprocess()
41+
42+
# Load and preprocess the test data
43+
submission_df = pd.read_csv("/kaggle/input/test.csv")
44+
ids = submission_df["Id"]
45+
X_test = submission_df.drop(["Id"], axis=1)
46+
47+
return X_train, X_valid, y_train, y_valid, X_test, ids
48+
49+
50+
def clean_and_impute_data(X_train, X_valid, X_test):
51+
"""
52+
Handles inf and -inf values by replacing them with NaN,
53+
then imputes missing values using the mean strategy.
54+
Also removes duplicate columns.
55+
"""
56+
# Replace inf and -inf with NaN
57+
X_train.replace([np.inf, -np.inf], np.nan, inplace=True)
58+
X_valid.replace([np.inf, -np.inf], np.nan, inplace=True)
59+
X_test.replace([np.inf, -np.inf], np.nan, inplace=True)
60+
61+
# Impute missing values
62+
imputer = SimpleImputer(strategy="mean")
63+
X_train = pd.DataFrame(imputer.fit_transform(X_train), columns=X_train.columns)
64+
X_valid = pd.DataFrame(imputer.transform(X_valid), columns=X_valid.columns)
65+
X_test = pd.DataFrame(imputer.transform(X_test), columns=X_test.columns)
66+
67+
# Remove duplicate columns
68+
X_train = X_train.loc[:, ~X_train.columns.duplicated()]
69+
X_valid = X_valid.loc[:, ~X_valid.columns.duplicated()]
70+
X_test = X_test.loc[:, ~X_test.columns.duplicated()]
71+
72+
return X_train, X_valid, X_test
Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
import pandas as pd
2+
3+
"""
4+
Here is the feature engineering code for each task, with a class that has a fit and transform method.
5+
Remember
6+
"""
7+
8+
9+
class IdentityFeature:
10+
def fit(self, train_df: pd.DataFrame):
11+
"""
12+
Fit the feature engineering model to the training data.
13+
"""
14+
pass
15+
16+
def transform(self, X: pd.DataFrame):
17+
"""
18+
Transform the input data.
19+
"""
20+
return X
21+
22+
23+
feature_engineering_cls = IdentityFeature
Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
"""
2+
Motivation of the model:
3+
The Random Forest model is chosen for its robustness and ability to handle large datasets with higher dimensionality.
4+
It reduces overfitting by averaging multiple decision trees and typically performs well out of the box, making it a good
5+
baseline model for many classification tasks.
6+
"""
7+
8+
import pandas as pd
9+
from sklearn.ensemble import RandomForestClassifier
10+
from sklearn.metrics import accuracy_score
11+
12+
13+
def fit(X_train: pd.DataFrame, y_train: pd.Series, X_valid: pd.DataFrame, y_valid: pd.Series):
14+
"""
15+
Define and train the Random Forest model. Merge feature selection into the pipeline.
16+
"""
17+
# Initialize the Random Forest model
18+
model = RandomForestClassifier(n_estimators=200, random_state=32, n_jobs=-1)
19+
20+
# Fit the model
21+
model.fit(X_train, y_train)
22+
23+
# Validate the model
24+
y_valid_pred = model.predict(X_valid)
25+
accuracy = accuracy_score(y_valid, y_valid_pred)
26+
print(f"Validation Accuracy: {accuracy:.4f}")
27+
28+
return model
29+
30+
31+
def predict(model, X):
32+
"""
33+
Keep feature selection's consistency and make predictions.
34+
"""
35+
# Predict using the trained model
36+
y_pred = model.predict(X)
37+
38+
return y_pred.reshape(-1, 1)
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
"""
2+
motivation of the model
3+
"""
4+
5+
import pandas as pd
6+
import xgboost as xgb
7+
8+
9+
def fit(X_train: pd.DataFrame, y_train: pd.DataFrame, X_valid: pd.DataFrame, y_valid: pd.DataFrame):
10+
"""Define and train the model. Merge feature_select"""
11+
dtrain = xgb.DMatrix(X_train, label=y_train)
12+
dvalid = xgb.DMatrix(X_valid, label=y_valid)
13+
14+
params = {
15+
"objective": "multi:softmax", # Use softmax for multi-class classification
16+
"num_class": len(set(y_train)), # Number of classes
17+
"nthread": -1,
18+
"tree_method": "gpu_hist",
19+
"device": "cuda",
20+
}
21+
num_round = 100
22+
23+
evallist = [(dtrain, "train"), (dvalid, "eval")]
24+
bst = xgb.train(params, dtrain, num_round, evallist)
25+
26+
return bst
27+
28+
29+
def predict(model, X):
30+
"""
31+
Keep feature select's consistency.
32+
"""
33+
dtest = xgb.DMatrix(X)
34+
y_pred = model.predict(dtest)
35+
return y_pred.astype(int).reshape(-1, 1)
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
import pandas as pd
2+
3+
4+
def select(X: pd.DataFrame) -> pd.DataFrame:
5+
"""
6+
Select relevant features. To be used in fit & predict function.
7+
"""
8+
# For now, we assume all features are relevant. This can be expanded to feature selection logic.
9+
if X.columns.nlevels == 1:
10+
return X
11+
X.columns = ["_".join(str(i) for i in col).strip() for col in X.columns.values]
12+
return X
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
import pandas as pd
2+
3+
4+
def select(X: pd.DataFrame) -> pd.DataFrame:
5+
"""
6+
Select relevant features. To be used in fit & predict function.
7+
"""
8+
# For now, we assume all features are relevant. This can be expanded to feature selection logic.
9+
if X.columns.nlevels == 1:
10+
return X
11+
X.columns = ["_".join(str(i) for i in col).strip() for col in X.columns.values]
12+
return X
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
import pandas as pd
2+
3+
4+
def select(X: pd.DataFrame) -> pd.DataFrame:
5+
"""
6+
Select relevant features. To be used in fit & predict function.
7+
"""
8+
# For now, we assume all features are relevant. This can be expanded to feature selection logic.
9+
if X.columns.nlevels == 1:
10+
return X
11+
X.columns = ["_".join(str(i) for i in col).strip() for col in X.columns.values]
12+
return X
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
import pandas as pd
2+
3+
4+
def select(X: pd.DataFrame) -> pd.DataFrame:
5+
"""
6+
Select relevant features. To be used in fit & predict function.
7+
"""
8+
# For now, we assume all features are relevant. This can be expanded to feature selection logic.
9+
if X.columns.nlevels == 1:
10+
return X
11+
X.columns = ["_".join(str(i) for i in col).strip() for col in X.columns.values]
12+
return X
Lines changed: 91 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,91 @@
1+
import importlib.util
2+
import random
3+
from pathlib import Path
4+
5+
import numpy as np
6+
import pandas as pd
7+
from fea_share_preprocess import clean_and_impute_data, preprocess_script
8+
from sklearn.metrics import accuracy_score, matthews_corrcoef
9+
10+
# Set random seed for reproducibility
11+
SEED = 42
12+
random.seed(SEED)
13+
np.random.seed(SEED)
14+
DIRNAME = Path(__file__).absolute().resolve().parent
15+
16+
17+
def compute_metrics_for_classification(y_true, y_pred):
18+
"""Compute MCC for classification."""
19+
mcc = matthews_corrcoef(y_true, y_pred)
20+
return mcc
21+
22+
23+
def import_module_from_path(module_name, module_path):
24+
spec = importlib.util.spec_from_file_location(module_name, module_path)
25+
module = importlib.util.module_from_spec(spec)
26+
spec.loader.exec_module(module)
27+
return module
28+
29+
30+
# 1) Preprocess the data
31+
X_train, X_valid, y_train, y_valid, X_test, ids = preprocess_script()
32+
33+
# 2) Auto feature engineering
34+
X_train_l, X_valid_l = [], []
35+
X_test_l = []
36+
37+
for f in DIRNAME.glob("feature/feat*.py"):
38+
cls = import_module_from_path(f.stem, f).feature_engineering_cls()
39+
cls.fit(X_train)
40+
X_train_f = cls.transform(X_train)
41+
X_valid_f = cls.transform(X_valid)
42+
X_test_f = cls.transform(X_test)
43+
44+
if X_train_f.shape[-1] == X_valid_f.shape[-1] and X_train_f.shape[-1] == X_test_f.shape[-1]:
45+
X_train_l.append(X_train_f)
46+
X_valid_l.append(X_valid_f)
47+
X_test_l.append(X_test_f)
48+
49+
X_train = pd.concat(X_train_l, axis=1, keys=[f"feature_{i}" for i in range(len(X_train_l))])
50+
X_valid = pd.concat(X_valid_l, axis=1, keys=[f"feature_{i}" for i in range(len(X_valid_l))])
51+
X_test = pd.concat(X_test_l, axis=1, keys=[f"feature_{i}" for i in range(len(X_test_l))])
52+
53+
print(X_train.shape, X_valid.shape, X_test.shape)
54+
55+
# Handle inf and -inf values
56+
X_train, X_valid, X_test = clean_and_impute_data(X_train, X_valid, X_test)
57+
58+
59+
model_l = [] # list[tuple[model, predict_func]]
60+
for f in DIRNAME.glob("model/model*.py"):
61+
select_python_path = f.with_name(f.stem.replace("model", "select") + f.suffix)
62+
select_m = import_module_from_path(select_python_path.stem, select_python_path)
63+
X_train_selected = select_m.select(X_train.copy())
64+
X_valid_selected = select_m.select(X_valid.copy())
65+
66+
m = import_module_from_path(f.stem, f)
67+
model_l.append((m.fit(X_train_selected, y_train, X_valid_selected, y_valid), m.predict, select_m))
68+
69+
# 4) Evaluate the model on the validation set
70+
metrics_all = []
71+
for model, predict_func, select_m in model_l:
72+
X_valid_selected = select_m.select(X_valid.copy())
73+
y_valid_pred = predict_func(model, X_valid_selected)
74+
accuracy = accuracy_score(y_valid, y_valid_pred)
75+
print(f"final accuracy on valid set: {accuracy}")
76+
metrics_all.append(accuracy)
77+
78+
# 5) Save the validation accuracy
79+
max_index = np.argmax(metrics_all)
80+
pd.Series(data=[metrics_all[max_index]], index=["multi-class accuracy"]).to_csv("submission_score.csv")
81+
82+
# 6) Make predictions on the test set and save them
83+
X_test_selected = model_l[max_index][2].select(X_test.copy())
84+
y_test_pred = model_l[max_index][1](model_l[max_index][0], X_test_selected).flatten() + 1
85+
86+
87+
# 7) Submit predictions for the test set
88+
submission_result = pd.DataFrame(y_test_pred, columns=["Cover_Type"])
89+
submission_result.insert(0, "Id", ids)
90+
91+
submission_result.to_csv("submission.csv", index=False)

0 commit comments

Comments
 (0)