Я хочу обучить и оценить модели, чтобы найти лучшие модели для своих сегментов, но в sklearn что-то не так с тегами и оценщиками, и я не могу разобраться в проблеме. Возможно, что-то не так со сценариями tags.py, base.py и search.py, но я в этом тоже не уверен.
# Import libraries
import pandas as pd
from sklearn.model_selection import train_test_split, GridSearchCV, KFold
from xgboost import XGBRegressor
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score
import joblib
# Develop function to train and evaluate models
def train_and_evaluate_models(input_train_csv, models_dir, metrics_output_csv):
# Load the training data
train_data = pd.read_csv(input_train_csv)
# Extract features and target
X_train = train_data.drop(columns=['Response_ID', 'segment'])
y_train = train_data['segment']
metrics_list = []
# Define hyperparameters to tune
param_grid = {
'max_depth': [3, 5, 7],
'n_estimators': [100, 200, 300],
'learning_rate': [0.01, 0.1, 0.2],
'subsample': [0.8, 1.0],
'colsample_bytree': [0.8, 1.0]
}
# Train, save models, and evaluate for each segment
for segment in range(1, 7):
# Binary target for the current segment
y_train_segment = (y_train == segment).astype(int)
# Split the data for evaluation
X_train_split, X_valid, y_train_split, y_valid = train_test_split(X_train, y_train_segment, test_size=0.2, random_state=42)
# Initialize the model
model = XGBRegressor(
objective='binary:logistic',
eval_metric='logloss',
use_label_encoder=False,
early_stopping_rounds=10
)
# Hyperparameter tuning using GridSearchCV
kf = KFold(n_splits=3, shuffle=True, random_state=42)
grid_search = GridSearchCV(estimator=model, param_grid=param_grid, cv=kf, scoring='roc_auc', verbose=1, n_jobs=-1)
# Fit the model with early stopping
grid_search.fit(X_train_split, y_train_split,
eval_set=[(X_valid, y_valid)],
verbose=False)
# Get the best model from grid search
best_model = grid_search.best_estimator_
# Save the best model to disk
model_filename = f'{models_dir}/segment_{segment}_model.joblib'
joblib.dump(best_model, model_filename)
print(f'Model for segment {segment} saved to {model_filename}')
# Predict on validation set
y_valid_pred = best_model.predict(X_valid)
y_valid_pred_binary = [1 if prob > 0.5 else 0 for prob in y_valid_pred]
# Calculate evaluation metrics
accuracy = accuracy_score(y_valid, y_valid_pred_binary)
precision = precision_score(y_valid, y_valid_pred_binary)
recall = recall_score(y_valid, y_valid_pred_binary)
f1 = f1_score(y_valid, y_valid_pred_binary)
auc = roc_auc_score(y_valid, y_valid_pred)
metrics_list.append({
'Segment': segment,
'Accuracy': accuracy,
'Precision': precision,
'Recall': recall,
'F1 Score': f1,
'AUC': auc
})
# Save the metrics to a CSV file
metrics_df = pd.DataFrame(metrics_list)
metrics_df.to_csv(metrics_output_csv, index=False)
return metrics_df
# Pathways
input_train_csv = r"C:\\Users\\me\\input.csv"
models_dir = r"C:\\Users\\me"
metrics_output_csv = r"C:\\Users\\me\\output.csv"
# Train, evaluate, and save models
metrics_df = train_and_evaluate_models(input_train_csv, models_dir, metrics_output_csv)
print(metrics_df)
Я пробовал обновить xgboost и понизить версию sklearn, чтобы решить проблемы совместимости, но та же ошибка продолжала появляться. Я также попробовал несколько обходных путей для обеспечения совместимости, а также убедился, что сценарий читает входные данные как число с плавающей запятой или целое число, а не строку, но это тоже не сработало, что привело к тем же ошибкам.
C:\ProgramData\spyder-6\envs\spyder-runtime\Lib\site-packages\sklearn\utils\_tags.py:354: FutureWarning: The XGBRegressor or classes from which it inherits use `_get_tags` and `_more_tags`. Please define the `__sklearn_tags__` method, or inherit from `sklearn.base.BaseEstimator` and/or other appropriate mixins such as `sklearn.base.TransformerMixin`, `sklearn.base.ClassifierMixin`, `sklearn.base.RegressorMixin`, and `sklearn.base.OutlierMixin`. From scikit-learn 1.7, not defining `__sklearn_tags__` will raise an error.
warnings.warn(
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
File C:\ProgramData\spyder-6\envs\spyder-runtime\Lib\site-packages\spyder_kernels\customize\utils.py:209, in exec_encapsulate_locals(code_ast, globals, locals, exec_fun, filename)
207 if filename is None:
208 filename = ""
--> 209 exec_fun(compile(code_ast, filename, "exec"), globals, None)
210 finally:
211 if use_locals_hack:
212 # Cleanup code
File c:\users\me\pyhton programs\2-model training\2_model_training.py:93
90 metrics_output_csv = r"C:\\Users\\me\\model_metrics.csv"
92 # Train, evaluate, and save models
---> 93 metrics_df = train_and_evaluate_models(input_train_csv, models_dir, metrics_output_csv)
94 print(metrics_df)
File c:\users\me\pyhton programs\2-model training\2_model_training.py:49, in train_and_evaluate_models(input_train_csv, models_dir, metrics_output_csv)
46 grid_search = GridSearchCV(estimator=model, param_grid=param_grid, cv=kf, scoring='roc_auc', verbose=1, n_jobs=-1)
48 # Fit the model with early stopping
---> 49 grid_search.fit(X_train_split, y_train_split,
50 eval_set=[(X_valid, y_valid)],
51 verbose=False)
53 # Get the best model from grid search
54 best_model = grid_search.best_estimator_
File C:\ProgramData\spyder-6\envs\spyder-runtime\Lib\site-packages\sklearn\base.py:1389, in _fit_context..decorator..wrapper(estimator, *args, **kwargs)
1382 estimator._validate_params()
1384 with config_context(
1385 skip_parameter_validation=(
1386 prefer_skip_nested_validation or global_skip_validation
1387 )
1388 ):
-> 1389 return fit_method(estimator, *args, **kwargs)
File C:\ProgramData\spyder-6\envs\spyder-runtime\Lib\site-packages\sklearn\model_selection\_search.py:932, in BaseSearchCV.fit(self, X, y, **params)
928 params = _check_method_params(X, params=params)
930 routed_params = self._get_routed_params_for_fit(params)
--> 932 cv_orig = check_cv(self.cv, y, classifier=is_classifier(estimator))
933 n_splits = cv_orig.get_n_splits(X, y, **routed_params.splitter.split)
935 base_estimator = clone(self.estimator)
File C:\ProgramData\spyder-6\envs\spyder-runtime\Lib\site-packages\sklearn\base.py:1237, in is_classifier(estimator)
1230 warnings.warn(
1231 f"passing a class to {print(inspect.stack()[0][3])} is deprecated and "
1232 "will be removed in 1.8. Use an instance of the class instead.",
1233 FutureWarning,
1234 )
1235 return getattr(estimator, "_estimator_type", None) == "classifier"
-> 1237 return get_tags(estimator).estimator_type == "classifier"
File C:\ProgramData\spyder-6\envs\spyder-runtime\Lib\site-packages\sklearn\utils\_tags.py:405, in get_tags(estimator)
403 for klass in reversed(type(estimator).mro()):
404 if "__sklearn_tags__" in vars(klass):
--> 405 sklearn_tags_provider[klass] = klass.__sklearn_tags__(estimator) # type: ignore[attr-defined]
406 class_order.append(klass)
407 elif "_more_tags" in vars(klass):
File C:\ProgramData\spyder-6\envs\spyder-runtime\Lib\site-packages\sklearn\base.py:613, in RegressorMixin.__sklearn_tags__(self)
612 def __sklearn_tags__(self):
--> 613 tags = super().__sklearn_tags__()
614 tags.estimator_type = "regressor"
615 tags.regressor_tags = RegressorTags()
AttributeError: 'super' object has no attribute '__sklearn_tags__'
Подробнее здесь: https://stackoverflow.com/questions/792 ... gmentation
Обучение модели для сегментации ⇐ Python
Программы на Python
1734490419
Anonymous
Я хочу обучить и оценить модели, чтобы найти лучшие модели для своих сегментов, но в sklearn что-то не так с тегами и оценщиками, и я не могу разобраться в проблеме. Возможно, что-то не так со сценариями tags.py, base.py и search.py, но я в этом тоже не уверен.
# Import libraries
import pandas as pd
from sklearn.model_selection import train_test_split, GridSearchCV, KFold
from xgboost import XGBRegressor
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score
import joblib
# Develop function to train and evaluate models
def train_and_evaluate_models(input_train_csv, models_dir, metrics_output_csv):
# Load the training data
train_data = pd.read_csv(input_train_csv)
# Extract features and target
X_train = train_data.drop(columns=['Response_ID', 'segment'])
y_train = train_data['segment']
metrics_list = []
# Define hyperparameters to tune
param_grid = {
'max_depth': [3, 5, 7],
'n_estimators': [100, 200, 300],
'learning_rate': [0.01, 0.1, 0.2],
'subsample': [0.8, 1.0],
'colsample_bytree': [0.8, 1.0]
}
# Train, save models, and evaluate for each segment
for segment in range(1, 7):
# Binary target for the current segment
y_train_segment = (y_train == segment).astype(int)
# Split the data for evaluation
X_train_split, X_valid, y_train_split, y_valid = train_test_split(X_train, y_train_segment, test_size=0.2, random_state=42)
# Initialize the model
model = XGBRegressor(
objective='binary:logistic',
eval_metric='logloss',
use_label_encoder=False,
early_stopping_rounds=10
)
# Hyperparameter tuning using GridSearchCV
kf = KFold(n_splits=3, shuffle=True, random_state=42)
grid_search = GridSearchCV(estimator=model, param_grid=param_grid, cv=kf, scoring='roc_auc', verbose=1, n_jobs=-1)
# Fit the model with early stopping
grid_search.fit(X_train_split, y_train_split,
eval_set=[(X_valid, y_valid)],
verbose=False)
# Get the best model from grid search
best_model = grid_search.best_estimator_
# Save the best model to disk
model_filename = f'{models_dir}/segment_{segment}_model.joblib'
joblib.dump(best_model, model_filename)
print(f'Model for segment {segment} saved to {model_filename}')
# Predict on validation set
y_valid_pred = best_model.predict(X_valid)
y_valid_pred_binary = [1 if prob > 0.5 else 0 for prob in y_valid_pred]
# Calculate evaluation metrics
accuracy = accuracy_score(y_valid, y_valid_pred_binary)
precision = precision_score(y_valid, y_valid_pred_binary)
recall = recall_score(y_valid, y_valid_pred_binary)
f1 = f1_score(y_valid, y_valid_pred_binary)
auc = roc_auc_score(y_valid, y_valid_pred)
metrics_list.append({
'Segment': segment,
'Accuracy': accuracy,
'Precision': precision,
'Recall': recall,
'F1 Score': f1,
'AUC': auc
})
# Save the metrics to a CSV file
metrics_df = pd.DataFrame(metrics_list)
metrics_df.to_csv(metrics_output_csv, index=False)
return metrics_df
# Pathways
input_train_csv = r"C:\\Users\\me\\input.csv"
models_dir = r"C:\\Users\\me"
metrics_output_csv = r"C:\\Users\\me\\output.csv"
# Train, evaluate, and save models
metrics_df = train_and_evaluate_models(input_train_csv, models_dir, metrics_output_csv)
print(metrics_df)
Я пробовал обновить xgboost и понизить версию sklearn, чтобы решить проблемы совместимости, но та же ошибка продолжала появляться. Я также попробовал несколько обходных путей для обеспечения совместимости, а также убедился, что сценарий читает входные данные как число с плавающей запятой или целое число, а не строку, но это тоже не сработало, что привело к тем же ошибкам.
C:\ProgramData\spyder-6\envs\spyder-runtime\Lib\site-packages\sklearn\utils\_tags.py:354: FutureWarning: The XGBRegressor or classes from which it inherits use `_get_tags` and `_more_tags`. Please define the `__sklearn_tags__` method, or inherit from `sklearn.base.BaseEstimator` and/or other appropriate mixins such as `sklearn.base.TransformerMixin`, `sklearn.base.ClassifierMixin`, `sklearn.base.RegressorMixin`, and `sklearn.base.OutlierMixin`. From scikit-learn 1.7, not defining `__sklearn_tags__` will raise an error.
warnings.warn(
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
File C:\ProgramData\spyder-6\envs\spyder-runtime\Lib\site-packages\spyder_kernels\customize\utils.py:209, in exec_encapsulate_locals(code_ast, globals, locals, exec_fun, filename)
207 if filename is None:
208 filename = ""
--> 209 exec_fun(compile(code_ast, filename, "exec"), globals, None)
210 finally:
211 if use_locals_hack:
212 # Cleanup code
File c:\users\me\pyhton programs\2-model training\2_model_training.py:93
90 metrics_output_csv = r"C:\\Users\\me\\model_metrics.csv"
92 # Train, evaluate, and save models
---> 93 metrics_df = train_and_evaluate_models(input_train_csv, models_dir, metrics_output_csv)
94 print(metrics_df)
File c:\users\me\pyhton programs\2-model training\2_model_training.py:49, in train_and_evaluate_models(input_train_csv, models_dir, metrics_output_csv)
46 grid_search = GridSearchCV(estimator=model, param_grid=param_grid, cv=kf, scoring='roc_auc', verbose=1, n_jobs=-1)
48 # Fit the model with early stopping
---> 49 grid_search.fit(X_train_split, y_train_split,
50 eval_set=[(X_valid, y_valid)],
51 verbose=False)
53 # Get the best model from grid search
54 best_model = grid_search.best_estimator_
File C:\ProgramData\spyder-6\envs\spyder-runtime\Lib\site-packages\sklearn\base.py:1389, in _fit_context..decorator..wrapper(estimator, *args, **kwargs)
1382 estimator._validate_params()
1384 with config_context(
1385 skip_parameter_validation=(
1386 prefer_skip_nested_validation or global_skip_validation
1387 )
1388 ):
-> 1389 return fit_method(estimator, *args, **kwargs)
File C:\ProgramData\spyder-6\envs\spyder-runtime\Lib\site-packages\sklearn\model_selection\_search.py:932, in BaseSearchCV.fit(self, X, y, **params)
928 params = _check_method_params(X, params=params)
930 routed_params = self._get_routed_params_for_fit(params)
--> 932 cv_orig = check_cv(self.cv, y, classifier=is_classifier(estimator))
933 n_splits = cv_orig.get_n_splits(X, y, **routed_params.splitter.split)
935 base_estimator = clone(self.estimator)
File C:\ProgramData\spyder-6\envs\spyder-runtime\Lib\site-packages\sklearn\base.py:1237, in is_classifier(estimator)
1230 warnings.warn(
1231 f"passing a class to {print(inspect.stack()[0][3])} is deprecated and "
1232 "will be removed in 1.8. Use an instance of the class instead.",
1233 FutureWarning,
1234 )
1235 return getattr(estimator, "_estimator_type", None) == "classifier"
-> 1237 return get_tags(estimator).estimator_type == "classifier"
File C:\ProgramData\spyder-6\envs\spyder-runtime\Lib\site-packages\sklearn\utils\_tags.py:405, in get_tags(estimator)
403 for klass in reversed(type(estimator).mro()):
404 if "__sklearn_tags__" in vars(klass):
--> 405 sklearn_tags_provider[klass] = klass.__sklearn_tags__(estimator) # type: ignore[attr-defined]
406 class_order.append(klass)
407 elif "_more_tags" in vars(klass):
File C:\ProgramData\spyder-6\envs\spyder-runtime\Lib\site-packages\sklearn\base.py:613, in RegressorMixin.__sklearn_tags__(self)
612 def __sklearn_tags__(self):
--> 613 tags = super().__sklearn_tags__()
614 tags.estimator_type = "regressor"
615 tags.regressor_tags = RegressorTags()
AttributeError: 'super' object has no attribute '__sklearn_tags__'
Подробнее здесь: [url]https://stackoverflow.com/questions/79289735/model-training-for-segmentation[/url]
Ответить
1 сообщение
• Страница 1 из 1
Перейти
- Кемерово-IT
- ↳ Javascript
- ↳ C#
- ↳ JAVA
- ↳ Elasticsearch aggregation
- ↳ Python
- ↳ Php
- ↳ Android
- ↳ Html
- ↳ Jquery
- ↳ C++
- ↳ IOS
- ↳ CSS
- ↳ Excel
- ↳ Linux
- ↳ Apache
- ↳ MySql
- Детский мир
- Для души
- ↳ Музыкальные инструменты даром
- ↳ Печатная продукция даром
- Внешняя красота и здоровье
- ↳ Одежда и обувь для взрослых даром
- ↳ Товары для здоровья
- ↳ Физкультура и спорт
- Техника - даром!
- ↳ Автомобилистам
- ↳ Компьютерная техника
- ↳ Плиты: газовые и электрические
- ↳ Холодильники
- ↳ Стиральные машины
- ↳ Телевизоры
- ↳ Телефоны, смартфоны, плашеты
- ↳ Швейные машинки
- ↳ Прочая электроника и техника
- ↳ Фототехника
- Ремонт и интерьер
- ↳ Стройматериалы, инструмент
- ↳ Мебель и предметы интерьера даром
- ↳ Cантехника
- Другие темы
- ↳ Разное даром
- ↳ Давай меняться!
- ↳ Отдам\возьму за копеечку
- ↳ Работа и подработка в Кемерове
- ↳ Давай с тобой поговорим...
Мобильная версия