GridSearchCV for models inside voting classifier

66 Views Asked by At

I am trying to apply gridsearch on the models inside a voting classifier. The voting classifier is inside a calibrated classifier. The calibrated classifier is inside the pipeline. I am using iris dataset and have got the code from sklearn ensemble voting classifier examples.

import pandas
import numpy
import matplotlib.pyplot as plt

from sklearn.model_selection import RandomizedSearchCV, GridSearchCV, train_test_split
from sklearn.calibration import CalibratedClassifierCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler


from sklearn.datasets import load_iris
X, y = load_iris(return_X_y=True, as_frame=True)

Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.2, stratify=y)

from sklearn.ensemble import VotingClassifier 

from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier 
from sklearn.linear_model import LogisticRegression 

svc = SVC()
rf = RandomForestClassifier()
lr = LogisticRegression()

voting_classifier = VotingClassifier(
    estimators=[
        ('svc', svc),
        ('rf', rf),
        ('lr', lr),
    ]
)

preprocessor = ColumnTransformer(
    transformers = [
        ('step1',StandardScaler(),X.columns.tolist())
    ]
)

pipe = Pipeline(
   steps = [
        ('step1_preprocessing', preprocessor),
        ('step2_featureselection', SelectKBest()),
        ('step3_machinelearning', CalibratedClassifierCV(estimator=voting_classifier))
    ]
)

paramgrid = {
    'step2_featureselection__score_func': [chi2, f_classif, mutual_info_classif],
    'step2_featureselection__k': [1,2,3,'all'],
    'step3_machinelearning__method': ['sigmoid', 'isotonic'],
    'step3_machinelearning__estimator__voting': ['hard', 'soft'],
    'step3_machinelearning__estimator__estimators__svc__C':[1.0, 0, 0.5, 1.5, 2], #regularization parameter - if dataset is more than 10000 u can try values upto 10
    'step3_machinelearning__estimator__estimators__svc__kernel':['rbf', 'linear', 'poly', 'sigmoid'],
    'step3_machinelearning__estimator__estimators__svc__degree':[1,2,4,5,3],
    'step3_machinelearning__estimator__estimators__svc__gamma':['scale','auto'],
    'step3_machinelearning__estimator__estimators__svc__coef0':[0.0],
    'step3_machinelearning__estimator__estimators__rf__bootstrap': [True, False],#done
    'step3_machinelearning__estimator__estimators__rf__oob_score': [True, False],#done
    'step3_machinelearning__estimator__estimators__rf__warm_start': [True, False],#done
    'step3_machinelearning__estimator__estimators__rf__max_samples': [1.0, 0.25, 0.5, 0.75],#done
    'step3_machinelearning__estimator__estimators__rf__n_estimators': [10, 25, 5, 50, 100], #done
    'step3_machinelearning__estimator__estimators__rf__criterion': ['gini', 'entropy', 'log_loss'], #done
    'step3_machinelearning__estimator__estimators__rf__max_depth': [None,2,3,4,5], #done
    'step3_machinelearning__estimator__estimators__rf__min_samples_split': [2,5,10],#done
    'step3_machinelearning__estimator__estimators__rf__min_samples_leaf': [1, 2, 4],#done
    'step3_machinelearning__estimator__estimators__rf__max_features': [None, 'sqrt', 'log2'],#done
    'step3_machinelearning__estimator__estimators__rf__max_leaf_nodes': [None, 5, 10, 20],#done
    'step3_machinelearning__estimator__estimators__rf__min_impurity_decrease': [0.0, 0.1, 0.2],#done
    'step3_machinelearning__estimator__estimators__rf__ccp_alpha': [0.0, 0.1, 0.2],#done
    'step3_machinelearning__estimator__estimators__lr__penalty' : ['l1', 'l2', 'elasticnet', None],
    'step3_machinelearning__estimator__estimators__lr__dual': [True, False],
    'step3_machinelearning__estimator__estimators__lr__C': [0.25, 0.5, 1, 2,],
    'step3_machinelearning__estimator__estimators__lr__fit_intercept': [True, False],
    'step3_machinelearning__estimator__estimators__lr__intercept_scaling': [0.25, 0.5, 1, 2],
    'step3_machinelearning__estimator__estimators__lr__class_weight': ['balanced', None],
    'step3_machinelearning__estimator__estimators__lr__l1_ratio': [0.1, 0.5, 0.75]
}

# Create the GridSearchCV object
random_search = RandomizedSearchCV(estimator=pipe, param_distributions=paramgrid, cv=5, scoring='f1_macro')

# Fit the GridSearchCV to find the best hyperparameters
random_search.fit(Xtrain, ytrain)

I have got the following error

---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
<ipython-input-27-b2a0d99a4253> in <cell line: 69>()
     67 
     68 # Fit the GridSearchCV to find the best hyperparameters
---> 69 random_search.fit(Xtrain, ytrain)
     70 
     71 # Print the best hyperparameters and best score

14 frames
/usr/local/lib/python3.10/dist-packages/sklearn/base.py in set_params(self, **params)
    234                 )
    235                 key = "estimator"
--> 236             valid_params[key].set_params(**sub_params)
    237 
    238         return self

AttributeError: 'list' object has no attribute 'set_params'

How to solve it? Anything wrong in the code i wrote?

1

There are 1 best solutions below

4
Muhammed Yunus On BEST ANSWER

CalibratedClassifierCV works on an estimator that has predict_proba or decision_function. At the moment, it's getting a VotingClassifier(voting='hard',...) which only has predict.

The first thing to change is to add voting='soft' to VotingClassifier. As a result, you must also remove this line from paramgrid:

paramgrid = {
  ...

  #Remove or comment line, as we require soft voting all the time
  'step3_machinelearning__estimator__voting': ['hard', 'soft'],

  ...
}

Since we're using soft voting, VotingClassifier expects its estimators to all have a predict_proba function. By default, SVM doesn't output probabilities, so you need to change the SVM to SVM(probability=True).

Some other issues:

  • Since you try L1/L2/elastic-net penalties for logistic regression, you need solver='saga' as it's the only one that supports all configurations. This also means you need to remove the dual=False line as it conflicts with "saga".
  • Removed chi2 as it fails when X < 0
  • For the RF, bootstrap=False can't be used with max_samples.
  • For quick debugging I used a small sample and n_iter=1 - you'll need to change it back.

I think quite a few of the parameter combinations won't have much effect or any effect at all. I'd start with varying just a few of the parameters you think are key. That will help you build an intuition for the main factors.


import pandas
import numpy
import matplotlib.pyplot as plt

from sklearn.model_selection import RandomizedSearchCV, GridSearchCV, train_test_split
from sklearn.calibration import CalibratedClassifierCV
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import chi2, f_classif, mutual_info_classif

from sklearn.datasets import load_iris
X, y = load_iris(return_X_y=True, as_frame=True)

Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.2, stratify=y)
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.8, stratify=y) #<--- small sample for debugging

from sklearn.ensemble import VotingClassifier 

from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier 
from sklearn.linear_model import LogisticRegression 
from sklearn.feature_selection import SelectKBest

svc = SVC(probability=True) #<--- require probabilities
rf = RandomForestClassifier()
lr = LogisticRegression(solver='saga') #<--- only "saga" supports all the l1/l2/elaticnet
                                       # combinations you try in paramgrid

voting_classifier = VotingClassifier(
    estimators=[
        ('svc', svc),
        ('rf', rf),
        ('lr', lr),
    ],
    voting='soft' #<--- soft voting
)

preprocessor = ColumnTransformer(
    transformers = [
        ('step1',StandardScaler(),X.columns.tolist())
    ]
)

pipe = Pipeline(
   steps = [
        ('step1_preprocessing', preprocessor),
        ('step2_featureselection', SelectKBest()),
        ('step3_machinelearning', CalibratedClassifierCV(estimator=voting_classifier))
    ]
)

paramgrid = { #<--- removed "estimators__" from keys
    'step2_featureselection__score_func': [f_classif, mutual_info_classif], #<---remove chi2 as X sometimes <0
    'step2_featureselection__k': [1,2,3,'all'],
    'step3_machinelearning__method': ['sigmoid', 'isotonic'],
    # 'step3_machinelearning__estimator__voting': ['hard', 'soft'], # <--- remove line
    'step3_machinelearning__estimator__svc__C':[1.0, 1e-6, 0.5, 1.5, 2], #regularization parameter - if dataset is more than 10000 u can try values upto 10
                                                                         #<--- C can't be 0, so 0 replaced with 1e-6
    'step3_machinelearning__estimator__svc__kernel':['rbf', 'linear', 'poly', 'sigmoid'],
    'step3_machinelearning__estimator__svc__degree':[1,2,4,5,3],
    'step3_machinelearning__estimator__svc__gamma':['scale','auto'],
    'step3_machinelearning__estimator__svc__coef0':[0.0],
    # 'step3_machinelearning__estimator__rf__bootstrap': [True, False],#done #<-- False can't be used with max_samples
    'step3_machinelearning__estimator__rf__oob_score': [True, False],#done
    'step3_machinelearning__estimator__rf__warm_start': [True, False],#done
    'step3_machinelearning__estimator__rf__max_samples': [1.0, 0.25, 0.5, 0.75],#done
    'step3_machinelearning__estimator__rf__n_estimators': [10, 25, 5, 50, 100], #done
    'step3_machinelearning__estimator__rf__criterion': ['gini', 'entropy', 'log_loss'], #done
    'step3_machinelearning__estimator__rf__max_depth': [None,2,3,4,5], #done
    'step3_machinelearning__estimator__rf__min_samples_split': [2,5,10],#done
    'step3_machinelearning__estimator__rf__min_samples_leaf': [1, 2, 4],#done
    'step3_machinelearning__estimator__rf__max_features': [None, 'sqrt', 'log2'],#done
    'step3_machinelearning__estimator__rf__max_leaf_nodes': [None, 5, 10, 20],#done
    'step3_machinelearning__estimator__rf__min_impurity_decrease': [0.0, 0.1, 0.2],#done
    'step3_machinelearning__estimator__rf__ccp_alpha': [0.0, 0.1, 0.2],#done
    'step3_machinelearning__estimator__lr__penalty' : ['l1', 'l2', 'elasticnet', None],
    # 'step3_machinelearning__estimator__lr__dual': [True, False], #<--- "saga" doesn't support False
    'step3_machinelearning__estimator__lr__C': [0.25, 0.5, 1, 2,],
    'step3_machinelearning__estimator__lr__fit_intercept': [True, False],
    'step3_machinelearning__estimator__lr__intercept_scaling': [0.25, 0.5, 1, 2],
    'step3_machinelearning__estimator__lr__class_weight': ['balanced', None],
    'step3_machinelearning__estimator__lr__l1_ratio': [0.1, 0.5, 0.75]
}

# Create the GridSearchCV object
# I set n_iter=1 just for quick debugging
random_search = RandomizedSearchCV(estimator=pipe, param_distributions=paramgrid, n_iter=1, cv=5, scoring='f1_macro')

# Fit the GridSearchCV to find the best hyperparameters
random_search.fit(Xtrain, ytrain)