2.5. WOE#

A Weight-of-Evidence (WOE) logistic regression implementation using OptBinning library and sklearn.linear_model.SGDClassifier.
import numpy as np
import pandas as pd
from optbinning import BinningProcess

np.random.seed(0)

from sklearn.linear_model import SGDClassifier
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
# Fetch blended credit data
url = (
    "https://drive.google.com/file/d/1LZ7K32OcocobL4jAeeVTC0O4rm0BsT-z/view?usp=sharing"
)
file_id = url.split("/")[-2]
dataset = pd.read_csv(f"https://drive.google.com/uc?id={file_id}")

X, y = dataset.drop("is_bad", axis=1), dataset["is_bad"]

ix_train, ix_test = train_test_split(
    X.index, stratify=y, test_size=0.3, random_state=62
)
# Perform WOE binning
binning_process = BinningProcess(list(X.columns))

# Train a pipeline with logistic regression
woe_pipeline = Pipeline(
    steps=[
        ("binning_process", binning_process),
        (
            "logistic_regression",
            SGDClassifier(
                loss="log_loss",
                penalty="elasticnet",
                l1_ratio=0.9,
                alpha=0.1,
                learning_rate="optimal",
                eta0=1e-3,
                tol=None,
            ),
        ),
    ]
)

bias_init = np.log(y.loc[ix_train].mean())
coef_init = np.array([-1.0] * len(X.columns))

kwargs = {
    woe_pipeline.steps[-1][0] + "__intercept_init": bias_init,
    woe_pipeline.steps[-1][0] + "__coef_init": coef_init,
}

woe_pipeline.fit(X.loc[ix_train], y.loc[ix_train].values.ravel(), **kwargs)
Pipeline(steps=[('binning_process',
                 BinningProcess(variable_names=['external_risk_estimate',
                                                'months_since_oldest_trade_open',
                                                'months_since_most_recent_trade_open',
                                                'average_months_in_file',
                                                'num_satisfactory_trades',
                                                'num_trades_60_ever_2_derog_pub_rec',
                                                'num_trades_90_ever_2_derog_pub_rec',
                                                'percent_trades_never_delq',
                                                'months_since_most_recent_delq',
                                                'nu...
                                                'total_credit_limit',
                                                'current_installment_accounts',
                                                'paid_total',
                                                'num_mort_accounts',
                                                'account_never_delinq_percent',
                                                'balance',
                                                'num_historical_failed_to_pay',
                                                'num_total_cc_accounts',
                                                'num_cc_carrying_balance',
                                                'age', 'monthly_income', ...])),
                ('logistic_regression',
                 SGDClassifier(alpha=0.1, eta0=0.001, l1_ratio=0.9,
                               loss='log_loss', penalty='elasticnet',
                               tol=None))])
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
predictions_trn = woe_pipeline.predict_proba(X.loc[ix_train])[:, 1]
predictions_tst = woe_pipeline.predict_proba(X.loc[ix_test])[:, 1]

gini_trn = roc_auc_score(y.loc[ix_train], predictions_trn) * 2 - 1
gini_tst = roc_auc_score(y.loc[ix_test], predictions_tst) * 2 - 1

print(f"Train Gini score: {gini_trn:.2%}\n" f"Test Gini score: {gini_tst:.2%}")
Train Gini score: 64.96%
Test Gini score: 63.44%