243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332 | @app.command()
# It's evaluating the model.
def evaluate_model(
run_id,
args_fp: str = "config/args.json",
experiment_name: str = "current candidate",
run_name: str = "3-day evaluation",
) -> None:
"""
It loads the model and data, and then evaluates the model on the data
:param run_id: The run_id of the model you want to evaluate
:param experiment_name: The name of the experiment you want to evaluate, defaults to current
candidate
:type experiment_name: str (optional)
:param run_name: The name of the run, defaults to 3-day auc
:type run_name: str (optional)
"""
args = load_config_dict(filepath=args_fp)
experiment = mlflow.set_experiment(experiment_name=experiment_name)
utils.print_experiment_info(experiment)
if not run_id:
run_id = open(Path(config.CONFIG_DIR, "run_id.txt")).read()
X, y = data.preprocess(config.SCORECARD_DATA.absolute())
# Split ####
X_train, X_test, y_train, y_test = data.get_data_splits(
X, y, test_frac=config.TEST_SIZE, seed=config.RANDOM_STATE
)
artifacts = load_artifacts(run_id=run_id)
print(artifacts)
numeric_vars = config.NUMERIC_VARIABLES
# get categorical and numeric vars
categorical_variables = [col for col in X.columns if col not in numeric_vars]
# Define the feature list from dataset (including categorical and numerical)
list_features = X.columns.values
# Define selection criteria for BinningProcess
selection_criteria = {
"iv": {"min": args["iv_min"], "max": args["iv_max"], "strategy": args["iv_strategy"]}
}
# Instatiate BinningProcess
binning_process = BinningProcess(
categorical_variables=categorical_variables,
variable_names=list_features,
selection_criteria=selection_criteria,
special_codes=[args["special_code"]],
)
# get binned data
X_train_binned = binning_process.fit_transform(
X_train,
y_train,
sample_weight=args["binning_sample_weight"],
metric=args["binning_metric"],
metric_special=args["binning_metric_special"],
metric_missing=args["binning_metric_missing"],
show_digits=args["binning_show_digits"],
check_input=args["binning_check_input"],
)
X_test_binned = binning_process.transform(
X_test,
metric=args["binning_metric"],
metric_special=args["binning_metric_special"],
metric_missing=args["binning_metric_missing"],
show_digits=args["binning_show_digits"],
check_input=args["binning_check_input"],
)
with mlflow.start_run(run_name=run_name):
evaluate_sc = evaluate.get_scorecard_metrics(
artifacts["model"],
X_train=X_train,
X_test=X_test,
y_train=y_train,
y_test=y_test,
)
print(json.dumps(evaluate_sc, indent=2))
evaluate_lr = evaluate.get_lr_metrics(
lr_model=artifacts["model"].estimator_,
X_train_binned=X_test_binned,
X_test_binned=X_test_binned,
y_train=y_train,
y_test=y_test,
N_feat=args["N_top_features"],
)
|