{
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "\n# Imputing missing values with variants of IterativeImputer\n\n.. currentmodule:: sklearn\n\nThe :class:`~impute.IterativeImputer` class is very flexible - it can be\nused with a variety of estimators to do round-robin regression, treating every\nvariable as an output in turn.\n\nIn this example we compare some estimators for the purpose of missing feature\nimputation with :class:`~impute.IterativeImputer`:\n\n* :class:`~linear_model.BayesianRidge`: regularized linear regression\n* :class:`~ensemble.RandomForestRegressor`: forests of randomized trees regression\n* :func:`~pipeline.make_pipeline` (:class:`~kernel_approximation.Nystroem`,\n  :class:`~linear_model.Ridge`): a pipeline with the expansion of a degree 2\n  polynomial kernel and regularized linear regression\n* :class:`~neighbors.KNeighborsRegressor`: comparable to other KNN\n  imputation approaches\n\nOf particular interest is the ability of\n:class:`~impute.IterativeImputer` to mimic the behavior of missForest, a\npopular imputation package for R.\n\nNote that :class:`~neighbors.KNeighborsRegressor` is different from KNN\nimputation, which learns from samples with missing values by using a distance\nmetric that accounts for missing values, rather than imputing them.\n\nThe goal is to compare different estimators to see which one is best for the\n:class:`~impute.IterativeImputer` when using a\n:class:`~linear_model.BayesianRidge` estimator on the California housing\ndataset with a single value randomly removed from each row.\n\nFor this particular pattern of missing values we see that\n:class:`~linear_model.BayesianRidge` and\n:class:`~ensemble.RandomForestRegressor` give the best results.\n\nIt should be noted that some estimators such as\n:class:`~ensemble.HistGradientBoostingRegressor` can natively deal with\nmissing features and are often recommended over building pipelines with\ncomplex and costly missing values imputation strategies.\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "collapsed": false
      },
      "outputs": [],
      "source": [
        "# Authors: The scikit-learn developers\n# SPDX-License-Identifier: BSD-3-Clause\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.datasets import fetch_california_housing\nfrom sklearn.ensemble import RandomForestRegressor\n\n# To use this experimental feature, we need to explicitly ask for it:\nfrom sklearn.experimental import enable_iterative_imputer  # noqa: F401\nfrom sklearn.impute import IterativeImputer, SimpleImputer\nfrom sklearn.kernel_approximation import Nystroem\nfrom sklearn.linear_model import BayesianRidge, Ridge\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import RobustScaler\n\nN_SPLITS = 5\n\nX_full, y_full = fetch_california_housing(return_X_y=True)\n# ~2k samples is enough for the purpose of the example.\n# Remove the following two lines for a slower run with different error bars.\nX_full = X_full[::10]\ny_full = y_full[::10]\nn_samples, n_features = X_full.shape\n\n\ndef compute_score_for(X, y, imputer=None):\n    # We scale data before imputation and training a target estimator,\n    # because our target estimator and some of the imputers assume\n    # that the features have similar scales.\n    if imputer is None:\n        estimator = make_pipeline(RobustScaler(), BayesianRidge())\n    else:\n        estimator = make_pipeline(RobustScaler(), imputer, BayesianRidge())\n    return cross_val_score(\n        estimator, X, y, scoring=\"neg_mean_squared_error\", cv=N_SPLITS\n    )\n\n\n# Estimate the score on the entire dataset, with no missing values\nscore_full_data = pd.DataFrame(\n    compute_score_for(X_full, y_full),\n    columns=[\"Full Data\"],\n)\n\n# Add a single missing value to each row\nrng = np.random.RandomState(0)\nX_missing = X_full.copy()\ny_missing = y_full\nmissing_samples = np.arange(n_samples)\nmissing_features = rng.choice(n_features, n_samples, replace=True)\nX_missing[missing_samples, missing_features] = np.nan\n\n# Estimate the score after imputation (mean and median strategies)\nscore_simple_imputer = pd.DataFrame()\nfor strategy in (\"mean\", \"median\"):\n    score_simple_imputer[strategy] = compute_score_for(\n        X_missing, y_missing, SimpleImputer(strategy=strategy)\n    )\n\n# Estimate the score after iterative imputation of the missing values\n# with different estimators\nnamed_estimators = [\n    (\"Bayesian Ridge\", BayesianRidge()),\n    (\n        \"Random Forest\",\n        RandomForestRegressor(\n            # We tuned the hyperparameters of the RandomForestRegressor to get a good\n            # enough predictive performance for a restricted execution time.\n            n_estimators=5,\n            max_depth=10,\n            bootstrap=True,\n            max_samples=0.5,\n            n_jobs=2,\n            random_state=0,\n        ),\n    ),\n    (\n        \"Nystroem + Ridge\",\n        make_pipeline(\n            Nystroem(kernel=\"polynomial\", degree=2, random_state=0), Ridge(alpha=1e4)\n        ),\n    ),\n    (\n        \"k-NN\",\n        KNeighborsRegressor(n_neighbors=10),\n    ),\n]\nscore_iterative_imputer = pd.DataFrame()\n# Iterative imputer is sensitive to the tolerance and\n# dependent on the estimator used internally.\n# We tuned the tolerance to keep this example run with limited computational\n# resources while not changing the results too much compared to keeping the\n# stricter default value for the tolerance parameter.\ntolerances = (1e-3, 1e-1, 1e-1, 1e-2)\nfor (name, impute_estimator), tol in zip(named_estimators, tolerances):\n    score_iterative_imputer[name] = compute_score_for(\n        X_missing,\n        y_missing,\n        IterativeImputer(\n            random_state=0, estimator=impute_estimator, max_iter=40, tol=tol\n        ),\n    )\n\nscores = pd.concat(\n    [score_full_data, score_simple_imputer, score_iterative_imputer],\n    keys=[\"Original\", \"SimpleImputer\", \"IterativeImputer\"],\n    axis=1,\n)\n\n# plot california housing results\nfig, ax = plt.subplots(figsize=(13, 6))\nmeans = -scores.mean()\nerrors = scores.std()\nmeans.plot.barh(xerr=errors, ax=ax)\nax.set_title(\"California Housing Regression with Different Imputation Methods\")\nax.set_xlabel(\"MSE (smaller is better)\")\nax.set_yticks(np.arange(means.shape[0]))\nax.set_yticklabels([\" w/ \".join(label) for label in means.index.tolist()])\nplt.tight_layout(pad=1)\nplt.show()"
      ]
    }
  ],
  "metadata": {
    "kernelspec": {
      "display_name": "Python 3",
      "language": "python",
      "name": "python3"
    },
    "language_info": {
      "codemirror_mode": {
        "name": "ipython",
        "version": 3
      },
      "file_extension": ".py",
      "mimetype": "text/x-python",
      "name": "python",
      "nbconvert_exporter": "python",
      "pygments_lexer": "ipython3",
      "version": "3.11.14"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}