{
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "\n# L1 Penalty and Sparsity in Logistic Regression\n\nComparison of the sparsity (percentage of zero coefficients) of solutions when\nL1, L2 and Elastic-Net penalty are used for different values of C. We can see\nthat large values of C give more freedom to the model.  Conversely, smaller\nvalues of C constrain the model more. In the L1 penalty case, this leads to\nsparser solutions. As expected, the Elastic-Net penalty sparsity is between\nthat of L1 and L2.\n\nWe classify 8x8 images of digits into two classes: 0-4 against 5-9.\nThe visualization shows coefficients of the models for varying C.\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "collapsed": false
      },
      "outputs": [],
      "source": [
        "# Authors: The scikit-learn developers\n# SPDX-License-Identifier: BSD-3-Clause\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom sklearn import datasets\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import StandardScaler\n\nX, y = datasets.load_digits(return_X_y=True)\n\nX = StandardScaler().fit_transform(X)\n\n# classify small against large digits\ny = (y > 4).astype(int)\n\nl1_ratio = 0.5  # L1 weight in the Elastic-Net regularization\n\nfig, axes = plt.subplots(3, 3)\n\n# Set regularization parameter\nfor i, (C, axes_row) in enumerate(zip((1, 0.1, 0.01), axes)):\n    # Increase tolerance for short training time\n    clf_l1_LR = LogisticRegression(C=C, l1_ratio=1, tol=0.01, solver=\"saga\")\n    clf_l2_LR = LogisticRegression(C=C, l1_ratio=0, tol=0.01, solver=\"saga\")\n    clf_en_LR = LogisticRegression(C=C, l1_ratio=l1_ratio, tol=0.01, solver=\"saga\")\n    clf_l1_LR.fit(X, y)\n    clf_l2_LR.fit(X, y)\n    clf_en_LR.fit(X, y)\n\n    coef_l1_LR = clf_l1_LR.coef_.ravel()\n    coef_l2_LR = clf_l2_LR.coef_.ravel()\n    coef_en_LR = clf_en_LR.coef_.ravel()\n\n    # coef_l1_LR contains zeros due to the\n    # L1 sparsity inducing norm\n\n    sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100\n    sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100\n    sparsity_en_LR = np.mean(coef_en_LR == 0) * 100\n\n    print(f\"C={C:.2f}\")\n    print(f\"{'Sparsity with L1 penalty:':<40} {sparsity_l1_LR:.2f}%\")\n    print(f\"{'Sparsity with Elastic-Net penalty:':<40} {sparsity_en_LR:.2f}%\")\n    print(f\"{'Sparsity with L2 penalty:':<40} {sparsity_l2_LR:.2f}%\")\n    print(f\"{'Score with L1 penalty:':<40} {clf_l1_LR.score(X, y):.2f}\")\n    print(f\"{'Score with Elastic-Net penalty:':<40} {clf_en_LR.score(X, y):.2f}\")\n    print(f\"{'Score with L2 penalty:':<40} {clf_l2_LR.score(X, y):.2f}\")\n\n    if i == 0:\n        axes_row[0].set_title(\"L1 penalty\")\n        axes_row[1].set_title(\"Elastic-Net\\nl1_ratio = %s\" % l1_ratio)\n        axes_row[2].set_title(\"L2 penalty\")\n\n    for ax, coefs in zip(axes_row, [coef_l1_LR, coef_en_LR, coef_l2_LR]):\n        ax.imshow(\n            np.abs(coefs.reshape(8, 8)),\n            interpolation=\"nearest\",\n            cmap=\"binary\",\n            vmax=1,\n            vmin=0,\n        )\n        ax.set_xticks(())\n        ax.set_yticks(())\n\n    axes_row[0].set_ylabel(f\"C = {C}\")\n\nplt.show()"
      ]
    }
  ],
  "metadata": {
    "kernelspec": {
      "display_name": "Python 3",
      "language": "python",
      "name": "python3"
    },
    "language_info": {
      "codemirror_mode": {
        "name": "ipython",
        "version": 3
      },
      "file_extension": ".py",
      "mimetype": "text/x-python",
      "name": "python",
      "nbconvert_exporter": "python",
      "pygments_lexer": "ipython3",
      "version": "3.11.14"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}