"""Leave one out coding"""
import numpy as np
import pandas as pd
import category_encoders.utils as util
from sklearn.utils.random import check_random_state
__author__ = 'hbghhy'
[docs]class LeaveOneOutEncoder(util.BaseEncoder, util.SupervisedTransformerMixin):
"""Leave one out coding for categorical features.
This is very similar to target encoding but excludes the current row's
target when calculating the mean target for a level to reduce the effect
of outliers.
Parameters
----------
verbose: int
integer indicating verbosity of the output. 0 for none.
cols: list
a list of columns to encode, if None, all string columns will be encoded.
drop_invariant: bool
boolean for whether or not to drop columns with 0 variance.
return_df: bool
boolean for whether to return a pandas DataFrame from transform (otherwise it will be a numpy array).
handle_missing: str
options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean.
handle_unknown: str
options are 'error', 'return_nan' and 'value', defaults to 'value', which returns the target mean.
sigma: float
adds normal (Gaussian) distribution noise into training data in order to decrease overfitting (testing
data are untouched). Sigma gives the standard deviation (spread or "width") of the normal distribution.
The optimal value is commonly between 0.05 and 0.6. The default is to not add noise, but that leads
to significantly suboptimal results.
Example
-------
>>> from category_encoders import *
>>> import pandas as pd
>>> from sklearn.datasets import fetch_openml
>>> bunch = fetch_openml(name="house_prices", as_frame=True)
>>> display_cols = ["Id", "MSSubClass", "MSZoning", "LotFrontage", "YearBuilt", "Heating", "CentralAir"]
>>> y = bunch.target
>>> X = pd.DataFrame(bunch.data, columns=bunch.feature_names)[display_cols]
>>> enc = LeaveOneOutEncoder(cols=['CentralAir', 'Heating']).fit(X, y)
>>> numeric_dataset = enc.transform(X)
>>> print(numeric_dataset.info())
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1460 entries, 0 to 1459
Data columns (total 7 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Id 1460 non-null float64
1 MSSubClass 1460 non-null float64
2 MSZoning 1460 non-null object
3 LotFrontage 1201 non-null float64
4 YearBuilt 1460 non-null float64
5 Heating 1460 non-null float64
6 CentralAir 1460 non-null float64
dtypes: float64(6), object(1)
memory usage: 80.0+ KB
None
References
----------
.. [1] Originally by Owen Zhang (reference broken), another short explanation at:
https://datascience.stackexchange.com/questions/10839/what-is-difference-between-one-hot-encoding-and-leave-one-out-encoding
"""
prefit_ordinal = False
encoding_relation = util.EncodingRelation.ONE_TO_ONE
def __init__(self, verbose=0, cols=None, drop_invariant=False, return_df=True,
handle_unknown='value', handle_missing='value', random_state=None, sigma=None):
super().__init__(verbose=verbose, cols=cols, drop_invariant=drop_invariant, return_df=return_df,
handle_unknown=handle_unknown, handle_missing=handle_missing)
self.mapping = None
self._mean = None
self.random_state = random_state
self.sigma = sigma
def _fit(self, X, y, **kwargs):
y = y.astype(float)
categories = self.fit_leave_one_out(
X, y,
cols=self.cols
)
self.mapping = categories
def _transform(self, X, y=None):
if y is not None:
y = y.astype(float)
X = self.transform_leave_one_out(
X, y,
mapping=self.mapping
)
return X
def _more_tags(self):
tags = super()._more_tags()
tags["predict_depends_on_y"] = True
return tags
def fit_leave_one_out(self, X_in, y, cols=None):
X = X_in.copy(deep=True)
if cols is None:
cols = X.columns
self._mean = y.mean()
return {col: self.fit_column_map(X[col], y) for col in cols}
def fit_column_map(self, series, y):
category = pd.Categorical(series)
categories = category.categories
codes = category.codes.copy()
codes[codes == -1] = len(categories)
categories = np.append(categories, np.nan)
return_map = pd.Series({code: category for code, category in enumerate(categories)})
result = y.groupby(codes).agg(['sum', 'count'])
return result.rename(return_map)