Source code for getml.feature_learning.fastprop_model

# Copyright 2021 The SQLNet Company GmbH

# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:

# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.

# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.

"""
Feature learning based on propositionalization.
"""

from dataclasses import dataclass, field
from typing import ClassVar, List

from .aggregations import _Aggregations
from .aggregations import fastprop as fastprop_aggregations
from .feature_learner import _FeatureLearner
from .loss_functions import SquareLoss
from .validation import _validate_dfs_model_parameters

# --------------------------------------------------------------------


[docs]@dataclass(repr=False) class FastPropModel(_FeatureLearner): """ Generates simple features based on propositionalization. :class:`~getml.feature_learning.FastPropModel` generates simple and easily interpretable features for relational data and time series. It is based on a propositionalization approach and has been optimized for speed and memory efficiency. :class:`~getml.feature_learning.FastPropModel` generates a large number of features and selects the most relevant ones based on the pair-wise correlation with the target(s). It is recommended to combine :class:`~getml.feature_learning.FastPropModel` with the :class:`~getml.preprocessors.Mapping` and :class:`~getml.preprocessors.Seasonal` preprocessors, which can drastically improve predictive accuracy. Args: aggregation (List[:class:`~getml.feature_learning.aggregations`], optional): Mathematical operations used by the automated feature learning algorithm to create new features. Must be from :mod:`~getml.feature_learning.aggregations`. loss_function (:class:`~getml.feature_learning.loss_functions`, optional): Objective function used by the feature learning algorithm to optimize your features. For regression problems use :class:`~getml.feature_learning.loss_functions.SquareLoss` and for classification problems use :class:`~getml.feature_learning.loss_functions.CrossEntropyLoss`. min_df (int, optional): Only relevant for columns with role :const:`~getml.data.roles.text`. The minimum number of fields (i.e. rows) in :const:`~getml.data.roles.text` column a given word is required to appear in to be included in the bag of words. Range: [1, :math:`\\infty`] num_features (int, optional): Number of features generated by the feature learning algorithm. Range: [1, :math:`\\infty`] n_most_frequent (int, optional): :class:`~getml.feature_learning.FastPropModel` can find the N most frequent categories in a categorical column and derive features from them. The parameter determines how many categories should be used. Range: [0, :math:`\\infty`] num_threads (int, optional): Number of threads used by the feature learning algorithm. If set to zero or a negative value, the number of threads will be determined automatically by the getML engine. Range: [:math:`0`, :math:`\\infty`] sampling_factor (float, optional): FastProp uses a bootstrapping procedure (sampling with replacement) to train each of the features. The sampling factor is proportional to the share of the samples randomly drawn from the population table every time Multirel generates a new feature. A lower sampling factor (but still greater than 0.0), will lead to less danger of overfitting, less complex statements and faster training. When set to 1.0, roughly 2,000 samples are drawn from the population table. If the population table contains less than 2,000 samples, it will use standard bagging. When set to 0.0, there will be no sampling at all. Range: [0, :math:`\\infty`] silent (bool, optional): Controls the logging during training. vocab_size (int, optional): Determines the maximum number of words that are extracted in total from :const:`getml.data.roles.text` columns. This can be interpreted as the maximum size of the bag of words. Range: [0, :math:`\\infty`] Example: .. code-block:: python population_placeholder = getml.data.Placeholder("population") order_placeholder = getml.data.Placeholder("order") trans_placeholder = getml.data.Placeholder("trans") population_placeholder.join(order_placeholder, join_key="account_id") population_placeholder.join(trans_placeholder, join_key="account_id", time_stamp="date") feature_selector = getml.predictors.XGBoostClassifier( reg_lambda=500 ) predictor = getml.predictors.XGBoostClassifier( reg_lambda=500 ) agg = getml.feature_learning.aggregations feature_learner = getml.feature_learning.FastPropModel( aggregation=[ agg.Avg, agg.Count, agg.Max, agg.Median, agg.Min, agg.Sum, agg.Var ], num_features=200, loss_function=getml.feature_learning.loss_functions.CrossEntropyLoss ) pipe = getml.pipeline.Pipeline( tags=["dfs"], population=population_placeholder, peripheral=[order_placeholder, trans_placeholder], feature_learners=feature_learner, feature_selectors=feature_selector, predictors=predictor, share_selected_features=0.5 ) pipe.check( population_table=population_train, peripheral_tables={"order": order, "trans": trans} ) pipe = pipe.fit( population_table=population_train, peripheral_tables={"order": order, "trans": trans} ) in_sample = pipe.score( population_table=population_train, peripheral_tables={"order": order, "trans": trans} ) out_of_sample = pipe.score( population_table=population_test, peripheral_tables={"order": order, "trans": trans} ) """ # ---------------------------------------------------------------- agg_sets: ClassVar[_Aggregations] = fastprop_aggregations # ---------------------------------------------------------------- aggregation: List[str] = field( default_factory=lambda: fastprop_aggregations.Default ) loss_function: str = SquareLoss min_df: int = 30 n_most_frequent: int = 0 num_features: int = 200 num_threads: int = 0 sampling_factor: float = 1.0 silent: bool = True vocab_size: int = 500 # ----------------------------------------------------------------
[docs] def validate(self, params=None): """Checks both the types and the values of all instance variables and raises an exception if something is off. Args: params (dict, optional): A dictionary containing the parameters to validate. params can hold the full set or a subset of the parameters explained in :class:`~getml.feature_learning.FastPropModel`. If params is None, the current set of parameters in the instance dictionary will be validated. """ # ------------------------------------------------------------ if params is None: params = self.__dict__ else: params = {**self.__dict__, **params} if not isinstance(params, dict): raise ValueError("params must be None or a dictionary!") # ------------------------------------------------------------ for kkey in params: if kkey not in type(self)._supported_params: raise KeyError( f"Instance variable '{kkey}' is not supported in {self.type}." ) # ------------------------------------------------------------ _validate_dfs_model_parameters(**params)
# --------------------------------------------------------------------