diff --git a/docs/api.html b/docs/api.html index 67de3db..a9e1d3c 100644 --- a/docs/api.html +++ b/docs/api.html @@ -218,7 +218,7 @@

BanditRecommender
-add_arm(arm: Arm, binarizer=None) NoReturn
+add_arm(arm: Arm, binarizer=None) None

Adds an _arm_ to the list of arms.

Incorporates the arm into the learning and neighborhood policies with no training data.

@@ -229,14 +229,14 @@

BanditRecommenderReturn type -

No return.

+

Returns nothing.

-fit(decisions: Union[List[Arm], numpy.ndarray, pandas.core.series.Series], rewards: Union[List[Union[int, float]], numpy.ndarray, pandas.core.series.Series], contexts: Union[None, List[List[Union[int, float]]], numpy.ndarray, pandas.core.series.Series, pandas.core.frame.DataFrame] = None) NoReturn
+fit(decisions: Union[List[Arm], numpy.ndarray, pandas.core.series.Series], rewards: Union[List[Union[int, float]], numpy.ndarray, pandas.core.series.Series], contexts: Union[None, List[List[Union[int, float]]], numpy.ndarray, pandas.core.series.Series, pandas.core.frame.DataFrame] = None) None

Fits the recommender the given decisions, their corresponding rewards and contexts, if any. If the recommender arms has not been initialized using the set_arms, the recommender arms will be set to the list of arms in decisions.

@@ -257,14 +257,14 @@

BanditRecommenderReturn type -

No return.

+

Returns nothing.

-partial_fit(decisions: Union[List[Arm], numpy.ndarray, pandas.core.series.Series], rewards: Union[List[Union[int, float]], numpy.ndarray, pandas.core.series.Series], contexts: Union[None, List[List[Union[int, float]]], numpy.ndarray, pandas.core.series.Series, pandas.core.frame.DataFrame] = None) NoReturn
+partial_fit(decisions: Union[List[Arm], numpy.ndarray, pandas.core.series.Series], rewards: Union[List[Union[int, float]], numpy.ndarray, pandas.core.series.Series], contexts: Union[None, List[List[Union[int, float]]], numpy.ndarray, pandas.core.series.Series, pandas.core.frame.DataFrame] = None) None

Updates the recommender with the given decisions, their corresponding rewards and contexts, if any.

Validates arguments and raises exceptions in case there are violations.

@@ -283,7 +283,7 @@

BanditRecommenderReturn type -

No return.

+

Returns nothing.

@@ -348,21 +348,21 @@

BanditRecommender
-remove_arm(arm: Arm) NoReturn
+remove_arm(arm: Arm) None

Removes an _arm_ from the list of arms.

Parameters

arm (Arm) – The existing arm to be removed.

Return type
-

No return.

+

Returns nothing.

-set_arms(arms: List[Arm], binarizer=None) NoReturn
+set_arms(arms: List[Arm], binarizer=None) None

Initializes the recommender and sets the recommender with given list of arms. Existing arms not in the given list of arms are removed and new arms are incorporated into the learning and neighborhood policies with no training data. @@ -375,14 +375,14 @@

BanditRecommenderReturn type -

No return.

+

Returns nothing.

-warm_start(arm_to_features: Dict[Arm, List[Union[int, float]]], distance_quantile: Optional[float] = None) NoReturn
+warm_start(arm_to_features: Dict[Arm, List[Union[int, float]]], distance_quantile: Optional[float] = None) None

Warm-start untrained (cold) arms of the multi-armed bandit.

Validates arguments and raises exceptions in case there are violations.

@@ -394,7 +394,7 @@

BanditRecommenderReturn type -

No return.

+

Returns nothing.

@@ -1356,7 +1356,7 @@

NeighborhoodPolicy
-mab2rec.pipeline.train(recommender: mab2rec.rec.BanditRecommender, data: Union[str, pandas.core.frame.DataFrame], user_features: Optional[Union[str, pandas.core.frame.DataFrame]] = None, user_features_list: Optional[Union[str, List[str]]] = None, user_features_dtypes: Optional[Union[str, Dict]] = None, item_features: Optional[Union[str, pandas.core.frame.DataFrame]] = None, item_list: Optional[Union[str, List[Arm]]] = None, item_eligibility: Optional[Union[str, pandas.core.frame.DataFrame]] = None, warm_start: bool = False, warm_start_distance: Optional[float] = None, user_id_col: str = 'user_id', item_id_col: str = 'item_id', response_col: str = 'response', batch_size: int = 100000, save_file: Optional[Union[str, bool]] = None) NoReturn
+mab2rec.pipeline.train(recommender: mab2rec.rec.BanditRecommender, data: Union[str, pandas.core.frame.DataFrame], user_features: Optional[Union[str, pandas.core.frame.DataFrame]] = None, user_features_list: Optional[Union[str, List[str]]] = None, user_features_dtypes: Optional[Union[str, Dict]] = None, item_features: Optional[Union[str, pandas.core.frame.DataFrame]] = None, item_list: Optional[Union[str, List[Arm]]] = None, item_eligibility: Optional[Union[str, pandas.core.frame.DataFrame]] = None, warm_start: bool = False, warm_start_distance: Optional[float] = None, user_id_col: str = 'user_id', item_id_col: str = 'item_id', response_col: str = 'response', batch_size: int = 100000, save_file: Optional[Union[str, bool]] = None) None

Trains Recommender.

Parameters
@@ -1401,7 +1401,7 @@

NeighborhoodPolicyReturn type -

No Return.

+

Returns nothing.

diff --git a/mab2rec/pipeline.py b/mab2rec/pipeline.py index 03abadc..3451ae6 100644 --- a/mab2rec/pipeline.py +++ b/mab2rec/pipeline.py @@ -5,7 +5,7 @@ import os from copy import deepcopy from time import time -from typing import Dict, List, NoReturn, Tuple, Union +from typing import Dict, List, Tuple, Union import numpy as np import pandas as pd @@ -33,7 +33,7 @@ def train(recommender: BanditRecommender, item_id_col: str = Constants.item_id, response_col: str = Constants.response, batch_size: int = 100000, - save_file: Union[str, bool] = None) -> NoReturn: + save_file: Union[str, bool] = None) -> None: """ Trains Recommender. @@ -94,7 +94,7 @@ def train(recommender: BanditRecommender, Returns ------- - No Return. + Returns nothing. """ _validate_recommender(recommender) _validate_common_args(data, user_features, user_features_list, user_features_dtypes, item_features, item_list, diff --git a/mab2rec/rec.py b/mab2rec/rec.py index 8ca2cf7..38f890d 100644 --- a/mab2rec/rec.py +++ b/mab2rec/rec.py @@ -2,7 +2,7 @@ # Copyright FMR LLC # SPDX-License-Identifier: Apache-2.0 -from typing import Dict, List, NoReturn, Tuple, Union +from typing import Dict, List, Tuple, Union import numpy as np import pandas as pd @@ -152,7 +152,7 @@ def __init__(self, learning_policy: Union[LearningPolicy.EpsilonGreedy, self.mab = None self._validate_mab_args() - def _init(self, arms: List[Union[Arm]]) -> NoReturn: + def _init(self, arms: List[Union[Arm]]) -> None: """Initializes recommender with given list of arms. Parameters @@ -163,11 +163,11 @@ def _init(self, arms: List[Union[Arm]]) -> NoReturn: Returns ------- - No return. + Returns nothing """ self.mab = MAB(arms, self.learning_policy, self.neighborhood_policy, self.seed, self.n_jobs, self.backend) - def add_arm(self, arm: Arm, binarizer=None) -> NoReturn: + def add_arm(self, arm: Arm, binarizer=None) -> None: """Adds an _arm_ to the list of arms. Incorporates the arm into the learning and neighborhood policies with no training data. @@ -181,7 +181,7 @@ def add_arm(self, arm: Arm, binarizer=None) -> NoReturn: Returns ------- - No return. + Returns nothing. """ if self.mab is None: self._init([arm]) @@ -190,7 +190,7 @@ def add_arm(self, arm: Arm, binarizer=None) -> NoReturn: def fit(self, decisions: Union[List[Arm], np.ndarray, pd.Series], rewards: Union[List[Num], np.ndarray, pd.Series], - contexts: Union[None, List[List[Num]], np.ndarray, pd.Series, pd.DataFrame] = None) -> NoReturn: + contexts: Union[None, List[List[Num]], np.ndarray, pd.Series, pd.DataFrame] = None) -> None: """Fits the recommender the given *decisions*, their corresponding *rewards* and *contexts*, if any. If the recommender arms has not been initialized using the `set_arms`, the recommender arms will be set to the list of arms in *decisions*. @@ -212,7 +212,7 @@ def fit(self, decisions: Union[List[Arm], np.ndarray, pd.Series], Returns ------- - No return. + Returns nothing. """ if self.mab is None: self._init(np.unique(decisions).tolist()) @@ -220,7 +220,7 @@ def fit(self, decisions: Union[List[Arm], np.ndarray, pd.Series], def partial_fit(self, decisions: Union[List[Arm], np.ndarray, pd.Series], rewards: Union[List[Num], np.ndarray, pd.Series], - contexts: Union[None, List[List[Num]], np.ndarray, pd.Series, pd.DataFrame] = None) -> NoReturn: + contexts: Union[None, List[List[Num]], np.ndarray, pd.Series, pd.DataFrame] = None) -> None: """Updates the recommender with the given *decisions*, their corresponding *rewards* and *contexts*, if any. Validates arguments and raises exceptions in case there are violations. @@ -240,7 +240,7 @@ def partial_fit(self, decisions: Union[List[Arm], np.ndarray, pd.Series], Returns ------- - No return. + Returns nothing. """ self._validate_mab(is_fit=True) self.mab.partial_fit(decisions, rewards, contexts) @@ -362,7 +362,7 @@ def recommend(self, contexts: Union[None, List[List[Num]], np.ndarray, pd.Series else: return recommendations[0] - def remove_arm(self, arm: Arm) -> NoReturn: + def remove_arm(self, arm: Arm) -> None: """Removes an _arm_ from the list of arms. Parameters @@ -372,12 +372,12 @@ def remove_arm(self, arm: Arm) -> NoReturn: Returns ------- - No return. + Returns nothing. """ self._validate_mab() self.mab.remove_arm(arm) - def set_arms(self, arms: List[Arm], binarizer=None) -> NoReturn: + def set_arms(self, arms: List[Arm], binarizer=None) -> None: """Initializes the recommender and sets the recommender with given list of arms. Existing arms not in the given list of arms are removed and new arms are incorporated into the learning and neighborhood policies with no training data. @@ -392,7 +392,7 @@ def set_arms(self, arms: List[Arm], binarizer=None) -> NoReturn: Returns ------- - No return. + Returns nothing. """ # Initialize mab @@ -412,7 +412,7 @@ def set_arms(self, arms: List[Arm], binarizer=None) -> NoReturn: if new_arm not in self.mab.arms: self.add_arm(new_arm, binarizer) - def warm_start(self, arm_to_features: Dict[Arm, List[Num]], distance_quantile: float = None) -> NoReturn: + def warm_start(self, arm_to_features: Dict[Arm, List[Num]], distance_quantile: float = None) -> None: """Warm-start untrained (cold) arms of the multi-armed bandit. Validates arguments and raises exceptions in case there are violations. @@ -427,7 +427,7 @@ def warm_start(self, arm_to_features: Dict[Arm, List[Num]], distance_quantile: f Returns ------- - No return. + Returns nothing. """ self._validate_mab(is_fit=True) self.mab.warm_start(arm_to_features, distance_quantile) diff --git a/mab2rec/utils.py b/mab2rec/utils.py index be595fb..a2a1438 100644 --- a/mab2rec/utils.py +++ b/mab2rec/utils.py @@ -4,7 +4,7 @@ import json import pickle -from typing import Dict, List, NamedTuple, NoReturn, Union +from typing import Dict, List, NamedTuple, Union import numpy as np import pandas as pd @@ -396,7 +396,7 @@ def get_exclusion_list(arms, eligible_list): def print_interaction_stats(df: pd.DataFrame, user_id_col: str = Constants.user_id, - item_id_col: str = Constants.item_id, response_col: str = Constants.response) -> NoReturn: + item_id_col: str = Constants.item_id, response_col: str = Constants.response) -> None: """ Print number of rows, number of users, number of items in interaction data. @@ -413,7 +413,7 @@ def print_interaction_stats(df: pd.DataFrame, user_id_col: str = Constants.user_ Returns ------- - No return. + Returns nothing. """ print(f"Number of rows: {len(df):,}") @@ -445,7 +445,7 @@ def merge_user_features(responses_df: pd.DataFrame, user_features_df: pd.DataFra return responses_df.merge(df, on=user_id_col, how="left") -def save_json(obj, json_file) -> NoReturn: +def save_json(obj, json_file) -> None: """ Save obj as json file. """ @@ -453,7 +453,7 @@ def save_json(obj, json_file) -> NoReturn: json.dump(obj, f) -def save_pickle(obj, pickle_file) -> NoReturn: +def save_pickle(obj, pickle_file) -> None: """ Save serializable object as pickle file. """