From 06f85feeb730aa36532bf0791529ecf49f316372 Mon Sep 17 00:00:00 2001 From: Fabio Vera Date: Fri, 22 Sep 2023 12:28:25 -0400 Subject: [PATCH 01/25] initial commit for binary outcome, warn when clf passed but disc_treat=False Signed-off-by: Fabio Vera --- econml/_ortho_learner.py | 56 +++++++++++++++++++++-------- econml/dml/_rlearner.py | 5 +-- econml/dml/causal_forest.py | 6 +++- econml/dml/dml.py | 44 ++++++++++++++++++----- econml/iv/dml/_dml.py | 58 ++++++++++++++++++++---------- econml/iv/dr/_dr.py | 55 ++++++++++++++++++++-------- econml/panel/dml/_dml.py | 14 +++++--- econml/tests/test_ortho_learner.py | 18 +++++----- 8 files changed, 184 insertions(+), 72 deletions(-) diff --git a/econml/_ortho_learner.py b/econml/_ortho_learner.py index becff2499..498376f1d 100644 --- a/econml/_ortho_learner.py +++ b/econml/_ortho_learner.py @@ -432,10 +432,17 @@ def _gen_ortho_learner_model_final(self): """ def __init__(self, *, - discrete_treatment, treatment_featurizer, - discrete_instrument, categories, cv, random_state, - mc_iters=None, mc_agg='mean'): + binary_outcome, + discrete_treatment, + treatment_featurizer, + discrete_instrument, + categories, + cv, + random_state, + mc_iters=None, + mc_agg='mean'): self.cv = cv + self.binary_outcome = binary_outcome self.discrete_treatment = discrete_treatment self.treatment_featurizer = treatment_featurizer self.discrete_instrument = discrete_instrument @@ -525,21 +532,42 @@ def _subinds_check_none(self, var, inds): def _strata(self, Y, T, X=None, W=None, Z=None, sample_weight=None, freq_weight=None, sample_var=None, groups=None, cache_values=False, only_final=False, check_input=True): + arrs = [] + if self.binary_outcome: + arrs.append(Y) + if self.discrete_treatment: + arrs.append(T) if self.discrete_instrument: - Z = LabelEncoder().fit_transform(np.ravel(Z)) + arrs.append(Z) - if self.discrete_treatment: - enc = LabelEncoder() - T = enc.fit_transform(np.ravel(T)) - if self.discrete_instrument: - return T + Z * len(enc.classes_) - else: - return T - elif self.discrete_instrument: - return Z - else: + return self._single_strata_from_discrete_arrays(arrs) + + """ + Combine multiple discrete arrays into a single array for stratification purposes: + + e.g. if arrs are + [0 1 2 0 1 2 0 1 2 0 1 2], + [0 1 0 1 0 1 0 1 0 1 0 1], + [0 0 0 0 0 0 1 1 1 1 1 1] + then output will be + [0 8 4 6 2 10 1 9 5 7 3 11] + + Every distinct combination of these discrete arrays will have it's own label. + """ + + def _single_strata_from_discrete_arrays(self, arrs): + if not arrs: return None + curr_array = np.zeros(shape=arrs[0].ravel().shape, dtype='int') + + for arr in arrs: + enc = LabelEncoder() + temp = enc.fit_transform(arr.ravel()) + curr_array = temp + curr_array * len(enc.classes_) + + return curr_array + def _prefit(self, Y, T, *args, only_final=False, **kwargs): # generate an instance of the final model diff --git a/econml/dml/_rlearner.py b/econml/dml/_rlearner.py index d13b99a2f..f5362f288 100644 --- a/econml/dml/_rlearner.py +++ b/econml/dml/_rlearner.py @@ -271,9 +271,10 @@ def _gen_rlearner_model_final(self): is multidimensional, then the average of the MSEs for each dimension of Y is returned. """ - def __init__(self, *, discrete_treatment, treatment_featurizer, categories, + def __init__(self, *, binary_outcome, discrete_treatment, treatment_featurizer, categories, cv, random_state, mc_iters=None, mc_agg='mean'): - super().__init__(discrete_treatment=discrete_treatment, + super().__init__(binary_outcome=binary_outcome, + discrete_treatment=discrete_treatment, treatment_featurizer=treatment_featurizer, discrete_instrument=False, # no instrument, so doesn't matter categories=categories, diff --git a/econml/dml/causal_forest.py b/econml/dml/causal_forest.py index 07a356d75..8faf157c2 100644 --- a/econml/dml/causal_forest.py +++ b/econml/dml/causal_forest.py @@ -649,7 +649,11 @@ def _gen_featurizer(self): def _gen_model_y(self): if self.model_y == 'auto': - model_y = WeightedLassoCVWrapper(random_state=self.random_state) + if self.binary_outcome: + model_y = LogisticRegressionCV(cv=WeightedStratifiedKFold(random_state=self.random_state), + random_state=self.random_state) + else: + model_y = WeightedLassoCVWrapper(random_state=self.random_state) else: model_y = clone(self.model_y, safe=False) return _FirstStageWrapper(model_y, True, self._gen_featurizer(), False, self.discrete_treatment) diff --git a/econml/dml/dml.py b/econml/dml/dml.py index 713554dad..286702a16 100644 --- a/econml/dml/dml.py +++ b/econml/dml/dml.py @@ -37,12 +37,20 @@ class _FirstStageWrapper: - def __init__(self, model, is_Y, featurizer, linear_first_stages, discrete_treatment): + def __init__( + self, + model, + is_Y, + featurizer, + linear_first_stages, + discrete_treatment, + binary_outcome): self._model = clone(model, safe=False) self._featurizer = clone(featurizer, safe=False) self._is_Y = is_Y self._linear_first_stages = linear_first_stages self._discrete_treatment = discrete_treatment + self._binary_outcome = binary_outcome def _combine(self, X, W, n_samples, fitting=True): if X is None: @@ -77,9 +85,13 @@ def fit(self, X, W, Target, sample_weight=None, groups=None): def predict(self, X, W): n_samples = X.shape[0] if X is not None else (W.shape[0] if W is not None else 1) - if (not self._is_Y) and self._discrete_treatment: + if (not self._is_Y and self._discrete_treatment) or (self._is_Y and self._binary_outcome): return self._model.predict_proba(self._combine(X, W, n_samples, fitting=False))[:, 1:] else: + if (not self._is_Y) and (not self._discrete_treatment) and hasattr(self._model, 'predict_proba'): + warn("A treatment model has a predict_proba method, but discrete_treatment=False. " + "If your treatment is discrete, consider setting discrete_treatment=True. " + "Otherwise, if your treatment is not discrete, use a regressor instead.", UserWarning) return self._model.predict(self._combine(X, W, n_samples, fitting=False)) def score(self, X, W, Target, sample_weight=None): @@ -461,6 +473,7 @@ def __init__(self, *, treatment_featurizer=None, fit_cate_intercept=True, linear_first_stages=False, + binary_outcome=False, discrete_treatment=False, categories='auto', cv=2, @@ -475,7 +488,8 @@ def __init__(self, *, self.model_y = clone(model_y, safe=False) self.model_t = clone(model_t, safe=False) self.model_final = clone(model_final, safe=False) - super().__init__(discrete_treatment=discrete_treatment, + super().__init__(binary_outcome=binary_outcome, + discrete_treatment=discrete_treatment, treatment_featurizer=treatment_featurizer, categories=categories, cv=cv, @@ -488,11 +502,15 @@ def _gen_featurizer(self): def _gen_model_y(self): if self.model_y == 'auto': - model_y = WeightedLassoCVWrapper(random_state=self.random_state) + if self.binary_outcome: + model_y = LogisticRegressionCV(cv=WeightedStratifiedKFold(random_state=self.random_state), + random_state=self.random_state) + else: + model_y = WeightedLassoCVWrapper(random_state=self.random_state) else: model_y = clone(self.model_y, safe=False) return _FirstStageWrapper(model_y, True, self._gen_featurizer(), - self.linear_first_stages, self.discrete_treatment) + self.linear_first_stages, self.discrete_treatment, self.binary_outcome) def _gen_model_t(self): if self.model_t == 'auto': @@ -504,7 +522,7 @@ def _gen_model_t(self): else: model_t = clone(self.model_t, safe=False) return _FirstStageWrapper(model_t, False, self._gen_featurizer(), - self.linear_first_stages, self.discrete_treatment) + self.linear_first_stages, self.discrete_treatment, self.binary_outcome) def _gen_model_final(self): return clone(self.model_final, safe=False) @@ -687,6 +705,7 @@ def __init__(self, *, treatment_featurizer=None, fit_cate_intercept=True, linear_first_stages=True, + binary_outcome=False, discrete_treatment=False, categories='auto', cv=2, @@ -700,6 +719,7 @@ def __init__(self, *, treatment_featurizer=treatment_featurizer, fit_cate_intercept=fit_cate_intercept, linear_first_stages=linear_first_stages, + binary_outcome=binary_outcome, discrete_treatment=discrete_treatment, categories=categories, cv=cv, @@ -927,6 +947,7 @@ def __init__(self, *, treatment_featurizer=None, fit_cate_intercept=True, linear_first_stages=True, + binary_outcome=False, discrete_treatment=False, categories='auto', cv=2, @@ -947,6 +968,7 @@ def __init__(self, *, treatment_featurizer=treatment_featurizer, fit_cate_intercept=fit_cate_intercept, linear_first_stages=linear_first_stages, + binary_outcome=binary_outcome, discrete_treatment=discrete_treatment, categories=categories, cv=cv, @@ -1131,6 +1153,7 @@ class KernelDML(DML): """ def __init__(self, model_y='auto', model_t='auto', + binary_outcome=False, discrete_treatment=False, treatment_featurizer=None, categories='auto', @@ -1148,6 +1171,7 @@ def __init__(self, model_y='auto', model_t='auto', featurizer=None, treatment_featurizer=treatment_featurizer, fit_cate_intercept=fit_cate_intercept, + binary_outcome=binary_outcome, discrete_treatment=discrete_treatment, categories=categories, cv=cv, @@ -1320,6 +1344,7 @@ class NonParamDML(_BaseDML): def __init__(self, *, model_y, model_t, model_final, featurizer=None, + binary_outcome=False, discrete_treatment=False, treatment_featurizer=None, categories='auto', @@ -1334,7 +1359,8 @@ def __init__(self, *, self.model_t = clone(model_t, safe=False) self.featurizer = clone(featurizer, safe=False) self.model_final = clone(model_final, safe=False) - super().__init__(discrete_treatment=discrete_treatment, + super().__init__(binary_outcome=binary_outcome, + discrete_treatment=discrete_treatment, treatment_featurizer=treatment_featurizer, categories=categories, cv=cv, @@ -1353,11 +1379,11 @@ def _gen_featurizer(self): def _gen_model_y(self): return _FirstStageWrapper(clone(self.model_y, safe=False), True, - self._gen_featurizer(), False, self.discrete_treatment) + self._gen_featurizer(), False, self.discrete_treatment, self.binary_outcome) def _gen_model_t(self): return _FirstStageWrapper(clone(self.model_t, safe=False), False, - self._gen_featurizer(), False, self.discrete_treatment) + self._gen_featurizer(), False, self.discrete_treatment, self.binary_outcome) def _gen_model_final(self): return clone(self.model_final, safe=False) diff --git a/econml/iv/dml/_dml.py b/econml/iv/dml/_dml.py index af0134ba3..f8b6c3df4 100644 --- a/econml/iv/dml/_dml.py +++ b/econml/iv/dml/_dml.py @@ -343,6 +343,7 @@ def __init__(self, *, projection=False, featurizer=None, fit_cate_intercept=True, + binary_outcome=False, discrete_treatment=False, treatment_featurizer=None, discrete_instrument=False, @@ -359,7 +360,8 @@ def __init__(self, *, self.featurizer = clone(featurizer, safe=False) self.fit_cate_intercept = fit_cate_intercept - super().__init__(discrete_instrument=discrete_instrument, + super().__init__(binary_outcome=binary_outcome, + discrete_instrument=discrete_instrument, discrete_treatment=discrete_treatment, treatment_featurizer=treatment_featurizer, categories=categories, @@ -379,7 +381,11 @@ def _gen_ortho_learner_model_final(self): def _gen_ortho_learner_model_nuisance(self): if self.model_y_xw == 'auto': - model_y_xw = WeightedLassoCVWrapper(random_state=self.random_state) + if self.binary_outcome: + model_y_xw = LogisticRegressionCV(cv=WeightedStratifiedKFold(random_state=self.random_state), + random_state=self.random_state) + else: + model_y_xw = WeightedLassoCVWrapper(random_state=self.random_state) else: model_y_xw = clone(self.model_y_xw, safe=False) @@ -404,11 +410,13 @@ def _gen_ortho_learner_model_nuisance(self): model_t_xwz = clone(self.model_t_xwz, safe=False) return _OrthoIVModelNuisance(_FirstStageWrapper(clone(model_y_xw, safe=False), True, - self._gen_featurizer(), False, False), + self._gen_featurizer(), False, False, self.binary_outcome), _FirstStageWrapper(clone(model_t_xw, safe=False), False, - self._gen_featurizer(), False, self.discrete_treatment), + self._gen_featurizer(), False, + self.discrete_treatment, self.binary_outcome), _FirstStageWrapper(clone(model_t_xwz, safe=False), False, - self._gen_featurizer(), False, self.discrete_treatment), + self._gen_featurizer(), False, + self.discrete_treatment, self.binary_outcome), self.projection) else: @@ -423,11 +431,13 @@ def _gen_ortho_learner_model_nuisance(self): model_z_xw = clone(self.model_z_xw, safe=False) return _OrthoIVModelNuisance(_FirstStageWrapper(clone(model_y_xw, safe=False), True, - self._gen_featurizer(), False, False), + self._gen_featurizer(), False, False, self.binary_outcome), _FirstStageWrapper(clone(model_t_xw, safe=False), False, - self._gen_featurizer(), False, self.discrete_treatment), + self._gen_featurizer(), False, + self.discrete_treatment, self.binary_outcome), _FirstStageWrapper(clone(model_z_xw, safe=False), False, - self._gen_featurizer(), False, self.discrete_instrument), + self._gen_featurizer(), False, + self.discrete_instrument, self.binary_outcome), self.projection) def fit(self, Y, T, *, Z, X=None, W=None, sample_weight=None, freq_weight=None, sample_var=None, groups=None, @@ -1142,6 +1152,7 @@ def __init__(self, *, model_final=StatsModelsLinearRegression(fit_intercept=False), featurizer=None, fit_cate_intercept=True, + binary_outcome=False, discrete_treatment=False, treatment_featurizer=None, discrete_instrument=False, @@ -1156,7 +1167,8 @@ def __init__(self, *, self.model_final = clone(model_final, safe=False) self.featurizer = clone(featurizer, safe=False) self.fit_cate_intercept = fit_cate_intercept - super().__init__(discrete_treatment=discrete_treatment, + super().__init__(binary_outcome=binary_outcome, + discrete_treatment=discrete_treatment, treatment_featurizer=treatment_featurizer, discrete_instrument=discrete_instrument, categories=categories, @@ -1170,11 +1182,15 @@ def _gen_featurizer(self): def _gen_model_y_xw(self): if self.model_y_xw == 'auto': - model_y_xw = WeightedLassoCVWrapper(random_state=self.random_state) + if self.binary_outcome: + model_y_xw = LogisticRegressionCV(cv=WeightedStratifiedKFold(random_state=self.random_state), + random_state=self.random_state) + else: + model_y_xw = WeightedLassoCVWrapper(random_state=self.random_state) else: model_y_xw = clone(self.model_y_xw, safe=False) return _FirstStageWrapper(model_y_xw, True, self._gen_featurizer(), - False, False) + False, False, self.binary_outcome) def _gen_model_t_xw(self): if self.model_t_xw == 'auto': @@ -1186,7 +1202,7 @@ def _gen_model_t_xw(self): else: model_t_xw = clone(self.model_t_xw, safe=False) return _FirstStageWrapper(model_t_xw, False, self._gen_featurizer(), - False, self.discrete_treatment) + False, self.discrete_treatment, self.binary_outcome) def _gen_model_t_xwz(self): if self.model_t_xwz == 'auto': @@ -1198,7 +1214,7 @@ def _gen_model_t_xwz(self): else: model_t_xwz = clone(self.model_t_xwz, safe=False) return _FirstStageWrapper(model_t_xwz, False, self._gen_featurizer(), - False, self.discrete_treatment) + False, self.discrete_treatment, self.binary_outcome) def _gen_model_final(self): return clone(self.model_final, safe=False) @@ -1532,6 +1548,7 @@ def __init__(self, *, model_t_xw="auto", model_t_xwz="auto", model_final, + binary_outcome=False, discrete_treatment=False, treatment_featurizer=None, discrete_instrument=False, @@ -1546,7 +1563,8 @@ def __init__(self, *, self.model_t_xwz = clone(model_t_xwz, safe=False) self.model_final = clone(model_final, safe=False) self.featurizer = clone(featurizer, safe=False) - super().__init__(discrete_treatment=discrete_treatment, + super().__init__(binary_outcome=binary_outcome, + discrete_treatment=discrete_treatment, discrete_instrument=discrete_instrument, treatment_featurizer=treatment_featurizer, categories=categories, @@ -1560,11 +1578,15 @@ def _gen_featurizer(self): def _gen_model_y_xw(self): if self.model_y_xw == 'auto': - model_y_xw = WeightedLassoCVWrapper(random_state=self.random_state) + if self.binary_outcome: + model_y_xw = LogisticRegressionCV(cv=WeightedStratifiedKFold(random_state=self.random_state), + random_state=self.random_state) + else: + model_y_xw = WeightedLassoCVWrapper(random_state=self.random_state) else: model_y_xw = clone(self.model_y_xw, safe=False) return _FirstStageWrapper(model_y_xw, True, self._gen_featurizer(), - False, False) + False, False, self.binary_outcome) def _gen_model_t_xw(self): if self.model_t_xw == 'auto': @@ -1576,7 +1598,7 @@ def _gen_model_t_xw(self): else: model_t_xw = clone(self.model_t_xw, safe=False) return _FirstStageWrapper(model_t_xw, False, self._gen_featurizer(), - False, self.discrete_treatment) + False, self.discrete_treatment, self.binary_outcome) def _gen_model_t_xwz(self): if self.model_t_xwz == 'auto': @@ -1588,7 +1610,7 @@ def _gen_model_t_xwz(self): else: model_t_xwz = clone(self.model_t_xwz, safe=False) return _FirstStageWrapper(model_t_xwz, False, self._gen_featurizer(), - False, self.discrete_treatment) + False, self.discrete_treatment, self.binary_outcome) def _gen_model_final(self): return clone(self.model_final, safe=False) diff --git a/econml/iv/dr/_dr.py b/econml/iv/dr/_dr.py index 6e3689453..b704e828c 100644 --- a/econml/iv/dr/_dr.py +++ b/econml/iv/dr/_dr.py @@ -301,6 +301,7 @@ def __init__(self, *, fit_cate_intercept=False, cov_clip=1e-3, opt_reweighted=False, + binary_outcome=False, discrete_instrument=False, discrete_treatment=False, treatment_featurizer=None, @@ -314,7 +315,8 @@ def __init__(self, *, self.fit_cate_intercept = fit_cate_intercept self.cov_clip = cov_clip self.opt_reweighted = opt_reweighted - super().__init__(discrete_instrument=discrete_instrument, + super().__init__(binary_outcome=binary_outcome, + discrete_instrument=discrete_instrument, discrete_treatment=discrete_treatment, treatment_featurizer=treatment_featurizer, categories=categories, @@ -550,6 +552,7 @@ def __init__(self, *, fit_cate_intercept=False, cov_clip=1e-3, opt_reweighted=False, + binary_outcome=False, discrete_instrument=False, discrete_treatment=False, treatment_featurizer=None, @@ -570,6 +573,7 @@ def __init__(self, *, fit_cate_intercept=fit_cate_intercept, cov_clip=cov_clip, opt_reweighted=opt_reweighted, + binary_outcome=binary_outcome, discrete_instrument=discrete_instrument, discrete_treatment=discrete_treatment, treatment_featurizer=treatment_featurizer, @@ -584,7 +588,11 @@ def _gen_prel_model_effect(self): def _gen_ortho_learner_model_nuisance(self): if self.model_y_xw == 'auto': - model_y_xw = WeightedLassoCVWrapper(random_state=self.random_state) + if self.binary_outcome: + model_y_xw = LogisticRegressionCV(cv=WeightedStratifiedKFold(random_state=self.random_state), + random_state=self.random_state) + else: + model_y_xw = WeightedLassoCVWrapper(random_state=self.random_state) else: model_y_xw = clone(self.model_y_xw, safe=False) @@ -614,14 +622,15 @@ def _gen_ortho_learner_model_nuisance(self): model_t_xwz = clone(self.model_t_xwz, safe=False) return _BaseDRIVModelNuisance(self._gen_prel_model_effect(), - _FirstStageWrapper(model_y_xw, True, self._gen_featurizer(), False, False), + _FirstStageWrapper(model_y_xw, True, self._gen_featurizer(), + False, False, self.binary_outcome), _FirstStageWrapper(model_t_xw, False, self._gen_featurizer(), - False, self.discrete_treatment), - # outcome is continuous since proj_t is probability + False, self.discrete_treatment, self.binary_outcome), + # target is continuous since proj_t is probability _FirstStageWrapper(model_tz_xw, False, self._gen_featurizer(), False, - False), + False, self.binary_outcome), _FirstStageWrapper(model_t_xwz, False, self._gen_featurizer(), - False, self.discrete_treatment), + False, self.discrete_treatment, self.binary_outcome), self.projection, self.discrete_treatment, self.discrete_instrument) else: @@ -644,13 +653,15 @@ def _gen_ortho_learner_model_nuisance(self): model_z_xw = clone(self.model_z_xw, safe=False) return _BaseDRIVModelNuisance(self._gen_prel_model_effect(), - _FirstStageWrapper(model_y_xw, True, self._gen_featurizer(), False, False), + _FirstStageWrapper(model_y_xw, True, self._gen_featurizer(), False, False, + self.binary_outcome), _FirstStageWrapper(model_t_xw, False, self._gen_featurizer(), - False, self.discrete_treatment), + False, self.discrete_treatment, self.binary_outcome), _FirstStageWrapper(model_tz_xw, False, self._gen_featurizer(), False, - self.discrete_treatment and self.discrete_instrument), + self.discrete_treatment and self.discrete_instrument, + self.binary_outcome), _FirstStageWrapper(model_z_xw, False, self._gen_featurizer(), - False, self.discrete_instrument), + False, self.discrete_instrument, self.binary_outcome), self.projection, self.discrete_treatment, self.discrete_instrument) @@ -838,6 +849,7 @@ def __init__(self, *, fit_cate_intercept=False, cov_clip=1e-3, opt_reweighted=False, + binary_outcome=False, discrete_instrument=False, discrete_treatment=False, treatment_featurizer=None, @@ -866,6 +878,7 @@ def __init__(self, *, fit_cate_intercept=fit_cate_intercept, cov_clip=cov_clip, opt_reweighted=opt_reweighted, + binary_outcome=binary_outcome, discrete_instrument=discrete_instrument, discrete_treatment=discrete_treatment, treatment_featurizer=treatment_featurizer, @@ -1301,6 +1314,7 @@ def __init__(self, *, fit_cate_intercept=True, cov_clip=1e-3, opt_reweighted=False, + binary_outcome=False, discrete_instrument=False, discrete_treatment=False, treatment_featurizer=None, @@ -1324,6 +1338,7 @@ def __init__(self, *, fit_cate_intercept=fit_cate_intercept, cov_clip=cov_clip, opt_reweighted=opt_reweighted, + binary_outcome=binary_outcome, discrete_instrument=discrete_instrument, discrete_treatment=discrete_treatment, treatment_featurizer=treatment_featurizer, @@ -1632,6 +1647,7 @@ def __init__(self, *, n_jobs=None, cov_clip=1e-3, opt_reweighted=False, + binary_outcome=False, discrete_instrument=False, discrete_treatment=False, treatment_featurizer=None, @@ -1662,6 +1678,7 @@ def __init__(self, *, fit_cate_intercept=fit_cate_intercept, cov_clip=cov_clip, opt_reweighted=opt_reweighted, + binary_outcome=binary_outcome, discrete_instrument=discrete_instrument, discrete_treatment=discrete_treatment, treatment_featurizer=treatment_featurizer, @@ -2040,6 +2057,7 @@ def __init__(self, *, verbose=0, cov_clip=1e-3, opt_reweighted=False, + binary_outcome=False, discrete_instrument=False, discrete_treatment=False, treatment_featurizer=None, @@ -2076,6 +2094,7 @@ def __init__(self, *, fit_cate_intercept=False, cov_clip=cov_clip, opt_reweighted=opt_reweighted, + binary_outcome=binary_outcome, discrete_instrument=discrete_instrument, discrete_treatment=discrete_treatment, treatment_featurizer=treatment_featurizer, @@ -2294,7 +2313,11 @@ def _gen_prel_model_effect(self): def _gen_ortho_learner_model_nuisance(self): if self.model_y_xw == 'auto': - model_y_xw = WeightedLassoCVWrapper(random_state=self.random_state) + if self.binary_outcome: + model_y_xw = LogisticRegressionCV(cv=WeightedStratifiedKFold(random_state=self.random_state), + random_state=self.random_state) + else: + model_y_xw = WeightedLassoCVWrapper(random_state=self.random_state) else: model_y_xw = clone(self.model_y_xw, safe=False) @@ -2312,11 +2335,13 @@ def _gen_ortho_learner_model_nuisance(self): raise ValueError("Only 'auto' or float is allowed!") return _IntentToTreatDRIVModelNuisance(_FirstStageWrapper(model_y_xw, True, self._gen_featurizer(), - False, False), + False, False, self.binary_outcome), _FirstStageWrapper(model_t_xwz, False, - self._gen_featurizer(), False, True), + self._gen_featurizer(), False, True, + self.binary_outcome), _FirstStageWrapper(dummy_z, False, - self._gen_featurizer(), False, True), + self._gen_featurizer(), False, True, + self.binary_outcome), self._gen_prel_model_effect() ) diff --git a/econml/panel/dml/_dml.py b/econml/panel/dml/_dml.py index a12385e36..2dde6dbe0 100644 --- a/econml/panel/dml/_dml.py +++ b/econml/panel/dml/_dml.py @@ -458,6 +458,7 @@ def __init__(self, *, featurizer=None, fit_cate_intercept=True, linear_first_stages=False, + binary_outcome=False, discrete_treatment=False, categories='auto', cv=2, @@ -469,7 +470,8 @@ def __init__(self, *, self.featurizer = clone(featurizer, safe=False) self.model_y = clone(model_y, safe=False) self.model_t = clone(model_t, safe=False) - super().__init__(discrete_treatment=discrete_treatment, + super().__init__(binary_outcome=binary_outcome, + discrete_treatment=discrete_treatment, treatment_featurizer=None, discrete_instrument=False, categories=categories, @@ -526,11 +528,15 @@ def _gen_featurizer(self): def _gen_model_y(self): if self.model_y == 'auto': - model_y = WeightedLassoCVWrapper(random_state=self.random_state) + if self.binary_outcome: + model_y = LogisticRegressionCV(cv=WeightedStratifiedKFold(random_state=self.random_state), + random_state=self.random_state) + else: + model_y = WeightedLassoCVWrapper(random_state=self.random_state) else: model_y = clone(self.model_y, safe=False) return _FirstStageWrapper(model_y, True, self._gen_featurizer(), - self.linear_first_stages, self.discrete_treatment) + self.linear_first_stages, self.discrete_treatment, self.binary_outcome) def _gen_model_t(self): if self.model_t == 'auto': @@ -542,7 +548,7 @@ def _gen_model_t(self): else: model_t = clone(self.model_t, safe=False) return _FirstStageWrapper(model_t, False, self._gen_featurizer(), - self.linear_first_stages, self.discrete_treatment) + self.linear_first_stages, self.discrete_treatment, self.binary_outcome) def _gen_model_final(self): return StatsModelsLinearRegression(fit_intercept=False) diff --git a/econml/tests/test_ortho_learner.py b/econml/tests/test_ortho_learner.py index 846d9facd..7927042d4 100644 --- a/econml/tests/test_ortho_learner.py +++ b/econml/tests/test_ortho_learner.py @@ -170,7 +170,7 @@ def _gen_ortho_learner_model_final(self): X = np.random.normal(size=(10000, 3)) sigma = 0.1 y = X[:, 0] + X[:, 1] + np.random.normal(0, sigma, size=(10000,)) - est = OrthoLearner(cv=2, discrete_treatment=False, treatment_featurizer=None, + est = OrthoLearner(cv=2, binary_outcome=False, discrete_treatment=False, treatment_featurizer=None, discrete_instrument=False, categories='auto', random_state=None) est.fit(y, X[:, 0], W=X[:, 1:]) np.testing.assert_almost_equal(est.const_marginal_effect(), 1, decimal=3) @@ -187,8 +187,8 @@ def _gen_ortho_learner_model_final(self): X = np.random.normal(size=(10000, 3)) sigma = 0.1 y = X[:, 0] + X[:, 1] + np.random.normal(0, sigma, size=(10000,)) - est = OrthoLearner(cv=2, discrete_treatment=False, treatment_featurizer=None, discrete_instrument=False, - categories='auto', random_state=None) + est = OrthoLearner(cv=2, binary_outcome=False, discrete_treatment=False, treatment_featurizer=None, + discrete_instrument=False, categories='auto', random_state=None) # test non-array inputs est.fit(list(y), list(X[:, 0]), X=None, W=X[:, 1:]) np.testing.assert_almost_equal(est.const_marginal_effect(), 1, decimal=3) @@ -268,8 +268,8 @@ def _gen_ortho_learner_model_final(self): X = np.random.normal(size=(10000, 3)) sigma = 0.1 y = X[:, 0] + X[:, 1] + np.random.normal(0, sigma, size=(10000,)) - est = OrthoLearner(cv=2, discrete_treatment=False, treatment_featurizer=None, discrete_instrument=False, - categories='auto', random_state=None) + est = OrthoLearner(cv=2, binary_outcome=False, discrete_treatment=False, treatment_featurizer=None, + discrete_instrument=False, categories='auto', random_state=None) est.fit(y, X[:, 0], W=X[:, 1:]) np.testing.assert_almost_equal(est.const_marginal_effect(), 1, decimal=3) np.testing.assert_array_almost_equal(est.effect(), np.ones(1), decimal=3) @@ -318,8 +318,8 @@ def _gen_ortho_learner_model_final(self): X = np.random.normal(size=(10000, 3)) sigma = 0.1 y = X[:, 0] + X[:, 1] + np.random.normal(0, sigma, size=(10000,)) - est = OrthoLearner(cv=2, discrete_treatment=False, treatment_featurizer=None, discrete_instrument=False, - categories='auto', random_state=None) + est = OrthoLearner(cv=2, binary_outcome=False, discrete_treatment=False, treatment_featurizer=None, + discrete_instrument=False, categories='auto', random_state=None) est.fit(y, X[:, 0], W=X[:, 1:]) np.testing.assert_almost_equal(est.const_marginal_effect(), 1, decimal=3) np.testing.assert_array_almost_equal(est.effect(), np.ones(1), decimal=3) @@ -380,8 +380,8 @@ def _gen_ortho_learner_model_final(self): T = np.random.binomial(1, scipy.special.expit(X[:, 0])) sigma = 0.01 y = T + X[:, 0] + np.random.normal(0, sigma, size=(10000,)) - est = OrthoLearner(cv=2, discrete_treatment=True, treatment_featurizer=None, discrete_instrument=False, - categories='auto', random_state=None) + est = OrthoLearner(cv=2, binary_outcome=False, discrete_treatment=True, treatment_featurizer=None, + discrete_instrument=False, categories='auto', random_state=None) est.fit(y, T, W=X) np.testing.assert_almost_equal(est.const_marginal_effect(), 1, decimal=3) np.testing.assert_array_almost_equal(est.effect(), np.ones(1), decimal=3) From 6bc066079f42c2bd56859f99d225b4224712e721 Mon Sep 17 00:00:00 2001 From: Fabio Vera Date: Mon, 25 Sep 2023 12:00:08 -0400 Subject: [PATCH 02/25] add init args to drlearner, causalforestdml Signed-off-by: Fabio Vera --- econml/dml/causal_forest.py | 4 +++- econml/dr/_drlearner.py | 8 ++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/econml/dml/causal_forest.py b/econml/dml/causal_forest.py index 8faf157c2..bc30aa1d4 100644 --- a/econml/dml/causal_forest.py +++ b/econml/dml/causal_forest.py @@ -577,6 +577,7 @@ def __init__(self, *, model_t='auto', featurizer=None, treatment_featurizer=None, + binary_outcome=False, discrete_treatment=False, categories='auto', cv=2, @@ -630,7 +631,8 @@ def __init__(self, *, self.subforest_size = subforest_size self.n_jobs = n_jobs self.verbose = verbose - super().__init__(discrete_treatment=discrete_treatment, + super().__init__(binary_outcome=binary_outcome, + discrete_treatment=discrete_treatment, treatment_featurizer=treatment_featurizer, categories=categories, cv=cv, diff --git a/econml/dr/_drlearner.py b/econml/dr/_drlearner.py index 3ca702a0c..e4e264113 100644 --- a/econml/dr/_drlearner.py +++ b/econml/dr/_drlearner.py @@ -402,6 +402,7 @@ def __init__(self, *, model_propensity='auto', model_regression='auto', model_final=StatsModelsLinearRegression(), + binary_outcome=False, multitask_model_final=False, featurizer=None, min_propensity=1e-6, @@ -419,6 +420,7 @@ def __init__(self, *, super().__init__(cv=cv, mc_iters=mc_iters, mc_agg=mc_agg, + binary_outcome=binary_outcome, discrete_treatment=True, treatment_featurizer=None, # treatment featurization not supported with discrete treatment discrete_instrument=False, # no instrument, so doesn't matter @@ -864,6 +866,7 @@ class LinearDRLearner(StatsModelsCateEstimatorDiscreteMixin, DRLearner): def __init__(self, *, model_propensity='auto', model_regression='auto', + binary_outcome=False, featurizer=None, fit_cate_intercept=True, min_propensity=1e-6, @@ -876,6 +879,7 @@ def __init__(self, *, super().__init__(model_propensity=model_propensity, model_regression=model_regression, model_final=None, + binary_outcome=binary_outcome, featurizer=featurizer, multitask_model_final=False, min_propensity=min_propensity, @@ -1137,6 +1141,7 @@ def __init__(self, *, model_regression='auto', featurizer=None, fit_cate_intercept=True, + binary_outcome=False, alpha='auto', n_alphas=100, alpha_cov='auto', @@ -1161,6 +1166,7 @@ def __init__(self, *, super().__init__(model_propensity=model_propensity, model_regression=model_regression, model_final=None, + binary_outcome=binary_outcome, featurizer=featurizer, multitask_model_final=False, min_propensity=min_propensity, @@ -1413,6 +1419,7 @@ class ForestDRLearner(ForestModelFinalCateEstimatorDiscreteMixin, DRLearner): def __init__(self, *, model_regression="auto", model_propensity="auto", + binary_outcome=False, featurizer=None, min_propensity=1e-6, categories='auto', @@ -1449,6 +1456,7 @@ def __init__(self, *, super().__init__(model_regression=model_regression, model_propensity=model_propensity, model_final=None, + binary_outcome=binary_outcome, featurizer=featurizer, multitask_model_final=False, min_propensity=min_propensity, From 058c3e8b26cf5bc6e05f76b5d402bac9645dcb18 Mon Sep 17 00:00:00 2001 From: Fabio Vera Date: Mon, 25 Sep 2023 12:14:57 -0400 Subject: [PATCH 03/25] modify bootstrap test to use np array Signed-off-by: Fabio Vera --- econml/tests/test_bootstrap.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/econml/tests/test_bootstrap.py b/econml/tests/test_bootstrap.py index df15f9cdb..3145ef07b 100644 --- a/econml/tests/test_bootstrap.py +++ b/econml/tests/test_bootstrap.py @@ -270,8 +270,8 @@ def test_internal_options(self): def test_stratify(self): """Test that we can properly stratify by treatment""" - T = [1, 0, 1, 2, 0, 2] - Y = [1, 2, 3, 4, 5, 6] + T = np.array([1, 0, 1, 2, 0, 2]) + Y = np.array([1, 2, 3, 4, 5, 6]) X = np.array([1, 1, 2, 2, 1, 2]).reshape(-1, 1) est = LinearDML(model_y=LinearRegression(), model_t=LogisticRegression(), discrete_treatment=True) inference = BootstrapInference(n_bootstrap_samples=5, n_jobs=-1, verbose=0) @@ -286,9 +286,9 @@ def test_stratify(self): def test_stratify_orthoiv(self): """Test that we can properly stratify by treatment/instrument pair""" - T = [1, 0, 1, 1, 0, 0, 1, 0] - Z = [1, 0, 0, 1, 0, 1, 0, 1] - Y = [1, 2, 3, 4, 5, 6, 7, 8] + T = np.array([1, 0, 1, 1, 0, 0, 1, 0]) + Z = np.array([1, 0, 0, 1, 0, 1, 0, 1]) + Y = np.array([1, 2, 3, 4, 5, 6, 7, 8]) X = np.array([1, 1, 2, 2, 1, 2, 1, 2]).reshape(-1, 1) est = LinearIntentToTreatDRIV(model_y_xw=LinearRegression(), model_t_xwz=LogisticRegression(), flexible_model_effect=LinearRegression(), cv=2) @@ -297,8 +297,8 @@ def test_stratify_orthoiv(self): est.const_marginal_effect_interval(X) def test_all_kinds(self): - T = [1, 0, 1, 2, 0, 2] * 5 - Y = [1, 2, 3, 4, 5, 6] * 5 + T = np.array([1, 0, 1, 2, 0, 2] * 5) + Y = np.array([1, 2, 3, 4, 5, 6] * 5) X = np.array([1, 1, 2, 2, 1, 2] * 5).reshape(-1, 1) est = LinearDML(cv=2) for kind in ['percentile', 'pivot', 'normal']: From a92d140cbd484b9a5d9a6bb247e45316fb93b05d Mon Sep 17 00:00:00 2001 From: Fabio Vera Date: Mon, 25 Sep 2023 15:47:26 -0400 Subject: [PATCH 04/25] bugfix causalforest firststagewrapper Signed-off-by: Fabio Vera --- econml/dml/causal_forest.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/econml/dml/causal_forest.py b/econml/dml/causal_forest.py index bc30aa1d4..1b91d0e03 100644 --- a/econml/dml/causal_forest.py +++ b/econml/dml/causal_forest.py @@ -658,7 +658,8 @@ def _gen_model_y(self): model_y = WeightedLassoCVWrapper(random_state=self.random_state) else: model_y = clone(self.model_y, safe=False) - return _FirstStageWrapper(model_y, True, self._gen_featurizer(), False, self.discrete_treatment) + return _FirstStageWrapper(model_y, True, self._gen_featurizer(), False, + self.discrete_treatment, self.binary_outcome) def _gen_model_t(self): if self.model_t == 'auto': @@ -669,7 +670,8 @@ def _gen_model_t(self): model_t = WeightedLassoCVWrapper(random_state=self.random_state) else: model_t = clone(self.model_t, safe=False) - return _FirstStageWrapper(model_t, False, self._gen_featurizer(), False, self.discrete_treatment) + return _FirstStageWrapper(model_t, False, self._gen_featurizer(), False, + self.discrete_treatment, self.binary_outcome) def _gen_model_final(self): return MultiOutputGRF(CausalForest(n_estimators=self.n_estimators, From 8929eab4c9162b40f390e3d955212329ca519f1b Mon Sep 17 00:00:00 2001 From: Fabio Vera Date: Mon, 25 Sep 2023 16:17:20 -0400 Subject: [PATCH 05/25] fix test bug ortholearner Signed-off-by: Fabio Vera --- econml/tests/test_ortho_learner.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/econml/tests/test_ortho_learner.py b/econml/tests/test_ortho_learner.py index 7927042d4..6db27cc37 100644 --- a/econml/tests/test_ortho_learner.py +++ b/econml/tests/test_ortho_learner.py @@ -203,7 +203,7 @@ def _gen_ortho_learner_model_final(self): X = np.random.normal(size=(10000, 3)) sigma = 0.1 y = X[:, 0] + X[:, 1] + np.random.normal(0, sigma, size=(10000,)) - est = OrthoLearner(cv=KFold(n_splits=3), + est = OrthoLearner(cv=KFold(n_splits=3), binary_outcome=False, discrete_treatment=False, treatment_featurizer=None, discrete_instrument=False, categories='auto', random_state=None) est.fit(y, X[:, 0], X=None, W=X[:, 1:]) @@ -220,7 +220,7 @@ def _gen_ortho_learner_model_final(self): sigma = 0.1 y = X[:, 0] + X[:, 1] + np.random.normal(0, sigma, size=(10000,)) folds = [(np.arange(X.shape[0] // 2), np.arange(X.shape[0] // 2, X.shape[0]))] - est = OrthoLearner(cv=folds, discrete_treatment=False, treatment_featurizer=None, + est = OrthoLearner(cv=folds, binary_outcome=False, discrete_treatment=False, treatment_featurizer=None, discrete_instrument=False, categories='auto', random_state=None) est.fit(y, X[:, 0], X=None, W=X[:, 1:]) np.testing.assert_almost_equal(est.const_marginal_effect(), 1, decimal=2) From 1540a081ca6987c525f38f7df3d61a1a3d6577b4 Mon Sep 17 00:00:00 2001 From: Fabio Vera Date: Mon, 25 Sep 2023 17:08:22 -0400 Subject: [PATCH 06/25] fix test bugs treatfeat OL doctest Signed-off-by: Fabio Vera --- econml/_ortho_learner.py | 4 ++-- econml/tests/test_treatment_featurization.py | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/econml/_ortho_learner.py b/econml/_ortho_learner.py index 498376f1d..84fde5f6e 100644 --- a/econml/_ortho_learner.py +++ b/econml/_ortho_learner.py @@ -342,7 +342,7 @@ def _gen_ortho_learner_model_final(self): np.random.seed(123) X = np.random.normal(size=(100, 3)) y = X[:, 0] + X[:, 1] + np.random.normal(0, 0.1, size=(100,)) - est = OrthoLearner(cv=2, discrete_treatment=False, treatment_featurizer=None, + est = OrthoLearner(cv=2, binary_outcome=False, discrete_treatment=False, treatment_featurizer=None, discrete_instrument=False, categories='auto', random_state=None) est.fit(y, X[:, 0], W=X[:, 1:]) @@ -400,7 +400,7 @@ def _gen_ortho_learner_model_final(self): import scipy.special T = np.random.binomial(1, scipy.special.expit(W[:, 0])) y = T + W[:, 0] + np.random.normal(0, 0.01, size=(100,)) - est = OrthoLearner(cv=2, discrete_treatment=True, discrete_instrument=False, + est = OrthoLearner(cv=2, binary_outcome=False, discrete_treatment=True, discrete_instrument=False, treatment_featurizer=None, categories='auto', random_state=None) est.fit(y, T, W=W) diff --git a/econml/tests/test_treatment_featurization.py b/econml/tests/test_treatment_featurization.py index a58bf5754..4310ac413 100644 --- a/econml/tests/test_treatment_featurization.py +++ b/econml/tests/test_treatment_featurization.py @@ -504,6 +504,7 @@ def _gen_ortho_learner_model_final(self): 'estimator': OrthoLearner, 'params': { 'cv': 2, + 'binary_outcome': False, 'discrete_treatment': False, 'treatment_featurizer': None, 'discrete_instrument': False, From d39a0910696481107f9c27ebd02e52b2a648b18e Mon Sep 17 00:00:00 2001 From: Fabio Vera Date: Wed, 11 Oct 2023 11:05:23 -0400 Subject: [PATCH 07/25] add tests, allow str y, add warnings/errors Signed-off-by: Fabio Vera --- econml/_ortho_learner.py | 19 +++- econml/dml/dml.py | 13 ++- econml/dr/_drlearner.py | 24 +++-- econml/tests/test_binary_outcome.py | 161 ++++++++++++++++++++++++++++ 4 files changed, 206 insertions(+), 11 deletions(-) create mode 100644 econml/tests/test_binary_outcome.py diff --git a/econml/_ortho_learner.py b/econml/_ortho_learner.py index 84fde5f6e..3536c4925 100644 --- a/econml/_ortho_learner.py +++ b/econml/_ortho_learner.py @@ -639,6 +639,18 @@ def fit(self, Y, T, *, X=None, W=None, Z=None, sample_weight=None, freq_weight=N if not only_final: + if self.binary_outcome: + self.outcome_transformer = LabelEncoder() + self.outcome_transformer.fit(Y) + if Y.shape[1:] and Y.shape[1] > 1: + raise ValueError( + f"Only one outcome variable is supported when binary_outcome=True. Got Y of shape {Y.shape}") + if len(self.outcome_transformer.classes_) > 2: + raise AttributeError( + "More than 2 outcome classes detected. This method currently only supports binary outcomes") + else: + self.outcome_transformer = None + if self.discrete_treatment: categories = self.categories if categories != 'auto': @@ -781,7 +793,7 @@ def refit_final(self, inference=None): def _fit_nuisances(self, Y, T, X=None, W=None, Z=None, sample_weight=None, groups=None): # use a binary array to get stratified split in case of discrete treatment - stratify = self.discrete_treatment or self.discrete_instrument + stratify = self.discrete_treatment or self.discrete_instrument or self.binary_outcome strata = self._strata(Y, T, X=X, W=W, Z=Z, sample_weight=sample_weight, groups=groups) if strata is None: strata = T # always safe to pass T as second arg to split even if we're not actually stratifying @@ -794,6 +806,9 @@ def _fit_nuisances(self, Y, T, X=None, W=None, Z=None, sample_weight=None, group if self.discrete_instrument: Z = self.z_transformer.transform(reshape(Z, (-1, 1))) + if self.binary_outcome: + Y = self.outcome_transformer.transform(Y) + if self.cv == 1: # special case, no cross validation folds = None else: @@ -912,6 +927,8 @@ def score(self, Y, T, X=None, W=None, Z=None, sample_weight=None, groups=None): X, T = self._expand_treatments(X, T) if self.z_transformer is not None: Z = self.z_transformer.transform(reshape(Z, (-1, 1))) + if self.binary_outcome: + Y = self.outcome_transformer.transform(Y) n_iters = len(self._models_nuisance) n_splits = len(self._models_nuisance[0]) diff --git a/econml/dml/dml.py b/econml/dml/dml.py index 286702a16..f67b01164 100644 --- a/econml/dml/dml.py +++ b/econml/dml/dml.py @@ -88,10 +88,15 @@ def predict(self, X, W): if (not self._is_Y and self._discrete_treatment) or (self._is_Y and self._binary_outcome): return self._model.predict_proba(self._combine(X, W, n_samples, fitting=False))[:, 1:] else: - if (not self._is_Y) and (not self._discrete_treatment) and hasattr(self._model, 'predict_proba'): - warn("A treatment model has a predict_proba method, but discrete_treatment=False. " - "If your treatment is discrete, consider setting discrete_treatment=True. " - "Otherwise, if your treatment is not discrete, use a regressor instead.", UserWarning) + if hasattr(self._model, 'predict_proba'): + if (not self._is_Y): + warn("A treatment model has a predict_proba method, but discrete_treatment=False. " + "If your treatment is discrete, consider setting discrete_treatment=True. " + "Otherwise, if your treatment is not discrete, use a regressor instead.", UserWarning) + elif (self._is_Y): + warn("An outcome model has a predict_proba method, but binary_outcome=False. " + "If your outcome is binary, consider setting binary_outcome=True. " + "Otherwise, if your outcome is not binary, use a regressor instead.", UserWarning) return self._model.predict(self._combine(X, W, n_samples, fitting=False)) def score(self, X, W, Target, sample_weight=None): diff --git a/econml/dr/_drlearner.py b/econml/dr/_drlearner.py index e4e264113..d42a32f40 100644 --- a/econml/dr/_drlearner.py +++ b/econml/dr/_drlearner.py @@ -51,16 +51,18 @@ from ..grf import RegressionForest from ..sklearn_extensions.linear_model import ( DebiasedLasso, StatsModelsLinearRegression, WeightedLassoCVWrapper) +from ..sklearn_extensions.model_selection import WeightedStratifiedKFold from ..utilities import (_deprecate_positional, check_high_dimensional, filter_none_kwargs, fit_with_groups, inverse_onehot, get_feature_names_or_default) from .._shap import _shap_explain_multitask_model_cate, _shap_explain_model_cate class _ModelNuisance: - def __init__(self, model_propensity, model_regression, min_propensity): + def __init__(self, model_propensity, model_regression, min_propensity, binary_outcome): self._model_propensity = model_propensity self._model_regression = model_regression self._min_propensity = min_propensity + self._binary_outcome = binary_outcome def _combine(self, X, W): return np.hstack([arr for arr in [X, W] if arr is not None]) @@ -102,12 +104,18 @@ def predict(self, Y, T, X=None, W=None, *, sample_weight=None, groups=None): n = T.shape[0] Y_pred = np.zeros((T.shape[0], T.shape[1] + 1)) T_counter = np.zeros(T.shape) - Y_pred[:, 0] = self._model_regression.predict(np.hstack([XW, T_counter])).reshape(n) + if self._binary_outcome and hasattr(self._model_regression, 'predict_proba'): + Y_pred[:, 0] = self._model_regression.predict_proba(np.hstack([XW, T_counter]))[:, 1].reshape(n) + else: + Y_pred[:, 0] = self._model_regression.predict(np.hstack([XW, T_counter])).reshape(n) Y_pred[:, 0] += (Y.reshape(n) - Y_pred[:, 0]) * np.all(T == 0, axis=1) / propensities[:, 0] for t in np.arange(T.shape[1]): T_counter = np.zeros(T.shape) T_counter[:, t] = 1 - Y_pred[:, t + 1] = self._model_regression.predict(np.hstack([XW, T_counter])).reshape(n) + if self._binary_outcome and hasattr(self._model_regression, 'predict_proba'): + Y_pred[:, t + 1] = self._model_regression.predict_proba(np.hstack([XW, T_counter]))[:, 1].reshape(n) + else: + Y_pred[:, t + 1] = self._model_regression.predict(np.hstack([XW, T_counter])).reshape(n) Y_pred[:, t + 1] += (Y.reshape(n) - Y_pred[:, t + 1]) * (T[:, t] == 1) / propensities[:, t + 1] T_complete = np.hstack(((np.all(T == 0, axis=1) * 1).reshape(-1, 1), T)) propensities_weight = np.sum(propensities * T_complete, axis=1) @@ -486,11 +494,15 @@ def _gen_ortho_learner_model_nuisance(self): model_propensity = clone(self.model_propensity, safe=False) if self.model_regression == 'auto': - model_regression = WeightedLassoCVWrapper(cv=3, random_state=self.random_state) + if self.binary_outcome: + model_regression = LogisticRegressionCV(cv=WeightedStratifiedKFold(random_state=self.random_state), + random_state=self.random_state) + else: + model_regression = WeightedLassoCVWrapper(cv=3, random_state=self.random_state) else: - model_regression = clone(self.model_regression, safe=False) + model_regression = clone(self.model_y, safe=False) - return _ModelNuisance(model_propensity, model_regression, self.min_propensity) + return _ModelNuisance(model_propensity, model_regression, self.min_propensity, self.binary_outcome) def _gen_featurizer(self): return clone(self.featurizer, safe=False) diff --git a/econml/tests/test_binary_outcome.py b/econml/tests/test_binary_outcome.py new file mode 100644 index 000000000..8188e92d1 --- /dev/null +++ b/econml/tests/test_binary_outcome.py @@ -0,0 +1,161 @@ +# Copyright (c) PyWhy contributors. All rights reserved. +# Licensed under the MIT License. +import pytest +import unittest +import numpy as np +from sklearn.preprocessing import PolynomialFeatures +from sklearn.linear_model import LinearRegression, LogisticRegression +from sklearn.ensemble import RandomForestRegressor +from joblib import Parallel, delayed + +from econml._ortho_learner import _OrthoLearner +from econml.dml import LinearDML, SparseLinearDML, KernelDML, CausalForestDML, NonParamDML +from econml.dr import LinearDRLearner +from econml.iv.dml import OrthoIV, DMLIV, NonParamDMLIV +from econml.iv.dr import DRIV, LinearDRIV, SparseLinearDRIV, ForestDRIV +from econml.orf import DMLOrthoForest + +from econml.utilities import filter_none_kwargs +from copy import deepcopy + + +class TestBinaryOutcome(unittest.TestCase): + # accuracy test + def test_accuracy(): + n = 1000 + binary_outcome = True + discrete_treatment = True + true_ate = 0.3 + W = np.random.uniform(-1, 1, size=(n, 1)) + D = np.random.binomial(1, .5 + .1 * W[:, 0], size=(n,)) + Y = np.random.binomial(1, .5 + true_ate * D + .1 * W[:, 0], size=(n,)) + + ests = [ + LinearDML(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment), + CausalForestDML(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment), + LinearDRLearner(binary_outcome=binary_outcome) + ] + + for est in ests: + + if isinstance(est, CausalForestDML): + est.fit(Y, D, X=W) + ate = est.ate(X=W) + ate_lb, ate_ub = est.ate_interval(X=W) + + else: + est.fit(Y, D, W=W) + ate = est.ate() + ate_lb, ate_ub = est.ate_interval() + + if isinstance(est, LinearDRLearner): + est.summary(T=1) + else: + est.summary() + + proportion_in_interval = ((ate_lb < true_ate) & (true_ate < ate_ub)).mean() + np.testing.assert_array_less(0.50, proportion_in_interval) + + +# accuracy test, DML +def test_accuracy_iv(): + n = 10000 + binary_outcome = True + discrete_treatment = True + true_ate = 0.3 + W = np.random.uniform(-1, 1, size=(n, 1)) + Z = np.random.uniform(-1, 1, size=(n, 1)) + D = np.random.binomial(1, .5 + .1 * W[:, 0] + .1 * Z[:, 0], size=(n,)) + Y = np.random.binomial(1, .5 + true_ate * D + .1 * W[:, 0], size=(n,)) + + ests = [ + OrthoIV(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment), + LinearDRIV(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment), + ] + + for est in ests: + + est.fit(Y, D, W=W, Z=Z) + ate = est.ate() + ate_lb, ate_ub = est.ate_interval() + + est.summary() + + proportion_in_interval = ((ate_lb < true_ate) & (true_ate < ate_ub)).mean() + np.testing.assert_array_less(0.50, proportion_in_interval) + + +def test_string_outcome(): + n = 100 + true_ate = 0.3 + W = np.random.uniform(-1, 1, size=(n, 1)) + D = np.random.binomial(1, .5 + .1 * W[:, 0], size=(n,)) + Y = np.random.binomial(1, .5 + true_ate * D + .1 * W[:, 0], size=(n,)) + Y_str = pd.Series(Y).replace(0, 'a').replace(1, 'b').values + est = LinearDML(binary_outcome=True, discrete_treatment=True) + est.fit(Y_str, D, X=W) + + +def test_basic_functionality(): + n = 100 + binary_outcome = True + d_x = 3 + + def gen_array(n, is_binary, d): + sz = (n, d) if d > 0 else (n,) + + if is_binary: + return np.random.choice([0, 1], size=sz) + else: + return np.random.normal(size=sz) + + for discrete_treatment in [True, False]: + for discrete_instrument in [True, False, None]: + + Y = gen_array(n, binary_outcome, d=0) + T = gen_array(n, discrete_treatment, d=0) + Z = None + if discrete_instrument is not None: + Z = gen_array(n, discrete_instrument, d=0) + X = gen_array(n, is_binary=False, d=3) + + if Z is not None: + est_list = [ + DRIV(binary_outcome=binary_outcome), + DMLIV(binary_outcome=binary_outcome), + OrthoIV(binary_outcome=binary_outcome), + ] + + else: + est_list = [ + LinearDML(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment), + CausalForestDML(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment) + ] + + if discrete_treatment: + est_list += [ + LinearDRLearner(binary_outcome=binary_outcome), + ] + + for est in est_list: + print(est) + est.fit(Y, T, **filter_none_kwargs(X=X, Z=Z)) + est.score(Y, T, **filter_none_kwargs(X=X, Z=Z)) + est.effect(X=X) + est.const_marginal_effect(X=X) + est.marginal_effect(T, X=X) + est.ate(X=X) + + # make sure the auto outcome model is a classifier + if hasattr(est, 'model_y'): + outcome_model_attr = 'models_y' + elif hasattr(est, 'model_regression'): + outcome_model_attr = 'models_regression' + elif hasattr(est, 'model_y_xw'): + outcome_model_attr = 'models_y_xw' + assert ( + hasattr( + getattr(est, outcome_model_attr)[0][0], + 'predict_proba' + ) + ), 'Auto outcome model is not a classifier!' From ee64b0e4005bc7c867cec747f8f82e0ba27c08c3 Mon Sep 17 00:00:00 2001 From: Fabio Vera Date: Fri, 27 Oct 2023 11:41:03 -0400 Subject: [PATCH 08/25] bugfixes Signed-off-by: Fabio Vera --- econml/dr/_drlearner.py | 2 +- econml/tests/test_binary_outcome.py | 9 +++++---- econml/tests/test_missing_values.py | 5 +++-- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/econml/dr/_drlearner.py b/econml/dr/_drlearner.py index d61f3e698..badb94a33 100644 --- a/econml/dr/_drlearner.py +++ b/econml/dr/_drlearner.py @@ -509,7 +509,7 @@ def _gen_ortho_learner_model_nuisance(self): else: model_regression = WeightedLassoCVWrapper(cv=3, random_state=self.random_state) else: - model_regression = clone(self.model_y, safe=False) + model_regression = clone(self.model_regression, safe=False) return _ModelNuisance(model_propensity, model_regression, self.min_propensity, self.binary_outcome) diff --git a/econml/tests/test_binary_outcome.py b/econml/tests/test_binary_outcome.py index 8188e92d1..a69b008e0 100644 --- a/econml/tests/test_binary_outcome.py +++ b/econml/tests/test_binary_outcome.py @@ -3,6 +3,7 @@ import pytest import unittest import numpy as np +import pandas as pd from sklearn.preprocessing import PolynomialFeatures from sklearn.linear_model import LinearRegression, LogisticRegression from sklearn.ensemble import RandomForestRegressor @@ -21,7 +22,7 @@ class TestBinaryOutcome(unittest.TestCase): # accuracy test - def test_accuracy(): + def test_accuracy(self): n = 1000 binary_outcome = True discrete_treatment = True @@ -58,7 +59,7 @@ def test_accuracy(): # accuracy test, DML -def test_accuracy_iv(): +def test_accuracy_iv(self): n = 10000 binary_outcome = True discrete_treatment = True @@ -85,7 +86,7 @@ def test_accuracy_iv(): np.testing.assert_array_less(0.50, proportion_in_interval) -def test_string_outcome(): +def test_string_outcome(self): n = 100 true_ate = 0.3 W = np.random.uniform(-1, 1, size=(n, 1)) @@ -96,7 +97,7 @@ def test_string_outcome(): est.fit(Y_str, D, X=W) -def test_basic_functionality(): +def test_basic_functionality(self): n = 100 binary_outcome = True d_x = 3 diff --git a/econml/tests/test_missing_values.py b/econml/tests/test_missing_values.py index 2ae355983..569a6d977 100644 --- a/econml/tests/test_missing_values.py +++ b/econml/tests/test_missing_values.py @@ -123,8 +123,9 @@ def test_missing(self): # model that can handle missing values nuisance_model = make_pipeline(SimpleImputer(strategy='mean'), LinearRegression()) - OrthoLearner(discrete_treatment=False, treatment_featurizer=None, discrete_instrument=None, - categories='auto', cv=3, random_state=1, allow_missing=True).fit(y, T, W=W_missing) + OrthoLearner(binary_outcome=False, discrete_treatment=False, treatment_featurizer=None, + discrete_instrument=None, categories='auto', cv=3, random_state=1, + allow_missing=True).fit(y, T, W=W_missing) CausalForestDML(model_y=nuisance_model, model_t=nuisance_model, allow_missing=True).fit(y, T, X=X, W=W_missing) From 5aaee9d43d9eac5e7342ef49d7d307008712fd6b Mon Sep 17 00:00:00 2001 From: Fabio Vera Date: Fri, 27 Oct 2023 11:56:26 -0400 Subject: [PATCH 09/25] linting Signed-off-by: Fabio Vera --- econml/iv/dr/_dr.py | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/econml/iv/dr/_dr.py b/econml/iv/dr/_dr.py index 76a6c419c..6d37f5764 100644 --- a/econml/iv/dr/_dr.py +++ b/econml/iv/dr/_dr.py @@ -679,21 +679,9 @@ def _gen_ortho_learner_model_nuisance(self): else: model_t_xwz = clone(self.model_t_xwz, safe=False) - return _BaseDRIVModelNuisance(self._gen_prel_model_effect(), - _FirstStageWrapper(model_y_xw, True, self._gen_featurizer(), - False, False, self.binary_outcome), - _FirstStageWrapper(model_t_xw, False, self._gen_featurizer(), - False, self.discrete_treatment, self.binary_outcome), - # target is continuous since proj_t is probability - _FirstStageWrapper(model_tz_xw, False, self._gen_featurizer(), False, - False, self.binary_outcome), - _FirstStageWrapper(model_t_xwz, False, self._gen_featurizer(), - False, self.discrete_treatment, self.binary_outcome), - self.projection, self.discrete_treatment, self.discrete_instrument) - return _BaseDRIVModelNuisance(prel_model_effect=self._gen_prel_model_effect(), model_y_xw=_FirstStageWrapper( - model_y_xw, True, self._gen_featurizer(), + model_y_xw, True, self._gen_featurizer(), False, False, self.binary_outcome), model_t_xw=_FirstStageWrapper(model_t_xw, False, self._gen_featurizer(), False, self.discrete_treatment, @@ -730,7 +718,7 @@ def _gen_ortho_learner_model_nuisance(self): return _BaseDRIVModelNuisance(prel_model_effect=self._gen_prel_model_effect(), model_y_xw=_FirstStageWrapper( - model_y_xw, True, self._gen_featurizer(), + model_y_xw, True, self._gen_featurizer(), False, False, self.binary_outcome), model_t_xw=_FirstStageWrapper(model_t_xw, False, self._gen_featurizer(), False, self.discrete_treatment, From 9064f8ba411f5278456cc5f5c475a4e7182c8ec6 Mon Sep 17 00:00:00 2001 From: Fabio Vera Date: Fri, 27 Oct 2023 12:56:00 -0400 Subject: [PATCH 10/25] indent Signed-off-by: Fabio Vera --- econml/tests/test_binary_outcome.py | 196 ++++++++++++++-------------- 1 file changed, 98 insertions(+), 98 deletions(-) diff --git a/econml/tests/test_binary_outcome.py b/econml/tests/test_binary_outcome.py index a69b008e0..d68eb0b30 100644 --- a/econml/tests/test_binary_outcome.py +++ b/econml/tests/test_binary_outcome.py @@ -58,105 +58,105 @@ def test_accuracy(self): np.testing.assert_array_less(0.50, proportion_in_interval) -# accuracy test, DML -def test_accuracy_iv(self): - n = 10000 - binary_outcome = True - discrete_treatment = True - true_ate = 0.3 - W = np.random.uniform(-1, 1, size=(n, 1)) - Z = np.random.uniform(-1, 1, size=(n, 1)) - D = np.random.binomial(1, .5 + .1 * W[:, 0] + .1 * Z[:, 0], size=(n,)) - Y = np.random.binomial(1, .5 + true_ate * D + .1 * W[:, 0], size=(n,)) - - ests = [ - OrthoIV(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment), - LinearDRIV(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment), - ] - - for est in ests: - - est.fit(Y, D, W=W, Z=Z) - ate = est.ate() - ate_lb, ate_ub = est.ate_interval() - - est.summary() - - proportion_in_interval = ((ate_lb < true_ate) & (true_ate < ate_ub)).mean() - np.testing.assert_array_less(0.50, proportion_in_interval) - - -def test_string_outcome(self): - n = 100 - true_ate = 0.3 - W = np.random.uniform(-1, 1, size=(n, 1)) - D = np.random.binomial(1, .5 + .1 * W[:, 0], size=(n,)) - Y = np.random.binomial(1, .5 + true_ate * D + .1 * W[:, 0], size=(n,)) - Y_str = pd.Series(Y).replace(0, 'a').replace(1, 'b').values - est = LinearDML(binary_outcome=True, discrete_treatment=True) - est.fit(Y_str, D, X=W) - - -def test_basic_functionality(self): - n = 100 - binary_outcome = True - d_x = 3 - - def gen_array(n, is_binary, d): - sz = (n, d) if d > 0 else (n,) - - if is_binary: - return np.random.choice([0, 1], size=sz) - else: - return np.random.normal(size=sz) - - for discrete_treatment in [True, False]: - for discrete_instrument in [True, False, None]: - - Y = gen_array(n, binary_outcome, d=0) - T = gen_array(n, discrete_treatment, d=0) - Z = None - if discrete_instrument is not None: - Z = gen_array(n, discrete_instrument, d=0) - X = gen_array(n, is_binary=False, d=3) - - if Z is not None: - est_list = [ - DRIV(binary_outcome=binary_outcome), - DMLIV(binary_outcome=binary_outcome), - OrthoIV(binary_outcome=binary_outcome), - ] + # accuracy test, DML + def test_accuracy_iv(self): + n = 10000 + binary_outcome = True + discrete_treatment = True + true_ate = 0.3 + W = np.random.uniform(-1, 1, size=(n, 1)) + Z = np.random.uniform(-1, 1, size=(n, 1)) + D = np.random.binomial(1, .5 + .1 * W[:, 0] + .1 * Z[:, 0], size=(n,)) + Y = np.random.binomial(1, .5 + true_ate * D + .1 * W[:, 0], size=(n,)) + + ests = [ + OrthoIV(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment), + LinearDRIV(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment), + ] + + for est in ests: + + est.fit(Y, D, W=W, Z=Z) + ate = est.ate() + ate_lb, ate_ub = est.ate_interval() + est.summary() + + proportion_in_interval = ((ate_lb < true_ate) & (true_ate < ate_ub)).mean() + np.testing.assert_array_less(0.50, proportion_in_interval) + + + def test_string_outcome(self): + n = 100 + true_ate = 0.3 + W = np.random.uniform(-1, 1, size=(n, 1)) + D = np.random.binomial(1, .5 + .1 * W[:, 0], size=(n,)) + Y = np.random.binomial(1, .5 + true_ate * D + .1 * W[:, 0], size=(n,)) + Y_str = pd.Series(Y).replace(0, 'a').replace(1, 'b').values + est = LinearDML(binary_outcome=True, discrete_treatment=True) + est.fit(Y_str, D, X=W) + + + def test_basic_functionality(self): + n = 100 + binary_outcome = True + d_x = 3 + + def gen_array(n, is_binary, d): + sz = (n, d) if d > 0 else (n,) + + if is_binary: + return np.random.choice([0, 1], size=sz) else: - est_list = [ - LinearDML(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment), - CausalForestDML(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment) - ] - - if discrete_treatment: - est_list += [ - LinearDRLearner(binary_outcome=binary_outcome), + return np.random.normal(size=sz) + + for discrete_treatment in [True, False]: + for discrete_instrument in [True, False, None]: + + Y = gen_array(n, binary_outcome, d=0) + T = gen_array(n, discrete_treatment, d=0) + Z = None + if discrete_instrument is not None: + Z = gen_array(n, discrete_instrument, d=0) + X = gen_array(n, is_binary=False, d=3) + + if Z is not None: + est_list = [ + DRIV(binary_outcome=binary_outcome), + DMLIV(binary_outcome=binary_outcome), + OrthoIV(binary_outcome=binary_outcome), + ] + + else: + est_list = [ + LinearDML(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment), + CausalForestDML(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment) ] - for est in est_list: - print(est) - est.fit(Y, T, **filter_none_kwargs(X=X, Z=Z)) - est.score(Y, T, **filter_none_kwargs(X=X, Z=Z)) - est.effect(X=X) - est.const_marginal_effect(X=X) - est.marginal_effect(T, X=X) - est.ate(X=X) - - # make sure the auto outcome model is a classifier - if hasattr(est, 'model_y'): - outcome_model_attr = 'models_y' - elif hasattr(est, 'model_regression'): - outcome_model_attr = 'models_regression' - elif hasattr(est, 'model_y_xw'): - outcome_model_attr = 'models_y_xw' - assert ( - hasattr( - getattr(est, outcome_model_attr)[0][0], - 'predict_proba' - ) - ), 'Auto outcome model is not a classifier!' + if discrete_treatment: + est_list += [ + LinearDRLearner(binary_outcome=binary_outcome), + ] + + for est in est_list: + print(est) + est.fit(Y, T, **filter_none_kwargs(X=X, Z=Z)) + est.score(Y, T, **filter_none_kwargs(X=X, Z=Z)) + est.effect(X=X) + est.const_marginal_effect(X=X) + est.marginal_effect(T, X=X) + est.ate(X=X) + + # make sure the auto outcome model is a classifier + if hasattr(est, 'model_y'): + outcome_model_attr = 'models_y' + elif hasattr(est, 'model_regression'): + outcome_model_attr = 'models_regression' + elif hasattr(est, 'model_y_xw'): + outcome_model_attr = 'models_y_xw' + assert ( + hasattr( + getattr(est, outcome_model_attr)[0][0], + 'predict_proba' + ) + ), 'Auto outcome model is not a classifier!' From c98edbc4f56fa6b1b0f018bcd5f0dd23e03c04fb Mon Sep 17 00:00:00 2001 From: Fabio Vera Date: Fri, 27 Oct 2023 12:56:40 -0400 Subject: [PATCH 11/25] linting Signed-off-by: Fabio Vera --- econml/tests/test_binary_outcome.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/econml/tests/test_binary_outcome.py b/econml/tests/test_binary_outcome.py index d68eb0b30..ccf42bd24 100644 --- a/econml/tests/test_binary_outcome.py +++ b/econml/tests/test_binary_outcome.py @@ -57,7 +57,6 @@ def test_accuracy(self): proportion_in_interval = ((ate_lb < true_ate) & (true_ate < ate_ub)).mean() np.testing.assert_array_less(0.50, proportion_in_interval) - # accuracy test, DML def test_accuracy_iv(self): n = 10000 @@ -85,7 +84,6 @@ def test_accuracy_iv(self): proportion_in_interval = ((ate_lb < true_ate) & (true_ate < ate_ub)).mean() np.testing.assert_array_less(0.50, proportion_in_interval) - def test_string_outcome(self): n = 100 true_ate = 0.3 @@ -96,7 +94,6 @@ def test_string_outcome(self): est = LinearDML(binary_outcome=True, discrete_treatment=True) est.fit(Y_str, D, X=W) - def test_basic_functionality(self): n = 100 binary_outcome = True From 1ff950527ba19d7dd1f763d986678226e2ad3a9b Mon Sep 17 00:00:00 2001 From: Fabio Vera Date: Thu, 9 Nov 2023 13:46:30 -0500 Subject: [PATCH 12/25] rlearner doctest Signed-off-by: Fabio Vera --- econml/dml/_rlearner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/econml/dml/_rlearner.py b/econml/dml/_rlearner.py index 02b6ec879..48a703a25 100644 --- a/econml/dml/_rlearner.py +++ b/econml/dml/_rlearner.py @@ -227,7 +227,7 @@ def _gen_rlearner_model_final(self): np.random.seed(123) X = np.random.normal(size=(1000, 3)) y = X[:, 0] + X[:, 1] + np.random.normal(0, 0.01, size=(1000,)) - est = RLearner(cv=2, discrete_treatment=False, + est = RLearner(cv=2, binary_outcome=False, discrete_treatment=False, treatment_featurizer=None, categories='auto', random_state=None) est.fit(y, X[:, 0], X=np.ones((X.shape[0], 1)), W=X[:, 1:]) From a67eb544d1698cccff444d886a7355ae53024080 Mon Sep 17 00:00:00 2001 From: Fabio Vera Date: Thu, 7 Dec 2023 14:09:42 -0500 Subject: [PATCH 13/25] linting Signed-off-by: Fabio Vera --- econml/dml/_rlearner.py | 27 +++++++++++++-------------- econml/iv/dml/_dml.py | 3 --- econml/iv/dr/_dr.py | 4 +++- econml/panel/dml/_dml.py | 4 +++- 4 files changed, 19 insertions(+), 19 deletions(-) diff --git a/econml/dml/_rlearner.py b/econml/dml/_rlearner.py index 889409bb6..27932fd2c 100644 --- a/econml/dml/_rlearner.py +++ b/econml/dml/_rlearner.py @@ -290,20 +290,19 @@ def _gen_rlearner_model_final(self): is multidimensional, then the average of the MSEs for each dimension of Y is returned. """ - def __init__( - self, - *, - binary_outcome, - discrete_treatment, - treatment_featurizer, - categories, - cv, - random_state, - mc_iters=None, - mc_agg='mean', - allow_missing=False, - use_ray=False, - ray_remote_func_options=None): + def __init__(self, + *, + binary_outcome, + discrete_treatment, + treatment_featurizer, + categories, + cv, + random_state, + mc_iters=None, + mc_agg='mean', + allow_missing=False, + use_ray=False, + ray_remote_func_options=None): super().__init__(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment, treatment_featurizer=treatment_featurizer, diff --git a/econml/iv/dml/_dml.py b/econml/iv/dml/_dml.py index 9856b65d1..21d581edb 100644 --- a/econml/iv/dml/_dml.py +++ b/econml/iv/dml/_dml.py @@ -417,7 +417,6 @@ def _gen_ortho_learner_model_nuisance(self): is_discrete=self.discrete_instrument, random_state=self.random_state) - return _OrthoIVNuisanceSelector(model_y, model_t, model_z, self.projection) @@ -1170,8 +1169,6 @@ def _gen_model_t_xw(self): def _gen_model_t_xwz(self): return _make_first_stage_selector(self.model_t_xwz, self.discrete_treatment, self.random_state) - > main - def _gen_model_final(self): return clone(self.model_final, safe=False) diff --git a/econml/iv/dr/_dr.py b/econml/iv/dr/_dr.py index f04b5ffe7..458cd7ef6 100644 --- a/econml/iv/dr/_dr.py +++ b/econml/iv/dr/_dr.py @@ -2464,7 +2464,9 @@ def _gen_prel_model_effect(self): return clone(self.prel_model_effect, safe=False) def _gen_ortho_learner_model_nuisance(self): - model_y_xw = _make_first_stage_selector(self.model_y_xw, is_discrete=self.binary_outcome, random_state=self.random_state) + model_y_xw = _make_first_stage_selector(self.model_y_xw, + is_discrete=self.binary_outcome, + random_state=self.random_state) model_t_xwz = _make_first_stage_selector(self.model_t_xwz, is_discrete=True, random_state=self.random_state) if self.z_propensity == "auto": diff --git a/econml/panel/dml/_dml.py b/econml/panel/dml/_dml.py index 366475256..8235e6422 100644 --- a/econml/panel/dml/_dml.py +++ b/econml/panel/dml/_dml.py @@ -542,7 +542,9 @@ def _gen_featurizer(self): return clone(self.featurizer, safe=False) def _gen_model_y(self): - return _make_first_stage_selector(self.model_y, is_discrete=self.binary_outcome, random_state=self.random_state) + return _make_first_stage_selector(self.model_y, + is_discrete=self.binary_outcome, + random_state=self.random_state) def _gen_model_t(self): return _make_first_stage_selector(self.model_t, From e104d73110807d89f35b2067d408626aa7f595b1 Mon Sep 17 00:00:00 2001 From: Fabio Vera Date: Thu, 7 Dec 2023 14:21:54 -0500 Subject: [PATCH 14/25] more typos Signed-off-by: Fabio Vera --- econml/dml/dml.py | 1 - econml/iv/dr/_dr.py | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/econml/dml/dml.py b/econml/dml/dml.py index cdeacaf87..148d55451 100644 --- a/econml/dml/dml.py +++ b/econml/dml/dml.py @@ -722,7 +722,6 @@ def __init__(self, *, featurizer=None, treatment_featurizer=None, fit_cate_intercept=True, - linear_first_stages="deprecated, binary_outcome=False, linear_first_stages="deprecated", discrete_treatment=False, diff --git a/econml/iv/dr/_dr.py b/econml/iv/dr/_dr.py index 458cd7ef6..589f914e0 100644 --- a/econml/iv/dr/_dr.py +++ b/econml/iv/dr/_dr.py @@ -2464,7 +2464,7 @@ def _gen_prel_model_effect(self): return clone(self.prel_model_effect, safe=False) def _gen_ortho_learner_model_nuisance(self): - model_y_xw = _make_first_stage_selector(self.model_y_xw, + model_y_xw = _make_first_stage_selector(self.model_y_xw, is_discrete=self.binary_outcome, random_state=self.random_state) model_t_xwz = _make_first_stage_selector(self.model_t_xwz, is_discrete=True, random_state=self.random_state) @@ -2479,7 +2479,7 @@ def _gen_ortho_learner_model_nuisance(self): dummy_z = _make_first_stage_selector(dummy_z, is_discrete=True, random_state=self.random_state) return _IntentToTreatDRIVNuisanceSelector(model_y_xw, model_t_xwz, dummy_z, self._gen_prel_model_effect()) - + class _DummyCATE: """ From edc0b485a64f48654b551c05fa7fd3a28c8fe545 Mon Sep 17 00:00:00 2001 From: Fabio Vera Date: Fri, 15 Dec 2023 15:14:05 -0500 Subject: [PATCH 15/25] bugfixes, docstrings, enable for intenttotreatdrivs Signed-off-by: Fabio Vera --- econml/_ortho_learner.py | 7 +- econml/dml/_rlearner.py | 3 + econml/dml/causal_forest.py | 43 ++- econml/dml/dml.py | 219 ++++++++---- econml/dr/_drlearner.py | 152 +++++++-- econml/dynamic/dml/__init__.py | 40 ++- econml/iv/dml/_dml.py | 216 ++++++++---- econml/iv/dr/_dr.py | 512 ++++++++++++++++++++-------- econml/panel/dml/_dml.py | 43 ++- econml/policy/_drlearner.py | 68 +++- econml/tests/test_binary_outcome.py | 32 +- 11 files changed, 983 insertions(+), 352 deletions(-) diff --git a/econml/_ortho_learner.py b/econml/_ortho_learner.py index 84fdf3870..3b0d34e98 100644 --- a/econml/_ortho_learner.py +++ b/econml/_ortho_learner.py @@ -327,6 +327,9 @@ class _OrthoLearner(TreatmentExpansionMixin, LinearCateEstimator): Parameters ---------- + binary_outcome: bool + Whether the outcome should be treated as binary + discrete_treatment: bool Whether the treatment values should be treated as categorical, rather than continuous, quantities @@ -921,7 +924,7 @@ def _fit_nuisances(self, Y, T, X=None, W=None, Z=None, sample_weight=None, group Z = self.z_transformer.transform(reshape(Z, (-1, 1))) if self.binary_outcome: - Y = self.outcome_transformer.transform(Y) + Y = self.outcome_transformer.transform(Y).reshape(Y.shape) if self.cv == 1: # special case, no cross validation folds = None @@ -1054,7 +1057,7 @@ def score(self, Y, T, X=None, W=None, Z=None, sample_weight=None, groups=None): if self.z_transformer is not None: Z = self.z_transformer.transform(reshape(Z, (-1, 1))) if self.binary_outcome: - Y = self.outcome_transformer.transform(Y) + Y = self.outcome_transformer.transform(Y).reshape(Y.shape) n_iters = len(self._models_nuisance) n_splits = len(self._models_nuisance[0]) diff --git a/econml/dml/_rlearner.py b/econml/dml/_rlearner.py index 27932fd2c..2120c6e17 100644 --- a/econml/dml/_rlearner.py +++ b/econml/dml/_rlearner.py @@ -137,6 +137,9 @@ class _RLearner(_OrthoLearner): Parameters ---------- + binary_outcome: bool + Whether the outcome should be treated as binary + discrete_treatment: bool Whether the treatment values should be treated as categorical, rather than continuous, quantities diff --git a/econml/dml/causal_forest.py b/econml/dml/causal_forest.py index 4e525561e..6b8ea1bd2 100644 --- a/econml/dml/causal_forest.py +++ b/econml/dml/causal_forest.py @@ -268,17 +268,35 @@ class CausalForestDML(_BaseDML): Parameters ---------- - model_y: estimator or 'auto', default 'auto' - The estimator for fitting the response to the features. Must implement - `fit` and `predict` methods. - If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen. - - model_t: estimator or 'auto', default 'auto' - The estimator for fitting the treatment to the features. - If estimator, it must implement `fit` and `predict` methods; - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` will be applied for discrete treatment, - and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` - will be applied for continuous treatment. + model_y: estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' + Determines how to fit the treatment to the features. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if binary_outcome=True. + + model_t: estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' + Determines how to fit the treatment to the features. str in a sentence + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete_treatment=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_treatment=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_treatment=True. featurizer : :term:`transformer`, optional Must support fit_transform and transform. Used to create composite features in the final CATE regression. @@ -290,6 +308,9 @@ class CausalForestDML(_BaseDML): The final CATE will be trained on the outcome of featurizer.fit_transform(T). If featurizer=None, then CATE is trained on T. + binary_outcome: bool, default ``False`` + Whether the outcome should be treated as binary + discrete_treatment: bool, default ``False`` Whether the treatment values should be treated as categorical, rather than continuous, quantities diff --git a/econml/dml/dml.py b/econml/dml/dml.py index 148d55451..c4fa78038 100644 --- a/econml/dml/dml.py +++ b/econml/dml/dml.py @@ -348,20 +348,35 @@ class takes as input the parameter `model_t`, which is an arbitrary scikit-learn Parameters ---------- - model_y: estimator or 'auto', default 'auto' - The estimator for fitting the response to the features. Must implement - `fit` and `predict` methods. - If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen. - - model_t: estimator or 'auto' (default is 'auto') - The estimator for fitting the treatment to the features. - If estimator, it must implement `fit` and `predict` methods. Must be a linear model for correctness - when linear_first_stages is ``True``; - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` - will be applied for discrete treatment, - and :class:`.WeightedLassoCV`/ - :class:`.WeightedMultiTaskLassoCV` - will be applied for continuous treatment. + model_y: estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' + Determines how to fit the treatment to the features. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if binary_outcome=True. + + model_t: estimator, {'linear', 'forest'}, list of str/estimator, or 'auto + Determines how to fit the treatment to the features. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete_treatment=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_treatment=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_treatment=True. model_final: estimator The estimator for fitting the response residuals to the treatment residuals. Must implement @@ -384,7 +399,10 @@ class takes as input the parameter `model_t`, which is an arbitrary scikit-learn Whether the first stage models are linear (in which case we will expand the features passed to `model_y` accordingly) - discrete_treatment: bool, default False + binary_outcome: bool, default ``False`` + Whether the outcome should be treated as binary + + discrete_treatment: bool, default ``False`` Whether the treatment values should be treated as categorical, rather than continuous, quantities categories: 'auto' or list, default 'auto' @@ -483,7 +501,9 @@ class takes as input the parameter `model_t`, which is an arbitrary scikit-learn """ def __init__(self, *, - model_y, model_t, model_final, + model_y, + model_t, + model_final, featurizer=None, treatment_featurizer=None, fit_cate_intercept=True, @@ -602,17 +622,35 @@ class LinearDML(StatsModelsCateEstimatorMixin, DML): Parameters ---------- - model_y: estimator or 'auto', default 'auto' - The estimator for fitting the response to the features. Must implement - `fit` and `predict` methods. - If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen. - - model_t: estimator or 'auto', default 'auto' - The estimator for fitting the treatment to the features. - If estimator, it must implement `fit` and `predict` methods; - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` will be applied for discrete treatment, - and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` - will be applied for continuous treatment. + model_y: estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' + Determines how to fit the treatment to the features. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if binary_outcome=True. + + model_t: estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' + Determines how to fit the treatment to the features. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete_treatment=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_treatment=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_treatment=True. featurizer : :term:`transformer`, optional Must support fit_transform and transform. Used to create composite features in the final CATE regression. @@ -631,6 +669,9 @@ class LinearDML(StatsModelsCateEstimatorMixin, DML): Whether the first stage models are linear (in which case we will expand the features passed to `model_y` accordingly) + binary_outcome: bool, default ``False`` + Whether the outcome should be treated as binary + discrete_treatment: bool, default ``False`` Whether the treatment values should be treated as categorical, rather than continuous, quantities @@ -722,8 +763,8 @@ def __init__(self, *, featurizer=None, treatment_featurizer=None, fit_cate_intercept=True, - binary_outcome=False, linear_first_stages="deprecated", + binary_outcome=False, discrete_treatment=False, categories='auto', cv=2, @@ -826,20 +867,35 @@ class SparseLinearDML(DebiasedLassoCateEstimatorMixin, DML): Parameters ---------- - model_y: estimator or 'auto', default 'auto' - The estimator for fitting the response to the features. Must implement - `fit` and `predict` methods. - If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen. - - model_t: estimator or 'auto', default 'auto' - The estimator for fitting the treatment to the features. - If estimator, it must implement `fit` and `predict` methods, and must be a - linear model for correctness; - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` - will be applied for discrete treatment, - and :class:`.WeightedLassoCV`/ - :class:`.WeightedMultiTaskLassoCV` - will be applied for continuous treatment. + model_y: estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' + Determines how to fit the treatment to the features. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if binary_outcome=True. + + model_t: estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' + Determines how to fit the treatment to the features. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete_treatment=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_treatment=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_treatment=True. alpha: str or float, default 'auto' CATE L1 regularization applied through the debiased lasso in the final model. @@ -888,6 +944,9 @@ class SparseLinearDML(DebiasedLassoCateEstimatorMixin, DML): Whether the first stage models are linear (in which case we will expand the features passed to `model_y` accordingly) + binary_outcome: bool, default ``False`` + Whether the outcome should be treated as binary + discrete_treatment: bool, default ``False`` Whether the treatment values should be treated as categorical, rather than continuous, quantities @@ -1111,19 +1170,32 @@ class KernelDML(DML): Parameters ---------- - model_y: estimator or 'auto', default 'auto' - The estimator for fitting the response to the features. Must implement - `fit` and `predict` methods. - If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen. - - model_t: estimator or 'auto', default 'auto' - The estimator for fitting the treatment to the features. - If estimator, it must implement `fit` and `predict` methods; - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` - will be applied for discrete treatment, - and :class:`.WeightedLassoCV`/ - :class:`.WeightedMultiTaskLassoCV` - will be applied for continuous treatment. + model_y: estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' + Determines how to fit the treatment to the features. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if binary_outcome=True. + + model_t: estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' + Determines how to fit the treatment to the features. + + - If an estimator, will use the model as is for fitting. + + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_treatment=True. fit_cate_intercept : bool, default True Whether the linear CATE model should have a constant term. @@ -1134,6 +1206,9 @@ class KernelDML(DML): bw: float, default 1.0 The bandwidth of the Gaussian used to generate features + binary_outcome: bool, default ``False`` + Whether the outcome should be treated as binary + discrete_treatment: bool, default ``False`` Whether the treatment values should be treated as categorical, rather than continuous, quantities @@ -1320,13 +1395,32 @@ class NonParamDML(_BaseDML): Parameters ---------- - model_y: estimator - The estimator for fitting the response to the features. Must implement - `fit` and `predict` methods. Must be a linear model for correctness when linear_first_stages is ``True``. + model_y: estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' + Determines how to fit the treatment to the features. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models - model_t: estimator - The estimator for fitting the treatment to the features. Must implement - `fit` and `predict` methods. Must be a linear model for correctness when linear_first_stages is ``True``. + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if binary_outcome=True. + + model_t: estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' + Determines how to fit the treatment to the features. + + - If an estimator, will use the model as is for fitting. + + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_treatment=True. model_final: estimator The estimator for fitting the response residuals to the treatment residuals. Must implement @@ -1337,6 +1431,9 @@ class NonParamDML(_BaseDML): The transformer used to featurize the raw features when fitting the final model. Must implement a `fit_transform` method. + binary_outcome: bool, default ``False`` + Whether the outcome should be treated as binary + discrete_treatment: bool, default ``False`` Whether the treatment values should be treated as categorical, rather than continuous, quantities diff --git a/econml/dr/_drlearner.py b/econml/dr/_drlearner.py index 59883cbcd..c03f8b7ac 100644 --- a/econml/dr/_drlearner.py +++ b/econml/dr/_drlearner.py @@ -241,18 +241,35 @@ class takes as input the parameter ``model_regressor``, which is an arbitrary sc Parameters ---------- - model_propensity : scikit-learn classifier or 'auto', default 'auto' + model_propensity : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' Estimator for Pr[T=t | X, W]. Trained by regressing treatments on (features, controls) concatenated. - Must implement `fit` and `predict_proba` methods. The `fit` method must be able to accept X and T, - where T is a shape (n, ) array. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` will be chosen. - model_regression : scikit-learn regressor or 'auto', default 'auto' + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV + - 'forest' - RandomForestClassifier + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict', and 'predict_proba'. + + model_regression : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' Estimator for E[Y | X, W, T]. Trained by regressing Y on (features, controls, one-hot-encoded treatments) - concatenated. The one-hot-encoding excludes the baseline treatment. Must implement `fit` and - `predict` methods. If different models per treatment arm are desired, see the - :class:`.MultiModelWrapper` helper class. - If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen. + concatenated. The one-hot-encoding excludes the baseline treatment. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if binary_outcome=True. model_final : estimator for the final cate model. Trained on regressing the doubly robust potential outcomes @@ -268,6 +285,9 @@ class takes as input the parameter ``model_regressor``, which is an arbitrary sc mono-task model and a separate clone of the model is trained for each outcome. Then predict(X) of the t-th clone will be the CATE of the t-th lexicographically ordered treatment compared to the baseline. + binary_outcome: bool, default False + Whether the outcome should be treated as binary + multitask_model_final : bool, default False Whether the model_final should be treated as a multi-task model. See description of model_final. @@ -783,18 +803,35 @@ class LinearDRLearner(StatsModelsCateEstimatorDiscreteMixin, DRLearner): Parameters ---------- - model_propensity : scikit-learn classifier or 'auto', default 'auto' + model_propensity : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' Estimator for Pr[T=t | X, W]. Trained by regressing treatments on (features, controls) concatenated. - Must implement `fit` and `predict_proba` methods. The `fit` method must be able to accept X and T, - where T is a shape (n, ) array. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` will be chosen. - model_regression : scikit-learn regressor or 'auto', default 'auto' + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV + - 'forest' - RandomForestClassifier + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict', and 'predict_proba'. + + model_regression : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' Estimator for E[Y | X, W, T]. Trained by regressing Y on (features, controls, one-hot-encoded treatments) - concatenated. The one-hot-encoding excludes the baseline treatment. Must implement `fit` and - `predict` methods. If different models per treatment arm are desired, see the - :class:`.MultiModelWrapper` helper class. - If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen. + concatenated. The one-hot-encoding excludes the baseline treatment. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if binary_outcome=True. featurizer : :term:`transformer`, optional Must support fit_transform and transform. Used to create composite features in the final CATE regression. @@ -804,6 +841,9 @@ class LinearDRLearner(StatsModelsCateEstimatorDiscreteMixin, DRLearner): fit_cate_intercept : bool, default True Whether the linear CATE model should have a constant term. + binary_outcome: bool, default False + Whether the outcome should be treated as binary + min_propensity : float, default ``1e-6`` The minimum propensity at which to clip propensity estimates to avoid dividing by zero. @@ -903,9 +943,9 @@ class LinearDRLearner(StatsModelsCateEstimatorDiscreteMixin, DRLearner): def __init__(self, *, model_propensity='auto', model_regression='auto', - binary_outcome=False, featurizer=None, fit_cate_intercept=True, + binary_outcome=False, min_propensity=1e-6, categories='auto', cv=2, @@ -1048,18 +1088,35 @@ class SparseLinearDRLearner(DebiasedLassoCateEstimatorDiscreteMixin, DRLearner): Parameters ---------- - model_propensity : scikit-learn classifier or 'auto', default 'auto' + model_propensity : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' Estimator for Pr[T=t | X, W]. Trained by regressing treatments on (features, controls) concatenated. - Must implement `fit` and `predict_proba` methods. The `fit` method must be able to accept X and T, - where T is a shape (n, ) array. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` will be chosen. - model_regression : scikit-learn regressor or 'auto', default 'auto' + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV + - 'forest' - RandomForestClassifier + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict', and 'predict_proba'. + + model_regression : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' Estimator for E[Y | X, W, T]. Trained by regressing Y on (features, controls, one-hot-encoded treatments) - concatenated. The one-hot-encoding excludes the baseline treatment. Must implement `fit` and - `predict` methods. If different models per treatment arm are desired, see the - :class:`.MultiModelWrapper` helper class. - If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen. + concatenated. The one-hot-encoding excludes the baseline treatment. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if binary_outcome=True. featurizer : :term:`transformer`, optional Must support fit_transform and transform. Used to create composite features in the final CATE regression. @@ -1069,6 +1126,9 @@ class SparseLinearDRLearner(DebiasedLassoCateEstimatorDiscreteMixin, DRLearner): fit_cate_intercept : bool, default True Whether the linear CATE model should have a constant term. + binary_outcome: bool, default False + Whether the outcome should be treated as binary + alpha: str | float, optional., default 'auto'. CATE L1 regularization applied through the debiased lasso in the final model. 'auto' corresponds to a CV form of the :class:`DebiasedLasso`. @@ -1330,16 +1390,38 @@ class ForestDRLearner(ForestModelFinalCateEstimatorDiscreteMixin, DRLearner): Parameters ---------- - model_propensity : scikit-learn classifier + model_propensity : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' Estimator for Pr[T=t | X, W]. Trained by regressing treatments on (features, controls) concatenated. - Must implement `fit` and `predict_proba` methods. The `fit` method must be able to accept X and T, - where T is a shape (n, ) array. - model_regression : scikit-learn regressor + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV + - 'forest' - RandomForestClassifier + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict', and 'predict_proba'. + + model_regression : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' Estimator for E[Y | X, W, T]. Trained by regressing Y on (features, controls, one-hot-encoded treatments) - concatenated. The one-hot-encoding excludes the baseline treatment. Must implement `fit` and - `predict` methods. If different models per treatment arm are desired, see the - :class:`~econml.utilities.MultiModelWrapper` helper class. + concatenated. The one-hot-encoding excludes the baseline treatment. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if binary_outcome=True. + + binary_outcome: bool, default False + Whether the outcome should be treated as binary min_propensity : float, default ``1e-6`` The minimum propensity at which to clip propensity estimates to avoid dividing by zero. diff --git a/econml/dynamic/dml/__init__.py b/econml/dynamic/dml/__init__.py index 0185ea702..6d9949296 100755 --- a/econml/dynamic/dml/__init__.py +++ b/econml/dynamic/dml/__init__.py @@ -44,17 +44,35 @@ def DynamicDML(*, Parameters ---------- - model_y: estimator or 'auto', default 'auto' - The estimator for fitting the response to the features. Must implement - `fit` and `predict` methods. - If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen. - - model_t: estimator or 'auto', default 'auto' - The estimator for fitting the treatment to the features. - If estimator, it must implement `fit` and `predict` methods; - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` will be applied for discrete treatment, - and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` - will be applied for continuous treatment. + model_y: estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' + Determines how to fit the treatment to the features. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if binary_outcome=True. + + model_t: estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' + Determines how to fit the treatment to the features. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete_treatment=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_treatment=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_treatment=True. featurizer : :term:`transformer`, optional Must support fit_transform and transform. Used to create composite features in the final CATE regression. diff --git a/econml/iv/dml/_dml.py b/econml/iv/dml/_dml.py index 21d581edb..cc22cfdbc 100644 --- a/econml/iv/dml/_dml.py +++ b/econml/iv/dml/_dml.py @@ -204,30 +204,65 @@ class OrthoIV(LinearModelFinalCateEstimatorMixin, _OrthoLearner): Parameters ---------- - model_y_xw : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[Y | X, W]`. Must support `fit` and `predict` methods. - If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen. - - model_t_xw : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[T | X, W]`. Must support `fit` and `predict` methods. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` - will be applied for discrete treatment, - and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` - will be applied for continuous treatment. - - model_t_xwz : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[T | X, W, Z]`. Must support `fit` and `predict` methods. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` - will be applied for discrete treatment, - and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` - will be applied for continuous treatment. - - model_z_xw : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[Z | X, W]`. Must support `fit` and `predict` methods. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` - will be applied for discrete instrument, - and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` - will be applied for continuous instrument. + model_y_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' + model to estimate :math:`\\E[Y | X, W]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if binary_outcome=True. + + model_t_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' + model to estimate :math:`\\E[T | X, W]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete_treatment=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_treatment=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_treatment=True. + + model_t_xwz : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' + model to estimate :math:`\\E[T | X, W, Z]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete_treatment=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_treatment=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_treatment=True. + + model_z_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' + model to estimate :math:`\\E[Z | X, W]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete_instrument=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_instrument=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_instrument=True. projection: bool, default False If True, we fit a slight variant of OrthoIV where we use E[T|X, W, Z] as the instrument as opposed to Z, @@ -241,6 +276,9 @@ class OrthoIV(LinearModelFinalCateEstimatorMixin, _OrthoLearner): fit_cate_intercept : bool, default False Whether the linear CATE model should have a constant term. + binary_outcome: bool, default False + Whether the outcome should be treated as binary + discrete_treatment: bool, default False Whether the treatment values should be treated as categorical, rather than continuous, quantities @@ -996,23 +1034,50 @@ class DMLIV(_BaseDMLIV): Parameters ---------- - model_y_xw : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[Y | X, W]`. Must support `fit` and `predict` methods. - If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen. - - model_t_xw : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[T | X, W]`. Must support `fit` and `predict` methods. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` - will be applied for discrete treatment, - and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` - will be applied for continuous treatment. - - model_t_xwz : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[T | X, W, Z]`. Must support `fit` and `predict` methods. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` - will be applied for discrete treatment, - and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` - will be applied for continuous treatment. + model_y_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' + model to estimate :math:`\\E[Y | X, W]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if binary_outcome=True. + + model_t_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' + Model to estimate :math:`\\E[T | X, W]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete_treatment=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_treatment=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_treatment=True. + + model_t_xwz : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' + Model to estimate :math:`\\E[T | X, W, Z]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete_treatment=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_treatment=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_treatment=True. model_final : estimator (default is :class:`.StatsModelsLinearRegression`) final model that at fit time takes as input :math:`(Y-\\E[Y|X])`, :math:`(\\E[T|X,Z]-\\E[T|X])` and X @@ -1028,6 +1093,9 @@ class DMLIV(_BaseDMLIV): discrete_instrument: bool, default False Whether the instrument values should be treated as categorical, rather than continuous, quantities + binary_outcome: bool, default False + Whether the outcome should be treated as binary + discrete_treatment: bool, default False Whether the treatment values should be treated as categorical, rather than continuous, quantities @@ -1374,25 +1442,50 @@ class NonParamDMLIV(_BaseDMLIV): Parameters ---------- - model_y_xw : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[Y | X, W]`. Must support `fit` and `predict` methods. - If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen. - - model_t_xw : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[T | X, W]`. Must support `fit` and either `predict` or `predict_proba` methods, - depending on whether the treatment is discrete. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` - will be applied for discrete treatment, - and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` - will be applied for continuous treatment. - - model_t_xwz : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[T | X, W, Z]`. Must support `fit` and either `predict` or `predict_proba` - methods, depending on whether the treatment is discrete. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` - will be applied for discrete treatment, - and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` - will be applied for continuous treatment. + model_y_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' + model to estimate :math:`\\E[Y | X, W]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if binary_outcome=True. + + model_t_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' + Model to estimate :math:`\\E[T | X, W]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete_treatment=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_treatment=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_treatment=True. + + model_t_xwz : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' + Model to estimate :math:`\\E[T | X, W, Z]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete_treatment=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_treatment=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_treatment=True. model_final : estimator final model for predicting :math:`\\tilde{Y}` from X with sample weights V(X) @@ -1401,6 +1494,9 @@ class NonParamDMLIV(_BaseDMLIV): The transformer used to featurize the raw features when fitting the final model. Must implement a `fit_transform` method. + binary_outcome: bool, default False + Whether the outcome should be treated as binary + discrete_treatment: bool, default False Whether the treatment values should be treated as categorical, rather than continuous, quantities diff --git a/econml/iv/dr/_dr.py b/econml/iv/dr/_dr.py index 589f914e0..54ff0c523 100644 --- a/econml/iv/dr/_dr.py +++ b/econml/iv/dr/_dr.py @@ -711,39 +711,83 @@ class DRIV(_DRIV): Parameters ---------- - model_y_xw : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[Y | X, W]`. Must support `fit` and `predict` methods. - If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen. - - model_t_xw : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[T | X, W]`. Must support `fit` and `predict` methods. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` - will be applied for discrete treatment, - and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` - will be applied for continuous treatment. - - model_z_xw : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[Z | X, W]`. Must support `fit` and `predict` methods. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` - will be applied for discrete instrument, - and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` - will be applied for continuous instrument. - - model_t_xwz : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[T | X, W, Z]`. Must support `fit` and `predict` methods. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` - will be applied for discrete treatment, - and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` - will be applied for continuous treatment. - - model_tz_xw : estimator or 'auto' (default is 'auto') + model_y_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' + model to estimate :math:`\\E[Y | X, W]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if binary_outcome=True. + + model_t_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' + Model to estimate :math:`\\E[T | X, W]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete_treatment=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_treatment=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_treatment=True. + + model_z_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' + model to estimate :math:`\\E[Z | X, W]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete_instrument=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_instrument=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_instrument=True. + + model_t_xwz : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' + Model to estimate :math:`\\E[T | X, W, Z]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete_treatment=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_treatment=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_treatment=True. + + model_tz_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' model to estimate :math:`\\E[T*Z | X, W]` or :math:`\\E[\\tilde{T}*\\tilde{Z} | X, W]` depending on `fit_cov_directly`. - Must support `fit` and `predict` methods. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` - will be applied for discrete instrument and discrete treatment with `fit_cov_directly=False`, - and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` - will be applied otherwise. + Target will be discrete if discrete instrument and discrete treatment with `fit_cov_directly=False`, + else target will be continuous. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete target else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete target else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete target. fit_cov_directly : bool, default True Whether to fit :math:`\\E[\\tilde{T}*\\tilde{Z} | X, W]` instead of :math:`\\E[T*Z | X, W]`. @@ -790,6 +834,9 @@ class DRIV(_DRIV): it method will return a biased projection to the model_final space, biased to give more weight on parts of the feature space where the instrument is strong. + binary_outcome: bool, default False + Whether the outcome should be treated as binary + discrete_instrument: bool, default False Whether the instrument values should be treated as categorical, rather than continuous, quantities @@ -1197,39 +1244,83 @@ class LinearDRIV(StatsModelsCateEstimatorMixin, DRIV): Parameters ---------- - model_y_xw : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[Y | X, W]`. Must support `fit` and `predict` methods. - If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen. - - model_t_xw : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[T | X, W]`. Must support `fit` and `predict` methods. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` - will be applied for discrete treatment, - and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` - will be applied for continuous treatment. - - model_z_xw : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[Z | X, W]`. Must support `fit` and `predict` methods. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` - will be applied for discrete instrument, - and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` - will be applied for continuous instrument. - - model_t_xwz : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[T | X, W, Z]`. Must support `fit` and `predict` methods. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` - will be applied for discrete treatment, - and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` - will be applied for continuous treatment. - - model_tz_xw : estimator or 'auto' (default is 'auto') + model_y_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' + model to estimate :math:`\\E[Y | X, W]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if binary_outcome=True. + + model_t_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' + model to estimate :math:`\\E[T | X, W]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete_treatment=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_treatment=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_treatment=True. + + model_z_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' + model to estimate :math:`\\E[Z | X, W]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete_instrument=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_instrument=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_instrument=True. + + model_t_xwz : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' + model to estimate :math:`\\E[T | X, W, Z]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete_treatment=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_treatment=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_treatment=True. + + model_tz_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' model to estimate :math:`\\E[T*Z | X, W]` or :math:`\\E[\\tilde{T}*\\tilde{Z} | X, W]` depending on `fit_cov_directly`. - Must support `fit` and `predict` methods. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` - will be applied for discrete instrument and discrete treatment with `fit_cov_directly=False`, - and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` - will be applied otherwise. + Target will be discrete if discrete instrument and discrete treatment with `fit_cov_directly=False`, + else target will be continuous. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete target else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete target else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete target. fit_cov_directly : bool, default True Whether to fit :math:`\\E[\\tilde{T}*\\tilde{Z} | X, W]` instead of :math:`\\E[T*Z | X, W]`. @@ -1273,6 +1364,9 @@ class LinearDRIV(StatsModelsCateEstimatorMixin, DRIV): it method will return a biased projection to the model_final space, biased to give more weight on parts of the feature space where the instrument is strong. + binary_outcome: bool, default False + Whether the outcome should be treated as binary + discrete_instrument: bool, default False Whether the instrument values should be treated as categorical, rather than continuous, quantities @@ -1520,39 +1614,83 @@ class SparseLinearDRIV(DebiasedLassoCateEstimatorMixin, DRIV): Parameters ---------- - model_y_xw : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[Y | X, W]`. Must support `fit` and `predict` methods. - If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen. - - model_t_xw : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[T | X, W]`. Must support `fit` and `predict` methods. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` - will be applied for discrete treatment, - and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` - will be applied for continuous treatment. - - model_z_xw : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[Z | X, W]`. Must support `fit` and `predict` methods. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` - will be applied for discrete instrument, - and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` - will be applied for continuous instrument. - - model_t_xwz : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[T | X, W, Z]`. Must support `fit` and `predict` methods. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` - will be applied for discrete treatment, - and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` - will be applied for continuous treatment. - - model_tz_xw : estimator or 'auto' (default is 'auto') + model_y_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' + model to estimate :math:`\\E[Y | X, W]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if binary_outcome=True. + + model_t_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' + model to estimate :math:`\\E[T | X, W]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete_treatment=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_treatment=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_treatment=True. + + model_z_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' + model to estimate :math:`\\E[Z | X, W]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete_instrument=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_instrument=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_instrument=True. + + model_t_xwz : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' + Model to estimate :math:`\\E[T | X, W, Z]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete_treatment=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_treatment=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_treatment=True. + + model_tz_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' model to estimate :math:`\\E[T*Z | X, W]` or :math:`\\E[\\tilde{T}*\\tilde{Z} | X, W]` depending on `fit_cov_directly`. - Must support `fit` and `predict` methods. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` - will be applied for discrete instrument and discrete treatment with `fit_cov_directly=False`, - and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` - will be applied otherwise. + Target will be discrete if discrete instrument and discrete treatment with `fit_cov_directly=False`, + else target will be continuous. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete target else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete target else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete target. fit_cov_directly : bool, default True Whether to fit :math:`\\E[\\tilde{T}*\\tilde{Z} | X, W]` instead of :math:`\\E[T*Z | X, W]`. @@ -1626,6 +1764,9 @@ class SparseLinearDRIV(DebiasedLassoCateEstimatorMixin, DRIV): it method will return a biased projection to the model_final space, biased to give more weight on parts of the feature space where the instrument is strong. + binary_outcome: bool, default False + Whether the outcome should be treated as binary + discrete_instrument: bool, default False Whether the instrument values should be treated as categorical, rather than continuous, quantities @@ -1889,39 +2030,83 @@ class ForestDRIV(ForestModelFinalCateEstimatorMixin, DRIV): Parameters ---------- - model_y_xw : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[Y | X, W]`. Must support `fit` and `predict` methods. - If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen. - - model_t_xw : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[T | X, W]`. Must support `fit` and `predict` methods. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` - will be applied for discrete treatment, - and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` - will be applied for continuous treatment. - - model_z_xw : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[Z | X, W]`. Must support `fit` and `predict` methods. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` - will be applied for discrete instrument, - and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` - will be applied for continuous instrument. - - model_t_xwz : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[T | X, W, Z]`. Must support `fit` and `predict` methods. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` - will be applied for discrete treatment, - and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` - will be applied for continuous treatment. - - model_tz_xw : estimator or 'auto' (default is 'auto') + model_y_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' + model to estimate :math:`\\E[Y | X, W]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if binary_outcome=True. + + model_t_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' + model to estimate :math:`\\E[T | X, W]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete_treatment=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_treatment=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_treatment=True. + + model_z_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' + model to estimate :math:`\\E[Z | X, W]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete_instrument=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_instrument=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_instrument=True. + + model_t_xwz : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' + model to estimate :math:`\\E[T | X, W, Z]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete_treatment=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_treatment=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_treatment=True. + + model_tz_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' model to estimate :math:`\\E[T*Z | X, W]` or :math:`\\E[\\tilde{T}*\\tilde{Z} | X, W]` depending on `fit_cov_directly`. - Must support `fit` and `predict` methods. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` - will be applied for discrete instrument and discrete treatment with `fit_cov_directly=False`, - and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` - will be applied otherwise. + Target will be discrete if discrete instrument and discrete treatment with `fit_cov_directly=False`, + else target will be continuous. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete target else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete target else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete target. fit_cov_directly : bool, default True Whether to fit :math:`\\E[\\tilde{T}*\\tilde{Z} | X, W]` instead of :math:`\\E[T*Z | X, W]`. @@ -2067,6 +2252,9 @@ class ForestDRIV(ForestModelFinalCateEstimatorMixin, DRIV): it method will return a biased projection to the model_final space, biased to give more weight on parts of the feature space where the instrument is strong. + binary_outcome: bool, default False + Whether the outcome should be treated as binary + discrete_instrument: bool, default False Whether the instrument values should be treated as categorical, rather than continuous, quantities @@ -2429,6 +2617,7 @@ def __init__(self, *, z_propensity="auto", featurizer=None, fit_cate_intercept=False, + binary_outcome=False, cov_clip=1e-3, opt_reweighted=False, categories='auto', @@ -2447,6 +2636,7 @@ def __init__(self, *, super().__init__(model_final=model_final, featurizer=featurizer, fit_cate_intercept=fit_cate_intercept, + binary_outcome=binary_outcome, cov_clip=cov_clip, cv=cv, mc_iters=mc_iters, @@ -2504,14 +2694,35 @@ class IntentToTreatDRIV(_IntentToTreatDRIV): Parameters ---------- - model_y_xw : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[Y | X, W]`. Must support `fit` and `predict` methods. - If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen. + model_y_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' + model to estimate :math:`\\E[Y | X, W]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if binary_outcome=True. - model_t_xwz : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[T | X, W, Z]`. Must support `fit` and `predict_proba` methods. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` - will be applied for discrete treatment. + model_t_xwz : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' + model to estimate :math:`\\E[T | X, W, Z]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete_treatment=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_treatment=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_treatment=True. flexible_model_effect : estimator or 'auto' (default is 'auto') a flexible model for a preliminary version of the CATE, must accept sample_weight at fit time. @@ -2655,6 +2866,7 @@ def __init__(self, *, z_propensity="auto", featurizer=None, fit_cate_intercept=False, + binary_outcome=True, cov_clip=1e-3, cv=3, mc_iters=None, @@ -2681,6 +2893,7 @@ def __init__(self, *, z_propensity=z_propensity, featurizer=featurizer, fit_cate_intercept=fit_cate_intercept, + binary_outcome=binary_outcome, cov_clip=cov_clip, opt_reweighted=opt_reweighted, categories=categories, @@ -2800,15 +3013,36 @@ class LinearIntentToTreatDRIV(StatsModelsCateEstimatorMixin, IntentToTreatDRIV): Parameters ---------- - model_y_xw : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[Y | X, W]`. Must support `fit` and `predict` methods. - If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen. + model_y_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' + model to estimate :math:`\\E[Y | X, W]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if binary_outcome=True. - model_t_xwz : estimator or 'auto' (default is 'auto') - model to estimate :math:`\\E[T | X, W, Z]`. Must support `fit` and `predict_proba` methods. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` - will be applied for discrete treatment. + + model_t_xwz : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' + model to estimate :math:`\\E[T | X, W, Z]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete_treatment=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_treatment=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_treatment=True. flexible_model_effect : estimator or 'auto' (default is 'auto') a flexible model for a preliminary version of the CATE, must accept sample_weight at fit time. @@ -2960,6 +3194,7 @@ def __init__(self, *, z_propensity="auto", featurizer=None, fit_cate_intercept=True, + binary_outcome=False, cov_clip=1e-3, cv=3, mc_iters=None, @@ -2980,6 +3215,7 @@ def __init__(self, *, z_propensity=z_propensity, featurizer=featurizer, fit_cate_intercept=fit_cate_intercept, + binary_outcome=binary_outcome, cov_clip=cov_clip, cv=cv, mc_iters=mc_iters, diff --git a/econml/panel/dml/_dml.py b/econml/panel/dml/_dml.py index 8235e6422..b85aa43aa 100644 --- a/econml/panel/dml/_dml.py +++ b/econml/panel/dml/_dml.py @@ -344,17 +344,35 @@ class DynamicDML(LinearModelFinalCateEstimatorMixin, _OrthoLearner): Parameters ---------- - model_y: estimator or 'auto', default 'auto' - The estimator for fitting the response to the features. Must implement - `fit` and `predict` methods. - If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen. - - model_t: estimator or 'auto', default 'auto' - The estimator for fitting the treatment to the features. - If estimator, it must implement `fit` and `predict` methods; - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` will be applied for discrete treatment, - and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` - will be applied for continuous treatment. + model_y: estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' + model to estimate :math:`\\E[Y | X, W]`. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if binary_outcome=True. + + model_t: estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' + Determines how to fit the treatment to the features. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV if discrete_treatment=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_treatment=True else RandomForestRegressor + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods, + and additionally 'predict_proba' if discrete_treatment=True. featurizer : :term:`transformer`, optional Must support fit_transform and transform. Used to create composite features in the final CATE regression. @@ -368,6 +386,9 @@ class DynamicDML(LinearModelFinalCateEstimatorMixin, _OrthoLearner): Whether the first stage models are linear (in which case we will expand the features passed to `model_y` accordingly) + binary_outcome: bool, default False + Whether the outcome should be treated as binary + discrete_treatment: bool, default ``False`` Whether the treatment values should be treated as categorical, rather than continuous, quantities diff --git a/econml/policy/_drlearner.py b/econml/policy/_drlearner.py index 05a50989a..2ee38c158 100644 --- a/econml/policy/_drlearner.py +++ b/econml/policy/_drlearner.py @@ -239,18 +239,34 @@ class takes as input the parameter ``model_regressor``, which is an arbitrary sc Parameters ---------- - model_propensity : scikit-learn classifier or 'auto', default 'auto' + model_propensity : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' Estimator for Pr[T=t | X, W]. Trained by regressing treatments on (features, controls) concatenated. - Must implement `fit` and `predict_proba` methods. The `fit` method must be able to accept X and T, - where T is a shape (n, ) array. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` will be chosen. - model_regression : scikit-learn regressor or 'auto', default 'auto' + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV + - 'forest' - RandomForestClassifier + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict', and 'predict_proba'. + + model_regression : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' Estimator for E[Y | X, W, T]. Trained by regressing Y on (features, controls, one-hot-encoded treatments) - concatenated. The one-hot-encoding excludes the baseline treatment. Must implement `fit` and - `predict` methods. If different models per treatment arm are desired, see the - :class:`.MultiModelWrapper` helper class. - If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen. + concatenated. The one-hot-encoding excludes the baseline treatment. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV + - 'forest' - RandomForestClassifier + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods. featurizer : :term:`transformer`, optional Must support fit_transform and transform. Used to create composite features in the final CATE regression. @@ -635,18 +651,34 @@ class takes as input the parameter ``model_regressor``, which is an arbitrary sc Parameters ---------- - model_propensity : scikit-learn classifier or 'auto', default 'auto' + model_propensity : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' Estimator for Pr[T=t | X, W]. Trained by regressing treatments on (features, controls) concatenated. - Must implement `fit` and `predict_proba` methods. The `fit` method must be able to accept X and T, - where T is a shape (n, ) array. - If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` will be chosen. - model_regression : scikit-learn regressor or 'auto', default 'auto' + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV + - 'forest' - RandomForestClassifier + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict', and 'predict_proba'. + + model_regression : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' Estimator for E[Y | X, W, T]. Trained by regressing Y on (features, controls, one-hot-encoded treatments) - concatenated. The one-hot-encoding excludes the baseline treatment. Must implement `fit` and - `predict` methods. If different models per treatment arm are desired, see the - :class:`.MultiModelWrapper` helper class. - If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen. + concatenated. The one-hot-encoding excludes the baseline treatment. + + - If an estimator, will use the model as is for fitting. + - If str, will use model associated with the keyword. + + - 'linear' - LogisticRegressionCV + - 'forest' - RandomForestClassifier + - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ + and then use the best estimator for fitting. + - If 'auto', model will select over linear and forest models + + User-supplied estimators should support 'fit' and 'predict' methods. featurizer : :term:`transformer`, optional Must support fit_transform and transform. Used to create composite features in the final CATE regression. diff --git a/econml/tests/test_binary_outcome.py b/econml/tests/test_binary_outcome.py index ccf42bd24..127c19d7a 100644 --- a/econml/tests/test_binary_outcome.py +++ b/econml/tests/test_binary_outcome.py @@ -11,9 +11,9 @@ from econml._ortho_learner import _OrthoLearner from econml.dml import LinearDML, SparseLinearDML, KernelDML, CausalForestDML, NonParamDML -from econml.dr import LinearDRLearner +from econml.dr import LinearDRLearner, ForestDRLearner from econml.iv.dml import OrthoIV, DMLIV, NonParamDMLIV -from econml.iv.dr import DRIV, LinearDRIV, SparseLinearDRIV, ForestDRIV +from econml.iv.dr import DRIV, LinearDRIV, SparseLinearDRIV, ForestDRIV, IntentToTreatDRIV, LinearIntentToTreatDRIV from econml.orf import DMLOrthoForest from econml.utilities import filter_none_kwargs @@ -119,20 +119,42 @@ def gen_array(n, is_binary, d): if Z is not None: est_list = [ - DRIV(binary_outcome=binary_outcome), - DMLIV(binary_outcome=binary_outcome), - OrthoIV(binary_outcome=binary_outcome), + DRIV(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment, + discrete_instrument=discrete_instrument), + DMLIV(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment, + discrete_instrument=discrete_instrument), + OrthoIV(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment, + discrete_instrument=discrete_instrument), + LinearDRIV(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment, + discrete_instrument=discrete_instrument), + SparseLinearDRIV(binary_outcome=binary_outcome, + discrete_treatment=discrete_treatment, + discrete_instrument=discrete_instrument), + ForestDRIV(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment, + discrete_instrument=discrete_instrument), + OrthoIV(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment, + discrete_instrument=discrete_instrument), + NonParamDMLIV(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment, + discrete_instrument=discrete_instrument) ] + if discrete_instrument: + est_list += [ + LinearIntentToTreatDRIV(binary_outcome=binary_outcome), + IntentToTreatDRIV(binary_outcome=binary_outcome), + ] + else: est_list = [ LinearDML(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment), + SparseLinearDML(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment), CausalForestDML(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment) ] if discrete_treatment: est_list += [ LinearDRLearner(binary_outcome=binary_outcome), + ForestDRLearner(binary_outcome=binary_outcome), ] for est in est_list: From 79a3b07173f9673506cb514ce3dc6e11695e37cc Mon Sep 17 00:00:00 2001 From: Fabio Vera Date: Fri, 15 Dec 2023 15:45:29 -0500 Subject: [PATCH 16/25] fix default Signed-off-by: Fabio Vera --- econml/iv/dr/_dr.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/econml/iv/dr/_dr.py b/econml/iv/dr/_dr.py index 54ff0c523..abc1d601d 100644 --- a/econml/iv/dr/_dr.py +++ b/econml/iv/dr/_dr.py @@ -2866,7 +2866,7 @@ def __init__(self, *, z_propensity="auto", featurizer=None, fit_cate_intercept=False, - binary_outcome=True, + binary_outcome=False, cov_clip=1e-3, cv=3, mc_iters=None, From 17a0b361e6e6716b71b79e073a423c379d86cfd3 Mon Sep 17 00:00:00 2001 From: Fabio Vera Date: Fri, 15 Dec 2023 16:37:23 -0500 Subject: [PATCH 17/25] bugfixes Signed-off-by: Fabio Vera --- econml/_ortho_learner.py | 4 ++-- econml/tests/test_binary_outcome.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/econml/_ortho_learner.py b/econml/_ortho_learner.py index 3b0d34e98..f5f91b36e 100644 --- a/econml/_ortho_learner.py +++ b/econml/_ortho_learner.py @@ -924,7 +924,7 @@ def _fit_nuisances(self, Y, T, X=None, W=None, Z=None, sample_weight=None, group Z = self.z_transformer.transform(reshape(Z, (-1, 1))) if self.binary_outcome: - Y = self.outcome_transformer.transform(Y).reshape(Y.shape) + Y = self.outcome_transformer.transform(Y).reshape(-1, 1) if self.cv == 1: # special case, no cross validation folds = None @@ -1057,7 +1057,7 @@ def score(self, Y, T, X=None, W=None, Z=None, sample_weight=None, groups=None): if self.z_transformer is not None: Z = self.z_transformer.transform(reshape(Z, (-1, 1))) if self.binary_outcome: - Y = self.outcome_transformer.transform(Y).reshape(Y.shape) + Y = self.outcome_transformer.transform(Y).reshape(-1, 1) n_iters = len(self._models_nuisance) n_splits = len(self._models_nuisance[0]) diff --git a/econml/tests/test_binary_outcome.py b/econml/tests/test_binary_outcome.py index 127c19d7a..2547a8e72 100644 --- a/econml/tests/test_binary_outcome.py +++ b/econml/tests/test_binary_outcome.py @@ -135,7 +135,7 @@ def gen_array(n, is_binary, d): OrthoIV(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment, discrete_instrument=discrete_instrument), NonParamDMLIV(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment, - discrete_instrument=discrete_instrument) + discrete_instrument=discrete_instrument, model_final=LinearRegression()) ] if discrete_instrument: From 6ba3b1fe0cf8170b0eae8f2d535971a9f2c3a711 Mon Sep 17 00:00:00 2001 From: Fabio Vera Date: Tue, 2 Jan 2024 09:51:37 -0500 Subject: [PATCH 18/25] test_binary_outcome bugfix Signed-off-by: Fabio Vera --- econml/tests/test_binary_outcome.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/econml/tests/test_binary_outcome.py b/econml/tests/test_binary_outcome.py index 2547a8e72..c86eeb3ab 100644 --- a/econml/tests/test_binary_outcome.py +++ b/econml/tests/test_binary_outcome.py @@ -138,7 +138,7 @@ def gen_array(n, is_binary, d): discrete_instrument=discrete_instrument, model_final=LinearRegression()) ] - if discrete_instrument: + if discrete_instrument and discrete_treatment: est_list += [ LinearIntentToTreatDRIV(binary_outcome=binary_outcome), IntentToTreatDRIV(binary_outcome=binary_outcome), From 5d75de488bc88e8991116f04944889cc206c1d9f Mon Sep 17 00:00:00 2001 From: Fabio Vera Date: Tue, 2 Jan 2024 16:30:17 -0500 Subject: [PATCH 19/25] adjust tests Signed-off-by: Fabio Vera --- econml/tests/test_binary_outcome.py | 31 +++++++++++++++-------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/econml/tests/test_binary_outcome.py b/econml/tests/test_binary_outcome.py index c86eeb3ab..8e935a9a8 100644 --- a/econml/tests/test_binary_outcome.py +++ b/econml/tests/test_binary_outcome.py @@ -134,8 +134,9 @@ def gen_array(n, is_binary, d): discrete_instrument=discrete_instrument), OrthoIV(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment, discrete_instrument=discrete_instrument), - NonParamDMLIV(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment, - discrete_instrument=discrete_instrument, model_final=LinearRegression()) + # uncomment when issue #837 is resolved + # NonParamDMLIV(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment, + # discrete_instrument=discrete_instrument, model_final=LinearRegression()) ] if discrete_instrument and discrete_treatment: @@ -166,16 +167,16 @@ def gen_array(n, is_binary, d): est.marginal_effect(T, X=X) est.ate(X=X) - # make sure the auto outcome model is a classifier - if hasattr(est, 'model_y'): - outcome_model_attr = 'models_y' - elif hasattr(est, 'model_regression'): - outcome_model_attr = 'models_regression' - elif hasattr(est, 'model_y_xw'): - outcome_model_attr = 'models_y_xw' - assert ( - hasattr( - getattr(est, outcome_model_attr)[0][0], - 'predict_proba' - ) - ), 'Auto outcome model is not a classifier!' + # make sure the auto outcome model is a classifier + if hasattr(est, 'model_y'): + outcome_model_attr = 'models_y' + elif hasattr(est, 'model_regression'): + outcome_model_attr = 'models_regression' + elif hasattr(est, 'model_y_xw'): + outcome_model_attr = 'models_y_xw' + assert ( + hasattr( + getattr(est, outcome_model_attr)[0][0], + 'predict_proba' + ) + ), 'Auto outcome model is not a classifier!' From 9e7d70149bf1c0de7fdbe201f6da0afe796a0c35 Mon Sep 17 00:00:00 2001 From: Fabio Vera Date: Fri, 5 Jan 2024 13:21:13 -0500 Subject: [PATCH 20/25] address comments; binary_outcome->discrete_outcome, improve warnings Signed-off-by: Fabio Vera --- econml/_ortho_learner.py | 56 ++++--------- econml/dml/_rlearner.py | 8 +- econml/dml/causal_forest.py | 14 ++-- econml/dml/dml.py | 72 ++++++++-------- econml/dr/_drlearner.py | 63 +++++++------- econml/dynamic/dml/__init__.py | 6 +- econml/iv/dml/_dml.py | 42 +++++----- econml/iv/dr/_dr.py | 84 +++++++++---------- econml/panel/dml/_dml.py | 14 ++-- econml/tests/test_bootstrap.py | 14 ++-- ...ry_outcome.py => test_discrete_outcome.py} | 52 ++++++------ econml/tests/test_missing_values.py | 2 +- econml/tests/test_ortho_learner.py | 15 ++-- econml/tests/test_treatment_featurization.py | 2 +- econml/tests/test_utilities.py | 13 ++- econml/utilities.py | 28 ++++++- 16 files changed, 253 insertions(+), 232 deletions(-) rename econml/tests/{test_binary_outcome.py => test_discrete_outcome.py} (72%) diff --git a/econml/_ortho_learner.py b/econml/_ortho_learner.py index f5f91b36e..76a51a374 100644 --- a/econml/_ortho_learner.py +++ b/econml/_ortho_learner.py @@ -43,7 +43,7 @@ class in this module implements the general logic in a very versatile way TreatmentExpansionMixin) from .inference import BootstrapInference from .utilities import (_deprecate_positional, check_input_arrays, - cross_product, filter_none_kwargs, + cross_product, filter_none_kwargs, single_strata_from_discrete_arrays, inverse_onehot, jacify_featurizer, ndim, reshape, shape, transpose) from .sklearn_extensions.model_selection import ModelSelector @@ -327,7 +327,7 @@ class _OrthoLearner(TreatmentExpansionMixin, LinearCateEstimator): Parameters ---------- - binary_outcome: bool + discrete_outcome: bool Whether the outcome should be treated as binary discrete_treatment: bool @@ -429,7 +429,7 @@ def _gen_ortho_learner_model_final(self): np.random.seed(123) X = np.random.normal(size=(100, 3)) y = X[:, 0] + X[:, 1] + np.random.normal(0, 0.1, size=(100,)) - est = OrthoLearner(cv=2, binary_outcome=False, discrete_treatment=False, treatment_featurizer=None, + est = OrthoLearner(cv=2, discrete_outcome=False, discrete_treatment=False, treatment_featurizer=None, discrete_instrument=False, categories='auto', random_state=None) est.fit(y, X[:, 0], W=X[:, 1:]) @@ -487,7 +487,7 @@ def _gen_ortho_learner_model_final(self): import scipy.special T = np.random.binomial(1, scipy.special.expit(W[:, 0])) y = T + W[:, 0] + np.random.normal(0, 0.01, size=(100,)) - est = OrthoLearner(cv=2, binary_outcome=False, discrete_treatment=True, discrete_instrument=False, + est = OrthoLearner(cv=2, discrete_outcome=False, discrete_treatment=True, discrete_instrument=False, treatment_featurizer=None, categories='auto', random_state=None) est.fit(y, T, W=W) @@ -519,7 +519,7 @@ def _gen_ortho_learner_model_final(self): """ def __init__(self, *, - binary_outcome, + discrete_outcome, discrete_treatment, treatment_featurizer, discrete_instrument, @@ -532,7 +532,7 @@ def __init__(self, *, use_ray=False, ray_remote_func_options=None): self.cv = cv - self.binary_outcome = binary_outcome + self.discrete_outcome = discrete_outcome self.discrete_treatment = discrete_treatment self.treatment_featurizer = treatment_featurizer self.discrete_instrument = discrete_instrument @@ -629,40 +629,14 @@ def _strata(self, Y, T, X=None, W=None, Z=None, sample_weight=None, freq_weight=None, sample_var=None, groups=None, cache_values=False, only_final=False, check_input=True): arrs = [] - if self.binary_outcome: + if self.discrete_outcome: arrs.append(Y) if self.discrete_treatment: arrs.append(T) if self.discrete_instrument: arrs.append(Z) - return self._single_strata_from_discrete_arrays(arrs) - - """ - Combine multiple discrete arrays into a single array for stratification purposes: - - e.g. if arrs are - [0 1 2 0 1 2 0 1 2 0 1 2], - [0 1 0 1 0 1 0 1 0 1 0 1], - [0 0 0 0 0 0 1 1 1 1 1 1] - then output will be - [0 8 4 6 2 10 1 9 5 7 3 11] - - Every distinct combination of these discrete arrays will have it's own label. - """ - - def _single_strata_from_discrete_arrays(self, arrs): - if not arrs: - return None - - curr_array = np.zeros(shape=arrs[0].ravel().shape, dtype='int') - - for arr in arrs: - enc = LabelEncoder() - temp = enc.fit_transform(arr.ravel()) - curr_array = temp + curr_array * len(enc.classes_) - - return curr_array + return single_strata_from_discrete_arrays(arrs) def _prefit(self, Y, T, *args, only_final=False, **kwargs): @@ -739,15 +713,17 @@ def fit(self, Y, T, *, X=None, W=None, Z=None, sample_weight=None, freq_weight=N if not only_final: - if self.binary_outcome: + if self.discrete_outcome: self.outcome_transformer = LabelEncoder() self.outcome_transformer.fit(Y) if Y.shape[1:] and Y.shape[1] > 1: raise ValueError( - f"Only one outcome variable is supported when binary_outcome=True. Got Y of shape {Y.shape}") + f"Only one outcome variable is supported when discrete_outcome=True. Got Y of shape {Y.shape}") if len(self.outcome_transformer.classes_) > 2: raise AttributeError( - "More than 2 outcome classes detected. This method currently only supports binary outcomes") + f"({self.outcome_transformer.classes_} outcome classes detected. \ + Currently, only 2 outcome classes are allowed when discrete_outcome=True. \ + Classes provided include {self.outcome_transformer.classes_[:5]}") else: self.outcome_transformer = None @@ -910,7 +886,7 @@ def refit_final(self, inference=None): def _fit_nuisances(self, Y, T, X=None, W=None, Z=None, sample_weight=None, groups=None): # use a binary array to get stratified split in case of discrete treatment - stratify = self.discrete_treatment or self.discrete_instrument or self.binary_outcome + stratify = self.discrete_treatment or self.discrete_instrument or self.discrete_outcome strata = self._strata(Y, T, X=X, W=W, Z=Z, sample_weight=sample_weight, groups=groups) if strata is None: strata = T # always safe to pass T as second arg to split even if we're not actually stratifying @@ -923,7 +899,7 @@ def _fit_nuisances(self, Y, T, X=None, W=None, Z=None, sample_weight=None, group if self.discrete_instrument: Z = self.z_transformer.transform(reshape(Z, (-1, 1))) - if self.binary_outcome: + if self.discrete_outcome: Y = self.outcome_transformer.transform(Y).reshape(-1, 1) if self.cv == 1: # special case, no cross validation @@ -1056,7 +1032,7 @@ def score(self, Y, T, X=None, W=None, Z=None, sample_weight=None, groups=None): X, T = self._expand_treatments(X, T) if self.z_transformer is not None: Z = self.z_transformer.transform(reshape(Z, (-1, 1))) - if self.binary_outcome: + if self.discrete_outcome: Y = self.outcome_transformer.transform(Y).reshape(-1, 1) n_iters = len(self._models_nuisance) n_splits = len(self._models_nuisance[0]) diff --git a/econml/dml/_rlearner.py b/econml/dml/_rlearner.py index 2120c6e17..b4c346b26 100644 --- a/econml/dml/_rlearner.py +++ b/econml/dml/_rlearner.py @@ -137,7 +137,7 @@ class _RLearner(_OrthoLearner): Parameters ---------- - binary_outcome: bool + discrete_outcome: bool Whether the outcome should be treated as binary discrete_treatment: bool @@ -245,7 +245,7 @@ def _gen_rlearner_model_final(self): np.random.seed(123) X = np.random.normal(size=(1000, 3)) y = X[:, 0] + X[:, 1] + np.random.normal(0, 0.01, size=(1000,)) - est = RLearner(cv=2, binary_outcome=False, discrete_treatment=False, + est = RLearner(cv=2, discrete_outcome=False, discrete_treatment=False, treatment_featurizer=None, categories='auto', random_state=None) est.fit(y, X[:, 0], X=np.ones((X.shape[0], 1)), W=X[:, 1:]) @@ -295,7 +295,7 @@ def _gen_rlearner_model_final(self): def __init__(self, *, - binary_outcome, + discrete_outcome, discrete_treatment, treatment_featurizer, categories, @@ -306,7 +306,7 @@ def __init__(self, allow_missing=False, use_ray=False, ray_remote_func_options=None): - super().__init__(binary_outcome=binary_outcome, + super().__init__(discrete_outcome=discrete_outcome, discrete_treatment=discrete_treatment, treatment_featurizer=treatment_featurizer, discrete_instrument=False, # no instrument, so doesn't matter diff --git a/econml/dml/causal_forest.py b/econml/dml/causal_forest.py index 6b8ea1bd2..a3affed39 100644 --- a/econml/dml/causal_forest.py +++ b/econml/dml/causal_forest.py @@ -274,14 +274,14 @@ class CausalForestDML(_BaseDML): - If an estimator, will use the model as is for fitting. - If str, will use model associated with the keyword. - - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper - - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - 'linear' - LogisticRegressionCV if discrete_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_outcome=True else RandomForestRegressor - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ and then use the best estimator for fitting. - If 'auto', model will select over linear and forest models User-supplied estimators should support 'fit' and 'predict' methods, - and additionally 'predict_proba' if binary_outcome=True. + and additionally 'predict_proba' if discrete_outcome=True. model_t: estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' Determines how to fit the treatment to the features. str in a sentence @@ -308,7 +308,7 @@ class CausalForestDML(_BaseDML): The final CATE will be trained on the outcome of featurizer.fit_transform(T). If featurizer=None, then CATE is trained on T. - binary_outcome: bool, default ``False`` + discrete_outcome: bool, default ``False`` Whether the outcome should be treated as binary discrete_treatment: bool, default ``False`` @@ -609,7 +609,7 @@ def __init__(self, *, model_t='auto', featurizer=None, treatment_featurizer=None, - binary_outcome=False, + discrete_outcome=False, discrete_treatment=False, categories='auto', cv=2, @@ -666,7 +666,7 @@ def __init__(self, *, self.subforest_size = subforest_size self.n_jobs = n_jobs self.verbose = verbose - super().__init__(binary_outcome=binary_outcome, + super().__init__(discrete_outcome=discrete_outcome, discrete_treatment=discrete_treatment, treatment_featurizer=treatment_featurizer, categories=categories, @@ -691,7 +691,7 @@ def _gen_featurizer(self): return clone(self.featurizer, safe=False) def _gen_model_y(self): - return _make_first_stage_selector(self.model_y, self.binary_outcome, self.random_state) + return _make_first_stage_selector(self.model_y, self.discrete_outcome, self.random_state) def _gen_model_t(self): return _make_first_stage_selector(self.model_t, self.discrete_treatment, self.random_state) diff --git a/econml/dml/dml.py b/econml/dml/dml.py index c4fa78038..8dcab2347 100644 --- a/econml/dml/dml.py +++ b/econml/dml/dml.py @@ -52,8 +52,14 @@ def __init__(self, model, discrete_target): def predict(self, X, W): n_samples = X.shape[0] if X is not None else (W.shape[0] if W is not None else 1) if self._discrete_target: - return self._model.predict_proba(_combine(X, W, n_samples))[:, 1:] + if hasattr(self._model, 'predict_proba'): + return self._model.predict_proba(_combine(X, W, n_samples))[:, 1:] + else: + warn('First stage model has discrete target but model is not a classifier!', UserWarning) + return self._model.predict(_combine(X, W, n_samples)) else: + if hasattr(self._model, 'predict_proba'): + raise AttributeError("Cannot use a classifier as a first stage model when the target is continuous!") return self._model.predict(_combine(X, W, n_samples)) def score(self, X, W, Target, sample_weight=None): @@ -354,14 +360,14 @@ class takes as input the parameter `model_t`, which is an arbitrary scikit-learn - If an estimator, will use the model as is for fitting. - If str, will use model associated with the keyword. - - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper - - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - 'linear' - LogisticRegressionCV if discrete_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_outcome=True else RandomForestRegressor - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ and then use the best estimator for fitting. - If 'auto', model will select over linear and forest models User-supplied estimators should support 'fit' and 'predict' methods, - and additionally 'predict_proba' if binary_outcome=True. + and additionally 'predict_proba' if discrete_outcome=True. model_t: estimator, {'linear', 'forest'}, list of str/estimator, or 'auto Determines how to fit the treatment to the features. @@ -399,7 +405,7 @@ class takes as input the parameter `model_t`, which is an arbitrary scikit-learn Whether the first stage models are linear (in which case we will expand the features passed to `model_y` accordingly) - binary_outcome: bool, default ``False`` + discrete_outcome: bool, default ``False`` Whether the outcome should be treated as binary discrete_treatment: bool, default ``False`` @@ -508,7 +514,7 @@ def __init__(self, *, treatment_featurizer=None, fit_cate_intercept=True, linear_first_stages="deprecated", - binary_outcome=False, + discrete_outcome=False, discrete_treatment=False, categories='auto', cv=2, @@ -528,7 +534,7 @@ def __init__(self, *, self.model_y = clone(model_y, safe=False) self.model_t = clone(model_t, safe=False) self.model_final = clone(model_final, safe=False) - super().__init__(binary_outcome=binary_outcome, + super().__init__(discrete_outcome=discrete_outcome, discrete_treatment=discrete_treatment, treatment_featurizer=treatment_featurizer, categories=categories, @@ -547,7 +553,7 @@ def _gen_featurizer(self): return clone(self.featurizer, safe=False) def _gen_model_y(self): - return _make_first_stage_selector(self.model_y, self.binary_outcome, self.random_state) + return _make_first_stage_selector(self.model_y, self.discrete_outcome, self.random_state) def _gen_model_t(self): return _make_first_stage_selector(self.model_t, self.discrete_treatment, self.random_state) @@ -628,14 +634,14 @@ class LinearDML(StatsModelsCateEstimatorMixin, DML): - If an estimator, will use the model as is for fitting. - If str, will use model associated with the keyword. - - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper - - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - 'linear' - LogisticRegressionCV if discrete_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_outcome=True else RandomForestRegressor - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ and then use the best estimator for fitting. - If 'auto', model will select over linear and forest models User-supplied estimators should support 'fit' and 'predict' methods, - and additionally 'predict_proba' if binary_outcome=True. + and additionally 'predict_proba' if discrete_outcome=True. model_t: estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' Determines how to fit the treatment to the features. @@ -669,7 +675,7 @@ class LinearDML(StatsModelsCateEstimatorMixin, DML): Whether the first stage models are linear (in which case we will expand the features passed to `model_y` accordingly) - binary_outcome: bool, default ``False`` + discrete_outcome: bool, default ``False`` Whether the outcome should be treated as binary discrete_treatment: bool, default ``False`` @@ -764,7 +770,7 @@ def __init__(self, *, treatment_featurizer=None, fit_cate_intercept=True, linear_first_stages="deprecated", - binary_outcome=False, + discrete_outcome=False, discrete_treatment=False, categories='auto', cv=2, @@ -783,7 +789,7 @@ def __init__(self, *, treatment_featurizer=treatment_featurizer, fit_cate_intercept=fit_cate_intercept, linear_first_stages=linear_first_stages, - binary_outcome=binary_outcome, + discrete_outcome=discrete_outcome, discrete_treatment=discrete_treatment, categories=categories, cv=cv, @@ -873,14 +879,14 @@ class SparseLinearDML(DebiasedLassoCateEstimatorMixin, DML): - If an estimator, will use the model as is for fitting. - If str, will use model associated with the keyword. - - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper - - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - 'linear' - LogisticRegressionCV if discrete_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_outcome=True else RandomForestRegressor - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ and then use the best estimator for fitting. - If 'auto', model will select over linear and forest models User-supplied estimators should support 'fit' and 'predict' methods, - and additionally 'predict_proba' if binary_outcome=True. + and additionally 'predict_proba' if discrete_outcome=True. model_t: estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' Determines how to fit the treatment to the features. @@ -944,7 +950,7 @@ class SparseLinearDML(DebiasedLassoCateEstimatorMixin, DML): Whether the first stage models are linear (in which case we will expand the features passed to `model_y` accordingly) - binary_outcome: bool, default ``False`` + discrete_outcome: bool, default ``False`` Whether the outcome should be treated as binary discrete_treatment: bool, default ``False`` @@ -1046,7 +1052,7 @@ def __init__(self, *, treatment_featurizer=None, fit_cate_intercept=True, linear_first_stages=True, - binary_outcome=False, + discrete_outcome=False, discrete_treatment=False, categories='auto', cv=2, @@ -1070,7 +1076,7 @@ def __init__(self, *, treatment_featurizer=treatment_featurizer, fit_cate_intercept=fit_cate_intercept, linear_first_stages=linear_first_stages, - binary_outcome=binary_outcome, + discrete_outcome=discrete_outcome, discrete_treatment=discrete_treatment, categories=categories, cv=cv, @@ -1176,14 +1182,14 @@ class KernelDML(DML): - If an estimator, will use the model as is for fitting. - If str, will use model associated with the keyword. - - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper - - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - 'linear' - LogisticRegressionCV if discrete_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_outcome=True else RandomForestRegressor - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ and then use the best estimator for fitting. - If 'auto', model will select over linear and forest models User-supplied estimators should support 'fit' and 'predict' methods, - and additionally 'predict_proba' if binary_outcome=True. + and additionally 'predict_proba' if discrete_outcome=True. model_t: estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' Determines how to fit the treatment to the features. @@ -1206,7 +1212,7 @@ class KernelDML(DML): bw: float, default 1.0 The bandwidth of the Gaussian used to generate features - binary_outcome: bool, default ``False`` + discrete_outcome: bool, default ``False`` Whether the outcome should be treated as binary discrete_treatment: bool, default ``False`` @@ -1289,7 +1295,7 @@ class KernelDML(DML): """ def __init__(self, model_y='auto', model_t='auto', - binary_outcome=False, + discrete_outcome=False, discrete_treatment=False, treatment_featurizer=None, categories='auto', @@ -1310,7 +1316,7 @@ def __init__(self, model_y='auto', model_t='auto', featurizer=None, treatment_featurizer=treatment_featurizer, fit_cate_intercept=fit_cate_intercept, - binary_outcome=binary_outcome, + discrete_outcome=discrete_outcome, discrete_treatment=discrete_treatment, categories=categories, cv=cv, @@ -1401,14 +1407,14 @@ class NonParamDML(_BaseDML): - If an estimator, will use the model as is for fitting. - If str, will use model associated with the keyword. - - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper - - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - 'linear' - LogisticRegressionCV if discrete_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_outcome=True else RandomForestRegressor - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ and then use the best estimator for fitting. - If 'auto', model will select over linear and forest models User-supplied estimators should support 'fit' and 'predict' methods, - and additionally 'predict_proba' if binary_outcome=True. + and additionally 'predict_proba' if discrete_outcome=True. model_t: estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' Determines how to fit the treatment to the features. @@ -1431,7 +1437,7 @@ class NonParamDML(_BaseDML): The transformer used to featurize the raw features when fitting the final model. Must implement a `fit_transform` method. - binary_outcome: bool, default ``False`` + discrete_outcome: bool, default ``False`` Whether the outcome should be treated as binary discrete_treatment: bool, default ``False`` @@ -1523,7 +1529,7 @@ class NonParamDML(_BaseDML): def __init__(self, *, model_y, model_t, model_final, featurizer=None, - binary_outcome=False, + discrete_outcome=False, discrete_treatment=False, treatment_featurizer=None, categories='auto', @@ -1540,7 +1546,7 @@ def __init__(self, *, self.model_t = clone(model_t, safe=False) self.featurizer = clone(featurizer, safe=False) self.model_final = clone(model_final, safe=False) - super().__init__(binary_outcome=binary_outcome, + super().__init__(discrete_outcome=discrete_outcome, discrete_treatment=discrete_treatment, treatment_featurizer=treatment_featurizer, categories=categories, @@ -1566,7 +1572,7 @@ def _gen_featurizer(self): return clone(self.featurizer, safe=False) def _gen_model_y(self): - return _make_first_stage_selector(self.model_y, is_discrete=self.binary_outcome, + return _make_first_stage_selector(self.model_y, is_discrete=self.discrete_outcome, random_state=self.random_state) def _gen_model_t(self): diff --git a/econml/dr/_drlearner.py b/econml/dr/_drlearner.py index c03f8b7ac..b08883f3b 100644 --- a/econml/dr/_drlearner.py +++ b/econml/dr/_drlearner.py @@ -63,11 +63,11 @@ def __init__(self, model_propensity: SingleModelSelector, model_regression: SingleModelSelector, min_propensity, - binary_outcome): + discrete_outcome): self._model_propensity = model_propensity self._model_regression = model_regression self._min_propensity = min_propensity - self._binary_outcome = binary_outcome + self._discrete_outcome = discrete_outcome def _combine(self, X, W): return np.hstack([arr for arr in [X, W] if arr is not None]) @@ -103,15 +103,18 @@ def predict(self, Y, T, X=None, W=None, *, sample_weight=None, groups=None): n = T.shape[0] Y_pred = np.zeros((T.shape[0], T.shape[1] + 1)) T_counter = np.zeros(T.shape) - if self._binary_outcome and hasattr(self._model_regression, 'predict_proba'): + if self._discrete_outcome and hasattr(self._model_regression, 'predict_proba'): Y_pred[:, 0] = self._model_regression.predict_proba(np.hstack([XW, T_counter]))[:, 1].reshape(n) else: + if self._discrete_outcome: + warn("A regressor was passed when discrete_outcome=True. \ + Using a classifier is recommended.", UserWarning) Y_pred[:, 0] = self._model_regression.predict(np.hstack([XW, T_counter])).reshape(n) Y_pred[:, 0] += (Y.reshape(n) - Y_pred[:, 0]) * np.all(T == 0, axis=1) / propensities[:, 0] for t in np.arange(T.shape[1]): T_counter = np.zeros(T.shape) T_counter[:, t] = 1 - if self._binary_outcome and hasattr(self._model_regression, 'predict_proba'): + if self._discrete_outcome and hasattr(self._model_regression, 'predict_proba'): Y_pred[:, t + 1] = self._model_regression.predict_proba(np.hstack([XW, T_counter]))[:, 1].reshape(n) else: Y_pred[:, t + 1] = self._model_regression.predict(np.hstack([XW, T_counter])).reshape(n) @@ -262,14 +265,14 @@ class takes as input the parameter ``model_regressor``, which is an arbitrary sc - If an estimator, will use the model as is for fitting. - If str, will use model associated with the keyword. - - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper - - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - 'linear' - LogisticRegressionCV if discrete_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_outcome=True else RandomForestRegressor - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ and then use the best estimator for fitting. - If 'auto', model will select over linear and forest models User-supplied estimators should support 'fit' and 'predict' methods, - and additionally 'predict_proba' if binary_outcome=True. + and additionally 'predict_proba' if discrete_outcome=True. model_final : estimator for the final cate model. Trained on regressing the doubly robust potential outcomes @@ -285,7 +288,7 @@ class takes as input the parameter ``model_regressor``, which is an arbitrary sc mono-task model and a separate clone of the model is trained for each outcome. Then predict(X) of the t-th clone will be the CATE of the t-th lexicographically ordered treatment compared to the baseline. - binary_outcome: bool, default False + discrete_outcome: bool, default False Whether the outcome should be treated as binary multitask_model_final : bool, default False @@ -445,7 +448,7 @@ def __init__(self, *, model_propensity='auto', model_regression='auto', model_final=StatsModelsLinearRegression(), - binary_outcome=False, + discrete_outcome=False, multitask_model_final=False, featurizer=None, min_propensity=1e-6, @@ -467,7 +470,7 @@ def __init__(self, *, super().__init__(cv=cv, mc_iters=mc_iters, mc_agg=mc_agg, - binary_outcome=binary_outcome, + discrete_outcome=discrete_outcome, discrete_treatment=True, treatment_featurizer=None, # treatment featurization not supported with discrete treatment discrete_instrument=False, # no instrument, so doesn't matter @@ -534,9 +537,9 @@ def _get_inference_options(self): def _gen_ortho_learner_model_nuisance(self): model_propensity = _make_first_stage_selector(self.model_propensity, True, self.random_state) - model_regression = _make_first_stage_selector(self.model_regression, self.binary_outcome, self.random_state) + model_regression = _make_first_stage_selector(self.model_regression, self.discrete_outcome, self.random_state) - return _ModelNuisance(model_propensity, model_regression, self.min_propensity, self.binary_outcome) + return _ModelNuisance(model_propensity, model_regression, self.min_propensity, self.discrete_outcome) def _gen_featurizer(self): return clone(self.featurizer, safe=False) @@ -824,14 +827,14 @@ class LinearDRLearner(StatsModelsCateEstimatorDiscreteMixin, DRLearner): - If an estimator, will use the model as is for fitting. - If str, will use model associated with the keyword. - - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper - - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - 'linear' - LogisticRegressionCV if discrete_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_outcome=True else RandomForestRegressor - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ and then use the best estimator for fitting. - If 'auto', model will select over linear and forest models User-supplied estimators should support 'fit' and 'predict' methods, - and additionally 'predict_proba' if binary_outcome=True. + and additionally 'predict_proba' if discrete_outcome=True. featurizer : :term:`transformer`, optional Must support fit_transform and transform. Used to create composite features in the final CATE regression. @@ -841,7 +844,7 @@ class LinearDRLearner(StatsModelsCateEstimatorDiscreteMixin, DRLearner): fit_cate_intercept : bool, default True Whether the linear CATE model should have a constant term. - binary_outcome: bool, default False + discrete_outcome: bool, default False Whether the outcome should be treated as binary min_propensity : float, default ``1e-6`` @@ -945,7 +948,7 @@ def __init__(self, *, model_regression='auto', featurizer=None, fit_cate_intercept=True, - binary_outcome=False, + discrete_outcome=False, min_propensity=1e-6, categories='auto', cv=2, @@ -960,7 +963,7 @@ def __init__(self, *, super().__init__(model_propensity=model_propensity, model_regression=model_regression, model_final=None, - binary_outcome=binary_outcome, + discrete_outcome=discrete_outcome, featurizer=featurizer, multitask_model_final=False, min_propensity=min_propensity, @@ -1109,14 +1112,14 @@ class SparseLinearDRLearner(DebiasedLassoCateEstimatorDiscreteMixin, DRLearner): - If an estimator, will use the model as is for fitting. - If str, will use model associated with the keyword. - - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper - - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - 'linear' - LogisticRegressionCV if discrete_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_outcome=True else RandomForestRegressor - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ and then use the best estimator for fitting. - If 'auto', model will select over linear and forest models User-supplied estimators should support 'fit' and 'predict' methods, - and additionally 'predict_proba' if binary_outcome=True. + and additionally 'predict_proba' if discrete_outcome=True. featurizer : :term:`transformer`, optional Must support fit_transform and transform. Used to create composite features in the final CATE regression. @@ -1126,7 +1129,7 @@ class SparseLinearDRLearner(DebiasedLassoCateEstimatorDiscreteMixin, DRLearner): fit_cate_intercept : bool, default True Whether the linear CATE model should have a constant term. - binary_outcome: bool, default False + discrete_outcome: bool, default False Whether the outcome should be treated as binary alpha: str | float, optional., default 'auto'. @@ -1260,7 +1263,7 @@ def __init__(self, *, model_regression='auto', featurizer=None, fit_cate_intercept=True, - binary_outcome=False, + discrete_outcome=False, alpha='auto', n_alphas=100, alpha_cov='auto', @@ -1289,7 +1292,7 @@ def __init__(self, *, super().__init__(model_propensity=model_propensity, model_regression=model_regression, model_final=None, - binary_outcome=binary_outcome, + discrete_outcome=discrete_outcome, featurizer=featurizer, multitask_model_final=False, min_propensity=min_propensity, @@ -1411,16 +1414,16 @@ class ForestDRLearner(ForestModelFinalCateEstimatorDiscreteMixin, DRLearner): - If an estimator, will use the model as is for fitting. - If str, will use model associated with the keyword. - - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper - - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - 'linear' - LogisticRegressionCV if discrete_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_outcome=True else RandomForestRegressor - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ and then use the best estimator for fitting. - If 'auto', model will select over linear and forest models User-supplied estimators should support 'fit' and 'predict' methods, - and additionally 'predict_proba' if binary_outcome=True. + and additionally 'predict_proba' if discrete_outcome=True. - binary_outcome: bool, default False + discrete_outcome: bool, default False Whether the outcome should be treated as binary min_propensity : float, default ``1e-6`` @@ -1583,7 +1586,7 @@ class ForestDRLearner(ForestModelFinalCateEstimatorDiscreteMixin, DRLearner): def __init__(self, *, model_regression="auto", model_propensity="auto", - binary_outcome=False, + discrete_outcome=False, featurizer=None, min_propensity=1e-6, categories='auto', @@ -1623,7 +1626,7 @@ def __init__(self, *, super().__init__(model_regression=model_regression, model_propensity=model_propensity, model_final=None, - binary_outcome=binary_outcome, + discrete_outcome=discrete_outcome, featurizer=featurizer, multitask_model_final=False, min_propensity=min_propensity, diff --git a/econml/dynamic/dml/__init__.py b/econml/dynamic/dml/__init__.py index 6d9949296..4fef96fd1 100755 --- a/econml/dynamic/dml/__init__.py +++ b/econml/dynamic/dml/__init__.py @@ -50,14 +50,14 @@ def DynamicDML(*, - If an estimator, will use the model as is for fitting. - If str, will use model associated with the keyword. - - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper - - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - 'linear' - LogisticRegressionCV if discrete_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_outcome=True else RandomForestRegressor - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ and then use the best estimator for fitting. - If 'auto', model will select over linear and forest models User-supplied estimators should support 'fit' and 'predict' methods, - and additionally 'predict_proba' if binary_outcome=True. + and additionally 'predict_proba' if discrete_outcome=True. model_t: estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' Determines how to fit the treatment to the features. diff --git a/econml/iv/dml/_dml.py b/econml/iv/dml/_dml.py index cc22cfdbc..27e85fe7d 100644 --- a/econml/iv/dml/_dml.py +++ b/econml/iv/dml/_dml.py @@ -210,14 +210,14 @@ class OrthoIV(LinearModelFinalCateEstimatorMixin, _OrthoLearner): - If an estimator, will use the model as is for fitting. - If str, will use model associated with the keyword. - - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper - - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - 'linear' - LogisticRegressionCV if discrete_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_outcome=True else RandomForestRegressor - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ and then use the best estimator for fitting. - If 'auto', model will select over linear and forest models User-supplied estimators should support 'fit' and 'predict' methods, - and additionally 'predict_proba' if binary_outcome=True. + and additionally 'predict_proba' if discrete_outcome=True. model_t_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' model to estimate :math:`\\E[T | X, W]`. @@ -276,7 +276,7 @@ class OrthoIV(LinearModelFinalCateEstimatorMixin, _OrthoLearner): fit_cate_intercept : bool, default False Whether the linear CATE model should have a constant term. - binary_outcome: bool, default False + discrete_outcome: bool, default False Whether the outcome should be treated as binary discrete_treatment: bool, default False @@ -392,7 +392,7 @@ def __init__(self, *, projection=False, featurizer=None, fit_cate_intercept=True, - binary_outcome=False, + discrete_outcome=False, discrete_treatment=False, treatment_featurizer=None, discrete_instrument=False, @@ -410,7 +410,7 @@ def __init__(self, *, self.featurizer = clone(featurizer, safe=False) self.fit_cate_intercept = fit_cate_intercept - super().__init__(binary_outcome=binary_outcome, + super().__init__(discrete_outcome=discrete_outcome, discrete_instrument=discrete_instrument, discrete_treatment=discrete_treatment, treatment_featurizer=treatment_featurizer, @@ -435,7 +435,7 @@ def _gen_ortho_learner_model_final(self): def _gen_ortho_learner_model_nuisance(self): model_y = _make_first_stage_selector(self.model_y_xw, - is_discrete=self.binary_outcome, + is_discrete=self.discrete_outcome, random_state=self.random_state) model_t = _make_first_stage_selector(self.model_t_xw, @@ -1040,14 +1040,14 @@ class DMLIV(_BaseDMLIV): - If an estimator, will use the model as is for fitting. - If str, will use model associated with the keyword. - - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper - - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - 'linear' - LogisticRegressionCV if discrete_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_outcome=True else RandomForestRegressor - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ and then use the best estimator for fitting. - If 'auto', model will select over linear and forest models User-supplied estimators should support 'fit' and 'predict' methods, - and additionally 'predict_proba' if binary_outcome=True. + and additionally 'predict_proba' if discrete_outcome=True. model_t_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' Model to estimate :math:`\\E[T | X, W]`. @@ -1093,7 +1093,7 @@ class DMLIV(_BaseDMLIV): discrete_instrument: bool, default False Whether the instrument values should be treated as categorical, rather than continuous, quantities - binary_outcome: bool, default False + discrete_outcome: bool, default False Whether the outcome should be treated as binary discrete_treatment: bool, default False @@ -1198,7 +1198,7 @@ def __init__(self, *, model_final=StatsModelsLinearRegression(fit_intercept=False), featurizer=None, fit_cate_intercept=True, - binary_outcome=False, + discrete_outcome=False, discrete_treatment=False, treatment_featurizer=None, discrete_instrument=False, @@ -1214,7 +1214,7 @@ def __init__(self, *, self.model_final = clone(model_final, safe=False) self.featurizer = clone(featurizer, safe=False) self.fit_cate_intercept = fit_cate_intercept - super().__init__(binary_outcome=binary_outcome, + super().__init__(discrete_outcome=discrete_outcome, discrete_treatment=discrete_treatment, treatment_featurizer=treatment_featurizer, discrete_instrument=discrete_instrument, @@ -1229,7 +1229,7 @@ def _gen_featurizer(self): return clone(self.featurizer, safe=False) def _gen_model_y_xw(self): - return _make_first_stage_selector(self.model_y_xw, self.binary_outcome, self.random_state) + return _make_first_stage_selector(self.model_y_xw, self.discrete_outcome, self.random_state) def _gen_model_t_xw(self): return _make_first_stage_selector(self.model_t_xw, self.discrete_treatment, self.random_state) @@ -1448,14 +1448,14 @@ class NonParamDMLIV(_BaseDMLIV): - If an estimator, will use the model as is for fitting. - If str, will use model associated with the keyword. - - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper - - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - 'linear' - LogisticRegressionCV if discrete_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_outcome=True else RandomForestRegressor - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ and then use the best estimator for fitting. - If 'auto', model will select over linear and forest models User-supplied estimators should support 'fit' and 'predict' methods, - and additionally 'predict_proba' if binary_outcome=True. + and additionally 'predict_proba' if discrete_outcome=True. model_t_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' Model to estimate :math:`\\E[T | X, W]`. @@ -1494,7 +1494,7 @@ class NonParamDMLIV(_BaseDMLIV): The transformer used to featurize the raw features when fitting the final model. Must implement a `fit_transform` method. - binary_outcome: bool, default False + discrete_outcome: bool, default False Whether the outcome should be treated as binary discrete_treatment: bool, default False @@ -1601,7 +1601,7 @@ def __init__(self, *, model_t_xw="auto", model_t_xwz="auto", model_final, - binary_outcome=False, + discrete_outcome=False, discrete_treatment=False, treatment_featurizer=None, discrete_instrument=False, @@ -1617,7 +1617,7 @@ def __init__(self, *, self.model_t_xwz = clone(model_t_xwz, safe=False) self.model_final = clone(model_final, safe=False) self.featurizer = clone(featurizer, safe=False) - super().__init__(binary_outcome=binary_outcome, + super().__init__(discrete_outcome=discrete_outcome, discrete_treatment=discrete_treatment, discrete_instrument=discrete_instrument, treatment_featurizer=treatment_featurizer, @@ -1632,7 +1632,7 @@ def _gen_featurizer(self): return clone(self.featurizer, safe=False) def _gen_model_y_xw(self): - return _make_first_stage_selector(self.model_y_xw, self.binary_outcome, self.random_state) + return _make_first_stage_selector(self.model_y_xw, self.discrete_outcome, self.random_state) def _gen_model_t_xw(self): return _make_first_stage_selector(self.model_t_xw, self.discrete_treatment, self.random_state) diff --git a/econml/iv/dr/_dr.py b/econml/iv/dr/_dr.py index abc1d601d..2dceef7c8 100644 --- a/econml/iv/dr/_dr.py +++ b/econml/iv/dr/_dr.py @@ -364,7 +364,7 @@ def __init__(self, *, fit_cate_intercept=False, cov_clip=1e-3, opt_reweighted=False, - binary_outcome=False, + discrete_outcome=False, discrete_instrument=False, discrete_treatment=False, treatment_featurizer=None, @@ -381,7 +381,7 @@ def __init__(self, *, self.fit_cate_intercept = fit_cate_intercept self.cov_clip = cov_clip self.opt_reweighted = opt_reweighted - super().__init__(binary_outcome=binary_outcome, + super().__init__(discrete_outcome=discrete_outcome, discrete_instrument=discrete_instrument, discrete_treatment=discrete_treatment, treatment_featurizer=treatment_featurizer, @@ -625,7 +625,7 @@ def __init__(self, *, fit_cate_intercept=False, cov_clip=1e-3, opt_reweighted=False, - binary_outcome=False, + discrete_outcome=False, discrete_instrument=False, discrete_treatment=False, treatment_featurizer=None, @@ -651,7 +651,7 @@ def __init__(self, *, fit_cate_intercept=fit_cate_intercept, cov_clip=cov_clip, opt_reweighted=opt_reweighted, - binary_outcome=binary_outcome, + discrete_outcome=discrete_outcome, discrete_instrument=discrete_instrument, discrete_treatment=discrete_treatment, treatment_featurizer=treatment_featurizer, @@ -668,7 +668,7 @@ def _gen_prel_model_effect(self): return clone(self.prel_model_effect, safe=False) def _gen_ortho_learner_model_nuisance(self): - model_y_xw = _make_first_stage_selector(self.model_y_xw, self.binary_outcome, self.random_state) + model_y_xw = _make_first_stage_selector(self.model_y_xw, self.discrete_outcome, self.random_state) model_t_xw = _make_first_stage_selector(self.model_t_xw, self.discrete_treatment, self.random_state) if self.projection: @@ -717,14 +717,14 @@ class DRIV(_DRIV): - If an estimator, will use the model as is for fitting. - If str, will use model associated with the keyword. - - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper - - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - 'linear' - LogisticRegressionCV if discrete_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_outcome=True else RandomForestRegressor - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ and then use the best estimator for fitting. - If 'auto', model will select over linear and forest models User-supplied estimators should support 'fit' and 'predict' methods, - and additionally 'predict_proba' if binary_outcome=True. + and additionally 'predict_proba' if discrete_outcome=True. model_t_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' Model to estimate :math:`\\E[T | X, W]`. @@ -834,7 +834,7 @@ class DRIV(_DRIV): it method will return a biased projection to the model_final space, biased to give more weight on parts of the feature space where the instrument is strong. - binary_outcome: bool, default False + discrete_outcome: bool, default False Whether the outcome should be treated as binary discrete_instrument: bool, default False @@ -954,7 +954,7 @@ def __init__(self, *, fit_cate_intercept=False, cov_clip=1e-3, opt_reweighted=False, - binary_outcome=False, + discrete_outcome=False, discrete_instrument=False, discrete_treatment=False, treatment_featurizer=None, @@ -987,7 +987,7 @@ def __init__(self, *, fit_cate_intercept=fit_cate_intercept, cov_clip=cov_clip, opt_reweighted=opt_reweighted, - binary_outcome=binary_outcome, + discrete_outcome=discrete_outcome, discrete_instrument=discrete_instrument, discrete_treatment=discrete_treatment, treatment_featurizer=treatment_featurizer, @@ -1250,14 +1250,14 @@ class LinearDRIV(StatsModelsCateEstimatorMixin, DRIV): - If an estimator, will use the model as is for fitting. - If str, will use model associated with the keyword. - - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper - - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - 'linear' - LogisticRegressionCV if discrete_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_outcome=True else RandomForestRegressor - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ and then use the best estimator for fitting. - If 'auto', model will select over linear and forest models User-supplied estimators should support 'fit' and 'predict' methods, - and additionally 'predict_proba' if binary_outcome=True. + and additionally 'predict_proba' if discrete_outcome=True. model_t_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' model to estimate :math:`\\E[T | X, W]`. @@ -1364,7 +1364,7 @@ class LinearDRIV(StatsModelsCateEstimatorMixin, DRIV): it method will return a biased projection to the model_final space, biased to give more weight on parts of the feature space where the instrument is strong. - binary_outcome: bool, default False + discrete_outcome: bool, default False Whether the outcome should be treated as binary discrete_instrument: bool, default False @@ -1495,7 +1495,7 @@ def __init__(self, *, fit_cate_intercept=True, cov_clip=1e-3, opt_reweighted=False, - binary_outcome=False, + discrete_outcome=False, discrete_instrument=False, discrete_treatment=False, treatment_featurizer=None, @@ -1524,7 +1524,7 @@ def __init__(self, *, fit_cate_intercept=fit_cate_intercept, cov_clip=cov_clip, opt_reweighted=opt_reweighted, - binary_outcome=binary_outcome, + discrete_outcome=discrete_outcome, discrete_instrument=discrete_instrument, discrete_treatment=discrete_treatment, treatment_featurizer=treatment_featurizer, @@ -1620,14 +1620,14 @@ class SparseLinearDRIV(DebiasedLassoCateEstimatorMixin, DRIV): - If an estimator, will use the model as is for fitting. - If str, will use model associated with the keyword. - - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper - - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - 'linear' - LogisticRegressionCV if discrete_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_outcome=True else RandomForestRegressor - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ and then use the best estimator for fitting. - If 'auto', model will select over linear and forest models User-supplied estimators should support 'fit' and 'predict' methods, - and additionally 'predict_proba' if binary_outcome=True. + and additionally 'predict_proba' if discrete_outcome=True. model_t_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' model to estimate :math:`\\E[T | X, W]`. @@ -1764,7 +1764,7 @@ class SparseLinearDRIV(DebiasedLassoCateEstimatorMixin, DRIV): it method will return a biased projection to the model_final space, biased to give more weight on parts of the feature space where the instrument is strong. - binary_outcome: bool, default False + discrete_outcome: bool, default False Whether the outcome should be treated as binary discrete_instrument: bool, default False @@ -1902,7 +1902,7 @@ def __init__(self, *, n_jobs=None, cov_clip=1e-3, opt_reweighted=False, - binary_outcome=False, + discrete_outcome=False, discrete_instrument=False, discrete_treatment=False, treatment_featurizer=None, @@ -1937,7 +1937,7 @@ def __init__(self, *, fit_cate_intercept=fit_cate_intercept, cov_clip=cov_clip, opt_reweighted=opt_reweighted, - binary_outcome=binary_outcome, + discrete_outcome=discrete_outcome, discrete_instrument=discrete_instrument, discrete_treatment=discrete_treatment, treatment_featurizer=treatment_featurizer, @@ -2036,14 +2036,14 @@ class ForestDRIV(ForestModelFinalCateEstimatorMixin, DRIV): - If an estimator, will use the model as is for fitting. - If str, will use model associated with the keyword. - - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper - - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - 'linear' - LogisticRegressionCV if discrete_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_outcome=True else RandomForestRegressor - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ and then use the best estimator for fitting. - If 'auto', model will select over linear and forest models User-supplied estimators should support 'fit' and 'predict' methods, - and additionally 'predict_proba' if binary_outcome=True. + and additionally 'predict_proba' if discrete_outcome=True. model_t_xw : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' model to estimate :math:`\\E[T | X, W]`. @@ -2252,7 +2252,7 @@ class ForestDRIV(ForestModelFinalCateEstimatorMixin, DRIV): it method will return a biased projection to the model_final space, biased to give more weight on parts of the feature space where the instrument is strong. - binary_outcome: bool, default False + discrete_outcome: bool, default False Whether the outcome should be treated as binary discrete_instrument: bool, default False @@ -2386,7 +2386,7 @@ def __init__(self, *, verbose=0, cov_clip=1e-3, opt_reweighted=False, - binary_outcome=False, + discrete_outcome=False, discrete_instrument=False, discrete_treatment=False, treatment_featurizer=None, @@ -2427,7 +2427,7 @@ def __init__(self, *, fit_cate_intercept=False, cov_clip=cov_clip, opt_reweighted=opt_reweighted, - binary_outcome=binary_outcome, + discrete_outcome=discrete_outcome, discrete_instrument=discrete_instrument, discrete_treatment=discrete_treatment, treatment_featurizer=treatment_featurizer, @@ -2617,7 +2617,7 @@ def __init__(self, *, z_propensity="auto", featurizer=None, fit_cate_intercept=False, - binary_outcome=False, + discrete_outcome=False, cov_clip=1e-3, opt_reweighted=False, categories='auto', @@ -2636,7 +2636,7 @@ def __init__(self, *, super().__init__(model_final=model_final, featurizer=featurizer, fit_cate_intercept=fit_cate_intercept, - binary_outcome=binary_outcome, + discrete_outcome=discrete_outcome, cov_clip=cov_clip, cv=cv, mc_iters=mc_iters, @@ -2655,7 +2655,7 @@ def _gen_prel_model_effect(self): def _gen_ortho_learner_model_nuisance(self): model_y_xw = _make_first_stage_selector(self.model_y_xw, - is_discrete=self.binary_outcome, + is_discrete=self.discrete_outcome, random_state=self.random_state) model_t_xwz = _make_first_stage_selector(self.model_t_xwz, is_discrete=True, random_state=self.random_state) @@ -2700,14 +2700,14 @@ class IntentToTreatDRIV(_IntentToTreatDRIV): - If an estimator, will use the model as is for fitting. - If str, will use model associated with the keyword. - - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper - - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - 'linear' - LogisticRegressionCV if discrete_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_outcome=True else RandomForestRegressor - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ and then use the best estimator for fitting. - If 'auto', model will select over linear and forest models User-supplied estimators should support 'fit' and 'predict' methods, - and additionally 'predict_proba' if binary_outcome=True. + and additionally 'predict_proba' if discrete_outcome=True. model_t_xwz : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' model to estimate :math:`\\E[T | X, W, Z]`. @@ -2866,7 +2866,7 @@ def __init__(self, *, z_propensity="auto", featurizer=None, fit_cate_intercept=False, - binary_outcome=False, + discrete_outcome=False, cov_clip=1e-3, cv=3, mc_iters=None, @@ -2893,7 +2893,7 @@ def __init__(self, *, z_propensity=z_propensity, featurizer=featurizer, fit_cate_intercept=fit_cate_intercept, - binary_outcome=binary_outcome, + discrete_outcome=discrete_outcome, cov_clip=cov_clip, opt_reweighted=opt_reweighted, categories=categories, @@ -3019,14 +3019,14 @@ class LinearIntentToTreatDRIV(StatsModelsCateEstimatorMixin, IntentToTreatDRIV): - If an estimator, will use the model as is for fitting. - If str, will use model associated with the keyword. - - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper - - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - 'linear' - LogisticRegressionCV if discrete_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_outcome=True else RandomForestRegressor - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ and then use the best estimator for fitting. - If 'auto', model will select over linear and forest models User-supplied estimators should support 'fit' and 'predict' methods, - and additionally 'predict_proba' if binary_outcome=True. + and additionally 'predict_proba' if discrete_outcome=True. model_t_xwz : estimator, {'linear', 'forest'}, list of str/estimator, or 'auto', default 'auto' @@ -3194,7 +3194,7 @@ def __init__(self, *, z_propensity="auto", featurizer=None, fit_cate_intercept=True, - binary_outcome=False, + discrete_outcome=False, cov_clip=1e-3, cv=3, mc_iters=None, @@ -3215,7 +3215,7 @@ def __init__(self, *, z_propensity=z_propensity, featurizer=featurizer, fit_cate_intercept=fit_cate_intercept, - binary_outcome=binary_outcome, + discrete_outcome=discrete_outcome, cov_clip=cov_clip, cv=cv, mc_iters=mc_iters, diff --git a/econml/panel/dml/_dml.py b/econml/panel/dml/_dml.py index b85aa43aa..094a230d4 100644 --- a/econml/panel/dml/_dml.py +++ b/econml/panel/dml/_dml.py @@ -350,14 +350,14 @@ class DynamicDML(LinearModelFinalCateEstimatorMixin, _OrthoLearner): - If an estimator, will use the model as is for fitting. - If str, will use model associated with the keyword. - - 'linear' - LogisticRegressionCV if binary_outcome=True else WeightedLassoCVWrapper - - 'forest' - RandomForestClassifier if binary_outcome=True else RandomForestRegressor + - 'linear' - LogisticRegressionCV if discrete_outcome=True else WeightedLassoCVWrapper + - 'forest' - RandomForestClassifier if discrete_outcome=True else RandomForestRegressor - If list, will perform model selection on the supplied list, which can be a mix of str and estimators, \ and then use the best estimator for fitting. - If 'auto', model will select over linear and forest models User-supplied estimators should support 'fit' and 'predict' methods, - and additionally 'predict_proba' if binary_outcome=True. + and additionally 'predict_proba' if discrete_outcome=True. model_t: estimator, {'linear', 'forest'}, list of str/estimator, or 'auto' Determines how to fit the treatment to the features. @@ -386,7 +386,7 @@ class DynamicDML(LinearModelFinalCateEstimatorMixin, _OrthoLearner): Whether the first stage models are linear (in which case we will expand the features passed to `model_y` accordingly) - binary_outcome: bool, default False + discrete_outcome: bool, default False Whether the outcome should be treated as binary discrete_treatment: bool, default ``False`` @@ -489,7 +489,7 @@ def __init__(self, *, featurizer=None, fit_cate_intercept=True, linear_first_stages=False, - binary_outcome=False, + discrete_outcome=False, discrete_treatment=False, categories='auto', cv=2, @@ -502,7 +502,7 @@ def __init__(self, *, self.featurizer = clone(featurizer, safe=False) self.model_y = clone(model_y, safe=False) self.model_t = clone(model_t, safe=False) - super().__init__(binary_outcome=binary_outcome, + super().__init__(discrete_outcome=discrete_outcome, discrete_treatment=discrete_treatment, treatment_featurizer=None, discrete_instrument=False, @@ -564,7 +564,7 @@ def _gen_featurizer(self): def _gen_model_y(self): return _make_first_stage_selector(self.model_y, - is_discrete=self.binary_outcome, + is_discrete=self.discrete_outcome, random_state=self.random_state) def _gen_model_t(self): diff --git a/econml/tests/test_bootstrap.py b/econml/tests/test_bootstrap.py index 3145ef07b..df15f9cdb 100644 --- a/econml/tests/test_bootstrap.py +++ b/econml/tests/test_bootstrap.py @@ -270,8 +270,8 @@ def test_internal_options(self): def test_stratify(self): """Test that we can properly stratify by treatment""" - T = np.array([1, 0, 1, 2, 0, 2]) - Y = np.array([1, 2, 3, 4, 5, 6]) + T = [1, 0, 1, 2, 0, 2] + Y = [1, 2, 3, 4, 5, 6] X = np.array([1, 1, 2, 2, 1, 2]).reshape(-1, 1) est = LinearDML(model_y=LinearRegression(), model_t=LogisticRegression(), discrete_treatment=True) inference = BootstrapInference(n_bootstrap_samples=5, n_jobs=-1, verbose=0) @@ -286,9 +286,9 @@ def test_stratify(self): def test_stratify_orthoiv(self): """Test that we can properly stratify by treatment/instrument pair""" - T = np.array([1, 0, 1, 1, 0, 0, 1, 0]) - Z = np.array([1, 0, 0, 1, 0, 1, 0, 1]) - Y = np.array([1, 2, 3, 4, 5, 6, 7, 8]) + T = [1, 0, 1, 1, 0, 0, 1, 0] + Z = [1, 0, 0, 1, 0, 1, 0, 1] + Y = [1, 2, 3, 4, 5, 6, 7, 8] X = np.array([1, 1, 2, 2, 1, 2, 1, 2]).reshape(-1, 1) est = LinearIntentToTreatDRIV(model_y_xw=LinearRegression(), model_t_xwz=LogisticRegression(), flexible_model_effect=LinearRegression(), cv=2) @@ -297,8 +297,8 @@ def test_stratify_orthoiv(self): est.const_marginal_effect_interval(X) def test_all_kinds(self): - T = np.array([1, 0, 1, 2, 0, 2] * 5) - Y = np.array([1, 2, 3, 4, 5, 6] * 5) + T = [1, 0, 1, 2, 0, 2] * 5 + Y = [1, 2, 3, 4, 5, 6] * 5 X = np.array([1, 1, 2, 2, 1, 2] * 5).reshape(-1, 1) est = LinearDML(cv=2) for kind in ['percentile', 'pivot', 'normal']: diff --git a/econml/tests/test_binary_outcome.py b/econml/tests/test_discrete_outcome.py similarity index 72% rename from econml/tests/test_binary_outcome.py rename to econml/tests/test_discrete_outcome.py index 8e935a9a8..b8e650478 100644 --- a/econml/tests/test_binary_outcome.py +++ b/econml/tests/test_discrete_outcome.py @@ -20,11 +20,11 @@ from copy import deepcopy -class TestBinaryOutcome(unittest.TestCase): +class TestDiscreteOutcome(unittest.TestCase): # accuracy test def test_accuracy(self): n = 1000 - binary_outcome = True + discrete_outcome = True discrete_treatment = True true_ate = 0.3 W = np.random.uniform(-1, 1, size=(n, 1)) @@ -32,9 +32,9 @@ def test_accuracy(self): Y = np.random.binomial(1, .5 + true_ate * D + .1 * W[:, 0], size=(n,)) ests = [ - LinearDML(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment), - CausalForestDML(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment), - LinearDRLearner(binary_outcome=binary_outcome) + LinearDML(discrete_outcome=discrete_outcome, discrete_treatment=discrete_treatment), + CausalForestDML(discrete_outcome=discrete_outcome, discrete_treatment=discrete_treatment), + LinearDRLearner(discrete_outcome=discrete_outcome) ] for est in ests: @@ -60,7 +60,7 @@ def test_accuracy(self): # accuracy test, DML def test_accuracy_iv(self): n = 10000 - binary_outcome = True + discrete_outcome = True discrete_treatment = True true_ate = 0.3 W = np.random.uniform(-1, 1, size=(n, 1)) @@ -69,8 +69,8 @@ def test_accuracy_iv(self): Y = np.random.binomial(1, .5 + true_ate * D + .1 * W[:, 0], size=(n,)) ests = [ - OrthoIV(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment), - LinearDRIV(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment), + OrthoIV(discrete_outcome=discrete_outcome, discrete_treatment=discrete_treatment), + LinearDRIV(discrete_outcome=discrete_outcome, discrete_treatment=discrete_treatment), ] for est in ests: @@ -91,12 +91,12 @@ def test_string_outcome(self): D = np.random.binomial(1, .5 + .1 * W[:, 0], size=(n,)) Y = np.random.binomial(1, .5 + true_ate * D + .1 * W[:, 0], size=(n,)) Y_str = pd.Series(Y).replace(0, 'a').replace(1, 'b').values - est = LinearDML(binary_outcome=True, discrete_treatment=True) + est = LinearDML(discrete_outcome=True, discrete_treatment=True) est.fit(Y_str, D, X=W) def test_basic_functionality(self): n = 100 - binary_outcome = True + discrete_outcome = True d_x = 3 def gen_array(n, is_binary, d): @@ -110,7 +110,7 @@ def gen_array(n, is_binary, d): for discrete_treatment in [True, False]: for discrete_instrument in [True, False, None]: - Y = gen_array(n, binary_outcome, d=0) + Y = gen_array(n, discrete_outcome, d=0) T = gen_array(n, discrete_treatment, d=0) Z = None if discrete_instrument is not None: @@ -119,43 +119,43 @@ def gen_array(n, is_binary, d): if Z is not None: est_list = [ - DRIV(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment, + DRIV(discrete_outcome=discrete_outcome, discrete_treatment=discrete_treatment, discrete_instrument=discrete_instrument), - DMLIV(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment, + DMLIV(discrete_outcome=discrete_outcome, discrete_treatment=discrete_treatment, discrete_instrument=discrete_instrument), - OrthoIV(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment, + OrthoIV(discrete_outcome=discrete_outcome, discrete_treatment=discrete_treatment, discrete_instrument=discrete_instrument), - LinearDRIV(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment, + LinearDRIV(discrete_outcome=discrete_outcome, discrete_treatment=discrete_treatment, discrete_instrument=discrete_instrument), - SparseLinearDRIV(binary_outcome=binary_outcome, + SparseLinearDRIV(discrete_outcome=discrete_outcome, discrete_treatment=discrete_treatment, discrete_instrument=discrete_instrument), - ForestDRIV(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment, + ForestDRIV(discrete_outcome=discrete_outcome, discrete_treatment=discrete_treatment, discrete_instrument=discrete_instrument), - OrthoIV(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment, + OrthoIV(discrete_outcome=discrete_outcome, discrete_treatment=discrete_treatment, discrete_instrument=discrete_instrument), # uncomment when issue #837 is resolved - # NonParamDMLIV(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment, + # NonParamDMLIV(discrete_outcome=discrete_outcome, discrete_treatment=discrete_treatment, # discrete_instrument=discrete_instrument, model_final=LinearRegression()) ] if discrete_instrument and discrete_treatment: est_list += [ - LinearIntentToTreatDRIV(binary_outcome=binary_outcome), - IntentToTreatDRIV(binary_outcome=binary_outcome), + LinearIntentToTreatDRIV(discrete_outcome=discrete_outcome), + IntentToTreatDRIV(discrete_outcome=discrete_outcome), ] else: est_list = [ - LinearDML(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment), - SparseLinearDML(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment), - CausalForestDML(binary_outcome=binary_outcome, discrete_treatment=discrete_treatment) + LinearDML(discrete_outcome=discrete_outcome, discrete_treatment=discrete_treatment), + SparseLinearDML(discrete_outcome=discrete_outcome, discrete_treatment=discrete_treatment), + CausalForestDML(discrete_outcome=discrete_outcome, discrete_treatment=discrete_treatment) ] if discrete_treatment: est_list += [ - LinearDRLearner(binary_outcome=binary_outcome), - ForestDRLearner(binary_outcome=binary_outcome), + LinearDRLearner(discrete_outcome=discrete_outcome), + ForestDRLearner(discrete_outcome=discrete_outcome), ] for est in est_list: diff --git a/econml/tests/test_missing_values.py b/econml/tests/test_missing_values.py index e8542001f..761da5dbc 100644 --- a/econml/tests/test_missing_values.py +++ b/econml/tests/test_missing_values.py @@ -123,7 +123,7 @@ def test_missing(self): # model that can handle missing values nuisance_model = make_pipeline(SimpleImputer(strategy='mean'), LinearRegression()) - OrthoLearner(binary_outcome=False, discrete_treatment=False, treatment_featurizer=None, + OrthoLearner(discrete_outcome=False, discrete_treatment=False, treatment_featurizer=None, discrete_instrument=None, categories='auto', cv=3, random_state=1, allow_missing=True).fit(y, T, W=W_missing) diff --git a/econml/tests/test_ortho_learner.py b/econml/tests/test_ortho_learner.py index 84017172a..08cc306ef 100644 --- a/econml/tests/test_ortho_learner.py +++ b/econml/tests/test_ortho_learner.py @@ -2,7 +2,6 @@ # Licensed under the MIT License. from sklearn.datasets import make_regression - from econml._ortho_learner import _OrthoLearner, _crossfit from sklearn.linear_model import LinearRegression from sklearn.preprocessing import PolynomialFeatures @@ -256,7 +255,7 @@ def _gen_ortho_learner_model_final(self): sigma = 0.1 y = X[:, 0] + X[:, 1] + np.random.normal(0, sigma, size=(10000,)) - est = OrthoLearner(cv=2, binary_outcome=False, discrete_treatment=False, treatment_featurizer=None, + est = OrthoLearner(cv=2, discrete_outcome=False, discrete_treatment=False, treatment_featurizer=None, discrete_instrument=False, categories='auto', random_state=None, use_ray=use_ray) est.fit(y, X[:, 0], W=X[:, 1:]) np.testing.assert_almost_equal(est.const_marginal_effect(), 1, decimal=3) @@ -273,7 +272,7 @@ def _gen_ortho_learner_model_final(self): X = np.random.normal(size=(10000, 3)) sigma = 0.1 y = X[:, 0] + X[:, 1] + np.random.normal(0, sigma, size=(10000,)) - est = OrthoLearner(cv=2, binary_outcome=False, discrete_treatment=False, treatment_featurizer=None, + est = OrthoLearner(cv=2, discrete_outcome=False, discrete_treatment=False, treatment_featurizer=None, discrete_instrument=False, categories='auto', random_state=None, use_ray=use_ray) # test non-array inputs est.fit(list(y), list(X[:, 0]), X=None, W=X[:, 1:]) @@ -289,7 +288,7 @@ def _gen_ortho_learner_model_final(self): X = np.random.normal(size=(10000, 3)) sigma = 0.1 y = X[:, 0] + X[:, 1] + np.random.normal(0, sigma, size=(10000,)) - est = OrthoLearner(cv=KFold(n_splits=3), binary_outcome=False, + est = OrthoLearner(cv=KFold(n_splits=3), discrete_outcome=False, discrete_treatment=False, treatment_featurizer=None, discrete_instrument=False, categories='auto', random_state=None, use_ray=use_ray) est.fit(y, X[:, 0], X=None, W=X[:, 1:]) @@ -306,7 +305,7 @@ def _gen_ortho_learner_model_final(self): sigma = 0.1 y = X[:, 0] + X[:, 1] + np.random.normal(0, sigma, size=(10000,)) folds = [(np.arange(X.shape[0] // 2), np.arange(X.shape[0] // 2, X.shape[0]))] - est = OrthoLearner(cv=KFold(n_splits=3), binary_outcome=False, + est = OrthoLearner(cv=KFold(n_splits=3), discrete_outcome=False, discrete_treatment=False, treatment_featurizer=None, discrete_instrument=False, categories='auto', random_state=None, use_ray=use_ray) @@ -363,7 +362,7 @@ def _gen_ortho_learner_model_final(self): X = np.random.normal(size=(10000, 3)) sigma = 0.1 y = X[:, 0] + X[:, 1] + np.random.normal(0, sigma, size=(10000,)) - est = OrthoLearner(cv=2, binary_outcome=False, discrete_treatment=False, + est = OrthoLearner(cv=2, discrete_outcome=False, discrete_treatment=False, treatment_featurizer=None, discrete_instrument=False, categories='auto', random_state=None) est.fit(y, X[:, 0], W=X[:, 1:]) @@ -414,7 +413,7 @@ def _gen_ortho_learner_model_final(self): X = np.random.normal(size=(10000, 3)) sigma = 0.1 y = X[:, 0] + X[:, 1] + np.random.normal(0, sigma, size=(10000,)) - est = OrthoLearner(cv=2, binary_outcome=False, discrete_treatment=False, + est = OrthoLearner(cv=2, discrete_outcome=False, discrete_treatment=False, treatment_featurizer=None, discrete_instrument=False, categories='auto', random_state=None) est.fit(y, X[:, 0], W=X[:, 1:]) @@ -477,7 +476,7 @@ def _gen_ortho_learner_model_final(self): T = np.random.binomial(1, scipy.special.expit(X[:, 0])) sigma = 0.01 y = T + X[:, 0] + np.random.normal(0, sigma, size=(10000,)) - est = OrthoLearner(cv=2, binary_outcome=False, discrete_treatment=True, + est = OrthoLearner(cv=2, discrete_outcome=False, discrete_treatment=True, treatment_featurizer=None, discrete_instrument=False, categories='auto', random_state=None) est.fit(y, T, W=X) diff --git a/econml/tests/test_treatment_featurization.py b/econml/tests/test_treatment_featurization.py index 96f496103..1127e3284 100644 --- a/econml/tests/test_treatment_featurization.py +++ b/econml/tests/test_treatment_featurization.py @@ -525,7 +525,7 @@ def _gen_ortho_learner_model_final(self): 'estimator': OrthoLearner, 'params': { 'cv': 2, - 'binary_outcome': False, + 'discrete_outcome': False, 'discrete_treatment': False, 'treatment_featurizer': None, 'discrete_instrument': False, diff --git a/econml/tests/test_utilities.py b/econml/tests/test_utilities.py index 62bb9ba7c..b80fb2c8b 100644 --- a/econml/tests/test_utilities.py +++ b/econml/tests/test_utilities.py @@ -9,7 +9,8 @@ import sparse as sp import pytest from econml.utilities import (einsum_sparse, todense, tocoo, transpose, - inverse_onehot, cross_product, transpose_dictionary, deprecated, _deprecate_positional) + inverse_onehot, cross_product, transpose_dictionary, deprecated, _deprecate_positional, + single_strata_from_discrete_arrays) from sklearn.preprocessing import OneHotEncoder @@ -177,3 +178,13 @@ def m(a, b, c=1, *args, **kwargs): m(a=1, b=2) m(1, b=2, c=3, X='other') assert not counter + + def test_single_strata_from_discrete_array(self): + T = np.repeat([[0, 1, 2]], 4, axis=0).ravel() + Z = np.repeat([[0, 1]], 6, axis=0).ravel() + Y = np.repeat([0, 1], 6, axis=0) + + assert set(single_strata_from_discrete_arrays([T, Z, Y])) == set(np.arange(12)) + assert set(single_strata_from_discrete_arrays([T, Z])) == set(np.arange(6)) + assert set(single_strata_from_discrete_arrays([T])) == set(np.arange(3)) + assert single_strata_from_discrete_arrays([]) is None diff --git a/econml/utilities.py b/econml/utilities.py index f62ffbb4d..db3f4cb49 100644 --- a/econml/utilities.py +++ b/econml/utilities.py @@ -18,7 +18,7 @@ from functools import reduce, wraps from sklearn.utils import check_array, check_X_y from sklearn.utils.validation import assert_all_finite -from sklearn.preprocessing import PolynomialFeatures +from sklearn.preprocessing import PolynomialFeatures, LabelEncoder import warnings from warnings import warn from collections.abc import Iterable @@ -1482,3 +1482,29 @@ def jacify_featurizer(featurizer): a function for calculating the jacobian """ return _TransformerWrapper(featurizer) + + +def single_strata_from_discrete_arrays(arrs): + """ + Combine multiple discrete arrays into a single array for stratification purposes: + + e.g. if arrs are + [0 1 2 0 1 2 0 1 2 0 1 2], + [0 1 0 1 0 1 0 1 0 1 0 1], + [0 0 0 0 0 0 1 1 1 1 1 1] + then output will be + [0 8 4 6 2 10 1 9 5 7 3 11] + + Every distinct combination of these discrete arrays will have it's own label. + """ + if not arrs: + return None + + curr_array = np.zeros(shape=np.ravel(arrs[0]).shape, dtype='int') + + for arr in arrs: + enc = LabelEncoder() + temp = enc.fit_transform(np.ravel(arr)) + curr_array = temp + curr_array * len(enc.classes_) + + return curr_array From 0757d39ca06aad3948a320b04e8a365b09626e3e Mon Sep 17 00:00:00 2001 From: Fabio Vera Date: Fri, 5 Jan 2024 14:07:29 -0500 Subject: [PATCH 21/25] line endings Signed-off-by: Fabio Vera --- econml/tests/test_ortho_learner.py | 972 ++++++++++++++--------------- 1 file changed, 486 insertions(+), 486 deletions(-) diff --git a/econml/tests/test_ortho_learner.py b/econml/tests/test_ortho_learner.py index 08cc306ef..0c65358ea 100644 --- a/econml/tests/test_ortho_learner.py +++ b/econml/tests/test_ortho_learner.py @@ -1,486 +1,486 @@ -# Copyright (c) PyWhy contributors. All rights reserved. -# Licensed under the MIT License. - -from sklearn.datasets import make_regression -from econml._ortho_learner import _OrthoLearner, _crossfit -from sklearn.linear_model import LinearRegression -from sklearn.preprocessing import PolynomialFeatures -from sklearn.linear_model import LinearRegression, LassoCV, Lasso -from sklearn.model_selection import KFold -import numpy as np -import unittest -import joblib -import pytest - -try: - import ray - - ray_installed = True -except ImportError: - ray_installed = False - - -class TestOrthoLearner(unittest.TestCase): - - def _test_crossfit(self, use_ray): - class Wrapper: - - def __init__(self, model): - self._model = model - - def train(self, is_selecting, X, y, Q, W=None): - self._model.fit(X, y) - return self - - def predict(self, X, y, Q, W=None): - return self._model.predict(X), y - self._model.predict(X), X - - def score(self, X, y, Q, W=None): - return self._model.score(X, y) - - np.random.seed(123) - X = np.random.normal(size=(5000, 3)) - y = X[:, 0] + np.random.normal(size=(5000,)) - folds = list(KFold(2).split(X, y)) - model = Lasso(alpha=0.01) - ray_remote_function_option = {"num_cpus": 1} - - nuisance, model_list, fitted_inds, scores = _crossfit(Wrapper(model), folds, use_ray, - ray_remote_function_option, - X, y, y, Z=None) - np.testing.assert_allclose(nuisance[0][folds[0][1]], - model.fit(X[folds[0][0]], y[folds[0][0]]).predict(X[folds[0][1]])) - np.testing.assert_allclose(nuisance[0][folds[0][0]], - model.fit(X[folds[0][1]], y[folds[0][1]]).predict(X[folds[0][0]])) - np.testing.assert_allclose(scores[0][0], model.fit(X[folds[0][0]], y[folds[0][0]]).score(X[folds[0][1]], - y[folds[0][1]])) - np.testing.assert_allclose(scores[0][1], model.fit(X[folds[0][1]], y[folds[0][1]]).score(X[folds[0][0]], - y[folds[0][0]])) - coef_ = np.zeros(X.shape[1]) - coef_[0] = 1 - [np.testing.assert_allclose(coef_, mdl._model.coef_, rtol=0, atol=0.08) for mdl in model_list] - np.testing.assert_array_equal(fitted_inds, np.arange(X.shape[0])) - - np.random.seed(123) - X = np.random.normal(size=(5000, 3)) - y = X[:, 0] + np.random.normal(size=(5000,)) - folds = list(KFold(2).split(X, y)) - model = Lasso(alpha=0.01) - nuisance, model_list, fitted_inds, scores = _crossfit(Wrapper(model), folds, use_ray, - ray_remote_function_option, - X, y, y, Z=None) - np.testing.assert_allclose(nuisance[0][folds[0][1]], - model.fit(X[folds[0][0]], y[folds[0][0]]).predict(X[folds[0][1]])) - np.testing.assert_allclose(nuisance[0][folds[0][0]], - model.fit(X[folds[0][1]], y[folds[0][1]]).predict(X[folds[0][0]])) - np.testing.assert_allclose(scores[0][0], model.fit(X[folds[0][0]], y[folds[0][0]]).score(X[folds[0][1]], - y[folds[0][1]])) - np.testing.assert_allclose(scores[0][1], model.fit(X[folds[0][1]], y[folds[0][1]]).score(X[folds[0][0]], - y[folds[0][0]])) - coef_ = np.zeros(X.shape[1]) - coef_[0] = 1 - [np.testing.assert_allclose(coef_, mdl._model.coef_, rtol=0, atol=0.08) for mdl in model_list] - np.testing.assert_array_equal(fitted_inds, np.arange(X.shape[0])) - - np.random.seed(123) - X = np.random.normal(size=(5000, 3)) - y = X[:, 0] + np.random.normal(size=(5000,)) - folds = list(KFold(2).split(X, y)) - model = Lasso(alpha=0.01) - nuisance, model_list, fitted_inds, scores = _crossfit(Wrapper(model), folds, use_ray, - ray_remote_function_option, - X, y, y, Z=None) - np.testing.assert_allclose(nuisance[0][folds[0][1]], - model.fit(X[folds[0][0]], y[folds[0][0]]).predict(X[folds[0][1]])) - np.testing.assert_allclose(nuisance[0][folds[0][0]], - model.fit(X[folds[0][1]], y[folds[0][1]]).predict(X[folds[0][0]])) - np.testing.assert_allclose(scores[0][0], model.fit(X[folds[0][0]], y[folds[0][0]]).score(X[folds[0][1]], - y[folds[0][1]])) - np.testing.assert_allclose(scores[0][1], model.fit(X[folds[0][1]], y[folds[0][1]]).score(X[folds[0][0]], - y[folds[0][0]])) - coef_ = np.zeros(X.shape[1]) - coef_[0] = 1 - [np.testing.assert_allclose(coef_, mdl._model.coef_, rtol=0, atol=0.08) for mdl in model_list] - np.testing.assert_array_equal(fitted_inds, np.arange(X.shape[0])) - - class Wrapper: - - def __init__(self, model): - self._model = model - - def train(self, is_selecting, X, y, W=None): - self._model.fit(X, y) - return self - - def predict(self, X, y, W=None): - return self._model.predict(X), y - self._model.predict(X), X - - np.random.seed(123) - X = np.random.normal(size=(5000, 3)) - y = X[:, 0] + np.random.normal(size=(5000,)) - folds = [(np.arange(X.shape[0] // 2), np.arange(X.shape[0] // 2, X.shape[0])), - (np.arange(X.shape[0] // 2), np.arange(X.shape[0] // 2, X.shape[0]))] - model = Lasso(alpha=0.01) - with pytest.raises(AttributeError) as e_info: - nuisance, model_list, fitted_inds, scores = _crossfit(Wrapper(model), folds, use_ray, - ray_remote_function_option, - X, y, y, Z=None) - - np.random.seed(123) - X = np.random.normal(size=(5000, 3)) - y = X[:, 0] + np.random.normal(size=(5000,)) - folds = [(np.arange(X.shape[0] // 2), np.arange(X.shape[0] // 2, X.shape[0])), - (np.arange(X.shape[0] // 2), np.arange(X.shape[0] // 2, X.shape[0]))] - model = Lasso(alpha=0.01) - with pytest.raises(AttributeError) as e_info: - nuisance, model_list, fitted_inds, scores = _crossfit(Wrapper(model), folds, use_ray, - ray_remote_function_option, - X, y, y, Z=None) - - np.random.seed(123) - X = np.random.normal(size=(5000, 3)) - y = X[:, 0] + np.random.normal(size=(5000,)) - folds = [(np.arange(X.shape[0]), np.arange(X.shape[0]))] - model = Lasso(alpha=0.01) - with pytest.raises(AttributeError) as e_info: - nuisance, model_list, fitted_inds, scores = _crossfit(Wrapper(model), folds, use_ray, - ray_remote_function_option, - X, y, y, Z=None) - - np.random.seed(123) - X = np.random.normal(size=(5000, 3)) - y = X[:, 0] + np.random.normal(size=(5000,)) - folds = [(np.arange(X.shape[0]), np.arange(X.shape[0]))] - model = Lasso(alpha=0.01) - with pytest.raises(AttributeError) as e_info: - nuisance, model_list, fitted_inds, scores = _crossfit(Wrapper(model), folds, use_ray, - ray_remote_function_option, - X, y, y, Z=None) - - @pytest.mark.ray - def test_crossfit_with_ray(self): - try: - ray.init() - self._test_crossfit(use_ray=True) - finally: - ray.shutdown() - - def test_crossfit_without_ray(self): - self._test_crossfit(use_ray=False) - - @pytest.mark.ray - def test_crossfit_comparison(self): - try: - ray.init() # Initialize Ray - - class Wrapper: - - def __init__(self, model): - self._model = model - - def train(self, is_selecting, X, y, Q, W=None): - self._model.fit(X, y) - return self - - def predict(self, X, y, Q, W=None): - return self._model.predict(X), y - self._model.predict(X), X - - def score(self, X, y, Q, W=None): - return self._model.score(X, y) - - # Generate synthetic data - X, y = make_regression(n_samples=10, n_features=5, noise=0.1, random_state=42) - folds = list(KFold(2).split(X, y)) - model = LinearRegression() - ray_remote_function_option = {"num_cpus": 1} - - # Run _crossfit with Ray enabled - nuisance_ray, model_list_ray, fitted_inds_ray, scores_ray = _crossfit(Wrapper(model), folds, True, - ray_remote_function_option, - X, y, y, Z=None) - # Run _crossfit without Ray - nuisance_regular, model_list_regular, fitted_inds_regular, scores_regular = _crossfit(Wrapper(model), - folds, - False, {}, - X, y, y, Z=None) - # Compare the results - assert np.allclose(nuisance_ray[0], nuisance_regular[0]) - assert np.allclose(nuisance_ray[1], nuisance_regular[1]) - assert np.allclose(fitted_inds_ray, fitted_inds_regular) - assert np.allclose(scores_ray, scores_regular) - - finally: - ray.shutdown() # Shutdown Ray - - def _test_ol(self, use_ray): - class ModelNuisance: - def __init__(self, model_t, model_y): - self._model_t = model_t - self._model_y = model_y - - def train(self, is_selecting, Y, T, W=None): - self._model_t.fit(W, T) - self._model_y.fit(W, Y) - return self - - def predict(self, Y, T, W=None): - return Y - self._model_y.predict(W), T - self._model_t.predict(W) - - class ModelFinal: - - def __init__(self): - return - - def fit(self, Y, T, W=None, nuisances=None): - Y_res, T_res = nuisances - self.model = LinearRegression(fit_intercept=False).fit(T_res.reshape(-1, 1), Y_res) - return self - - def predict(self, X=None): - return self.model.coef_[0] - - def score(self, Y, T, W=None, nuisances=None): - Y_res, T_res = nuisances - return np.mean((Y_res - self.model.predict(T_res.reshape(-1, 1))) ** 2) - - class OrthoLearner(_OrthoLearner): - def _gen_ortho_learner_model_nuisance(self): - return ModelNuisance(LinearRegression(), LinearRegression()) - - def _gen_ortho_learner_model_final(self): - return ModelFinal() - - np.random.seed(123) - X = np.random.normal(size=(10000, 3)) - sigma = 0.1 - y = X[:, 0] + X[:, 1] + np.random.normal(0, sigma, size=(10000,)) - - est = OrthoLearner(cv=2, discrete_outcome=False, discrete_treatment=False, treatment_featurizer=None, - discrete_instrument=False, categories='auto', random_state=None, use_ray=use_ray) - est.fit(y, X[:, 0], W=X[:, 1:]) - np.testing.assert_almost_equal(est.const_marginal_effect(), 1, decimal=3) - np.testing.assert_array_almost_equal(est.effect(), np.ones(1), decimal=3) - np.testing.assert_array_almost_equal(est.effect(T0=0, T1=10), np.ones(1) * 10, decimal=2) - np.testing.assert_almost_equal(est.score(y, X[:, 0], W=X[:, 1:]), sigma**2, decimal=3) - np.testing.assert_almost_equal(est.score_, sigma**2, decimal=3) - np.testing.assert_almost_equal(est.ortho_learner_model_final_.model.coef_[0], 1, decimal=3) - # Nuisance model has no score method, so nuisance_scores_ should be none - assert est.nuisance_scores_ is None - - # Test non keyword based calls to fit - np.random.seed(123) - X = np.random.normal(size=(10000, 3)) - sigma = 0.1 - y = X[:, 0] + X[:, 1] + np.random.normal(0, sigma, size=(10000,)) - est = OrthoLearner(cv=2, discrete_outcome=False, discrete_treatment=False, treatment_featurizer=None, - discrete_instrument=False, categories='auto', random_state=None, use_ray=use_ray) - # test non-array inputs - est.fit(list(y), list(X[:, 0]), X=None, W=X[:, 1:]) - np.testing.assert_almost_equal(est.const_marginal_effect(), 1, decimal=3) - np.testing.assert_array_almost_equal(est.effect(), np.ones(1), decimal=3) - np.testing.assert_array_almost_equal(est.effect(T0=0, T1=10), np.ones(1) * 10, decimal=2) - np.testing.assert_almost_equal(est.score(y, X[:, 0], None, X[:, 1:]), sigma ** 2, decimal=3) - np.testing.assert_almost_equal(est.score_, sigma ** 2, decimal=3) - np.testing.assert_almost_equal(est.ortho_learner_model_final_.model.coef_[0], 1, decimal=3) - - # Test custom splitter - np.random.seed(123) - X = np.random.normal(size=(10000, 3)) - sigma = 0.1 - y = X[:, 0] + X[:, 1] + np.random.normal(0, sigma, size=(10000,)) - est = OrthoLearner(cv=KFold(n_splits=3), discrete_outcome=False, - discrete_treatment=False, treatment_featurizer=None, discrete_instrument=False, - categories='auto', random_state=None, use_ray=use_ray) - est.fit(y, X[:, 0], X=None, W=X[:, 1:]) - np.testing.assert_almost_equal(est.const_marginal_effect(), 1, decimal=3) - np.testing.assert_array_almost_equal(est.effect(), np.ones(1), decimal=3) - np.testing.assert_array_almost_equal(est.effect(T0=0, T1=10), np.ones(1) * 10, decimal=2) - np.testing.assert_almost_equal(est.score(y, X[:, 0], W=X[:, 1:]), sigma**2, decimal=3) - np.testing.assert_almost_equal(est.score_, sigma**2, decimal=3) - np.testing.assert_almost_equal(est.ortho_learner_model_final_.model.coef_[0], 1, decimal=3) - - # Test incomplete set of test folds - np.random.seed(123) - X = np.random.normal(size=(10000, 3)) - sigma = 0.1 - y = X[:, 0] + X[:, 1] + np.random.normal(0, sigma, size=(10000,)) - folds = [(np.arange(X.shape[0] // 2), np.arange(X.shape[0] // 2, X.shape[0]))] - est = OrthoLearner(cv=KFold(n_splits=3), discrete_outcome=False, - discrete_treatment=False, treatment_featurizer=None, discrete_instrument=False, - categories='auto', random_state=None, use_ray=use_ray) - - est.fit(y, X[:, 0], X=None, W=X[:, 1:]) - np.testing.assert_almost_equal(est.const_marginal_effect(), 1, decimal=2) - np.testing.assert_array_almost_equal(est.effect(), np.ones(1), decimal=2) - np.testing.assert_array_almost_equal(est.effect(T0=0, T1=10), np.ones(1) * 10, decimal=1) - np.testing.assert_almost_equal(est.score(y, X[:, 0], W=X[:, 1:]), sigma**2, decimal=2) - np.testing.assert_almost_equal(est.score_, sigma**2, decimal=2) - np.testing.assert_almost_equal(est.ortho_learner_model_final_.model.coef_[0], 1, decimal=2) - - @pytest.mark.ray - def test_ol_with_ray(self): - self._test_ol(True) - - def test_ol_without_ray(self): - self._test_ol(False) - - def test_ol_no_score_final(self): - class ModelNuisance: - def __init__(self, model_t, model_y): - self._model_t = model_t - self._model_y = model_y - - def train(self, is_selecting, Y, T, W=None): - self._model_t.fit(W, T) - self._model_y.fit(W, Y) - return self - - def predict(self, Y, T, W=None): - return Y - self._model_y.predict(W), T - self._model_t.predict(W) - - class ModelFinal: - - def __init__(self): - return - - def fit(self, Y, T, W=None, nuisances=None): - Y_res, T_res = nuisances - self.model = LinearRegression(fit_intercept=False).fit(T_res.reshape(-1, 1), Y_res) - return self - - def predict(self, X=None): - return self.model.coef_[0] - - class OrthoLearner(_OrthoLearner): - def _gen_ortho_learner_model_nuisance(self): - return ModelNuisance(LinearRegression(), LinearRegression()) - - def _gen_ortho_learner_model_final(self): - return ModelFinal() - - np.random.seed(123) - X = np.random.normal(size=(10000, 3)) - sigma = 0.1 - y = X[:, 0] + X[:, 1] + np.random.normal(0, sigma, size=(10000,)) - est = OrthoLearner(cv=2, discrete_outcome=False, discrete_treatment=False, - treatment_featurizer=None, discrete_instrument=False, - categories='auto', random_state=None) - est.fit(y, X[:, 0], W=X[:, 1:]) - np.testing.assert_almost_equal(est.const_marginal_effect(), 1, decimal=3) - np.testing.assert_array_almost_equal(est.effect(), np.ones(1), decimal=3) - np.testing.assert_array_almost_equal(est.effect(T0=0, T1=10), np.ones(1) * 10, decimal=2) - assert est.score_ is None - np.testing.assert_almost_equal(est.ortho_learner_model_final_.model.coef_[0], 1, decimal=3) - - def test_ol_nuisance_scores(self): - class ModelNuisance: - def __init__(self, model_t, model_y): - self._model_t = model_t - self._model_y = model_y - - def train(self, is_selecting, Y, T, W=None): - self._model_t.fit(W, T) - self._model_y.fit(W, Y) - return self - - def predict(self, Y, T, W=None): - return Y - self._model_y.predict(W), T - self._model_t.predict(W) - - def score(self, Y, T, W=None): - return (self._model_t.score(W, Y), self._model_y.score(W, T)) - - class ModelFinal: - - def __init__(self): - return - - def fit(self, Y, T, W=None, nuisances=None): - Y_res, T_res = nuisances - self.model = LinearRegression(fit_intercept=False).fit(T_res.reshape(-1, 1), Y_res) - return self - - def predict(self, X=None): - return self.model.coef_[0] - - class OrthoLearner(_OrthoLearner): - def _gen_ortho_learner_model_nuisance(self): - return ModelNuisance(LinearRegression(), LinearRegression()) - - def _gen_ortho_learner_model_final(self): - return ModelFinal() - - np.random.seed(123) - X = np.random.normal(size=(10000, 3)) - sigma = 0.1 - y = X[:, 0] + X[:, 1] + np.random.normal(0, sigma, size=(10000,)) - est = OrthoLearner(cv=2, discrete_outcome=False, discrete_treatment=False, - treatment_featurizer=None, discrete_instrument=False, - categories='auto', random_state=None) - est.fit(y, X[:, 0], W=X[:, 1:]) - np.testing.assert_almost_equal(est.const_marginal_effect(), 1, decimal=3) - np.testing.assert_array_almost_equal(est.effect(), np.ones(1), decimal=3) - np.testing.assert_array_almost_equal(est.effect(T0=0, T1=10), np.ones(1) * 10, decimal=2) - np.testing.assert_almost_equal(est.ortho_learner_model_final_.model.coef_[0], 1, decimal=3) - nuisance_scores_y = est.nuisance_scores_[0] - nuisance_scores_t = est.nuisance_scores_[1] - assert len(nuisance_scores_y) == len(nuisance_scores_t) == 1 # as many scores as iterations - assert len(nuisance_scores_y[0]) == len(nuisance_scores_t[0]) == 2 # as many scores as splits - # y scores should be positive, since W predicts Y somewhat - # t scores might not be, since W and T are uncorrelated - np.testing.assert_array_less(0, nuisance_scores_y[0]) - - def test_ol_discrete_treatment(self): - class ModelNuisance: - def __init__(self, model_t, model_y): - self._model_t = model_t - self._model_y = model_y - - def train(self, is_selecting, Y, T, W=None): - self._model_t.fit(W, np.matmul(T, np.arange(1, T.shape[1] + 1))) - self._model_y.fit(W, Y) - return self - - def predict(self, Y, T, W=None): - return Y - self._model_y.predict(W), T - self._model_t.predict_proba(W)[:, 1:] - - class ModelFinal: - - def __init__(self): - return - - def fit(self, Y, T, W=None, nuisances=None): - Y_res, T_res = nuisances - self.model = LinearRegression(fit_intercept=False).fit(T_res.reshape(-1, 1), Y_res) - return self - - def predict(self): - # theta needs to be of dimension (1, d_t) if T is (n, d_t) - return np.array([[self.model.coef_[0]]]) - - def score(self, Y, T, W=None, nuisances=None): - Y_res, T_res = nuisances - return np.mean((Y_res - self.model.predict(T_res.reshape(-1, 1)))**2) - - from sklearn.linear_model import LogisticRegression - - class OrthoLearner(_OrthoLearner): - def _gen_ortho_learner_model_nuisance(self): - return ModelNuisance(LogisticRegression(solver='lbfgs'), LinearRegression()) - - def _gen_ortho_learner_model_final(self): - return ModelFinal() - - np.random.seed(123) - X = np.random.normal(size=(10000, 3)) - import scipy.special - T = np.random.binomial(1, scipy.special.expit(X[:, 0])) - sigma = 0.01 - y = T + X[:, 0] + np.random.normal(0, sigma, size=(10000,)) - est = OrthoLearner(cv=2, discrete_outcome=False, discrete_treatment=True, - treatment_featurizer=None, discrete_instrument=False, - categories='auto', random_state=None) - est.fit(y, T, W=X) - np.testing.assert_almost_equal(est.const_marginal_effect(), 1, decimal=3) - np.testing.assert_array_almost_equal(est.effect(), np.ones(1), decimal=3) - np.testing.assert_almost_equal(est.score(y, T, W=X), sigma**2, decimal=3) - np.testing.assert_almost_equal(est.ortho_learner_model_final_.model.coef_[0], 1, decimal=3) +# Copyright (c) PyWhy contributors. All rights reserved. +# Licensed under the MIT License. + +from sklearn.datasets import make_regression +from econml._ortho_learner import _OrthoLearner, _crossfit +from sklearn.linear_model import LinearRegression +from sklearn.preprocessing import PolynomialFeatures +from sklearn.linear_model import LinearRegression, LassoCV, Lasso +from sklearn.model_selection import KFold +import numpy as np +import unittest +import joblib +import pytest + +try: + import ray + + ray_installed = True +except ImportError: + ray_installed = False + + +class TestOrthoLearner(unittest.TestCase): + + def _test_crossfit(self, use_ray): + class Wrapper: + + def __init__(self, model): + self._model = model + + def train(self, is_selecting, X, y, Q, W=None): + self._model.fit(X, y) + return self + + def predict(self, X, y, Q, W=None): + return self._model.predict(X), y - self._model.predict(X), X + + def score(self, X, y, Q, W=None): + return self._model.score(X, y) + + np.random.seed(123) + X = np.random.normal(size=(5000, 3)) + y = X[:, 0] + np.random.normal(size=(5000,)) + folds = list(KFold(2).split(X, y)) + model = Lasso(alpha=0.01) + ray_remote_function_option = {"num_cpus": 1} + + nuisance, model_list, fitted_inds, scores = _crossfit(Wrapper(model), folds, use_ray, + ray_remote_function_option, + X, y, y, Z=None) + np.testing.assert_allclose(nuisance[0][folds[0][1]], + model.fit(X[folds[0][0]], y[folds[0][0]]).predict(X[folds[0][1]])) + np.testing.assert_allclose(nuisance[0][folds[0][0]], + model.fit(X[folds[0][1]], y[folds[0][1]]).predict(X[folds[0][0]])) + np.testing.assert_allclose(scores[0][0], model.fit(X[folds[0][0]], y[folds[0][0]]).score(X[folds[0][1]], + y[folds[0][1]])) + np.testing.assert_allclose(scores[0][1], model.fit(X[folds[0][1]], y[folds[0][1]]).score(X[folds[0][0]], + y[folds[0][0]])) + coef_ = np.zeros(X.shape[1]) + coef_[0] = 1 + [np.testing.assert_allclose(coef_, mdl._model.coef_, rtol=0, atol=0.08) for mdl in model_list] + np.testing.assert_array_equal(fitted_inds, np.arange(X.shape[0])) + + np.random.seed(123) + X = np.random.normal(size=(5000, 3)) + y = X[:, 0] + np.random.normal(size=(5000,)) + folds = list(KFold(2).split(X, y)) + model = Lasso(alpha=0.01) + nuisance, model_list, fitted_inds, scores = _crossfit(Wrapper(model), folds, use_ray, + ray_remote_function_option, + X, y, y, Z=None) + np.testing.assert_allclose(nuisance[0][folds[0][1]], + model.fit(X[folds[0][0]], y[folds[0][0]]).predict(X[folds[0][1]])) + np.testing.assert_allclose(nuisance[0][folds[0][0]], + model.fit(X[folds[0][1]], y[folds[0][1]]).predict(X[folds[0][0]])) + np.testing.assert_allclose(scores[0][0], model.fit(X[folds[0][0]], y[folds[0][0]]).score(X[folds[0][1]], + y[folds[0][1]])) + np.testing.assert_allclose(scores[0][1], model.fit(X[folds[0][1]], y[folds[0][1]]).score(X[folds[0][0]], + y[folds[0][0]])) + coef_ = np.zeros(X.shape[1]) + coef_[0] = 1 + [np.testing.assert_allclose(coef_, mdl._model.coef_, rtol=0, atol=0.08) for mdl in model_list] + np.testing.assert_array_equal(fitted_inds, np.arange(X.shape[0])) + + np.random.seed(123) + X = np.random.normal(size=(5000, 3)) + y = X[:, 0] + np.random.normal(size=(5000,)) + folds = list(KFold(2).split(X, y)) + model = Lasso(alpha=0.01) + nuisance, model_list, fitted_inds, scores = _crossfit(Wrapper(model), folds, use_ray, + ray_remote_function_option, + X, y, y, Z=None) + np.testing.assert_allclose(nuisance[0][folds[0][1]], + model.fit(X[folds[0][0]], y[folds[0][0]]).predict(X[folds[0][1]])) + np.testing.assert_allclose(nuisance[0][folds[0][0]], + model.fit(X[folds[0][1]], y[folds[0][1]]).predict(X[folds[0][0]])) + np.testing.assert_allclose(scores[0][0], model.fit(X[folds[0][0]], y[folds[0][0]]).score(X[folds[0][1]], + y[folds[0][1]])) + np.testing.assert_allclose(scores[0][1], model.fit(X[folds[0][1]], y[folds[0][1]]).score(X[folds[0][0]], + y[folds[0][0]])) + coef_ = np.zeros(X.shape[1]) + coef_[0] = 1 + [np.testing.assert_allclose(coef_, mdl._model.coef_, rtol=0, atol=0.08) for mdl in model_list] + np.testing.assert_array_equal(fitted_inds, np.arange(X.shape[0])) + + class Wrapper: + + def __init__(self, model): + self._model = model + + def train(self, is_selecting, X, y, W=None): + self._model.fit(X, y) + return self + + def predict(self, X, y, W=None): + return self._model.predict(X), y - self._model.predict(X), X + + np.random.seed(123) + X = np.random.normal(size=(5000, 3)) + y = X[:, 0] + np.random.normal(size=(5000,)) + folds = [(np.arange(X.shape[0] // 2), np.arange(X.shape[0] // 2, X.shape[0])), + (np.arange(X.shape[0] // 2), np.arange(X.shape[0] // 2, X.shape[0]))] + model = Lasso(alpha=0.01) + with pytest.raises(AttributeError) as e_info: + nuisance, model_list, fitted_inds, scores = _crossfit(Wrapper(model), folds, use_ray, + ray_remote_function_option, + X, y, y, Z=None) + + np.random.seed(123) + X = np.random.normal(size=(5000, 3)) + y = X[:, 0] + np.random.normal(size=(5000,)) + folds = [(np.arange(X.shape[0] // 2), np.arange(X.shape[0] // 2, X.shape[0])), + (np.arange(X.shape[0] // 2), np.arange(X.shape[0] // 2, X.shape[0]))] + model = Lasso(alpha=0.01) + with pytest.raises(AttributeError) as e_info: + nuisance, model_list, fitted_inds, scores = _crossfit(Wrapper(model), folds, use_ray, + ray_remote_function_option, + X, y, y, Z=None) + + np.random.seed(123) + X = np.random.normal(size=(5000, 3)) + y = X[:, 0] + np.random.normal(size=(5000,)) + folds = [(np.arange(X.shape[0]), np.arange(X.shape[0]))] + model = Lasso(alpha=0.01) + with pytest.raises(AttributeError) as e_info: + nuisance, model_list, fitted_inds, scores = _crossfit(Wrapper(model), folds, use_ray, + ray_remote_function_option, + X, y, y, Z=None) + + np.random.seed(123) + X = np.random.normal(size=(5000, 3)) + y = X[:, 0] + np.random.normal(size=(5000,)) + folds = [(np.arange(X.shape[0]), np.arange(X.shape[0]))] + model = Lasso(alpha=0.01) + with pytest.raises(AttributeError) as e_info: + nuisance, model_list, fitted_inds, scores = _crossfit(Wrapper(model), folds, use_ray, + ray_remote_function_option, + X, y, y, Z=None) + + @pytest.mark.ray + def test_crossfit_with_ray(self): + try: + ray.init() + self._test_crossfit(use_ray=True) + finally: + ray.shutdown() + + def test_crossfit_without_ray(self): + self._test_crossfit(use_ray=False) + + @pytest.mark.ray + def test_crossfit_comparison(self): + try: + ray.init() # Initialize Ray + + class Wrapper: + + def __init__(self, model): + self._model = model + + def train(self, is_selecting, X, y, Q, W=None): + self._model.fit(X, y) + return self + + def predict(self, X, y, Q, W=None): + return self._model.predict(X), y - self._model.predict(X), X + + def score(self, X, y, Q, W=None): + return self._model.score(X, y) + + # Generate synthetic data + X, y = make_regression(n_samples=10, n_features=5, noise=0.1, random_state=42) + folds = list(KFold(2).split(X, y)) + model = LinearRegression() + ray_remote_function_option = {"num_cpus": 1} + + # Run _crossfit with Ray enabled + nuisance_ray, model_list_ray, fitted_inds_ray, scores_ray = _crossfit(Wrapper(model), folds, True, + ray_remote_function_option, + X, y, y, Z=None) + # Run _crossfit without Ray + nuisance_regular, model_list_regular, fitted_inds_regular, scores_regular = _crossfit(Wrapper(model), + folds, + False, {}, + X, y, y, Z=None) + # Compare the results + assert np.allclose(nuisance_ray[0], nuisance_regular[0]) + assert np.allclose(nuisance_ray[1], nuisance_regular[1]) + assert np.allclose(fitted_inds_ray, fitted_inds_regular) + assert np.allclose(scores_ray, scores_regular) + + finally: + ray.shutdown() # Shutdown Ray + + def _test_ol(self, use_ray): + class ModelNuisance: + def __init__(self, model_t, model_y): + self._model_t = model_t + self._model_y = model_y + + def train(self, is_selecting, Y, T, W=None): + self._model_t.fit(W, T) + self._model_y.fit(W, Y) + return self + + def predict(self, Y, T, W=None): + return Y - self._model_y.predict(W), T - self._model_t.predict(W) + + class ModelFinal: + + def __init__(self): + return + + def fit(self, Y, T, W=None, nuisances=None): + Y_res, T_res = nuisances + self.model = LinearRegression(fit_intercept=False).fit(T_res.reshape(-1, 1), Y_res) + return self + + def predict(self, X=None): + return self.model.coef_[0] + + def score(self, Y, T, W=None, nuisances=None): + Y_res, T_res = nuisances + return np.mean((Y_res - self.model.predict(T_res.reshape(-1, 1))) ** 2) + + class OrthoLearner(_OrthoLearner): + def _gen_ortho_learner_model_nuisance(self): + return ModelNuisance(LinearRegression(), LinearRegression()) + + def _gen_ortho_learner_model_final(self): + return ModelFinal() + + np.random.seed(123) + X = np.random.normal(size=(10000, 3)) + sigma = 0.1 + y = X[:, 0] + X[:, 1] + np.random.normal(0, sigma, size=(10000,)) + + est = OrthoLearner(cv=2, discrete_outcome=False, discrete_treatment=False, treatment_featurizer=None, + discrete_instrument=False, categories='auto', random_state=None, use_ray=use_ray) + est.fit(y, X[:, 0], W=X[:, 1:]) + np.testing.assert_almost_equal(est.const_marginal_effect(), 1, decimal=3) + np.testing.assert_array_almost_equal(est.effect(), np.ones(1), decimal=3) + np.testing.assert_array_almost_equal(est.effect(T0=0, T1=10), np.ones(1) * 10, decimal=2) + np.testing.assert_almost_equal(est.score(y, X[:, 0], W=X[:, 1:]), sigma**2, decimal=3) + np.testing.assert_almost_equal(est.score_, sigma**2, decimal=3) + np.testing.assert_almost_equal(est.ortho_learner_model_final_.model.coef_[0], 1, decimal=3) + # Nuisance model has no score method, so nuisance_scores_ should be none + assert est.nuisance_scores_ is None + + # Test non keyword based calls to fit + np.random.seed(123) + X = np.random.normal(size=(10000, 3)) + sigma = 0.1 + y = X[:, 0] + X[:, 1] + np.random.normal(0, sigma, size=(10000,)) + est = OrthoLearner(cv=2, discrete_outcome=False, discrete_treatment=False, treatment_featurizer=None, + discrete_instrument=False, categories='auto', random_state=None, use_ray=use_ray) + # test non-array inputs + est.fit(list(y), list(X[:, 0]), X=None, W=X[:, 1:]) + np.testing.assert_almost_equal(est.const_marginal_effect(), 1, decimal=3) + np.testing.assert_array_almost_equal(est.effect(), np.ones(1), decimal=3) + np.testing.assert_array_almost_equal(est.effect(T0=0, T1=10), np.ones(1) * 10, decimal=2) + np.testing.assert_almost_equal(est.score(y, X[:, 0], None, X[:, 1:]), sigma ** 2, decimal=3) + np.testing.assert_almost_equal(est.score_, sigma ** 2, decimal=3) + np.testing.assert_almost_equal(est.ortho_learner_model_final_.model.coef_[0], 1, decimal=3) + + # Test custom splitter + np.random.seed(123) + X = np.random.normal(size=(10000, 3)) + sigma = 0.1 + y = X[:, 0] + X[:, 1] + np.random.normal(0, sigma, size=(10000,)) + est = OrthoLearner(cv=KFold(n_splits=3), discrete_outcome=False, + discrete_treatment=False, treatment_featurizer=None, discrete_instrument=False, + categories='auto', random_state=None, use_ray=use_ray) + est.fit(y, X[:, 0], X=None, W=X[:, 1:]) + np.testing.assert_almost_equal(est.const_marginal_effect(), 1, decimal=3) + np.testing.assert_array_almost_equal(est.effect(), np.ones(1), decimal=3) + np.testing.assert_array_almost_equal(est.effect(T0=0, T1=10), np.ones(1) * 10, decimal=2) + np.testing.assert_almost_equal(est.score(y, X[:, 0], W=X[:, 1:]), sigma**2, decimal=3) + np.testing.assert_almost_equal(est.score_, sigma**2, decimal=3) + np.testing.assert_almost_equal(est.ortho_learner_model_final_.model.coef_[0], 1, decimal=3) + + # Test incomplete set of test folds + np.random.seed(123) + X = np.random.normal(size=(10000, 3)) + sigma = 0.1 + y = X[:, 0] + X[:, 1] + np.random.normal(0, sigma, size=(10000,)) + folds = [(np.arange(X.shape[0] // 2), np.arange(X.shape[0] // 2, X.shape[0]))] + est = OrthoLearner(cv=KFold(n_splits=3), discrete_outcome=False, + discrete_treatment=False, treatment_featurizer=None, discrete_instrument=False, + categories='auto', random_state=None, use_ray=use_ray) + + est.fit(y, X[:, 0], X=None, W=X[:, 1:]) + np.testing.assert_almost_equal(est.const_marginal_effect(), 1, decimal=2) + np.testing.assert_array_almost_equal(est.effect(), np.ones(1), decimal=2) + np.testing.assert_array_almost_equal(est.effect(T0=0, T1=10), np.ones(1) * 10, decimal=1) + np.testing.assert_almost_equal(est.score(y, X[:, 0], W=X[:, 1:]), sigma**2, decimal=2) + np.testing.assert_almost_equal(est.score_, sigma**2, decimal=2) + np.testing.assert_almost_equal(est.ortho_learner_model_final_.model.coef_[0], 1, decimal=2) + + @pytest.mark.ray + def test_ol_with_ray(self): + self._test_ol(True) + + def test_ol_without_ray(self): + self._test_ol(False) + + def test_ol_no_score_final(self): + class ModelNuisance: + def __init__(self, model_t, model_y): + self._model_t = model_t + self._model_y = model_y + + def train(self, is_selecting, Y, T, W=None): + self._model_t.fit(W, T) + self._model_y.fit(W, Y) + return self + + def predict(self, Y, T, W=None): + return Y - self._model_y.predict(W), T - self._model_t.predict(W) + + class ModelFinal: + + def __init__(self): + return + + def fit(self, Y, T, W=None, nuisances=None): + Y_res, T_res = nuisances + self.model = LinearRegression(fit_intercept=False).fit(T_res.reshape(-1, 1), Y_res) + return self + + def predict(self, X=None): + return self.model.coef_[0] + + class OrthoLearner(_OrthoLearner): + def _gen_ortho_learner_model_nuisance(self): + return ModelNuisance(LinearRegression(), LinearRegression()) + + def _gen_ortho_learner_model_final(self): + return ModelFinal() + + np.random.seed(123) + X = np.random.normal(size=(10000, 3)) + sigma = 0.1 + y = X[:, 0] + X[:, 1] + np.random.normal(0, sigma, size=(10000,)) + est = OrthoLearner(cv=2, discrete_outcome=False, discrete_treatment=False, + treatment_featurizer=None, discrete_instrument=False, + categories='auto', random_state=None) + est.fit(y, X[:, 0], W=X[:, 1:]) + np.testing.assert_almost_equal(est.const_marginal_effect(), 1, decimal=3) + np.testing.assert_array_almost_equal(est.effect(), np.ones(1), decimal=3) + np.testing.assert_array_almost_equal(est.effect(T0=0, T1=10), np.ones(1) * 10, decimal=2) + assert est.score_ is None + np.testing.assert_almost_equal(est.ortho_learner_model_final_.model.coef_[0], 1, decimal=3) + + def test_ol_nuisance_scores(self): + class ModelNuisance: + def __init__(self, model_t, model_y): + self._model_t = model_t + self._model_y = model_y + + def train(self, is_selecting, Y, T, W=None): + self._model_t.fit(W, T) + self._model_y.fit(W, Y) + return self + + def predict(self, Y, T, W=None): + return Y - self._model_y.predict(W), T - self._model_t.predict(W) + + def score(self, Y, T, W=None): + return (self._model_t.score(W, Y), self._model_y.score(W, T)) + + class ModelFinal: + + def __init__(self): + return + + def fit(self, Y, T, W=None, nuisances=None): + Y_res, T_res = nuisances + self.model = LinearRegression(fit_intercept=False).fit(T_res.reshape(-1, 1), Y_res) + return self + + def predict(self, X=None): + return self.model.coef_[0] + + class OrthoLearner(_OrthoLearner): + def _gen_ortho_learner_model_nuisance(self): + return ModelNuisance(LinearRegression(), LinearRegression()) + + def _gen_ortho_learner_model_final(self): + return ModelFinal() + + np.random.seed(123) + X = np.random.normal(size=(10000, 3)) + sigma = 0.1 + y = X[:, 0] + X[:, 1] + np.random.normal(0, sigma, size=(10000,)) + est = OrthoLearner(cv=2, discrete_outcome=False, discrete_treatment=False, + treatment_featurizer=None, discrete_instrument=False, + categories='auto', random_state=None) + est.fit(y, X[:, 0], W=X[:, 1:]) + np.testing.assert_almost_equal(est.const_marginal_effect(), 1, decimal=3) + np.testing.assert_array_almost_equal(est.effect(), np.ones(1), decimal=3) + np.testing.assert_array_almost_equal(est.effect(T0=0, T1=10), np.ones(1) * 10, decimal=2) + np.testing.assert_almost_equal(est.ortho_learner_model_final_.model.coef_[0], 1, decimal=3) + nuisance_scores_y = est.nuisance_scores_[0] + nuisance_scores_t = est.nuisance_scores_[1] + assert len(nuisance_scores_y) == len(nuisance_scores_t) == 1 # as many scores as iterations + assert len(nuisance_scores_y[0]) == len(nuisance_scores_t[0]) == 2 # as many scores as splits + # y scores should be positive, since W predicts Y somewhat + # t scores might not be, since W and T are uncorrelated + np.testing.assert_array_less(0, nuisance_scores_y[0]) + + def test_ol_discrete_treatment(self): + class ModelNuisance: + def __init__(self, model_t, model_y): + self._model_t = model_t + self._model_y = model_y + + def train(self, is_selecting, Y, T, W=None): + self._model_t.fit(W, np.matmul(T, np.arange(1, T.shape[1] + 1))) + self._model_y.fit(W, Y) + return self + + def predict(self, Y, T, W=None): + return Y - self._model_y.predict(W), T - self._model_t.predict_proba(W)[:, 1:] + + class ModelFinal: + + def __init__(self): + return + + def fit(self, Y, T, W=None, nuisances=None): + Y_res, T_res = nuisances + self.model = LinearRegression(fit_intercept=False).fit(T_res.reshape(-1, 1), Y_res) + return self + + def predict(self): + # theta needs to be of dimension (1, d_t) if T is (n, d_t) + return np.array([[self.model.coef_[0]]]) + + def score(self, Y, T, W=None, nuisances=None): + Y_res, T_res = nuisances + return np.mean((Y_res - self.model.predict(T_res.reshape(-1, 1)))**2) + + from sklearn.linear_model import LogisticRegression + + class OrthoLearner(_OrthoLearner): + def _gen_ortho_learner_model_nuisance(self): + return ModelNuisance(LogisticRegression(solver='lbfgs'), LinearRegression()) + + def _gen_ortho_learner_model_final(self): + return ModelFinal() + + np.random.seed(123) + X = np.random.normal(size=(10000, 3)) + import scipy.special + T = np.random.binomial(1, scipy.special.expit(X[:, 0])) + sigma = 0.01 + y = T + X[:, 0] + np.random.normal(0, sigma, size=(10000,)) + est = OrthoLearner(cv=2, discrete_outcome=False, discrete_treatment=True, + treatment_featurizer=None, discrete_instrument=False, + categories='auto', random_state=None) + est.fit(y, T, W=X) + np.testing.assert_almost_equal(est.const_marginal_effect(), 1, decimal=3) + np.testing.assert_array_almost_equal(est.effect(), np.ones(1), decimal=3) + np.testing.assert_almost_equal(est.score(y, T, W=X), sigma**2, decimal=3) + np.testing.assert_almost_equal(est.ortho_learner_model_final_.model.coef_[0], 1, decimal=3) From b848e734d67b292426aca2dcc7b5ba204543b4b8 Mon Sep 17 00:00:00 2001 From: Fabio Vera Date: Tue, 9 Jan 2024 14:58:58 -0500 Subject: [PATCH 22/25] fix tests where clf was used without specifying disc treat Signed-off-by: Fabio Vera --- econml/tests/test_drtester.py | 12 ++- econml/tests/test_federated_learning.py | 11 ++- econml/tests/test_missing_values.py | 3 +- econml/tests/test_refit.py | 4 +- notebooks/CATE validation.ipynb | 116 ++++++++---------------- 5 files changed, 60 insertions(+), 86 deletions(-) diff --git a/econml/tests/test_drtester.py b/econml/tests/test_drtester.py index 9e890c190..cc66cfe6a 100644 --- a/econml/tests/test_drtester.py +++ b/econml/tests/test_drtester.py @@ -65,7 +65,8 @@ def test_multi(self): cate = DML( model_y=reg_y, model_t=reg_t, - model_final=reg_y + model_final=reg_y, + discrete_treatment=True ).fit(Y=Ytrain, T=Dtrain, X=Xtrain) # test the DR outcome difference @@ -114,7 +115,8 @@ def test_binary(self): cate = DML( model_y=reg_y, model_t=reg_t, - model_final=reg_y + model_final=reg_y, + discrete_treatment=True ).fit(Y=Ytrain, T=Dtrain, X=Xtrain) # test the DR outcome difference @@ -158,7 +160,8 @@ def test_nuisance_val_fit(self): cate = DML( model_y=reg_y, model_t=reg_t, - model_final=reg_y + model_final=reg_y, + discrete_treatment=True ).fit(Y=Ytrain, T=Dtrain, X=Xtrain) # test the DR outcome difference @@ -198,7 +201,8 @@ def test_exceptions(self): cate = DML( model_y=reg_y, model_t=reg_t, - model_final=reg_y + model_final=reg_y, + discrete_treatment=True ).fit(Y=Ytrain, T=Dtrain, X=Xtrain) # test the DR outcome difference diff --git a/econml/tests/test_federated_learning.py b/econml/tests/test_federated_learning.py index 71c674e69..27f656945 100644 --- a/econml/tests/test_federated_learning.py +++ b/econml/tests/test_federated_learning.py @@ -23,6 +23,13 @@ def fit(self, X, y, sample_weight=None): def predict(self, X): return self.func(X) + +class FunctionClassifier(FunctionRegressor): + """A simple model that ignores the data it is fitted on, always just using the specified function to predict""" + + def __init__(self, func): + self.func = func + def predict_proba(self, X): return self.func(X) @@ -62,8 +69,8 @@ def test_lineardrlearner(self): a = np.random.normal(size=(n_x + n_w, n_t)) b = np.random.normal(size=(n_x + n_w + n_t - 1)) - t_model = FunctionRegressor(lambda XW: np.exp(XW @ a)) - y_model = FunctionRegressor(lambda XW: XW @ b) + t_model = FunctionClassifier(lambda XW: np.exp(XW @ a)) + y_model = FunctionClassifier(lambda XW: XW @ b) for cov_type in ['HC0', 'HC1', 'nonrobust']: with self.subTest(n_t=n_t, cov_type=cov_type): diff --git a/econml/tests/test_missing_values.py b/econml/tests/test_missing_values.py index 761da5dbc..e59c8811c 100644 --- a/econml/tests/test_missing_values.py +++ b/econml/tests/test_missing_values.py @@ -166,7 +166,8 @@ def test_missing2(self): x_w_missing_models = [ NonParamDML(model_y=regr, model_t=clsf, model_final=non_param_model_final, discrete_treatment=discrete_treatment, allow_missing=True), - DML(model_y=regr, model_t=clsf, model_final=param_model_final, allow_missing=True), + DML(model_y=regr, model_t=clsf, discrete_treatment=discrete_treatment, + model_final=param_model_final, allow_missing=True), DMLIV(model_y_xw=regr, model_t_xw=clsf, model_t_xwz=clsf, model_final=param_model_final, discrete_treatment=discrete_treatment, discrete_instrument=discrete_instrument, allow_missing=True), diff --git a/econml/tests/test_refit.py b/econml/tests/test_refit.py index 00cc81dff..5f473200e 100644 --- a/econml/tests/test_refit.py +++ b/econml/tests/test_refit.py @@ -265,8 +265,8 @@ def test_can_set_discrete_treatment(self): est.fit(y, T, X=X, W=W) est.effect(X) est.discrete_treatment = False - est.fit(y, T, X=X, W=W) - est.effect(X) + with pytest.raises(AttributeError): + est.fit(y, T, X=X, W=W) # should fail because passing a clf when discrete_treatment=False def test_refit_final_inference(self): """Test that we can perform inference during refit_final""" diff --git a/notebooks/CATE validation.ipynb b/notebooks/CATE validation.ipynb index 57b20444b..07c9ba86f 100644 --- a/notebooks/CATE validation.ipynb +++ b/notebooks/CATE validation.ipynb @@ -13,44 +13,7 @@ }, "scrolled": true }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/opt/anaconda3/envs/cate_test/lib/python3.9/site-packages/shap/utils/_clustering.py:35: NumbaDeprecationWarning: The 'nopython' keyword argument was not supplied to the 'numba.jit' decorator. The implicit default value for this argument is currently False, but it will be changed to True in Numba 0.59.0. See https://numba.readthedocs.io/en/stable/reference/deprecation.html#deprecation-of-object-mode-fall-back-behaviour-when-using-jit for details.\n", - " def _pt_shuffle_rec(i, indexes, index_mask, partition_tree, M, pos):\n", - "/opt/anaconda3/envs/cate_test/lib/python3.9/site-packages/shap/utils/_clustering.py:54: NumbaDeprecationWarning: The 'nopython' keyword argument was not supplied to the 'numba.jit' decorator. The implicit default value for this argument is currently False, but it will be changed to True in Numba 0.59.0. See https://numba.readthedocs.io/en/stable/reference/deprecation.html#deprecation-of-object-mode-fall-back-behaviour-when-using-jit for details.\n", - " def delta_minimization_order(all_masks, max_swap_size=100, num_passes=2):\n", - "/opt/anaconda3/envs/cate_test/lib/python3.9/site-packages/shap/utils/_clustering.py:63: NumbaDeprecationWarning: The 'nopython' keyword argument was not supplied to the 'numba.jit' decorator. The implicit default value for this argument is currently False, but it will be changed to True in Numba 0.59.0. See https://numba.readthedocs.io/en/stable/reference/deprecation.html#deprecation-of-object-mode-fall-back-behaviour-when-using-jit for details.\n", - " def _reverse_window(order, start, length):\n", - "/opt/anaconda3/envs/cate_test/lib/python3.9/site-packages/shap/utils/_clustering.py:69: NumbaDeprecationWarning: The 'nopython' keyword argument was not supplied to the 'numba.jit' decorator. The implicit default value for this argument is currently False, but it will be changed to True in Numba 0.59.0. See https://numba.readthedocs.io/en/stable/reference/deprecation.html#deprecation-of-object-mode-fall-back-behaviour-when-using-jit for details.\n", - " def _reverse_window_score_gain(masks, order, start, length):\n", - "/opt/anaconda3/envs/cate_test/lib/python3.9/site-packages/shap/utils/_clustering.py:77: NumbaDeprecationWarning: The 'nopython' keyword argument was not supplied to the 'numba.jit' decorator. The implicit default value for this argument is currently False, but it will be changed to True in Numba 0.59.0. See https://numba.readthedocs.io/en/stable/reference/deprecation.html#deprecation-of-object-mode-fall-back-behaviour-when-using-jit for details.\n", - " def _mask_delta_score(m1, m2):\n", - "/opt/anaconda3/envs/cate_test/lib/python3.9/site-packages/shap/links.py:5: NumbaDeprecationWarning: The 'nopython' keyword argument was not supplied to the 'numba.jit' decorator. The implicit default value for this argument is currently False, but it will be changed to True in Numba 0.59.0. See https://numba.readthedocs.io/en/stable/reference/deprecation.html#deprecation-of-object-mode-fall-back-behaviour-when-using-jit for details.\n", - " def identity(x):\n", - "/opt/anaconda3/envs/cate_test/lib/python3.9/site-packages/shap/links.py:10: NumbaDeprecationWarning: The 'nopython' keyword argument was not supplied to the 'numba.jit' decorator. The implicit default value for this argument is currently False, but it will be changed to True in Numba 0.59.0. See https://numba.readthedocs.io/en/stable/reference/deprecation.html#deprecation-of-object-mode-fall-back-behaviour-when-using-jit for details.\n", - " def _identity_inverse(x):\n", - "/opt/anaconda3/envs/cate_test/lib/python3.9/site-packages/shap/links.py:15: NumbaDeprecationWarning: The 'nopython' keyword argument was not supplied to the 'numba.jit' decorator. The implicit default value for this argument is currently False, but it will be changed to True in Numba 0.59.0. See https://numba.readthedocs.io/en/stable/reference/deprecation.html#deprecation-of-object-mode-fall-back-behaviour-when-using-jit for details.\n", - " def logit(x):\n", - "/opt/anaconda3/envs/cate_test/lib/python3.9/site-packages/shap/links.py:20: NumbaDeprecationWarning: The 'nopython' keyword argument was not supplied to the 'numba.jit' decorator. The implicit default value for this argument is currently False, but it will be changed to True in Numba 0.59.0. See https://numba.readthedocs.io/en/stable/reference/deprecation.html#deprecation-of-object-mode-fall-back-behaviour-when-using-jit for details.\n", - " def _logit_inverse(x):\n", - "/opt/anaconda3/envs/cate_test/lib/python3.9/site-packages/shap/utils/_masked_model.py:362: NumbaDeprecationWarning: The 'nopython' keyword argument was not supplied to the 'numba.jit' decorator. The implicit default value for this argument is currently False, but it will be changed to True in Numba 0.59.0. See https://numba.readthedocs.io/en/stable/reference/deprecation.html#deprecation-of-object-mode-fall-back-behaviour-when-using-jit for details.\n", - " def _build_fixed_single_output(averaged_outs, last_outs, outputs, batch_positions, varying_rows, num_varying_rows, link, linearizing_weights):\n", - "/opt/anaconda3/envs/cate_test/lib/python3.9/site-packages/shap/utils/_masked_model.py:384: NumbaDeprecationWarning: The 'nopython' keyword argument was not supplied to the 'numba.jit' decorator. The implicit default value for this argument is currently False, but it will be changed to True in Numba 0.59.0. See https://numba.readthedocs.io/en/stable/reference/deprecation.html#deprecation-of-object-mode-fall-back-behaviour-when-using-jit for details.\n", - " def _build_fixed_multi_output(averaged_outs, last_outs, outputs, batch_positions, varying_rows, num_varying_rows, link, linearizing_weights):\n", - "/opt/anaconda3/envs/cate_test/lib/python3.9/site-packages/shap/maskers/_tabular.py:185: NumbaDeprecationWarning: The 'nopython' keyword argument was not supplied to the 'numba.jit' decorator. The implicit default value for this argument is currently False, but it will be changed to True in Numba 0.59.0. See https://numba.readthedocs.io/en/stable/reference/deprecation.html#deprecation-of-object-mode-fall-back-behaviour-when-using-jit for details.\n", - " def _single_delta_mask(dind, masked_inputs, last_mask, data, x, noop_code):\n", - "/opt/anaconda3/envs/cate_test/lib/python3.9/site-packages/shap/maskers/_tabular.py:196: NumbaDeprecationWarning: The 'nopython' keyword argument was not supplied to the 'numba.jit' decorator. The implicit default value for this argument is currently False, but it will be changed to True in Numba 0.59.0. See https://numba.readthedocs.io/en/stable/reference/deprecation.html#deprecation-of-object-mode-fall-back-behaviour-when-using-jit for details.\n", - " def _delta_masking(masks, x, curr_delta_inds, varying_rows_out,\n", - "/opt/anaconda3/envs/cate_test/lib/python3.9/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n", - "The 'nopython' keyword argument was not supplied to the 'numba.jit' decorator. The implicit default value for this argument is currently False, but it will be changed to True in Numba 0.59.0. See https://numba.readthedocs.io/en/stable/reference/deprecation.html#deprecation-of-object-mode-fall-back-behaviour-when-using-jit for details.\n", - "The 'nopython' keyword argument was not supplied to the 'numba.jit' decorator. The implicit default value for this argument is currently False, but it will be changed to True in Numba 0.59.0. See https://numba.readthedocs.io/en/stable/reference/deprecation.html#deprecation-of-object-mode-fall-back-behaviour-when-using-jit for details.\n" - ] - } - ], + "outputs": [], "source": [ "import numpy as np\n", "import pandas as pd\n", @@ -166,14 +129,13 @@ "name": "stderr", "output_type": "stream", "text": [ - "`sparse` was renamed to `sparse_output` in version 1.2 and will be removed in 1.4. `sparse_output` is ignored unless you leave `sparse` to its default value.\n", "The final model has a nonzero intercept for at least one outcome; it will be subtracted, but consider fitting a model without an intercept if possible.\n" ] }, { "data": { "text/plain": [ - "" + "" ] }, "execution_count": 4, @@ -183,7 +145,7 @@ ], "source": [ "est_t = TLearner(models=model_regression)\n", - "est_dm = DML(model_y=model_regression, model_t=model_propensity, model_final=model_regression)\n", + "est_dm = DML(model_y=model_regression, model_t=model_propensity, model_final=model_regression, discrete_treatment=True)\n", "\n", "est_t.fit(Ytrain, Dtrain, X=Xtrain)\n", "est_dm.fit(Ytrain, Dtrain, X=Xtrain)" @@ -236,24 +198,24 @@ " \n", " 0\n", " 1\n", - " -0.137\n", - " 0.142\n", - " 0.335\n", - " -0.015\n", - " 0.021\n", - " 0.242\n", - " -5.506\n", + " 0.078\n", + " 0.220\n", + " 0.722\n", + " -0.011\n", + " 0.023\n", + " 0.322\n", + " -9.163\n", " \n", " \n", " 1\n", " 2\n", - " 1.209\n", - " 0.095\n", + " 1.003\n", + " 0.062\n", " 0.000\n", - " 0.373\n", + " 0.375\n", " 0.024\n", " 0.000\n", - " 0.090\n", + " -0.178\n", " \n", " \n", "\n", @@ -261,12 +223,12 @@ ], "text/plain": [ " treatment blp_est blp_se blp_pval qini_est qini_se qini_pval \\\n", - "0 1 -0.137 0.142 0.335 -0.015 0.021 0.242 \n", - "1 2 1.209 0.095 0.000 0.373 0.024 0.000 \n", + "0 1 0.078 0.220 0.722 -0.011 0.023 0.322 \n", + "1 2 1.003 0.062 0.000 0.375 0.024 0.000 \n", "\n", " cal_r_squared \n", - "0 -5.506 \n", - "1 0.090 " + "0 -9.163 \n", + "1 -0.178 " ] }, "execution_count": 5, @@ -294,7 +256,7 @@ { "data": { "text/plain": [ - "" + "" ] }, "execution_count": 6, @@ -303,7 +265,7 @@ }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjcAAAHHCAYAAABDUnkqAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAAA8pElEQVR4nO3dd3xUVf7/8fcQkkkoAQIkBAyEotKLQdgQaRo2AhtWV4RFpamAGpQvrFJXAijFAqJIEaTpIs1FVxcWFlkigrAoJixKE2kqhE4qEJI5vz/8ZXRIIROSTLi8no/HPPSeOefOZ06GyTv3njtjM8YYAQAAWEQZTxcAAABQlAg3AADAUgg3AADAUgg3AADAUgg3AADAUgg3AADAUgg3AADAUgg3AADAUgg3AADAUgg3AEqVCRMmyGazubSFhoZqwIABzu0lS5bIZrPp66+/LuHqXMXFxclmsykuLs6jdQBwRbiBW2w2W4FuJfVmP2fOHC1ZsqREHutG7d27VxMmTNDRo0dL/LFTU1MVGxur+++/XwEBAbLZbEU2b8nJyZo4caJatGihChUqyM/PT02bNtWoUaN04sSJInkMTyuNr7NOnTq5/Jvz8/NT8+bNNXPmTDkcjnzHHj9+XDVr1pTNZtPrr7+eZ7/9+/dr5MiRatmypSpWrKjg4GB1797d46Hyt7IDZm63HTt2XHd8dpi+9ubr65tr/4ULF6pRo0by9fXV7bffrlmzZuW575UrVyo8PFzly5dX5cqV1a5dO/3nP/8p9HNFwZX1dAG4ubz//vsu2++99542btyYo71Ro0YlUs+cOXNUrVo1l7/qS6u9e/dq4sSJ6tSpk0JDQ0v0sc+ePatJkyapdu3aatGiRZGFz8OHDysyMlLHjx/Xww8/rMGDB8vHx0f/+9//tHDhQn300Uc6ePDgDT/OgQMHVKaM5/4Wy+t11qFDB126dEk+Pj4eqeu2227T1KlTJf3yM/7ggw80fPhwnTlzRpMnT851zIULF9S1a1elpaWpQ4cOGjlypEJCQtS7d+8cfd99910tXLhQDz30kJ555hklJSXpnXfe0e9+9zutX79ekZGRxfr83PHcc8/p7rvvdmlr0KBBgcfPnTtXFSpUcG57eXnl6PPOO+/oqaee0kMPPaQRI0boiy++0HPPPaf09HSNGjXKpe+ECRM0adIk9ezZUwMGDNDVq1f17bff6ueff3bzmaFQDHADYmJiTEFeRmlpacXy+E2aNDEdO3Ysln0XtdWrVxtJZvPmzSX+2JcvXzYnT540xhjz1VdfGUlm8eLFN7TPq1evmhYtWphy5cqZL774Isf9SUlJZuzYsW7vNzY29rqvqcWLFxtJ5quvvnJ7/w6Hw6Snp7s1pjS+zjp27GiaNGni0nbp0iVTp04dU7FiRZOZmZljzOXLl02HDh2Mv7+/2b59u7l8+bLp3r27sdvt5vPPP8/R/+uvvzYpKSkubWfPnjXVq1c3ERERRfuECmnz5s1Gklm9enWhxme/3s6cOZNvv/T0dFO1alXTvXt3l/ZHH33UlC9f3pw/f97Ztn37dmOz2cyMGTMKVRNuHKelUOQ6deqkpk2bateuXerQoYPKlSunsWPHSpKuXLmi2NhYNWjQQHa7XSEhIRo5cqSuXLniso/Fixfr3nvvVWBgoOx2uxo3bqy5c+e69AkNDdV3332nzz//3HkouVOnTpJ+XZOxdetWPffcc6pevboqV66sIUOGKCMjQxcvXlS/fv1UpUoVValSRSNHjpQxxmX/DodDM2fOVJMmTeTr66ugoCANGTJEFy5cyFHHH/7wB23dulVt2rSRr6+v6tWrp/fee8/ZZ8mSJXr44YclSZ07dy7x03d2u101atQo0n3+/e9/1+7duzVu3Djdc889Oe739/d3OXrwxRdf6OGHH1bt2rWdP/vhw4fr0qVL132sa9fcZEtPT9eQIUNUtWpV+fv7q1+/fnn+fDZs2KDWrVvLz89P77zzjqQbf53lteZm9erVCgsLk5+fn6pVq6bHHnssx1/sAwYMUIUKFfTzzz/rgQceUIUKFVS9enU9//zzysrKuu6c5MbX11d33323UlJSdPr0aZf7jDHq37+/du/erY0bN+p3v/ud7Ha71qxZoy5duuiBBx7Q3r17XcaEhYW5HM2QpKpVq6p9+/bat29foWosTikpKcrMzCzUWGOMkpOTc7wPZNu8ebPOnTunZ555xqU9JiZGaWlpWrt2rbNt5syZqlGjhoYNGyZjjFJTUwtVEwqP01IoFufOnVPXrl315z//WY899piCgoLkcDjUo0cPbd26VYMHD1ajRo20Z88evfHGGzp48KA+/vhj5/i5c+eqSZMm6tGjh8qWLatPP/1UzzzzjBwOh2JiYiT98gby7LPPqkKFCho3bpwkKSgoyKWOZ599VjVq1NDEiRO1Y8cOzZ8/X5UrV9aXX36p2rVra8qUKVq3bp1ee+01NW3aVP369XOOHTJkiJYsWaKBAwfqueee05EjR/T2228rPj5e27Ztk7e3t7PvoUOH1LNnTz3xxBPq37+/Fi1apAEDBigsLExNmjRRhw4d9Nxzz+mtt97S2LFjnaft8jt9d+XKFaWkpBRovqtVq1agfkXpk08+kST17du3QP1Xr16t9PR0Pf3006patap27typWbNm6aefftLq1asLVcPQoUNVuXJlTZgwQQcOHNDcuXN17NgxZ+jIduDAAfXp00dDhgzRoEGDdOedd0oqutfZb2W/Zu6++25NnTpVp06d0ptvvqlt27YpPj5elStXdvbNyspSVFSU2rZtq9dff12fffaZpk+frvr16+vpp58u1JwcPXpUNpvN5XEkaeTIkdqwYYM2btzocvrGx8dHf//739WzZ0917dpVO3bsUHBwcL6PkZiYWKDX3NWrV5WUlFSgugMCAm7o1OPAgQOVmpoqLy8vtW/fXq+99ppat25d4PH16tVTamqqypcvrwceeEDTp093+TnHx8dLUo59hoWFqUyZMoqPj9djjz0mSdq0aZPatWunt956Sy+//LLOnTunGjVqaNy4cRo6dGihnyPc4NHjRrjp5XZaqmPHjkaSmTdvnkv7+++/b8qUKZPjFMa8efOMJLNt2zZnW26nDaKioky9evVc2vI6XZB92iIqKso4HA5ne3h4uLHZbOapp55ytmVmZprbbrvNZT9ffPGFkWSWLVvmst/169fnaK9Tp46RZLZs2eJsO336tLHb7eYvf/mLs83d01LZz6EgN3cU1WmpVq1amUqVKhW4f24/06lTpxqbzWaOHTvmbMvttFSdOnVM//79ndvZcxMWFmYyMjKc7a+++qqRZP7xj3+4jJVk1q9fX6Ca3HmdZZ8Syf6ZZmRkmMDAQNO0aVNz6dIlZ79//vOfRpIZP368s61///5Gkpk0aZLLPlu1amXCwsJyPNa1OnbsaBo2bGjOnDljzpw5Y/bv329eeOEFIynHqZOitGXLFmOz2cyLL7543b7Z81OQ25EjRwpVz7Zt28xDDz1kFi5caP7xj3+YqVOnmqpVqxpfX1/zzTffXHf8zJkzzdChQ82yZcvMhx9+aIYNG2bKli1rbr/9dpOUlOTsFxMTY7y8vHLdR/Xq1c2f//xnY4wx58+fN5JM1apVTYUKFcxrr71mVq5cae6///5c3xdRPDhyg2Jht9s1cOBAl7bVq1erUaNGatiwoc6ePetsv/feeyX9cti3Xbt2kiQ/Pz/n/UlJSbp69ao6duyoDRs2KCkpSZUqVSpQHU888YTLX/Bt27bV9u3b9cQTTzjbvLy81Lp1a+3atcul1kqVKqlLly4utWYfpt+8ebMeeeQRZ3vjxo3Vvn1753b16tV155136vDhwwWqMzdRUVHauHFjoccXt+TkZFWsWLHA/X/7M01LS9OlS5fUrl07GWMUHx+v2rVru13D4MGDXY6gPf300xo7dqzWrVunHj16ONvr1q2rqKiofGu6kddZtq+//lqnT5/WhAkTXK626d69uxo2bKi1a9dq4sSJLmOeeuopl+327dvnWKCfl/3796t69eoubT169NDChQvdqrugTp8+rUceeUR169bVyJEjr9u/RYsWBX4NF/a0abt27ZzvG9Ivz79nz55q3ry5xowZo/Xr1+c7ftiwYS7bDz30kNq0aaNHH31Uc+bM0ejRoyUp34Xjvr6+ztOr2aegzp07pxUrVjgXavfs2VPNmjXTyy+/rCFDhhTquaLgCDcoFrVq1crxRvD9999r3759Od6Ms/12jcC2bdsUGxur7du3Kz093aWfO790rv2FmT0uJCQkR/tv12p8//33SkpKUmBg4HVrze1xJKlKlSo51n+4Izg4+LqnBzzJ39/frfB2/PhxjR8/Xp988kmOeSnoqYtr3X777S7bFSpUUHBwcI7L7evWrZvr+KJ6nWU7duyYJDlPe/1Ww4YNtXXrVpc2X1/fHP8e3HndhIaGasGCBXI4HPrhhx80efJknTlzJs/LmG9EWlqa/vCHPyglJUVbt27NsRYnN1WqVCmSK6qysrJ05swZl7aAgIA8w0aDBg30xz/+UWvWrFFWVlauVz7l55FHHtFf/vIXffbZZ85w4+fnp4yMjFz7X7582RmUs//r7e2tnj17OvuUKVNGvXv3VmxsrI4fP16oMI+CI9ygWPz2L+JsDodDzZo104wZM3Idkx04fvjhB913331q2LChZsyYoZCQEPn4+GjdunV64403rvsZHr+V15tabu3mNwsJHQ6HAgMDtWzZslzHX/sLKa/HMXksTiyIS5cuFfiXflEvFi6Ihg0bKj4+Xj/++GOOsHitrKwsdenSRefPn9eoUaPUsGFDlS9fXj///LMGDBjg1s+0MHJ7PRbl66yw3P2le63y5cu7hIeIiAjdddddGjt2rN56660bLc8pIyNDf/rTn/S///1PGzZsUNOmTQs87vz58wXqW7169Tzn48cff8wRUDdv3uxc2J2bkJAQZWRkKC0tTf7+/gWq4drxv609ODhYWVlZOn36tMsfPRkZGTp37pxq1qwp6ZfQ5evrq8qVK+d4PtnjLly4QLgpZoQblJj69etr9+7duu+++3J8Au1vffrpp7py5Yo++eQTlzeAzZs35+ib335utNbPPvtMERERuf5iLAx3a125cmWOU3t5uZEQVVjR0dFavny5/va3v2nMmDH59t2zZ48OHjyopUuXuizavtHTbt9//706d+7s3E5NTdXJkyfVrVu3644tjtdZnTp1JP2ygDn7dGu2AwcOOO8vLs2bN9djjz2md955R88//3yR/AJ1OBzq16+fNm3apFWrVqljx44FHvvll1+6/Hzyc+TIkTw//6lGjRo5XistWrTId3+HDx+Wr69vgY4wXcsYo6NHj6pVq1bOtpYtW0r65dTjb19fX3/9tRwOh/P+MmXKqGXLlvrqq6+UkZHhcnQp+0Mt8zp6jaJDuEGJ6dWrl9atW6cFCxZo8ODBLvddunRJDodD5cuXd/6189tf2ElJSVq8eHGOfZYvX14XL14sllrnzJmjl156SVOmTHG5LzMzU6mpqTmuRrme8uXLS1KB6y3ta2569uypqVOnavLkyerUqZPCw8Nd7k9JSdG0adM0efLkXH+mxhi9+eabN1TD/PnzNXDgQOe6m7lz5yozM1Ndu3a97tjieJ21bt1agYGBmjdvnh5//HHZ7XZJ0r/+9S/t27dP48ePL8jTuiEjR47Ue++9pxkzZmjmzJk3vL9nn31WK1eu1DvvvKM//elPbo0tqjU3vr6+eZ7eOnPmTI6wsHv3bn3yySfq2rWryxVYx48fV3p6uho2bJjv+Llz5+rMmTO6//77nW333nuvAgICNHfuXJdwM3fuXJUrV07du3d3tvXu3Vs7duzQ0qVLNWjQIEm/nLpatmyZGjdu7DzKg+JDuEGJ6du3r1atWqWnnnpKmzdvVkREhLKysrR//36tWrXK+Tkkv//97+Xj46Po6GgNGTJEqampWrBggQIDA3Xy5EmXfYaFhWnu3Ll6+eWX1aBBAwUGBub4i7kwOnbsqCFDhmjq1KlKSEjQ73//e3l7e+v777/X6tWr9eabb7qcTy+Ili1bysvLS6+88oqSkpJkt9udn7GSm6Jec/P222/r4sWLzr8eP/30U/3000+SfvkFlr2+JPtS5sWLF+f7yc/e3t5as2aNIiMj1aFDB/Xq1UsRERHy9vbWd999pw8++EBVqlTR5MmT1bBhQ9WvX1/PP/+8fv75Z/n7++vvf//7Da1Jkn45JXDfffepV69eOnDggObMmaN77rnHZTFxXorjdebt7a1XXnlFAwcOVMeOHdWnTx/npeChoaEaPnz4DT3fgmjcuLG6deumd999Vy+++KKqVq1a6H3NnDlTc+bMUXh4uMqVK6e//e1vLvc/+OCDztCem6Jac5Of3r17y8/PT+3atVNgYKD27t2r+fPnq1y5cpo2bZpL3379+unzzz93CbR16tRR79691axZM/n6+mrr1q1asWKFWrZs6bLw18/PTy+99JJiYmL08MMPKyoqSl988YX+9re/afLkyQoICHD2HTJkiN59913FxMTo4MGDql27tt5//30dO3ZMn376abHOB/4/T12mBWvI61Lwaz85NVtGRoZ55ZVXTJMmTYzdbjdVqlQxYWFhZuLEiS6XXX7yySemefPmxtfX14SGhppXXnnFLFq0KMclo4mJiaZ79+6mYsWKRpLzct28PsE2r08j7d+/vylfvnyOeufPn2/CwsKMn5+fqVixomnWrJkZOXKkOXHihLNPnTp1cr30tmPHjjkuH16wYIGpV6+e8fLyKvFPK86+JDq322/ndNasWXleOp2bCxcumPHjx5tmzZqZcuXKGV9fX9O0aVMzZswY56ciG2PM3r17TWRkpKlQoYKpVq2aGTRokNm9e3eOy9LduRT8888/N4MHDzZVqlQxFSpUMI8++qg5d+5cjrF5XRp9o6+zay8Fz7Zy5UrTqlUrY7fbTUBAgHn00UfNTz/95NInr9dcQT6h2Zj8/53FxcUZSSY2Nva6+8lP9uXqBXndeMqbb75p2rRpYwICAkzZsmVNcHCweeyxx8z333+fo2/2x1T81pNPPmkaN25sKlasaLy9vU2DBg3MqFGjTHJycq6PN3/+fHPnnXcaHx8fU79+ffPGG2+4fNxEtlOnTpn+/fubgIAAY7fbTdu2bQv8bwo3zmaMB07WAyi1evXqpaNHj2rnzp2eLgUACoXTUgCcjDGKi4vLcfoBAG4mHLkBAACWwhdnAgAASyHcAAAASyHcAAAASyHcAAAAS7nlrpZyOBw6ceKEKlasWGwf3Q8AAIqWMUYpKSmqWbOmyydP5+aWCzcnTpy47pf8AQCA0unHH3/Ubbfdlm+fWy7cVKxYUdIvk1OYb4oFAAAlLzk5WSEhIc7f4/m55cJN9qkof39/wg0AADeZgiwpYUExAACwFMINAACwFMINAACwFMINAACwFMINAACwFMINAACwFMINAACwFMINAACwFMINAACwFMINAACwFMINAACwFMINAACwFMINAACwFMINAACwFMJNEUnPyFTo6LUKHb1W6RmZni4HAIASV1p+FxJuAACApRBuAACApRBuAACApRBuAACApRBuAACApRBuAACApRBuAACApRBuAACApRBuAACApRBuAACApRBuAACApRBuAACApRBuAACApRBuAACApRBuAACApRBuAACApRBuAACApRBuAACApRBuAACApRBuAACApRBuAACApRBuAACApRBuAACApRBuAACApRBuAACApRBugBKWnpGp0NFrFTp6rdIzMj1dDgBYDuEGAABYCuEGAABYCuEGAABYCuEGAABYCuEGAABYCuEGAABYCuEGAABYCuEGAABYCuEGAABYCuEGAABYCuEGAABYCuEGAABYCuEGAABYCuEGAABYCuEGAABYCuEGAABYCuEGAABYCuEGAABYCuEGAABYCuEGAABYikfDzZYtWxQdHa2aNWvKZrPp448/zrf/mjVr1KVLF1WvXl3+/v4KDw/Xhg0bSqZYAABwU/BouElLS1OLFi00e/bsAvXfsmWLunTponXr1mnXrl3q3LmzoqOjFR8fX8yVAgCAm0VZTz54165d1bVr1wL3nzlzpsv2lClT9I9//EOffvqpWrVqVcTVAQCAm5FHw82NcjgcSklJUUBAQJ59rly5oitXrji3k5OTS6I0AADgITf1guLXX39dqamp6tWrV559pk6dqkqVKjlvISEhJVghAAAoaTdtuPnggw80ceJErVq1SoGBgXn2GzNmjJKSkpy3H3/8sQSrBAAAJe2mPC21YsUKPfnkk1q9erUiIyPz7Wu322W320uoMgAA4Gk33ZGb5cuXa+DAgVq+fLm6d+/u6XIAAEAp49EjN6mpqTp06JBz+8iRI0pISFBAQIBq166tMWPG6Oeff9Z7770n6ZdTUf3799ebb76ptm3bKjExUZLk5+enSpUqeeQ5AACA0sWjR26+/vprtWrVynkZ94gRI9SqVSuNHz9eknTy5EkdP37c2X/+/PnKzMxUTEyMgoODnbdhw4Z5pH4AAFD6ePTITadOnWSMyfP+JUuWuGzHxcUVb0EAAOCmd9OtuQEAAMgP4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAbkLpGZkKHb1WoaPXKj0j09PlAKUK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4aYYHD2b7ukSAAC4ZRFuisDF9AwNWrrLud3trS/Ub+FOJaVf9WBVAADcmgg3ReC55Qna/sNZl7Zth87q2eXxHqoIAIBbF+HmBh0+k6ot35+R45r2LGO05fszOnI2zSN1AQBwqyLc3KBj5/NfX3P0HOEGAICSRLi5QXUCyuV7f2jV8iVUCQAAkAg3N6xe9QrqcHv1HBPpZbOpw+3VVbca4QYAgJJEuCkCs/q0Unj9ai5tEQ2qaVafVh6qCACAWxfhpghUKuetBf3DnNvrnmuv955oo0rlvD1YFQAAtybCTTEIrZb/OhwAAFB8CDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDeABx09m+7pEgDAcgg3QAm6mJ6hQUt3Obe7vfWF+i3cqaT0qx6sCgCshXADlKDnlido+w9nXdq2HTqrZ5fHe6giALAej4abLVu2KDo6WjVr1pTNZtPHH3983TFxcXG66667ZLfb1aBBAy1ZsqTY6wSKwuEzqdry/Rk5rmnPMkZbvj+jI2fTPFIXAFiNR8NNWlqaWrRoodmzZxeo/5EjR9S9e3d17txZCQkJ+r//+z89+eST2rBhQzFXCty4Y+fzX19z9BzhBgCKQllPPnjXrl3VtWvXAvefN2+e6tatq+nTp0uSGjVqpK1bt+qNN95QVFRUcZUJFIk6AeXyvT+0avkSqgQAit/Rs+lqXNPfI499U6252b59uyIjI13aoqKitH37dg9VBBRcveoV1OH26jn+0XnZbOpwe3XVrUa4AXDzKk0XTNxU4SYxMVFBQUEubUFBQUpOTtalS5dyHXPlyhUlJye73ABPmdWnlcLrV3Npi2hQTbP6tPJQRQBQNErTBRM3VbgpjKlTp6pSpUrOW0hIiKdLwi2sUjlvLegf5txe91x7vfdEG1Uq5+3BqgDgxpS2CyZuqnBTo0YNnTp1yqXt1KlT8vf3l5+fX65jxowZo6SkJOftxx9/LIlSgQIJrZb/OhwAuBmUtgsmPLqg2F3h4eFat26dS9vGjRsVHh6e5xi73S673V7cpQEAcMsqbRdMePTITWpqqhISEpSQkCDpl0u9ExISdPz4cUm/HHXp16+fs/9TTz2lw4cPa+TIkdq/f7/mzJmjVatWafjw4Z4oHwAAqPRdMOHRcPP111+rVatWatXql8WUI0aMUKtWrTR+/HhJ0smTJ51BR5Lq1q2rtWvXauPGjWrRooWmT5+ud999l8vAAQDwsNJ0wYRHT0t16tRJxpg878/t04c7deqk+Hg+qh4AgNIk+4KJxuN/+WDddc+153NuAACAdXjyggnCDQAAsBTCDQAAsBTCDQAAsBTCDQAAsBTCDQAAsBTCDQAAsBTCDQAAsBTCDQAAsBTCDQAAsBTCDQAAsBTCDQAAsBTCDQAAsBS3ws2qVauUkZHh3P7pp5/kcDic2+np6Xr11VeLrjoAAAA3uRVu+vTpo4sXLzq3GzdurKNHjzq3U1JSNGbMmKKqDQAAwG1uhRtjTL7bAAAAnsaaGwAAYCmEGwAAYCll3R2wYcMGVapUSZLkcDi0adMmffvtt5Lksh4HAADAE9wON/3793fZHjJkiMu2zWa7sYoAAABugFvh5reXfQMAAJRGbq25efzxx5WSklJctQAAANwwt8LN0qVLdenSpeKqBQAA4Ibd0OfcAAAAlDZuLyhOSUmRr69vvn38/f0LXRAAAMCNcDvc3HHHHXneZ4yRzWZTVlbWDRUFAABQWG6Hmw8//FABAQHFUQsAAMANczvcREREKDAwsDhqAQAAuGFF/vULnJICAACe5Fa4qVOnjry8vHK97+DBgxo5cqRuu+22IikMAACgMNwKN0eOHFHVqlWd2+np6Vq8eLHat2+vxo0ba8uWLRoxYkSRFwkAAFBQbq+5kaQdO3bo3Xff1erVq1W7dm3t27dPmzdvVvv27Yu6PgAAALe4deRm+vTpatKkiXr27KkqVapoy5Yt2rNnj2w2m8sRHQAAAE9x68jNqFGjNGrUKE2aNCnPtTcAAACe5NaRm5deekmrV69W3bp1NWrUKH377bfFVRcAAEChuBVuxowZo4MHD+r9999XYmKi2rZtqxYtWsgYowsXLhRXjQAAAAVWqM+56dixo5YuXaqTJ0/qmWee0V133aUOHTqoXbt2mjFjRlHXCAAAUGA39CF+/v7+GjJkiHbu3Kndu3erbdu2mjZtWlHVBgAA4Da3FhRfunRJmzZt0h/+8AdJv5ymunLlyq87K1tWP/zwQ9FWCAAA4Aa3ws3SpUu1du1aZ7h5++231aRJE/n5+UmSDhw4oJo1a2r48OFFXykAAEABuHVaatmyZRo8eLBL2wcffKDNmzdr8+bNevXVV7Vq1aoiLRAAAMAdboWbQ4cOqVmzZs5tX19flSnz6y7atGmjvXv3Fl11AAAAbnLrtNTFixdd1ticOXPG5X6Hw+FyPwAAQElz68jNbbfdlu8H9/3vf//jW8EBAIBHuRVuunXrpvHjx+vy5cs57rt06ZImTpyo7t27F1lxAAAA7nLrtNTYsWO1atUq3XnnnRo6dKjuuOMOSb9cJfX2228rMzNTY8eOLZZCAQAACsKtcBMUFKQvv/xSTz/9tEaPHi1jjCTJZrOpS5cumjNnjoKCgoqlUAAAgIJwK9xIUt26dbV+/XqdP39ehw4dkiQ1aNBAAQEBRV4cAACAu9wON9kCAgLUpk2boqwFAADght3Qd0sBAACUNoQbAABgKYQbAABgKYQbAABgKYQbAABgKYQbAABgKYQbAABgKYQbAABgKYQbAABgKYQbAABgKYQbAABgKYQbAABgKYQbAABgKYQbAABgKWU9XYBVlPMpq6PTunu6DAAAbnkcuQEAAJZCuAEAAJZCuAEAAJZCuAEAAJZCuAGAm9zRs+meLgEoVQg3AHCTuZieoUFLdzm3u731hfot3Kmk9KserAooPQg3AHCTeW55grb/cNalbduhs3p2ebyHKgJKF4+Hm9mzZys0NFS+vr5q27atdu7cmW//mTNn6s4775Sfn59CQkI0fPhwXb58uYSqBQDPOnwmVVu+PyPHNe1ZxmjL92d05GyaR+oCShOPhpuVK1dqxIgRio2N1TfffKMWLVooKipKp0+fzrX/Bx98oNGjRys2Nlb79u3TwoULtXLlSo0dO7aEKwcAzzh2Pv/1NUfPEW4Aj4abGTNmaNCgQRo4cKAaN26sefPmqVy5clq0aFGu/b/88ktFRETokUceUWhoqH7/+9+rT58+1z3aAwBWUSegXL73h1YtX0KVAKWXx8JNRkaGdu3apcjIyF+LKVNGkZGR2r59e65j2rVrp127djnDzOHDh7Vu3Tp169atRGoGAE+rV72COtxePcebt5fNpg63V1fdaoQbwGPfLXX27FllZWUpKCjIpT0oKEj79+/Pdcwjjzyis2fP6p577pExRpmZmXrqqafyPS115coVXblyxbmdnJxcNE8AADxkVp9WembZN9r2m0XFEQ2qaVafVh6sCig9PL6g2B1xcXGaMmWK5syZo2+++UZr1qzR2rVr9dJLL+U5ZurUqapUqZLzFhISUoIVA0DRq1TOWwv6hzm31z3XXu890UaVynl7sCqg9PDYkZtq1arJy8tLp06dcmk/deqUatSokeuYF198UX379tWTTz4pSWrWrJnS0tI0ePBgjRs3TmXK5MxqY8aM0YgRI5zbycnJBBwAlhJaLf91OMCtxmNHbnx8fBQWFqZNmzY52xwOhzZt2qTw8PBcx6Snp+cIMF5eXpIkY0yuY+x2u/z9/V1uAADAujx25EaSRowYof79+6t169Zq06aNZs6cqbS0NA0cOFCS1K9fP9WqVUtTp06VJEVHR2vGjBlq1aqV2rZtq0OHDunFF19UdHS0M+QAAIBbm0fDTe/evXXmzBmNHz9eiYmJatmypdavX+9cZHz8+HGXIzV//etfZbPZ9Ne//lU///yzqlevrujoaE2ePNlTTwEAAJQyHg03kjR06FANHTo01/vi4uJctsuWLavY2FjFxsaWQGUAAOBmdFNdLQUAAHA9hBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGApHv9uKeBWU86nrI5O6+7pMgDAsjhyAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALKWspwsAAADWUM6nrI5O6+7pMjhyAwAArIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALMXj4Wb27NkKDQ2Vr6+v2rZtq507d+bb/+LFi4qJiVFwcLDsdrvuuOMOrVu3roSqBQAApV1ZTz74ypUrNWLECM2bN09t27bVzJkzFRUVpQMHDigwMDBH/4yMDHXp0kWBgYH68MMPVatWLR07dkyVK1cu+eIBAECp5NFwM2PGDA0aNEgDBw6UJM2bN09r167VokWLNHr06Bz9Fy1apPPnz+vLL7+Ut7e3JCk0NLQkSwYAAKWcx05LZWRkaNeuXYqMjPy1mDJlFBkZqe3bt+c65pNPPlF4eLhiYmIUFBSkpk2basqUKcrKysrzca5cuaLk5GSXGwAAsC6PhZuzZ88qKytLQUFBLu1BQUFKTEzMdczhw4f14YcfKisrS+vWrdOLL76o6dOn6+WXX87zcaZOnapKlSo5byEhIUX6PAAAQOni8QXF7nA4HAoMDNT8+fMVFham3r17a9y4cZo3b16eY8aMGaOkpCTn7ccffyzBigEAQEnz2JqbatWqycvLS6dOnXJpP3XqlGrUqJHrmODgYHl7e8vLy8vZ1qhRIyUmJiojI0M+Pj45xtjtdtnt9qItHgAAlFoeO3Lj4+OjsLAwbdq0ydnmcDi0adMmhYeH5zomIiJChw4dksPhcLYdPHhQwcHBuQYbAABw6/HoaakRI0ZowYIFWrp0qfbt26enn35aaWlpzqun+vXrpzFjxjj7P/300zp//ryGDRumgwcPau3atZoyZYpiYmI89RQAAEAp49FLwXv37q0zZ85o/PjxSkxMVMuWLbV+/XrnIuPjx4+rTJlf81dISIg2bNig4cOHq3nz5qpVq5aGDRumUaNGeeopAACAUsZmjDGeLqIkJScnq1KlSkpKSpK/v7+nywGAQknPyFTj8RskSXsnRamcj0f/VgWKnTu/v2+qq6UAAACuh3ADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAspaynCwAAuK+cT1kdndbd02UApRJHbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKWU9XQBJc0YI0lKTk72cCUAAKCgsn9vZ/8ez88tF25SUlIkSSEhIR6uBAAAuCslJUWVKlXKt4/NFCQCWYjD4dCJEydUsWJF2Ww2T5fjMcnJyQoJCdGPP/4of39/T5fjUcyFK+bDFfPhivn4FXPhqrjnwxijlJQU1axZU2XK5L+q5pY7clOmTBnddtttni6j1PD39+cf5f/HXLhiPlwxH66Yj18xF66Kcz6ud8QmGwuKAQCApRBuAACApRBublF2u12xsbGy2+2eLsXjmAtXzIcr5sMV8/Er5sJVaZqPW25BMQAAsDaO3AAAAEsh3AAAAEsh3AAAAEsh3AAAAEsh3FjY7NmzFRoaKl9fX7Vt21Y7d+7Mt//q1avVsGFD+fr6qlmzZlq3bl0JVVr83JmLBQsWqH379qpSpYqqVKmiyMjI687dzcbd10a2FStWyGaz6YEHHijeAkuYu/Nx8eJFxcTEKDg4WHa7XXfcccct++9FkmbOnKk777xTfn5+CgkJ0fDhw3X58uUSqrb4bNmyRdHR0apZs6ZsNps+/vjj646Ji4vTXXfdJbvdrgYNGmjJkiXFXmdJcXc+1qxZoy5duqh69ery9/dXeHi4NmzYUDLFGljSihUrjI+Pj1m0aJH57rvvzKBBg0zlypXNqVOncu2/bds24+XlZV599VWzd+9e89e//tV4e3ubPXv2lHDlRc/duXjkkUfM7NmzTXx8vNm3b58ZMGCAqVSpkvnpp59KuPLi4e58ZDty5IipVauWad++vfnjH/9YMsWWAHfn48qVK6Z169amW7duZuvWrebIkSMmLi7OJCQklHDlxcPd+Vi2bJmx2+1m2bJl5siRI2bDhg0mODjYDB8+vIQrL3rr1q0z48aNM2vWrDGSzEcffZRv/8OHD5ty5cqZESNGmL1795pZs2YZLy8vs379+pIpuJi5Ox/Dhg0zr7zyitm5c6c5ePCgGTNmjPH29jbffPNNsddKuLGoNm3amJiYGOd2VlaWqVmzppk6dWqu/Xv16mW6d+/u0ta2bVszZMiQYq2zJLg7F9fKzMw0FStWNEuXLi2uEktUYeYjMzPTtGvXzrz77rumf//+lgo37s7H3LlzTb169UxGRkZJlVii3J2PmJgYc++997q0jRgxwkRERBRrnSWtIL/MR44caZo0aeLS1rt3bxMVFVWMlXlGQeYjN40bNzYTJ04s+oKuwWkpC8rIyNCuXbsUGRnpbCtTpowiIyO1ffv2XMds377dpb8kRUVF5dn/ZlGYubhWenq6rl69qoCAgOIqs8QUdj4mTZqkwMBAPfHEEyVRZokpzHx88sknCg8PV0xMjIKCgtS0aVNNmTJFWVlZJVV2sSnMfLRr1067du1ynro6fPiw1q1bp27dupVIzaWJVd9Hi4rD4VBKSkqJvJfecl+ceSs4e/assrKyFBQU5NIeFBSk/fv35zomMTEx1/6JiYnFVmdJKMxcXGvUqFGqWbNmjjetm1Fh5mPr1q1auHChEhISSqDCklWY+Th8+LD+85//6NFHH9W6det06NAhPfPMM7p69apiY2NLouxiU5j5eOSRR3T27Fndc889MsYoMzNTTz31lMaOHVsSJZcqeb2PJicn69KlS/Lz8/NQZaXD66+/rtTUVPXq1avYH4sjN0A+pk2bphUrVuijjz6Sr6+vp8spcSkpKerbt68WLFigatWqebqcUsHhcCgwMFDz589XWFiYevfurXHjxmnevHmeLs0j4uLiNGXKFM2ZM0fffPON1qxZo7Vr1+qll17ydGkoRT744ANNnDhRq1atUmBgYLE/HkduLKhatWry8vLSqVOnXNpPnTqlGjVq5DqmRo0abvW/WRRmLrK9/vrrmjZtmj777DM1b968OMssMe7Oxw8//KCjR48qOjra2eZwOCRJZcuW1YEDB1S/fv3iLboYFeb1ERwcLG9vb3l5eTnbGjVqpMTERGVkZMjHx6dYay5OhZmPF198UX379tWTTz4pSWrWrJnS0tI0ePBgjRs3TmXK3Dp/Q+f1Purv739LH7VZsWKFnnzySa1evbrEjoDfOq+6W4iPj4/CwsK0adMmZ5vD4dCmTZsUHh6e65jw8HCX/pK0cePGPPvfLAozF5L06quv6qWXXtL69evVunXrkii1RLg7Hw0bNtSePXuUkJDgvPXo0UOdO3dWQkKCQkJCSrL8IleY10dERIQOHTrkDHmSdPDgQQUHB9/UwUYq3Hykp6fnCDDZwc/cYl9daNX30RuxfPlyDRw4UMuXL1f37t1L7oGLfckyPGLFihXGbrebJUuWmL1795rBgwebypUrm8TERGOMMX379jWjR4929t+2bZspW7asef31182+fftMbGyspS4Fd2cupk2bZnx8fMyHH35oTp486bylpKR46ikUKXfn41pWu1rK3fk4fvy4qVixohk6dKg5cOCA+ec//2kCAwPNyy+/7KmnUKTcnY/Y2FhTsWJFs3z5cnP48GHz73//29SvX9/06tXLU0+hyKSkpJj4+HgTHx9vJJkZM2aY+Ph4c+zYMWOMMaNHjzZ9+/Z19s++FPyFF14w+/btM7Nnz7bUpeDuzseyZctM2bJlzezZs13eSy9evFjstRJuLGzWrFmmdu3axsfHx7Rp08bs2LHDeV/Hjh1N//79XfqvWrXK3HHHHcbHx8c0adLErF27toQrLj7uzEWdOnWMpBy32NjYki+8mLj72vgtq4UbY9yfjy+//NK0bdvW2O12U69ePTN58mSTmZlZwlUXH3fm4+rVq2bChAmmfv36xtfX14SEhJhnnnnGXLhwoeQLL2KbN2/O9b0g+/n379/fdOzYMceYli1bGh8fH1OvXj2zePHiEq+7uLg7Hx07dsy3f3GyGXOLHTcEAACWxpobAABgKYQbAABgKYQbAABgKYQbAABgKYQbAABgKYQbAABgKYQbAABgKYQbAABgKYQbwOISExM1bNgwNWjQQL6+vgoKClJERITmzp2r9PR0T5eXp9DQUNlsNq1YsSLHfU2aNJHNZtOSJUtKvrBcJCYm6tlnn1W9evVkt9sVEhKi6OjoHN8zJElTp06Vl5eXXnvtNWdb9nPN6zZgwABJyvP+3OYIuJXxreCAhR0+fFgRERGqXLmypkyZombNmslut2vPnj2aP3++atWqpR49euQ69urVq/L29i7hil2FhIRo8eLF+vOf/+xs27FjhxITE1W+fHkPVvaro0ePOuf4tddeU7NmzXT16lVt2LBBMTEx2r9/v0v/RYsWaeTIkVq0aJFeeOEFSdJXX32lrKwsSdKXX36phx56SAcOHJC/v78kuXyj9OLFi3X//fe77LNy5crF+AyBm1Cxf8EDAI+Jiooyt912m0lNTc31fofD4fx/SWbOnDkmOjralCtXzvldWnPmzDH16tUz3t7e5o477jDvvfeec8yRI0eMJBMfH+9su3DhgpFkNm/ebIz59fto/vnPf5pmzZoZu91u2rZte90vZa1Tp44ZPXq0sdvt5vjx4872QYMGmWeffdZUqlTJ5Xt7Lly4YJ544glTrVo1U7FiRdO5c2eTkJDgvP/QoUOmR48eJjAw0JQvX960bt3abNy4McdjTp482QwcONBUqFDBhISEmHfeeSffOrt27Wpq1aqV6xxf+/1KcXFxplatWiYjI8PUrFnTbNu2LceY7PnK7buZJJmPPvoo33oAGMNpKcCizp07p3//+9+KiYnJ8yiHzWZz2Z4wYYIefPBB7dmzR48//rg++ugjDRs2TH/5y1/07bffasiQIRo4cKA2b97sdj0vvPCCpk+frq+++krVq1dXdHS0rl69mu+YoKAgRUVFaenSpZKk9PR0rVy5Uo8//niOvg8//LBOnz6tf/3rX9q1a5fuuusu3XfffTp//rwkKTU1Vd26ddOmTZsUHx+v+++/X9HR0Tp+/LjLfqZPn67WrVsrPj5ezzzzjJ5++mkdOHAg1/rOnz+v9evX5znH1x5RWbhwofr06SNvb2/16dNHCxcuzPf5AygkT6crAMVjx44dRpJZs2aNS3vVqlVN+fLlTfny5c3IkSOd7ZLM//3f/7n0bdeunRk0aJBL28MPP2y6detmjHHvyM2KFSucfc6dO2f8/PzMypUr86y/Tp065o033jAff/yxqV+/vnE4HGbp0qWmVatWxhjjcuTmiy++MP7+/uby5csu+6hfv36+R16aNGliZs2a5fKYjz32mHPb4XCYwMBAM3fu3FzH//e//811jnOTlJRk/Pz8nEeT4uPjTYUKFUxKSopLv+sdufH19XX+/LJvx44du+7jA7cSjtwAt5idO3cqISFBTZo00ZUrV1zua926tcv2vn37FBER4dIWERGhffv2uf244eHhzv8PCAjQnXfeWaD9dO/eXampqdqyZYsWLVqU61Gb3bt3KzU1VVWrVlWFChWctyNHjuiHH36Q9MuRm+eff16NGjVS5cqVVaFCBe3bty/HkZvmzZs7/99ms6lGjRo6ffp0rrUZYwr03CVp+fLlql+/vlq0aCFJatmyperUqaOVK1cWeB+S9MYbbyghIcHlVrNmTbf2AVgdC4oBi2rQoIFsNluOUyr16tWT5LpINZu7i3TLlPnl76Pf/pK/3qkmd5UtW1Z9+/ZVbGys/vvf/+qjjz7K0Sc1NVXBwcGKi4vLcV/2qaHnn39eGzdu1Ouvv64GDRrIz89PPXv2VEZGhkv/axdR22w2ORyOXGu7/fbbZbPZciwazs3ChQv13XffqWzZX992HQ6HFi1apCeeeOK647PVqFFDDRo0KHB/4FbEkRvAoqpWraouXbro7bffVlpaWqH20ahRI23bts2lbdu2bWrcuLEkqXr16pKkkydPOu9PSEjIdV87duxw/v+FCxd08OBBNWrUqEB1PP744/r888/1xz/+UVWqVMlx/1133aXExESVLVtWDRo0cLlVq1bNWfeAAQP04IMPqlmzZqpRo4aOHj1aoMfPS0BAgKKiojR79uxc5/jixYuSpD179ujrr79WXFycyxGXuLg4bd++vUDhCEDBceQGsLA5c+YoIiJCrVu31oQJE9S8eXOVKVNGX331lfbv36+wsLB8x7/wwgvq1auXWrVqpcjISH366adas2aNPvvsM0m/HP353e9+p2nTpqlu3bo6ffq0/vrXv+a6r0mTJqlq1aoKCgrSuHHjVK1aNT3wwAMFeh6NGjXS2bNnVa5cuVzvj4yMVHh4uB544AG9+uqruuOOO3TixAmtXbtWDz74oFq3bq3bb79da9asUXR0tGw2m1588cU8j8i4Y/bs2YqIiFCbNm00adIkNW/eXJmZmdq4caPmzp2rffv2aeHChWrTpo06dOiQY/zdd9+thQsXunzuTX4uXryoxMREl7aKFSuWmkvjgVLB04t+ABSvEydOmKFDh5q6desab29vU6FCBdOmTRvz2muvmbS0NGc/5XGZcX6XghtjzN69e014eLjx8/MzLVu2NP/+979zXVD86aefmiZNmhgfHx/Tpk0bs3v37nzrzl5QnJdrLwVPTk42zz77rKlZs6bx9vY2ISEh5tFHH3VeRn7kyBHTuXNn4+fnZ0JCQszbb79tOnbsaIYNG5bvY7Zo0cJ5WXxeTpw4YWJiYkydOnWMj4+PqVWrlunRo4fZvHmzuXLliqlatap59dVXcx37yiuvmMDAQJORkWGMuf6C4txuU6dOzbc+4FZjM8aNFXEA4Ka4uDh17txZFy5c4MPmAJQI1twAAABLIdwAAABL4bQUAACwFI7cAAAASyHcAAAASyHcAAAASyHcAAAASyHcAAAASyHcAAAASyHcAAAASyHcAAAASyHcAAAAS/l/9UL239OF3uQAAAAASUVORK5CYII=", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjcAAAHFCAYAAAAOmtghAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy88F64QAAAACXBIWXMAAA9hAAAPYQGoP6dpAAA8r0lEQVR4nO3deVzU1f7H8fewgwoKKIISYK5ZCGmaet1yITSsbmVqpZZWZqVm6dUWt+5tsfWWS113Tcsytc3cCiXLMg26mZapmJbivqAoCpzfH/2Y2wQiAwMDX1/Px2MeD79nzvc7nzMzwNvzPd8ZmzHGCAAAwCI83F0AAACAKxFuAACApRBuAACApRBuAACApRBuAACApRBuAACApRBuAACApRBuAACApRBuAACApRBuUGw2m61Yt7Vr15ZLPQsXLtSrr75aLo9VWllZWRo/fny5PTd/NW/ePPXu3VuNGjWSh4eHoqOjXXLcvLw8zZ8/X126dFFoaKi8vb1Vq1Yt3XDDDfroo4+Ul5fn9DGjo6M1YMAA+/bu3btls9k0Z84ce9v48eNls9l0+PBhF4zi4op6r9lsNo0fP75c6vizOXPmOPzceXl5KTw8XL1799Yvv/xy0f2/+eYbjR8/Xu+///4F++Tm5urll1/W9ddfr7p16yogIEBNmjTR6NGjdfz4cReOpvQWLFig+Ph4+fn5KTQ0VH379tXevXuLte/69es1aNAgNW/eXL6+vrLZbNq9e/cF+//666+65557FBERIV9fX9WpU0c333yzQ581a9aoa9eu9j61atXSddddp+XLl5dmmCgmL3cXgMpjw4YNDttPP/20kpOT9fnnnzu0X3HFFeVSz8KFC7VlyxYNHz68XB6vNLKysjRhwgRJUseOHcv98efPn6+MjAy1bNlSeXl5On/+fKmPefbsWd10001atWqVevfurWnTpql27do6dOiQVqxYodtuu02LFi3SjTfeWKrHCQ8P14YNG3T55ZeXuuaSKuq9tmHDBtWtW7f8i/p/s2fPVuPGjXX27Fl9+eWX+te//qXk5GT99NNPqlGjRqH7bNiwQQkJCcrNzdWZM2f0n//8R4MGDSrQ78yZMxo/frz69OmjQYMGKTQ0VN99953++c9/6qOPPtKmTZvk7+9f1kO8qNdff11Dhw7VoEGD9Nxzz+m3337TU089pXbt2ik1NfWCz0O+zz77TGvWrFF8fLwCAwOL/E/Ili1b1LFjR9WrV08vvvii6tatq/3792vlypUO/Y4cOaKmTZtq0KBBql27to4ePao33nhDPXr00Pz583XnnXe6Yui4EAOUUP/+/U2VKlUu2u/06dNl8vg9evQwUVFRZXJsVzt06JCRZMaNG+eWx8/NzbX/21XP2wMPPGAkmblz5xZ6//bt283333/v9HGjoqJM//79i+wzbtw4I8kcOnTI6eMbY0xWVpZT/Svie2327NlGkvn2228d2idMmGAkmVmzZhW638aNG01gYKBp27atOXjwoBk0aJCx2Wxm9uzZBfrm5OSYw4cPF2h/7733jCQzf/58l4ylNM6ePWuCgoJMUlKSQ/tXX31lJJnHH3/8osf488/HCy+8YCSZ9PT0Av3y8vJMXFyciYuLM2fPnnW61nPnzpk6deqYdu3aOb0vnMNpKbhUx44ddeWVVyolJUVt2rRRQECA7rnnHknSyZMn9dhjjykmJkY+Pj6qU6eOhg8frtOnTzscY8qUKWrfvr1q1aqlKlWq6KqrrtKkSZMcZhs6duyoTz75RL/++qvD1Lz0v9MYL7zwgp5//nlFR0fL399fHTt21Pbt23X+/HmNHj1aERERCgoK0s0336yDBw8WGMuiRYvUunVrValSRVWrVlVCQoJSU1Md+gwYMEBVq1bVjh071L17d1WtWlWRkZF69NFHlZ2dba+nZs2akqQJEybYa/3zqZey5uHh2h/1jIwMzZgxQwkJCerXr1+hfRo0aKDY2FhJf8zyPProo4qLi1NQUJCCg4PVunVrffDBBxd9rMJOS+Xbu3ev/v73vyswMFBBQUG68847dejQIYc+0dHRuuGGG7RkyRL7aYv8WbTSvtekwk9LbdmyRTfeeKNq1KghPz8/xcXFae7cuQ591q5dK5vNprfffltPPPGEIiIiFBgYqC5duujnn3++6PNyIS1atJAkHThwoMB9mzdvVrdu3dS5c2etWbNGNWvW1PTp0zV27FgNHDhQb731lkN/T09PhYSEFDhOy5YtJanYp33K0pYtW3TixAl1797dob1169YKDg4u8rRbvuL+fKSkpCgtLU3Dhw+Xr6+v07V6e3urevXq8vLipElZ4xmGy+3fv1933nmnRo0apWeeeUYeHh7KyspShw4d9Ntvv+nxxx9XbGysfvzxR40dO1Y//PCD1qxZY/+DsXPnTvXt29cegr7//nv961//0k8//aRZs2ZJkqZOnar77rtPO3fu1NKlSwutY8qUKYqNjdWUKVN0/PhxPfroo0pKSlKrVq3k7e2tWbNm6ddff9Vjjz2mQYMG6cMPP7Tv+8wzz+jJJ5/U3XffrSeffFLnzp3TCy+8oHbt2mnjxo0Op97Onz+vnj17auDAgXr00UeVkpKip59+WkFBQRo7dqzCw8O1YsUKXX/99Ro4cKB9+j8/8FxITk5OsZ5vT09Phz+25SE5OVnnz5/XTTfdVKz+2dnZOnr0qB577DHVqVNH586d05o1a/T3v/9ds2fPvmBAupibb75ZvXr10uDBg/Xjjz/qqaee0tatW/XNN9/I29vb3u+7777Ttm3b9OSTTyomJkZVqlSR5Lr32p/9/PPPatOmjWrVqqXXXntNISEheuuttzRgwAAdOHBAo0aNcuj/+OOPq23btpoxY4ZOnjypf/zjH0pKStK2bdvk6enp9HOSnp4uSWrYsKFDe1pamrp166a+ffvq9ddfd/iDPn78eNWtW1eDBg2Sp6en+vTpU+Rj5J+Kbtq06UXryc3NlTHmov08PDxKFMLPnTsnSYWGDV9fX/3yyy86e/as/Pz8nD72X6WkpEiSqlWrpu7du+vzzz+Xl5eXOnbsqBdffFGNGzcusE9eXp7y8vJ08OBBvfnmm9q+fbuef/75UteCi3D31BEqr8JOS3Xo0MFIMp999plD+7PPPms8PDwKTKEvXrzYSDLLly8v9DFyc3PN+fPnzbx584ynp6c5evSo/b4LnSpIT083kkyzZs0cpptfffVVI8n07NnTof/w4cONJHPixAljjDF79uwxXl5e5uGHH3bol5mZaWrXrm169erl8BxIMu+++65D3+7du5tGjRrZt0tyWkpSsW6FnU4oiitOsTz33HNGklmxYkWJ9s/JyTHnz583AwcONPHx8Q73/fW0VP7r+edx5p+WeuSRRxz2XbBggZFk3nrrLYfjeXp6mp9//rnImkryXjPGFHhde/fubXx9fc2ePXsc+iUmJpqAgABz/PhxY4wxycnJRpLp3r27Q793333XSDIbNmwost7801Jff/21OX/+vMnMzDQrVqwwtWvXNu3btzfnz58vcv+S+u2330xYWJhp0aKFw8/XheT/TrjY7WKnIi/kyJEjxsPDwwwcONChfceOHfZj79u3r9jHK+q01P33328kmcDAQDNw4ECzZs0aM3/+fBMVFWVCQ0MLfZyEhAR7HYGBgWbJkiVOjxHOY+YGLlejRg1dd911Dm0ff/yxrrzySsXFxTnMSCQkJNivsEpMTJQkpaamaty4cfryyy919OhRh+Ns375drVq1KlYd3bt3d/ifYJMmTSRJPXr0cOiX375nzx5deeWVWrlypXJyctSvXz+HWv38/NShQwclJyc77G+z2ZSUlOTQFhsbW2ChtbO+/fbbYvWLiYkp1eOUl/fee0+vvvqqvv/+e4dTkaX5H/Udd9zhsN2rVy/1799fycnJDvfFxsYWmMmQXPde+7PPP/9cnTt3VmRkpEP7gAED9Omnn2rDhg26/vrr7e09e/Z06Jd/Ku/XX3/Vtddee9HH+2ufJk2a6IMPPiiTUx9Hjx5V9+7dZYzRokWLijXT8uabbyozM/Oi/UJDQ4u8P38GJJ/NZpOnp6eCg4N1xx13aN68ebrmmmt022236bffftN9990nT09P5ebmuuy0bP7jt27dWjNmzLC3X3nllYqPj9eUKVP0z3/+02Gf119/XcePH9f+/fv11ltv6fbbb9fcuXMvOjuG0iHcwOXCw8MLtB04cEA7duxwOFXwZ/mX9O7Zs0ft2rVTo0aN9O9//1vR0dHy8/PTxo0b9eCDD+rMmTPFriM4ONhh28fHp8j2s2fP2muVpGuuuabQ4/71F2VAQECBP9C+vr7245VUXFxcsfqV5NRFaV122WWS/ncK5GKWLFmiXr166bbbbtPIkSNVu3ZteXl5adq0afbTPyVRu3Zth20vLy+FhIToyJEjDu2FvSdd+V77syNHjhT6eBEREfb7/+yva1ryT68U9/HnzZunJk2aKDMzU4sWLdKbb76pPn366NNPPy1J+Rd07Ngxde3aVb///rs+//xz1atXr1j71a9fv9inpYoyceJE+1opSYqKirJfrj1t2jQZYzRkyBANHjxYHh4euuuuuxQWFqaVK1cWum6oJPKPk5CQ4NAeFxen8PBwfffddwX2adCggf3fPXv2VGJioh588EHdfvvtLl8Lh/8h3MDlClv/ERoaKn9//wv+Icv/X9uyZct0+vRpLVmyRFFRUfb709LSyqTWompZvHixQw3l7UJB8K9mz55drouTJalTp07y9vbWsmXLNHjw4Iv2f+uttxQTE6NFixY5vD/yF12XVEZGhurUqWPfzsnJ0ZEjRwr8MSvsPVlW77WQkBDt37+/QPu+ffskXXyGwllNmjSxLyLu1KmTcnNzNWPGDC1evFi33nqrSx7j2LFj6tKli9LT0/XZZ5/ZZ5eKo3Pnzlq3bt1F+/Xv37/QReP57rvvPt1www327T+vsalSpYrmz5+v1157TXv37lVERIRCQ0PVuHFjtWnTxmWzWEWN2xhTrLDSsmVLrVixQocOHVJYWJhL6kJBhBuUixtuuEHPPPOMQkJCijyNkv9H6M+/uIwxmj59eoG+vr6+Jf7fdVESEhLk5eWlnTt36pZbbnHJMZ3937hUsU9L1a5dW4MGDdK0adM0b968QhcE79y5U6dPn1ZsbKxsNpt8fHwcQkZGRkaxrpYqyoIFC9S8eXP79rvvvqucnJxifZZQWb3XOnfurKVLl2rfvn322RrpjxmWgICAYp1qKo1Jkybp/fff19ixY/X3v/+91LMD+cFm165dWr16teLj453a31WnpSIiIhyez8LUqFHD/pk2H374oX7++WeXLt5NTExUQECAPv30Uz3yyCP29u+++04ZGRkXfW2NMVq3bp2qV6/ustkkFI5wg3IxfPhwvf/++2rfvr0eeeQRxcbGKi8vT3v27NGqVav06KOPqlWrVuratat8fHzUp08fjRo1SmfPntW0adN07NixAse86qqrtGTJEk2bNk3NmzeXh4eH/X+wpREdHa2JEyfqiSee0K5du3T99derRo0aOnDggDZu3KgqVao4TI8XR7Vq1RQVFaUPPvhAnTt3VnBwsEJDQ4v8pGBXjCXf1q1btXXrVkl/hIqsrCwtXrxY0h8fuvjnq79sNps6dOhw0U9Tfvnll7Vr1y4NGDBAK1eu1M0336ywsDAdPnxYq1ev1uzZs/XOO+8oNjbWfin2kCFDdOutt2rv3r16+umnFR4eXqxP072QJUuWyMvLS127drVfLdWsWTP16tXrovuW1Xtt3Lhx+vjjj9WpUyeNHTtWwcHBWrBggT755BNNmjRJQUFBJR5vcdSoUUNjxozRqFGjtHDhwlJ9WNyZM2fsH4Hw6quvKicnR19//bX9/po1a170wxUbNWpU4scvrvfff1/79u1TkyZNdPbsWa1du1b//ve/NXjw4AIfIlm/fn1J0o4dO+xthw4dss8u/fDDD5KkTz/9VDVr1lTNmjXVoUMHSVL16tU1ceJEPfbYYxowYID69OmjjIwMPfXUU7rssss0ZMgQ+zFvvPFGNWvWTHFxcQoJCdG+ffs0Z84crVu3TlOmTOFy8LLmxsXMqOQudLVU06ZNC+1/6tQp8+STT5pGjRoZHx8fExQUZK666irzyCOPmIyMDHu/jz76yDRr1sz4+fmZOnXqmJEjR5pPP/3USDLJycn2fkePHjW33nqrqV69urHZbCb/7Zx/dc0LL7zg8Pj5V6e89957Du0X+jC0ZcuWmU6dOpnAwEDj6+troqKizK233mrWrFlT5HNgzP+u5vmzNWvWmPj4eOPr61uqq0NKIr+ewm5/vtInMzPTSDK9e/cu1nFzcnLM3LlzzXXXXWeCg4ONl5eXqVmzpklMTDQLFy50uJrmueeeM9HR0cbX19c0adLETJ8+vdDnyZmrpTZv3mySkpJM1apVTbVq1UyfPn3MgQMHChyvR48ehdZf2veaMQWvljLGmB9++MEkJSWZoKAg4+PjY5o1a1bgqrYLvR8LG29hLvS+NcaYM2fOmMsuu8w0aNDA5OTkFHmcouTXcqFbeb6Hi7J06VITFxdnqlSpYvz9/U2LFi3MzJkzTV5eXoG+UVFRBa58y38tCrt16NChwDGmT59urrzySuPj42NCQkLMHXfcYfbu3evQ5/nnnzfXXHONqVGjhvH09DQhISEmISHBfPzxx64cOi7AZkwxVnoBuCQsX75cN9xwg77//ntdddVV7i4HAEqEpdoA7JKTk9W7d2+CDYBKjZkbAABgKczcAAAASyHcAAAASyHcAAAASyHcAAAAS7nkPkUoLy9P+/btU7Vq1Qr9SHYAAFDxGGOUmZmpiIiIi37y9iUXbvbt21fg23oBAEDlsHfvXtWtW7fIPpdcuKlWrZqkP56cwMBAN1cDAACK4+TJk4qMjLT/HS/KJRdu8k9FBQYGEm4AAKhkirOkhAXFAADAUgg3AADAUgg3AADAUtwablJSUpSUlKSIiAjZbDYtW7bsovtkZ2friSeeUFRUlHx9fXX55Zdr1qxZZV8sAACoFNy6oPj06dNq1qyZ7r77bt1yyy3F2qdXr146cOCAZs6cqfr16+vgwYPKyckp40oBAEBl4dZwk5iYqMTExGL3X7FihdatW6ddu3YpODhYkhQdHV1G1QEAgMqoUq25+fDDD9WiRQtNmjRJderUUcOGDfXYY4/pzJkz7i4NAABUEJXqc2527dql9evXy8/PT0uXLtXhw4c1ZMgQHT169ILrbrKzs5WdnW3fPnnyZHmVCwAA3KBSzdzk5eXJZrNpwYIFatmypbp3766XX35Zc+bMueDszbPPPqugoCD7ja9eAADA2ipVuAkPD1edOnUUFBRkb2vSpImMMfrtt98K3WfMmDE6ceKE/bZ3797yKhcAALhBpQo3bdu21b59+3Tq1Cl72/bt2+Xh4XHBL9Hy9fW1f9UCX7kAAID1uTXcnDp1SmlpaUpLS5MkpaenKy0tTXv27JH0x6xLv3797P379u2rkJAQ3X333dq6datSUlI0cuRI3XPPPfL393fHEAAAQAXj1nCzadMmxcfHKz4+XpI0YsQIxcfHa+zYsZKk/fv324OOJFWtWlWrV6/W8ePH1aJFC91xxx1KSkrSa6+95pb6AQBAxWMzxhh3F1GeTp48qaCgIJ04cYJTVAAAVBLO/P2uVGtuKrKsczmKHv2Jokd/oqxzfGIyAADuQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACW4tZwk5KSoqSkJEVERMhms2nZsmXF3vfLL7+Ul5eX4uLiyqw+AABQ+bg13Jw+fVrNmjXT5MmTndrvxIkT6tevnzp37lxGlQEAgMrKy50PnpiYqMTERKf3u//++9W3b195eno6NdsDAACsr9KtuZk9e7Z27typcePGFat/dna2Tp486XADAADWVanCzS+//KLRo0drwYIF8vIq3qTTs88+q6CgIPstMjKyjKsEAADuVGnCTW5urvr27asJEyaoYcOGxd5vzJgxOnHihP22d+/eMqwSAAC4m1vX3DgjMzNTmzZtUmpqqh566CFJUl5enowx8vLy0qpVq3TdddcV2M/X11e+vr7lXS4AAHCTShNuAgMD9cMPPzi0TZ06VZ9//rkWL16smJgYN1UGAAAqEreGm1OnTmnHjh327fT0dKWlpSk4OFiXXXaZxowZo99//13z5s2Th4eHrrzySof9a9WqJT8/vwLtAADg0uXWcLNp0yZ16tTJvj1ixAhJUv/+/TVnzhzt379fe/bscVd5AACgErIZY4y7iyhPJ0+eVFBQkE6cOKHAwECXHTfrXI6uGLtSkrR1YoICfCrNGT8AACo8Z/5+V5qrpQAAAIqDcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcANUMFnnchQ9+hNFj/5EWedy3F0OAFQ6hBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGAphBsAAGApbg03KSkpSkpKUkREhGw2m5YtW1Zk/yVLlqhr166qWbOmAgMD1bp1a61cubJ8igUAAJWCW8PN6dOn1axZM02ePLlY/VNSUtS1a1ctX75cmzdvVqdOnZSUlKTU1NQyrhQAAFQWXu588MTERCUmJha7/6uvvuqw/cwzz+iDDz7QRx99pPj4eBdXBwAAKiO3hpvSysvLU2ZmpoKDgy/YJzs7W9nZ2fbtkydPlkdpAADATSr1guKXXnpJp0+fVq9evS7Y59lnn1VQUJD9FhkZWY4VAgCA8lZpw83bb7+t8ePHa9GiRapVq9YF+40ZM0YnTpyw3/bu3VuOVQIAgPJWKU9LLVq0SAMHDtR7772nLl26FNnX19dXvr6+5VQZAABwt0o3c/P2229rwIABWrhwoXr06OHucgAAQAXj1pmbU6dOaceOHfbt9PR0paWlKTg4WJdddpnGjBmj33//XfPmzZP0R7Dp16+f/v3vf+vaa69VRkaGJMnf319BQUFuGQMAAKhY3Dpzs2nTJsXHx9sv4x4xYoTi4+M1duxYSdL+/fu1Z88ee/8333xTOTk5evDBBxUeHm6/DRs2zC31AwCAisetMzcdO3aUMeaC98+ZM8dhe+3atWVbEAAAqPQq3ZobAACAohBuAACApTgVbiZNmqQzZ87Yt1NSUhw+/TczM1NDhgxxXXUAAABOcircjBkzRpmZmfbtG264Qb///rt9OysrS2+++abrqgMAAHCSU+Hmr4t/i1oMDAAA4A6suQEAAJZCuAEAAJbi9OfczJgxQ1WrVpUk5eTkaM6cOQoNDZUkh/U4AOCsrHM5umLsSknS1okJCvCplF9/B8DNnPrNcdlll2n69On27dq1a2v+/PkF+gAAALiLU+Fm9+7dZVQGAACAazi15ua6667T8ePHy6gUAACA0nMq3Kxdu1bnzp0rq1osY/fhLHeXAADAJYurpVzgeNY53Tt3s327+2tfqN/MjTqRdd6NVQEAcGly+lKEzMxM+fn5FdknMDCwxAVVRkPfTtOGnYcd2r7ccVgPv52qeQNbuqkqAAAuTU6Hm4YNG17wPmOMbDabcnNzS1VUZbLr0Cml/HKoQHuuMUr55ZDSD59WTGgVN1QGAMClyelws3jxYgUHB5dFLZXSr0eLXl+z+wjhBgCA8uR0uGnbtq1q1apVFrVUSlHBAUXeHx1CsAEAoDy5fEFxTk6Oqw9ZodWrWVXtG9Qs8ER62mxq36AmszYAAJQzp8JNVFSUPD09C71v69atGjFihOrUqeOSwiqT1/vEq/XloQ5tbeuH6vU+8W6qCACAS5dT4SY9PV0hISH27VOnTmnGjBlq3bq1YmNjtXHjRo0ePdrlRVZ0QQHemt6/uX17+dB2mjewpYICvN1YFQAAl6YSfSvd+vXrNWPGDL3//vuKiYnR1q1btW7dOrVt29bV9VVK0aFFr8MBAABlx6mZm0mTJqlx48bq3bu3atasqfXr1+u///2vbDabatSoUVY1AgAAFJtTMzePP/64/vGPf2jixIkXXHsDAADgTk7N3EycOFHvvfeeYmJi9I9//ENbtmwpq7oAAABKxKlw8/jjj2v79u2aP3++MjIydO2116pZs2YyxujYsWNlVSMAAECxlehzbjp06KC5c+dq3759euCBB3T11Verffv2atOmjV5++WVX1wgAAFBspfoQv8DAQA0ePFgbN27U999/r1atWum5555zVW0AAABOc2pB8ZkzZ/TZZ5/phhtukCSNGTNG2dnZ/zuYl5d27tzp2goBAACc4FS4mTdvnj7++GN7uJk8ebKaNm0qf39/SdLPP/+siIgIPfLII66vFAAAoBicOi21YMEC3XPPPQ5tCxcuVHJyspKTkzVp0iS9++67Li0QAADAGU6Fm+3bt6thw4b2bT8/P3l4/O8QLVu21NatW11XHQAAgJOcOi114sQJeXn9b5dDhw453J+Xl+ewBgcAAKC8OTVzU7du3SI/uO+///2v6tatW+qiAAAASsqpcNO9e3eNHTtWZ8+eLXDfmTNnNGHCBPXo0cNlxQEAADjL6e+Wevfdd9WoUSM99NBDatiwoWw2m3766SdNnjxZOTk5evzxx8uqVgAAgItyKtyEhYXpq6++0gMPPKDRo0fLGCNJstls6tq1q6ZOnaqwsLAyKRQAAFRsWedydMXYlZKkrRMTFODjVMxwGacfNSYmRitWrNDRo0e1Y8cOSVL9+vUVHBzs8uIAAACcVeJIFRwcrJYtW7qyFgAAgFIr1XdLAQAAVDSEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCluDTcpKSlKSkpSRESEbDabli1bdtF91q1bp+bNm8vPz0/16tXTG2+8UfaFAgCASsOt4eb06dNq1qyZJk+eXKz+6enp6t69u9q1a6fU1FQ9/vjjGjp0qN5///0yrhQAAFQWXu588MTERCUmJha7/xtvvKHLLrtMr776qiSpSZMm2rRpk1588UXdcsstZVQl4D67D2fpiohAd5cBAJVKpVpzs2HDBnXr1s2hLSEhQZs2bdL58+fdVBXgOsezzuneuZvt291f+0L9Zm7UiSze3wBQXJUq3GRkZCgsLMyhLSwsTDk5OTp8+HCh+2RnZ+vkyZMON6CiGvp2mjbsdHwvf7njsB5+O9VNFQFA5VOpwo0k2Ww2h21jTKHt+Z599lkFBQXZb5GRkWVeI1ASuw6dUsovh5T3l/ZcY5TyyyGlHz7tlroAoLKpVOGmdu3aysjIcGg7ePCgvLy8FBISUug+Y8aM0YkTJ+y3vXv3lkepgNN+PZpV5P27jxBuAKA43Lqg2FmtW7fWRx995NC2atUqtWjRQt7e3oXu4+vrK19f3/IoDyiVqOCAIu+PDqlSTpUAQOXm1pmbU6dOKS0tTWlpaZL+uNQ7LS1Ne/bskfTHrEu/fv3s/QcPHqxff/1VI0aM0LZt2zRr1izNnDlTjz32mDvKB1yqXs2qat+gZoEfSk+bTe0b1FRMKOEGAIrDreFm06ZNio+PV3x8vCRpxIgRio+P19ixYyVJ+/fvtwcdSYqJidHy5cu1du1axcXF6emnn9Zrr73GZeCwjNf7xKv15aEObW3rh+r1PvFuqggAKh+3npbq2LGjfUFwYebMmVOgrUOHDvruu+/KsCrAfYICvDW9f3NdMXalJGn50HZ8zg0AOKlSLSgGLjXRoUWvwwEAFES4AQAAlkK4AQAAlkK4AQAAlkK4AQAAlkK4AQAAlkK4AQAAlkK4AQAAlkK4AQAAlkK4AQAAlkK4AQAAlkK4AQAAlkK4AQAAlkK4AQAALrf7cJbbHptwAwAASu141jndO3ezfbv7a1+o38yNOpF1vtxrIdwAAIBSG/p2mjbsPOzQ9uWOw3r47dRyr4VwAwAASmXXoVNK+eWQ8v7SnmuMUn45pPTDp8u1HsINAAAolV+PFr2+ZvcRwg0AAKhEooIDirw/OqRKOVXyB8INgArJnVdaAHBOvZpV1b5BzQKhwtNmU/sGNRUTSrgBcAmqSFdaAHDe633i1fryUIe2tvVD9Xqf+HKvhXADoEKoSFdaAHBeUIC3pvdvbt9ePrSd5g1sqaAA73KvhXADwO0q2pUWAEovOrTodThliXADwO0q2pUWACo3wg0At6toV1oAqNwINwDcrqJdaQGgciPcAKgQKtKVFgAqN8INgAqhIl1pAaByI9wAqJDceaUFgMqNcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACzF7eFm6tSpiomJkZ+fn5o3b64vvviiyP4LFixQs2bNFBAQoPDwcN199906cuRIOVULAAAqOreGm0WLFmn48OF64oknlJqaqnbt2ikxMVF79uwptP/69evVr18/DRw4UD/++KPee+89ffvttxo0aFA5Vw4AACoqt4abl19+WQMHDtSgQYPUpEkTvfrqq4qMjNS0adMK7f/1118rOjpaQ4cOVUxMjP72t7/p/vvv16ZNm8q5cgAAUFG5LdycO3dOmzdvVrdu3Rzau3Xrpq+++qrQfdq0aaPffvtNy5cvlzFGBw4c0OLFi9WjR48LPk52drZOnjzpcAMAANbltnBz+PBh5ebmKiwszKE9LCxMGRkZhe7Tpk0bLViwQLfffrt8fHxUu3ZtVa9eXa+//voFH+fZZ59VUFCQ/RYZGenScQAAgIrF7QuKbTabw7YxpkBbvq1bt2ro0KEaO3asNm/erBUrVig9PV2DBw++4PHHjBmjEydO2G979+51af0AAKBi8XLXA4eGhsrT07PALM3BgwcLzObke/bZZ9W2bVuNHDlSkhQbG6sqVaqoXbt2+uc//6nw8PAC+/j6+srX19f1AwAAABWS22ZufHx81Lx5c61evdqhffXq1WrTpk2h+2RlZcnDw7FkT09PSX/M+AAAALj1tNSIESM0Y8YMzZo1S9u2bdMjjzyiPXv22E8zjRkzRv369bP3T0pK0pIlSzRt2jTt2rVLX375pYYOHaqWLVsqIiLCXcMAAAAViNtOS0nS7bffriNHjmjixInav3+/rrzySi1fvlxRUVGSpP379zt85s2AAQOUmZmpyZMn69FHH1X16tV13XXX6fnnn3fXEAAAwP8L8PHS7ucufAVzeXFruJGkIUOGaMiQIYXeN2fOnAJtDz/8sB5++OEyrgoAAFRWbr9aCgAAwJUINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFK83F2AVQT4eGn3cz3cXQYsgPcSAJQOMzcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSuBQcQIXBZfAAXIGZGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCle7i6gvBljJEknT550cyUAAKC48v9u5/8dL8olF24yMzMlSZGRkW6uBAAAOCszM1NBQUFF9rGZ4kQgC8nLy9O+fftUrVo12Ww2d5fjlJMnTyoyMlJ79+5VYGCgu8spc5fSeC+lsUqM18oupbFKl9Z43T1WY4wyMzMVEREhD4+iV9VccjM3Hh4eqlu3rrvLKJXAwEDL/xD92aU03ktprBLjtbJLaazSpTVed471YjM2+VhQDAAALIVwAwAALIVwU4n4+vpq3Lhx8vX1dXcp5eJSGu+lNFaJ8VrZpTRW6dIab2Ua6yW3oBgAAFgbMzcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcVzNSpUxUTEyM/Pz81b95cX3zxRZH9161bp+bNm8vPz0/16tXTG2+8UU6VuoYz492/f7/69u2rRo0aycPDQ8OHDy+/Ql3AmbEuWbJEXbt2Vc2aNRUYGKjWrVtr5cqV5Vht6Tkz3vXr16tt27YKCQmRv7+/GjdurFdeeaUcqy0dZ39u83355Zfy8vJSXFxc2RboYs6Md+3atbLZbAVuP/30UzlWXHLOvrbZ2dl64oknFBUVJV9fX11++eWaNWtWOVVbes6Md8CAAYW+tk2bNi3Hii/AoMJ45513jLe3t5k+fbrZunWrGTZsmKlSpYr59ddfC+2/a9cuExAQYIYNG2a2bt1qpk+fbry9vc3ixYvLufKScXa86enpZujQoWbu3LkmLi7ODBs2rHwLLgVnxzps2DDz/PPPm40bN5rt27ebMWPGGG9vb/Pdd9+Vc+Ul4+x4v/vuO7Nw4UKzZcsWk56ebubPn28CAgLMm2++Wc6VO8/ZseY7fvy4qVevnunWrZtp1qxZ+RTrAs6ONzk52UgyP//8s9m/f7/9lpOTU86VO68kr23Pnj1Nq1atzOrVq016err55ptvzJdfflmOVZecs+M9fvy4w2u6d+9eExwcbMaNG1e+hReCcFOBtGzZ0gwePNihrXHjxmb06NGF9h81apRp3LixQ9v9999vrr322jKr0ZWcHe+fdejQoVKFm9KMNd8VV1xhJkyY4OrSyoQrxnvzzTebO++809WluVxJx3r77bebJ5980owbN65ShRtnx5sfbo4dO1YO1bmWs2P99NNPTVBQkDly5Eh5lOdypf25Xbp0qbHZbGb37t1lUZ5TOC1VQZw7d06bN29Wt27dHNq7deumr776qtB9NmzYUKB/QkKCNm3apPPnz5dZra5QkvFWVq4Ya15enjIzMxUcHFwWJbqUK8abmpqqr776Sh06dCiLEl2mpGOdPXu2du7cqXHjxpV1iS5Vmtc2Pj5e4eHh6ty5s5KTk8uyTJcoyVg//PBDtWjRQpMmTVKdOnXUsGFDPfbYYzpz5kx5lFwqrvi5nTlzprp06aKoqKiyKNEpl9wXZ1ZUhw8fVm5ursLCwhzaw8LClJGRUeg+GRkZhfbPycnR4cOHFR4eXmb1llZJxltZuWKsL730kk6fPq1evXqVRYkuVZrx1q1bV4cOHVJOTo7Gjx+vQYMGlWWppVaSsf7yyy8aPXq0vvjiC3l5Va5fwSUZb3h4uP7zn/+oefPmys7O1vz589W5c2etXbtW7du3L4+yS6QkY921a5fWr18vPz8/LV26VIcPH9aQIUN09OjRCr/uprS/p/bv369PP/1UCxcuLKsSnVK5frIuATabzWHbGFOg7WL9C2uvqJwdb2VW0rG+/fbbGj9+vD744APVqlWrrMpzuZKM94svvtCpU6f09ddfa/To0apfv7769OlTlmW6RHHHmpubq759+2rChAlq2LBheZXncs68to0aNVKjRo3s261bt9bevXv14osvVuhwk8+Zsebl5clms2nBggX2b69++eWXdeutt2rKlCny9/cv83pLq6S/p+bMmaPq1avrpptuKqPKnEO4qSBCQ0Pl6elZICEfPHiwQJLOV7t27UL7e3l5KSQkpMxqdYWSjLeyKs1YFy1apIEDB+q9995Tly5dyrJMlynNeGNiYiRJV111lQ4cOKDx48dX6HDj7FgzMzO1adMmpaam6qGHHpL0xx9EY4y8vLy0atUqXXfddeVSe0m46uf22muv1VtvveXq8lyqJGMNDw9XnTp17MFGkpo0aSJjjH777Tc1aNCgTGsujdK8tsYYzZo1S3fddZd8fHzKssxiY81NBeHj46PmzZtr9erVDu2rV69WmzZtCt2ndevWBfqvWrVKLVq0kLe3d5nV6golGW9lVdKxvv322xowYIAWLlyoHj16lHWZLuOq19YYo+zsbFeX51LOjjUwMFA//PCD0tLS7LfBgwerUaNGSktLU6tWrcqr9BJx1WubmppaoU+bSyUba9u2bbVv3z6dOnXK3rZ9+3Z5eHiobt26ZVpvaZXmtV23bp127NihgQMHlmWJznHLMmYUKv8yvJkzZ5qtW7ea4cOHmypVqthXno8ePdrcdddd9v75l4I/8sgjZuvWrWbmzJmV8lLw4o7XGGNSU1NNamqqad68uenbt69JTU01P/74ozvKd4qzY124cKHx8vIyU6ZMcbjU8vjx4+4aglOcHe/kyZPNhx9+aLZv3262b99uZs2aZQIDA80TTzzhriEUW0nex39W2a6Wcna8r7zyilm6dKnZvn272bJlixk9erSRZN5//313DaHYnB1rZmamqVu3rrn11lvNjz/+aNatW2caNGhgBg0a5K4hOKWk7+U777zTtGrVqrzLLRLhpoKZMmWKiYqKMj4+Pubqq68269ats9/Xv39/06FDB4f+a9euNfHx8cbHx8dER0ebadOmlXPFpePseCUVuEVFRZVv0SXkzFg7dOhQ6Fj79+9f/oWXkDPjfe2110zTpk1NQECACQwMNPHx8Wbq1KkmNzfXDZU7z9n38Z9VtnBjjHPjff75583ll19u/Pz8TI0aNczf/vY388knn7ih6pJx9rXdtm2b6dKli/H39zd169Y1I0aMMFlZWeVcdck5O97jx48bf39/85///KecKy2azZj/X4EKAABgAay5AQAAlkK4AQAAlkK4AQAAlkK4AQAAlkK4AQAAlkK4AQAAlkK4AQAAlkK4AQAAlkK4ASwuIyNDw4YNU/369eXn56ewsDD97W9/0xtvvKGsrCx3l3dB0dHRstlseueddwrc17RpU9lsNs2ZM6f8CytERkaGHn74YdWrV0++vr6KjIxUUlKSPvvsswJ9n3nmGXl6euq5556zt+WP9UK3jh07Ftnvz8cCwLeCA5a2a9cutW3bVtWrV9czzzyjq666Sjk5Odq+fbtmzZqliIgI9ezZs9B9z58/7/YvYI2MjNTs2bPVu3dve9vXX3+tjIwMValSxY2V/c/u3bvtz/GkSZMUGxur8+fPa+XKlXrwwQf1008/OfSfPXu2Ro0apVmzZmn06NGSpG+//Va5ubmSpK+++kq33HKLfv75ZwUGBkqSwzctT5w4Uffee6/DMatVq1aWQwQqH3d//wOAspOQkGDq1q1rTp06Vej9eXl59n9LMtOmTTM9e/Y0AQEBZuzYscYYY6ZOnWrq1atnvL29TcOGDc28efPs+6SnpxtJJjU11d527NgxI8kkJycbY4xJTk42kszHH39sYmNjja+vr2nZsqX573//W2TtUVFRZvTo0cbX19fs2bPH3n7vvfeahx9+2AQFBZnZs2fb248fP27uvfdeU7NmTVOtWjXTqVMnk5aWZr9/x44dpmfPnqZWrVqmSpUqpkWLFmb16tUFHvNf//qXufvuu03VqlVNZGSkefPNN4usMzEx0dSpU6fQ5/jYsWMO22vXrjV16tQx586dMxEREQ7f25Mv//n667759b3yyitF1gPAGE5LARZ15MgRrVq1Sg8++OAFZzlsNpvD9rhx43TjjTfqhx9+0D333KOlS5dq2LBhevTRR7Vlyxbdf//9uvvuu5WcnOx0PSNHjtSLL76ob7/9VrVq1VLPnj11/vz5IvcJCwtTQkKC5s6dK0nKysrSokWLdM899zj0M8aoR48eysjI0PLly7V582ZdffXV6ty5s44ePSpJOnXqlLp37641a9YoNTVVCQkJSkpK0p49exyO9dJLL6lFixZKTU3VkCFD9MADDxSYfcl39OhRrVix4oLPcfXq1R22Z86cqT59+sjb21t9+vTRzJkzixw/gBJyd7oCUDa+/vprI8ksWbLEoT0kJMRUqVLFVKlSxYwaNcreLskMHz7coW+bNm3Mvffe69B22223me7duxtjnJu5eeedd+x9jhw5Yvz9/c2iRYsuWH/+LMWyZcvM5ZdfbvLy8szcuXNNfHy8McY4zNx89tlnJjAw0Jw9e9bhGJdffnmRMy9XXHGFef311x0e884777Rv5+XlmVq1aplp06YVuv8333xT6HNcmBMnTpiAgAD7bFJqaqoJCAgwJ06ccOh3sZkbHx8f++uXf8t/rgH8gZkbwOL+OjuzceNGpaWlqWnTpsrOzna4r0WLFg7b27ZtU9u2bR3a2rZtq23btjldR+vWre3/Dg4OVqNGjYp1nB49eujUqVNKSUnRrFmzCszaSNLmzZt16tQphYSEqGrVqvZbenq6du7cKUk6ffq0Ro0apSuuuELVq1dX1apV9dNPPxWYuYmNjbX/22azqXbt2jp48GChtRlj7P0uZuHChapXr56aNWsmSYqLi1O9evUKXTBdlJEjRyotLc3h1qpVK6eOAVgdC4oBi6pfv75sNluBUyr16tWTJPn7+xfYp7BTK3/9w22Msbd5eHjY2/Jd7FRTUccujJeXl+666y6NGzdO33zzjZYuXVqgT15ensLDw7V27doC9+WfGho5cqRWrlypF198UfXr15e/v79uvfVWnTt3zqH/XxdR22w25eXlFVpbgwYNZLPZtG3bNt10001FjmPWrFn68ccf5eX1v1+7eXl5mjlzpu67774i9/2z0NBQ1a9fv9j9gUsRMzeARYWEhKhr166aPHmyTp8+XaJjNGnSROvXr3do++qrr9SkSRNJUs2aNSVJ+/fvt9+flpZW6LG+/vpr+7+PHTum7du3q3HjxsWq45577tG6det04403qkaNGgXuv/rqq5WRkSEvLy/Vr1/f4RYaGipJ+uKLLzRgwADdfPPNuuqqq1S7dm3t3r27WI9/IcHBwUpISNCUKVMKfY6PHz8uSfrhhx+0adMmrV271mHGJSUlRd9++622bNlSqjoAOGLmBrCwqVOnqm3btmrRooXGjx+v2NhYeXh46Ntvv9VPP/2k5s2bF7n/yJEj1atXL/vi3I8++khLlizRmjVrJP0x+3PttdfqueeeU3R0tA4fPqwnn3yy0GNNnDhRISEhCgsL0xNPPKHQ0NCLznbka9KkiQ4fPqyAgIBC7+/SpYtat26tm266Sc8//7waNWqkffv2afny5brpppvUokUL1a9fX0uWLFFSUpJsNpueeuqpC87IOGPq1Klq06aNWrZsqYkTJyo2NlY5OTlavXq1pk2bpm3btmnmzJlq2bKl2rdvX2D/1q1ba+bMmXrllVeK9XiZmZnKyMhwaAsICLBfNg5ALCgGrG7fvn3moYceMjExMcbb29tUrVrVtGzZ0rzwwgvm9OnT9n6SzNKlSwvsX9Sl4MYYs3XrVnPttdcaf39/ExcXZ1atWlXoguKPPvrING3a1Pj4+JhrrrnG4TLtwlzssue/Xgp+8uRJ8/DDD5uIiAjj7e1tIiMjzR133GG/jDw9Pd106tTJ+Pv7m8jISDN58mTToUMHM2zYsCIfs1mzZmbcuHFF1rpv3z7z4IMP2hf81qlTx/Ts2dMkJyeb7OxsExISYiZNmlTovi+99JIJDQ012dnZxpiLLyiWVOB2//33F1kfcKmxGfOnk+UA4GJr165Vp06ddOzYsQKXRgNAWWDNDQAAsBTCDQAAsBROSwEAAEth5gYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFjK/wFA4v+iXmSLwAAAAABJRU5ErkJggg==", "text/plain": [ "
" ] @@ -324,7 +286,7 @@ { "data": { "text/plain": [ - "" + "" ] }, "execution_count": 7, @@ -333,7 +295,7 @@ }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAi0AAAHHCAYAAABz3mgLAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAAA3NUlEQVR4nO3deXwV1f3/8fcFkhuyQghhkbAjS9jXhlQiFaUQQW0RRUVEvuASUFxAoL+K2K+NuGFlU5StrkAVq2KhSBNABAUMFmRHQCyEgEASEkgg9/z+4JtbLllvCLk58no+HvN4MOeemfncyXDvOzNnJg5jjBEAAEAlV8XXBQAAAJQGoQUAAFiB0AIAAKxAaAEAAFYgtAAAACsQWgAAgBUILQAAwAqEFgAAYAVCCwAAsAKhBYDPPPPMM3I4HB5tjRs31n333eeeX7BggRwOhzZt2lTB1XlKTk6Ww+FQcnKyT+sArmaEFhTJ4XCUaqqoD/FZs2ZpwYIFFbKty7V9+3Y988wzOnDgQIVve+PGjRo9erSio6MVFBSkhg0bavDgwdq9e/dlrzsjI0NTpkxRhw4dFBwcrOrVq6tt27Z66qmndPjw4XKo3vcq43F2/fXXe/yfq169utq3b69XX31VLper2GV//PFH1a9fXw6HQy+99FKR/Xbu3Knx48erY8eOCgkJUb169RQfH++zsOhyufTCCy+oSZMmCggIUPv27fX++++XevlTp05p1KhRql27toKCgtS7d299++23BfqdPn1aY8eOVYMGDeR0OtW6dWvNnj27PN8KylE1XxeAyuvtt9/2mP/rX/+qlStXFmhv3bp1hdQza9YsRUREePwWXllt375dU6ZM0fXXX6/GjRtX6LanTp2qdevW6fbbb1f79u2VmpqqGTNmqHPnztqwYYPatm1bpvX+8MMP6tOnj3788UfdfvvtGjVqlPz9/fXvf/9bc+fO1dKlS8slGO3atUtVqvju96mijrNevXrpzJkz8vf390ldDRo0UGJioiTp+PHjeu+99/TYY4/p2LFjeu655wpd5uTJk+rXr5+ysrLUq1cvjR8/XlFRUbrjjjsK9H3rrbc0d+5c/f73v9fDDz+s9PR0vfHGG/rVr36l5cuXq0+fPlf0/V3qD3/4g55//nmNHDlS3bp109///nfdddddcjgcuvPOO4td1uVyKT4+Xt99953GjRuniIgIzZo1S9dff702b96sFi1aSJLy8vLUt29fbdq0SQkJCWrRooVWrFihhx9+WCdPntSkSZMq4q3CGwYopYSEBFOaQyYrK+uKbD86OtrExcVdkXWXtyVLlhhJJikpqcK3vW7dOpOTk+PRtnv3buN0Os3dd99dpnWeO3fOdOjQwQQGBpq1a9cWeD09Pd1MmjTJ6/VOnjy5xGNq/vz5RpLZuHGj1+t3uVwmOzvbq2Uq43EWFxdnoqOjPdrOnDljGjVqZEJCQsz58+cLLHP27FnTq1cvExoaatavX2/Onj1r4uPjjdPpNKtXry7Qf9OmTSYzM9Oj7fjx46Z27domNja2fN9QCX766Sfj5+dnEhIS3G0ul8tcd911pkGDBoW+34stWrTISDJLlixxt6WlpZkaNWqYIUOGuNsWL15sJJm5c+d6LP/73//eBAQEmKNHj5bTO0J5IbSg1AoLLfkfpps2bTLXXXedqV69unn00UeNMRc+NJ9++mnTrFkz4+/vbxo0aGDGjRtnzp4967GOefPmmd69e5vatWsbf39/07p1azNr1iyPPo0aNTKSPKb8L5b8L7W1a9eaMWPGmIiICBMWFmZGjRplcnJyzMmTJ83QoUNNjRo1TI0aNcy4ceOMy+XyWH9eXp6ZNm2aadOmjXE6nSYyMtKMGjXKnDhxokAd8fHxZu3ataZbt27G6XSaJk2amIULF7r75Ndz6eSLAHOxzp07m86dO5dp2Q8++MBIMs8991yp+q9Zs8YMGjTIREVFuX/2Y8eOLRAgCgstjRo1MsOGDXPP5+/P1atXm1GjRpnw8HATEhJihg4dWuTPZ/ny5aZLly7G6XSaadOmGWMu/zhLSkoq9Oe4ePFi07lzZxMQEGBq1apl7r77bvPTTz959Bk2bJgJCgoyP/30k7nllltMUFCQiYiIME888USJX8DGFB5ajDFm0KBBRpI5fPiwR7vL5TJ33HGHCQsLM19//bW7PScnx9x8882mZs2a5vvvvy9xu8YY87vf/c6Eh4eXqm95mTlzppFUoMb33nvP/X+9OLfffrupU6eOycvL82gfNWqUCQwMdH8GjRkzxkgq8ItW/i8dc+bMKYd3g/LE5SFctp9//ln9+vXTnXfeqXvuuUd16tSRy+XSwIED9eWXX2rUqFFq3bq1tm7dqmnTpmn37t36+OOP3cvPnj1b0dHRGjhwoKpVq6ZPP/1UDz/8sFwulxISEiRJr776qsaMGaPg4GD94Q9/kCTVqVPHo44xY8aobt26mjJlijZs2KA5c+aoRo0a+uqrr9SwYUP9+c9/1ueff64XX3xRbdu21b333ute9oEHHtCCBQs0fPhwPfLII9q/f79mzJihlJQUrVu3Tn5+fu6+e/fu1aBBgzRixAgNGzZM8+bN03333acuXbooOjpavXr10iOPPKLXXntNkyZNcl8+K+4yWk5OjjIzM0u1vyMiIkrV72LGGB09elTR0dFeLytJn3zyiSRp6NChpeq/ZMkSZWdn66GHHlKtWrX0zTffaPr06frpp5+0ZMmSMtUwevRo1ahRQ88884x27dql2bNn6+DBg+4Bsvl27dqlIUOG6IEHHtDIkSPVsmVLSeV3nF0s/5jp1q2bEhMTdfToUf3lL3/RunXrlJKSoho1arj75l+K6NGjh1566SV98cUXevnll9WsWTM99NBDZdonBw4ckMPh8NiOJI0fP14rVqzQypUr1a1bN3e7v7+/PvzwQw0aNEj9+vXThg0bVK9evWK3kZqaWqpj7ty5c0pPTy9V3eHh4cVeAkxJSVFQUFCB/zPdu3d3v/7rX/+62OU7d+5cYBvdu3fXnDlztHv3brVr1045OTmqWrVqgUt+gYGBkqTNmzdr5MiRpXpPqCC+Tk2wR1FnWiSZ119/3aP97bffNlWqVCnwG9Hrr79uJJl169a52wo7fd+3b1/TtGlTj7aiTtvn/ybet29fjzMoMTExxuFwmAcffNDddv78edOgQQOP9axdu9ZIMu+++67HepcvX16gPf838TVr1rjb0tLSjNPpNE888YS7zdvLQ0WdnSlsKou333670NPgpdWpUycTFhZW6v6F/UwTExONw+EwBw8edLd5c6alS5cuJjc3193+wgsvGEnm73//u8eykszy5ctLVZM3x9mlZ1pyc3NNZGSkadu2rTlz5oy732effWYkmaefftrdNmzYMCPJPPvssx7r7NSpk+nSpUuBbV0qLi7OtGrVyhw7dswcO3bM7Ny504wbN85IMvHx8SUuX1Zr1qwxDofD/PGPfyyxb/7+Kc20f//+YtcVHx9f4OdizIVLz5LMhAkTil0+KCjI3H///QXaly1b5nF8vPzyy4WeuZkwYYKRZG6++eYS3jUqGmdacNmcTqeGDx/u0bZkyRK1bt1arVq10vHjx93tv/nNbyRJSUlJ6tmzpySpevXq7tfT09N17tw5xcXFacWKFUpPT1dYWFip6hgxYoTHb9w9evTQ+vXrNWLECHdb1apV1bVrV23evNmj1rCwMN14440etXbp0kXBwcFKSkrSXXfd5W5v06aNrrvuOvd87dq11bJlS/3www+lqrMwffv21cqVK8u8fHF27typhIQExcTEaNiwYWVaR0ZGhkJCQkrd/+KfaVZWls6cOaOePXvKGKOUlBQ1bNjQ6xpGjRrlccbroYce0qRJk/T5559r4MCB7vYmTZqob9++xdZ0OcdZvk2bNiktLU3PPPOMAgIC3O3x8fFq1aqVli1bpilTpngs8+CDD3rMX3fddQUGthdl586dql27tkfbwIEDNXfuXK/qLq20tDTdddddatKkicaPH19i/w4dOpT6GK5bt26xr585c0ZOp7NAe/5+PnPmTLksf9ddd+nZZ5/V/fffr5kzZ6pFixb65z//qVmzZpVqO6h4hBZctmuuuabA6dU9e/Zox44dBT5k86Wlpbn/vW7dOk2ePFnr169Xdna2Rz9vvkwu/SLMXy4qKqpA+8mTJz1qTU9PV2RkZIm1FrYdSapZs6bHOr1Vr169Ek/Tl0Vqaqri4+MVFhamv/3tb6patWqZ1hMaGupVKPvxxx/19NNP65NPPimwX0p7CeFS+Xd85AsODla9evUK3FbepEmTQpcvr+Ms38GDByXJffnpYq1atdKXX37p0RYQEFDg/4M3x03jxo315ptvyuVyad++fXruued07Ngxj8BUXrKysnTzzTcrMzNTX375pYKDg0tcpmbNmuV2h1H16tWVk5NToP3s2bPu18tj+bp16+qTTz7R0KFDddNNN0m6cKxPnz5dw4YNK9X7RsUitOCyFfYB4nK51K5dO73yyiuFLpMfJPbt26cbbrhBrVq10iuvvKKoqCj5+/vr888/17Rp00p8BsXFivpCLqzdGONRa2RkpN59991Cl7/0i6ao7Vy8Tm+dOXOm1F/mJf2Wmi89PV39+vXTqVOntHbtWtWvX7/M9bVq1UopKSk6dOhQgRB4qby8PN144406ceKEnnrqKbVq1UpBQUH6z3/+o/vuu8+rn2lZFHY8ludxVlZlDYz5goKCPEJBbGysOnfurEmTJum111673PLccnNz9bvf/U7//ve/tWLFilLfIp+bm6sTJ06Uqm/t2rWL3R/16tVTUlKSjDEeZ0+PHDkiSSUey/Xq1XP3vVhhy/fq1Us//PCDtm7dqqysLHXo0MH9zKFrr722VO8HFYfQgiuiWbNm+u6773TDDTcUeOLpxT799FPl5OTok08+8TiDkZSUVKBvceu53Fq/+OILxcbGlvgbXGl5W+uiRYsKXGIrSmnC0dmzZzVgwADt3r1bX3zxhdq0aeNVPZcaMGCA3n//fb3zzjuaOHFisX23bt2q3bt3a+HChR6DnS/38teePXvUu3dv9/zp06d15MgR9e/fv8Rlr8Rx1qhRI0kXBv7mX/bMt2vXLvfrV0r79u11zz336I033tCTTz5Zpktul3K5XLr33nu1atUqLV68WHFxcaVe9quvvvL4+RRn//79xT6/qGPHjnrrrbe0Y8cOj2P366+/dr9enI4dO2rt2rVyuVweg3G//vprBQYGFggjVatW9VjnF198IUkV/mwalIwn4uKKGDx4sP7zn//ozTffLPDamTNnlJWVJem/v31e/EWcnp6u+fPnF1guKChIp06duiK15uXl6U9/+lOB186fP1+mbQYFBUlSqZfNH9NSmqkkeXl5uuOOO7R+/XotWbJEMTExXtd/qUGDBqldu3Z67rnntH79+gKvZ2Zmuu+2KexnaozRX/7yl8uqYc6cOTp37px7fvbs2Tp//rz69etX4rJX4jjr2rWrIiMj9frrr3tcivjHP/6hHTt2KD4+vsR1XK7x48fr3LlzRZ7R9NaYMWO0aNEizZo1S7/73e+8WjZ/TEtpppLOFt5yyy3y8/Nzjy2RLvzsXn/9dV1zzTXu8XDShbMnO3fu9Dg2Bg0apKNHj+qjjz5ytx0/flxLlizRgAEDCh3vku/YsWOaOnWq2rdvT2iphDjTgiti6NChWrx4sR588EElJSUpNjZWeXl52rlzpxYvXqwVK1aoa9euuummm+Tv768BAwbogQce0OnTp/Xmm28qMjKywOndLl26aPbs2frf//1fNW/eXJGRkQV+wy2LuLg4PfDAA0pMTNSWLVt00003yc/PT3v27NGSJUv0l7/8RYMGDfJqnR07dlTVqlU1depUpaeny+l06je/+U2R42bKc0zLE088oU8++UQDBgzQiRMn9M4773i8fs8997j/nX/L7vz584t90rCfn58++ugj9enTR7169dLgwYMVGxsrPz8/ff/993rvvfdUs2ZNPffcc2rVqpWaNWumJ598Uv/5z38UGhqqDz/88LLG/EgXLj/ccMMNGjx4sHbt2qVZs2bp17/+tccg3KJciePMz89PU6dO1fDhwxUXF6chQ4a4b3lu3LixHnvssct6v6XRpk0b9e/fX2+99Zb++Mc/qlatWmVe16uvvqpZs2YpJiZGgYGBBY6b2267zR3GC1OeY1oaNGigsWPH6sUXX9S5c+fUrVs3ffzxx1q7dq3effddj0tLEydO1MKFCz3O3gwaNEi/+tWvNHz4cG3fvt39RNy8vLwCg6Pj4uIUExOj5s2bKzU1VXPmzNHp06f12Wef+fTJzCiCr25bgn2Ke7hcYXJzc83UqVNNdHS0cTqdpmbNmqZLly5mypQpJj093d3vk08+Me3btzcBAQGmcePGZurUqWbevHkFbo1MTU018fHxJiQkpNCHy136xNT822mPHTvm0Z7/oK9LzZkzx3Tp0sVUr17dhISEmHbt2pnx48d7PLgr/+Fll4qLiytwm+ybb75pmjZtaqpWrVqhD5fLvw29qOli06dPL/IW4cKcPHnSPP3006Zdu3YmMDDQBAQEmLZt25qJEyeaI0eOuPtt377d9OnTxwQHB5uIiAgzcuRI89133xlJZv78+e5+ZXm4XM2aNU1wcLC5++67zc8//1xg2aJuAb7c46yoh8stWrTIdOrUyTidThMeHl7sw+UuVZonAhtT/P+z5ORkI8lMnjy5xPUUJ/+27KKmkm5TLm95eXnmz3/+s2nUqJHx9/c30dHR5p133imy7kvrO3HihBkxYoSpVauWCQwMNHFxcYU+Vfmxxx4zTZs2NU6n09SuXdvcddddZt++fVfqbeEyOYy5jNGDAKw2ePBgHThwQN98842vSwGAEnF5CLhKGWOUnJxc4DIAAFRWnGkBAABWYJQRAACwAqEFAABYgdACAACsQGgBAABWsPruIZfLpcOHDyskJOSKPeIdAACUL2OMMjMzVb9+fa8e4md1aDl8+HCJf7wNAABUTocOHVKDBg1K3d/q0BISEiLpwpsODQ31cTUAAKA0MjIyFBUV5f4eLy2rQ0v+JaHQ0FBCCwAAlvF2aAcDcQEAgBUILQAAwAqEFgAAYAVCCwAAsAKhBQAAWIHQAgAArEBoAQAAViC0AAAAKxBaAACAFQgtAADACoQWAABgBUILAACwAqEFAABYodKElueff14Oh0Njx471dSkAAKASqhShZePGjXrjjTfUvn17X5cCAAAkZeeeV+MJy9R4wjJl5573dTmSKkFoOX36tO6++269+eabqlmzpq/LAQAAlZTPQ0tCQoLi4+PVp0+fEvvm5OQoIyPDYwIAAFeHar7c+AcffKBvv/1WGzduLFX/xMRETZky5QpXBQAAKiOfnWk5dOiQHn30Ub377rsKCAgo1TITJ05Uenq6ezp06NAVrhIAAFQWPjvTsnnzZqWlpalz587utry8PK1Zs0YzZsxQTk6Oqlat6rGM0+mU0+ms6FIBAEAl4LPQcsMNN2jr1q0ebcOHD1erVq301FNPFQgsAADg6uaz0BISEqK2bdt6tAUFBalWrVoF2gEAAHx+9xAAAEBp+PTuoUslJyf7ugQAAFBJcaYFAABYgdACAACsQGgBAABWILQAAAArEFoAAIAVCC0AAKBYB45n+7oESYQWAABwiVPZuRq5cLN7vv9ra3Xv3G+Unn3Oh1URWgAAwCUeeX+L1u877tG2bu9xjXk/xUcVXUBoAQAAbj8cO601e47JdUl7njFas+eY9h/P8kldEqEFAABc5OCJ4sevHPiZ0AIAACqBRuGBxb7euFZQBVVSEKEFAAC4Na0drF4tahcICFUdDvVqUVtNIggtAACgkpg+pJNimkV4tMU2j9D0IZ18VNEFhBYAAOAhLNBPbw7r4p7//JHr9NcR3RUW6OfDqggtAACgBI0jih/nUlEILQAAwAqEFgAAYAVCCwAAsAKhBQAAWIHQAgAArEBoAQAAViC0AAAAKxBaAACAFQgtAADACoQWAABgBUILAACwAqEFAABYgdACAACsUM3XBQAAgMon0L+aDjwf7+syPHCmBQAAWIHQAgAArEBoAQAAViC0AAAAKxBaAACAFQgtAADACoQWAABgBUILAACwAqEFAABYgdACAACsQGgBAABWILQAAAArEFoAAIAVCC0AAMAKhBYAAGAFQgsAALACoQUAAFiB0AIAAKxAaAEAAFYgtAAAACsQWgAAgBUILQAAwAqEFgAAYAVCCwAAsAKhBQAAWIHQAgAArEBoAQAAViC0AAAAKxBaAACAFQgtAADACoQWAABgBUILAACwAqEFAABYgdACAACsQGgBAABWILQAAAArEFoAAIAVCC0AAMAKhBYAAGAFQgsAALACoQUAAFiB0AIAAKzg09Aye/ZstW/fXqGhoQoNDVVMTIz+8Y9/+LIkAABQSfk0tDRo0EDPP/+8Nm/erE2bNuk3v/mNbrnlFn3//fe+LAsAAFRCDmOM8XURFwsPD9eLL76oESNGlNg3IyNDYWFhSk9PV2hoaAVUBwAALldZv7+rXcGavJKXl6clS5YoKytLMTExhfbJyclRTk6Oez4jI6OiygMAAD7m84G4W7duVXBwsJxOpx588EEtXbpUbdq0KbRvYmKiwsLC3FNUVFQFVwsAAHzF55eHcnNz9eOPPyo9PV1/+9vf9NZbb2n16tWFBpfCzrRERUVxeQgAAIuU9fKQz0PLpfr06aNmzZrpjTfeKLEvY1oAALBPWb+/fX556FIul8vjbAoAAIDk44G4EydOVL9+/dSwYUNlZmbqvffeU3JyslasWOHLsgAAQCXk09CSlpame++9V0eOHFFYWJjat2+vFStW6MYbb/RlWQAAoBLyaWiZO3euLzcPAAAsUunGtAAAABSG0AIAAKxAaAEAAFYgtAAAACsQWgAAgBUILQAAwAqEFgAAYAVCCwAAsAKhBQAAWIHQAgAArEBoAQAAViC0AAAAKxBaAACAFQgtAADACoQWAABgBUILAACwAqEFAABYgdACAACsQGgBAABWILQAAAArEFoAAIAVCC0AAMAKhBYAAGAFQgsAALACoQUAAFiB0AIAAKxAaAEAAFYgtAAAACsQWgAAgBUILQAAwAqEFgAAYAVCCwAAsAKhBQB8LDv3vBpPWKbGE5YpO/e8r8sBKi1CCwAAsAKhBQAAWIHQAgAArEBoAQAAViC0AAAAKxBaAACAFQgtAADACoQWAABgBUILAACwAqEFACqRA8ezfV0CUGkRWgDAh05l52rkws3u+f6vrdW9c79RevY5H1YFVE6EFgDwoUfe36L1+457tK3be1xj3k/xUUVA5UVoAQAf+eHYaa3Zc0yuS9rzjNGaPce0/3iWT+oCKitCCwD4yMETxY9fOfAzoQW4GKEFAHykUXhgsa83rhVUQZUAdiC0AICPNK0drF4tahf4IK7qcKhXi9pqEkFoAS5GaAEAH5o+pJNimkV4tMU2j9D0IZ18VBFQeRFaAMCHwgL99OawLu75zx+5Tn8d0V1hgX4+rAqonAgtAFCJNI4ofpwLcDUjtAAAACsQWgAAgBUILQAAwAqEFgAAYAVCCwAAsAKhBQAAWIHQAgAArEBoAQAAVvAqtCxevFi5ubnu+Z9++kku13//qHp2drZeeOGF8qsOAK4Cgf7VdOD5eB14Pl6B/tV8XQ5QaXkVWoYMGaJTp06559u0aaMDBw645zMzMzVx4sTyqg0AAMDNq9BijCl2HgAA4EphTAsAALACoQUAAFjB6xFfK1asUFhYmCTJ5XJp1apV2rZtmyR5jHcBAAAoTw7jxcCUKlVKPjHjcDiUl5d3WUWVVkZGhsLCwpSenq7Q0NAK2SYAALg8Zf3+9upMy8W3NwMAAFQkr8a03H///crMzLxStQAAABTJq9CycOFCnTlz5krVAgAAUKTLek4LAABARfH6lufMzExlZGQUO5VWYmKiunXrppCQEEVGRurWW2/Vrl27vC0JAABcBby+5fnaa68t8jVjjFd3D61evVoJCQnq1q2bzp8/r0mTJummm27S9u3bFRQU5G1pAADgF8zr0PK3v/1N4eHh5bLx5cuXe8wvWLBAkZGR2rx5s3r16lUu2wAAAL8MXoeW2NhYRUZGXolalJ6eLklFhqKcnBzl5OS45725FAUAAOxW7o/xL+uD5Vwul8aOHavY2Fi1bdu20D6JiYkKCwtzT1FRUZdTKgAAsIhXoaVRo0aqWrVqoa/t3r1b48ePV4MGDcpUSEJCgrZt26YPPvigyD4TJ05Uenq6ezp06FCZtgUAAOzj1eWh/fv3e8xnZ2dr0aJFmjdvntavX6+uXbvq8ccf97qI0aNH67PPPtOaNWuKDT1Op1NOp9Pr9QMAAPt5PaZFkjZs2KC33npLS5YsUcOGDbVjxw4lJSXpuuuu82o9xhiNGTNGS5cuVXJyspo0aVKWcgAAwFXAq8tDL7/8sqKjozVo0CDVrFlTa9as0datW+VwOFSrVi2vN56QkKB33nlH7733nkJCQpSamqrU1FSeugsAAArw6q88V6tWTU899ZSeffZZj7Etfn5++u6779SmTRvvNu5wFNo+f/583XfffSUuz195BgDAPmX9/vbqTMuf/vQnLVmyRE2aNNFTTz2lbdu2eV3oxYwxhU6lCSwAAODq4lVomThxonbv3q23335bqamp6tGjhzp06CBjjE6ePHmlagQAACjbc1ri4uK0cOFCHTlyRA8//LA6d+6sXr16qWfPnnrllVfKu0YAAADvxrQUZ9u2bZo7d67effddpaWllccqS8SYFgAA7FPW72+vbnk+c+aMVq1apZtvvlnShctFFz9Wv1q1atq3b583qwQAACgVr0LLwoULtWzZMndomTFjhqKjo1W9enVJ0q5du1S/fn099thj5V8pUMlk555Xm6dXSJK2P9tXgf5leuwRAKCUvBrT8u6772rUqFEebe+9956SkpKUlJSkF154QYsXLy7XAgEAACQvQ8vevXvVrl0793xAQICqVPnvKrp3767t27eXX3UAAAD/x6vz2adOnfIYw3Ls2DGP110ul8frAAAA5cWrMy0NGjQo9oFy//73v8v8V54BAACK41Vo6d+/v55++mmdPXu2wGtnzpzRlClTFB8fX27FAQAA5PPq8tCkSZO0ePFitWzZUqNHj9a1114r6cJdQzNmzND58+c1adKkK1IoAAC4unkVWurUqaOvvvpKDz30kCZMmKD859I5HA7deOONmjVrlurUqXNFCgUAAFc3rx8s0aRJEy1fvlwnTpzQ3r17JUnNmzdXeHh4uRcHAACQr8xPwwoPD1f37t3LsxYAAIAilekPJgIAAFQ0QgsAALACoQUAAFiB0AIAAKxAaAEAAFYgtAAAACsQWgAAgBUILQAAwAqEFgAAYAVCCwAAsAKhBQAAWIHQApSDA8ezfV0CAPziEVqAMjiVnauRCze75/u/tlb3zv1G6dnnfFgVAPyyEVqAMnjk/S1av++4R9u6vcc15v0UH1UEAL98hBbASz8cO601e47JdUl7njFas+eY9h/P8kldAPBLR2gBvHTwRPHjVw78TGgBgCuB0AJ4qVF4YLGvN64VVEGVAMDVhdACeKlp7WD1alG7wH+eqg6HerWorSYRhBYAuBIILUAZTB/SSTHNIjzaYptHaPqQTj6qCAB++QgtQBmEBfrpzWFd3POfP3Kd/jqiu8IC/XxYFQD8shFagHLQOKL4cS4AgMtHaAEAAFYgtAAAACsQWgAAgBUILQAAwAqEFgAAYAVCCwAAsAKhBQAAWIHQAgAArEBoAQAAViC0AAAAKxBaAACAFQgtAADACoQWAABgBUILAACwAqEFAABYgdACAACsQGgBAABWILQAAAArEFoAAIAVCC0AAMAK1XxdAGCrQP9qOvB8vK/LAICrBmdaAACAFQgtAADACoQWAABgBUILAACwAqEFAABYgdACAACsQGgBAABWILQAAAArEFoAAIAVCC0AAMAKhBYAAGAFQgsAALACoQUAAFiB0AIAAKxAaAEAAFbwaWhZs2aNBgwYoPr168vhcOjjjz/2ZTkAAKAS82loycrKUocOHTRz5kxflgEAACxQzZcb79evn/r16+fLEgAAgCV8Glq8lZOTo5ycHPd8RkaGD6sBAAAVyaqBuImJiQoLC3NPUVFRvi4JAABUEKtCy8SJE5Wenu6eDh065OuSAABABbHq8pDT6ZTT6fR1GQAAwAesOtMCAACuXj4903L69Gnt3bvXPb9//35t2bJF4eHhatiwoQ8rAwAAlY1PQ8umTZvUu3dv9/zjjz8uSRo2bJgWLFjgo6oAAEBl5NPQcv3118sY48sSAACAJRjTAgAArEBoAQAAViC0AAAAKxBaAACAFQgtAADACoQWAABgBUILAACwAqEFAABYgdACAACsQGgBAABWILQAAAArEFoAAIAVCC0AAMAKhBYAAGAFQgsAALACoQUAAFiB0AIAAKxAaAEAAFYgtBQiO/e8Gk9YpsYTlik797yvywEAACK0AAAASxBaAACAFQgtAADACoQWAABgBUILAACwAqEFAABYgdACAACsQGgBAABWILQAAAArEFoAAIAVCC0AAMAKhBYAAGAFQgsAALACoQUAAFiB0AIAAKxAaAEAAFYgtAAAACsQWgAAgBUILQAAwAqEFgAAYAVCCwAAsAKhBQAAWIHQAgAArEBoAQAAViC0AAAAKxBaAACAFQgtJThwPNvXJQAAABFaCjiVnauRCze75/u/tlb3zv1G6dnnfFgVAAAgtFzikfe3aP2+4x5t6/Ye15j3U3xUEQAAkAgtHn44dlpr9hyT65L2PGO0Zs8x7T+e5ZO6AAAAocXDwRPFj1858DOhBQAAXyG0XKRReGCxrzeuFVRBlQAAgEsRWi7StHawerWoXWCnVHU41KtFbTWJILQAAOArhJZLTB/SSTHNIjzaYptHaPqQTj6qCAAASISWAsIC/fTmsC7u+c8fuU5/HdFdYYF+PqwKAAAQWkrQOKL4cS4AAKBiEFoAAIAVCC0AAMAKhBYAAGAFQgsAALACoQUAAFiB0AIAAKxAaAEAAFYgtAAAACsQWgAAgBUILQAAwAqEFgAAYAVCCwAAsAKhBQAAWIHQAgAArEBoAQAAViC0AAAAK1SK0DJz5kw1btxYAQEB6tGjh7755htflwQAACoZn4eWRYsW6fHHH9fkyZP17bffqkOHDurbt6/S0tJ8XRoAAKhEfB5aXnnlFY0cOVLDhw9XmzZt9PrrryswMFDz5s3zdWkAAKAS8Wloyc3N1ebNm9WnTx93W5UqVdSnTx+tX7++QP+cnBxlZGR4TAAA4Org09By/Phx5eXlqU6dOh7tderUUWpqaoH+iYmJCgsLc09RUVEVVSoAAPAxn18e8sbEiROVnp7ung4dOuTrkgAAQAWp5suNR0REqGrVqjp69KhH+9GjR1W3bt0C/Z1Op5xOZ0WVBwAAKhGfnmnx9/dXly5dtGrVKneby+XSqlWrFBMT48PKAABAZePTMy2S9Pjjj2vYsGHq2rWrunfvrldffVVZWVkaPny4r0sDAACViM9Dyx133KFjx47p6aefVmpqqjp27Kjly5cXGJxbkQL9q+nA8/E+2z4AACjIYYwxvi6irDIyMhQWFqb09HSFhob6uhwAAFAKZf3+turuIQAAcPUitAAAACsQWgAAgBUILQAAwAqEFgAAYAVCCwAAsAKhBQAAWIHQAgAArEBoAQAAViC0AAAAKxBaAACAFQgtAADACoQWAABgBUILAACwQjVfF3A5jDGSLvyJawAAYIf87+387/HSsjq0ZGZmSpKioqJ8XAkAAPBWZmamwsLCSt3fYbyNOZWIy+XS4cOHFRISIofD4etyfCIjI0NRUVE6dOiQQkNDfV3OLwL7tPyxT8sf+7R8sT/LX3H71BijzMxM1a9fX1WqlH6kitVnWqpUqaIGDRr4uoxKITQ0lP9o5Yx9Wv7Yp+WPfVq+2J/lr6h96s0ZlnwMxAUAAFYgtAAAACsQWizndDo1efJkOZ1OX5fyi8E+LX/s0/LHPi1f7M/ydyX2qdUDcQEAwNWDMy0AAMAKhBYAAGAFQgsAALACoQUAAFiB0GKBmTNnqnHjxgoICFCPHj30zTffFNt/yZIlatWqlQICAtSuXTt9/vnnFVSpPbzZpwsWLJDD4fCYAgICKrDaym3NmjUaMGCA6tevL4fDoY8//rjEZZKTk9W5c2c5nU41b95cCxYsuOJ12sTbfZqcnFzgGHU4HEpNTa2Ygiu5xMREdevWTSEhIYqMjNStt96qXbt2lbgcn6VFK8s+LY/PUkJLJbdo0SI9/vjjmjx5sr799lt16NBBffv2VVpaWqH9v/rqKw0ZMkQjRoxQSkqKbr31Vt16663atm1bBVdeeXm7T6ULT3Q8cuSIezp48GAFVly5ZWVlqUOHDpo5c2ap+u/fv1/x8fHq3bu3tmzZorFjx+p//ud/tGLFiitcqT283af5du3a5XGcRkZGXqEK7bJ69WolJCRow4YNWrlypc6dO6ebbrpJWVlZRS7DZ2nxyrJPpXL4LDWo1Lp3724SEhLc83l5eaZ+/fomMTGx0P6DBw828fHxHm09evQwDzzwwBWt0ybe7tP58+ebsLCwCqrObpLM0qVLi+0zfvx4Ex0d7dF2xx13mL59+17ByuxVmn2alJRkJJmTJ09WSE22S0tLM5LM6tWri+zDZ6l3SrNPy+OzlDMtlVhubq42b96sPn36uNuqVKmiPn36aP369YUus379eo/+ktS3b98i+19tyrJPJen06dNq1KiRoqKidMstt+j777+viHJ/kThGr5yOHTuqXr16uvHGG7Vu3Tpfl1NppaenS5LCw8OL7MNx6p3S7FPp8j9LCS2V2PHjx5WXl6c6dep4tNepU6fIa9Wpqale9b/alGWftmzZUvPmzdPf//53vfPOO3K5XOrZs6d++umniij5F6eoYzQjI0NnzpzxUVV2q1evnl5//XV9+OGH+vDDDxUVFaXrr79e3377ra9Lq3RcLpfGjh2r2NhYtW3btsh+fJaWXmn3aXl8llr9V56BihATE6OYmBj3fM+ePdW6dWu98cYb+tOf/uTDyoALWrZsqZYtW7rne/bsqX379mnatGl6++23fVhZ5ZOQkKBt27bpyy+/9HUpvxil3afl8VnKmZZKLCIiQlWrVtXRo0c92o8ePaq6desWukzdunW96n+1Kcs+vZSfn586deqkvXv3XokSf/GKOkZDQ0NVvXp1H1X1y9O9e3eO0UuMHj1an332mZKSktSgQYNi+/JZWjre7NNLleWzlNBSifn7+6tLly5atWqVu83lcmnVqlUeafViMTExHv0laeXKlUX2v9qUZZ9eKi8vT1u3blW9evWuVJm/aByjFWPLli0co//HGKPRo0dr6dKl+te//qUmTZqUuAzHafHKsk8vVabP0ssaxosr7oMPPjBOp9MsWLDAbN++3YwaNcrUqFHDpKamGmOMGTp0qJkwYYK7/7p160y1atXMSy+9ZHbs2GEmT55s/Pz8zNatW331Fiodb/fplClTzIoVK8y+ffvM5s2bzZ133mkCAgLM999/76u3UKlkZmaalJQUk5KSYiSZV155xaSkpJiDBw8aY4yZMGGCGTp0qLv/Dz/8YAIDA824cePMjh07zMyZM03VqlXN8uXLffUWKh1v9+m0adPMxx9/bPbs2WO2bt1qHn30UVOlShXzxRdf+OotVCoPPfSQCQsLM8nJyebIkSPuKTs7292Hz1LvlGWflsdnKaHFAtOnTzcNGzY0/v7+pnv37mbDhg3u1+Li4sywYcM8+i9evNhce+21xt/f30RHR5tly5ZVcMWVnzf7dOzYse6+derUMf379zfffvutD6qunPJvt710yt+Hw4YNM3FxcQWW6dixo/H39zdNmzY18+fPr/C6KzNv9+nUqVNNs2bNTEBAgAkPDzfXX3+9+de//uWb4iuhwvalJI/jjs9S75Rln5bHZ6nj/zYOAABQqTGmBQAAWIHQAgAArEBoAQAAViC0AAAAKxBaAACAFQgtAADACoQWAABgBUILAACwAqEFsEhqaqoeffRRNW/eXAEBAapTp45iY2M1e/ZsZWdn+7q8IjVu3FgOh0MffPBBgdeio6PlcDi0YMGCii+sEKmpqRozZoyaNm0qp9OpqKgoDRgwoMDfoZGkxMREVa1aVS+++KK7Lf+9FjXdd999klTk64XtIwAXVPN1AQBK54cfflBsbKxq1KihP//5z2rXrp2cTqe2bt2qOXPm6JprrtHAgQMLXfbcuXPy8/Or4Io9RUVFaf78+brzzjvdbRs2bFBqaqqCgoJ8WNl/HThwwL2PX3zxRbVr107nzp3TihUrlJCQoJ07d3r0nzdvnsaPH6958+Zp3LhxkqSNGzcqLy9PkvTVV1/p97//vXbt2qXQ0FBJ8vhL1vPnz9dvf/tbj3XWqFHjCr5DwHLl8kcIAFxxffv2NQ0aNDCnT58u9HWXy+X+tyQza9YsM2DAABMYGGgmT55sjDFm1qxZpmnTpsbPz89ce+215q9//at7mf379xtJJiUlxd128uRJI8kkJSUZY/77N3E+++wz065dO+N0Ok2PHj1K/CNyjRo1MhMmTDBOp9P8+OOP7vaRI0eaMWPGmLCwMI+/WXLy5EkzYsQIExERYUJCQkzv3r3Nli1b3K/v3bvXDBw40ERGRpqgoCDTtWtXs3LlygLbfO6558zw4cNNcHCwiYqKMm+88Uaxdfbr189cc801he7jkydPeswnJyeba665xuTm5pr69eubdevWFVgmf39duqwxF35GS5cuLbYeAJ64PARY4Oeff9Y///lPJSQkFHlWwuFweMw/88wzuu2227R161bdf//9Wrp0qR599FE98cQT2rZtmx544AENHz5cSUlJXtczbtw4vfzyy9q4caNq166tAQMG6Ny5c8UuU6dOHfXt21cLFy6UJGVnZ2vRokW6//77C/S9/fbblZaWpn/84x/avHmzOnfurBtuuEEnTpyQJJ0+fVr9+/fXqlWrlJKSot/+9rcaMGCAfvzxR4/1vPzyy+ratatSUlL08MMP66GHHtKuXbsKre/EiRNavnx5kfv40jMgc+fO1ZAhQ+Tn56chQ4Zo7ty5xb5/AOXA16kJQMk2bNhgJJmPPvrIo71WrVomKCjIBAUFmfHjx7vbJZmxY8d69O3Zs6cZOXKkR9vtt99u+vfvb4zx7kzLBx984O7z888/m+rVq5tFixYVWX+jRo3MtGnTzMcff2yaNWtmXC6XWbhwoenUqZMxxnicaVm7dq0JDQ01Z8+e9VhHs2bNij1TEh0dbaZPn+6xzXvuucc973K5TGRkpJk9e3ahy3/99deF7uPCpKenm+rVq7vP/qSkpJjg4GCTmZnp0a+kMy0BAQHun1/+dPDgwRK3D1ytONMCWOybb77Rli1bFB0drZycHI/Xunbt6jG/Y8cOxcbGerTFxsZqx44dXm83JibG/e/w8HC1bNmyVOuJj4/X6dOntWbNGs2bN6/QsyzfffedTp8+rVq1aik4ONg97d+/X/v27ZN04UzLk08+qdatW6tGjRoKDg7Wjh07Cpxpad++vfvfDodDdevWVVpaWqG1GS/+4P3777+vZs2aqUOHDpKkjh07qlGjRlq0aFGp1yFJ06ZN05YtWzym+vXre7UO4GrCQFzAAs2bN5fD4ShwaaNp06aSPAd35vN2cGuVKhd+h7n4y7ukSz7eqlatmoYOHarJkyfr66+/1tKlSwv0OX36tOrVq6fk5OQCr+VfonnyySe1cuVKvfTSS2revLmqV6+uQYMGKTc316P/pYOPHQ6HXC5XobW1aNFCDoejwGDbwsydO1fff/+9qlX770eoy+XSvHnzNGLEiBKXz1e3bl01b9681P2Bqx1nWgAL1KpVSzfeeKNmzJihrKysMq2jdevWWrdunUfbunXr1KZNG0lS7dq1JUlHjhxxv75ly5ZC17Vhwwb3v0+ePKndu3erdevWparj/vvv1+rVq3XLLbeoZs2aBV7v3LmzUlNTVa1aNTVv3txjioiIcNd933336bbbblO7du1Ut25dHThwoFTbL0p4eLj69u2rmTNnFrqPT506JUnaunWrNm3apOTkZI8zJMnJyVq/fn2pQg+AsuFMC2CJWbNmKTY2Vl27dtUzzzyj9u3bq0qVKtq4caN27typLl26FLv8uHHjNHjwYHXq1El9+vTRp59+qo8++khffPGFpAtna371q1/p+eefV5MmTZSWlqb/9//+X6HrevbZZ1WrVi3VqVNHf/jDHxQREaFbb721VO+jdevWOn78uAIDAwt9vU+fPoqJidGtt96qF154Qddee60OHz6sZcuW6bbbblPXrl3VokULffTRRxowYIAcDof++Mc/FnkGxRszZ85UbGysunfvrmeffVbt27fX+fPntXLlSs2ePVs7duzQ3Llz1b17d/Xq1avA8t26ddPcuXM9nttSnFOnTik1NdWjLSQkpNLcAg5UOr4eVAOg9A4fPmxGjx5tmjRpYvz8/ExwcLDp3r27efHFF01WVpa7n4q4nba4W56NMWb79u0mJibGVK9e3XTs2NH885//LHQg7qeffmqio6ONv7+/6d69u/nuu++KrTt/IG5RLr3lOSMjw4wZM8bUr1/f+Pn5maioKHP33Xe7b5fev3+/6d27t6levbqJiooyM2bMMHFxcebRRx8tdpsdOnRw3/5dlMOHD5uEhATTqFEj4+/vb6655hozcOBAk5SUZHJyckytWrXMCy+8UOiyU6dONZGRkSY3N9cYU/JA3MKmxMTEYusDrmYOY7wYfQbgqpacnKzevXvr5MmTPAQNQIVjTAsAALACoQUAAFiBy0MAAMAKnGkBAABWILQAAAArEFoAAIAVCC0AAMAKhBYAAGAFQgsAALACoQUAAFiB0AIAAKxAaAEAAFb4/2z23HHxLl6gAAAAAElFTkSuQmCC", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAisAAAHFCAYAAAAzCLlHAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy88F64QAAAACXBIWXMAAA9hAAAPYQGoP6dpAAA5IklEQVR4nO3deXQUVf7+8achG0lIQwghLDEJi4hAEEEkIJtssoOjDAwiKDjKIIoiiIiA+FMQR8cZEUYxIIgIKsuIIsqSgArIYlCQTTZhBsJOEghkvb8//KahydohSRfk/Tqnz6Fu36r69E2RfnKrqttmjDECAACwqDLuLgAAACAvhBUAAGBphBUAAGBphBUAAGBphBUAAGBphBUAAGBphBUAAGBphBUAAGBphBUAAGBphJVSyGazFegRGxtbIvUsWLBAb7/9dons63olJydr0qRJJTY2Vzt+/LjGjx+vqKgoBQUFKSAgQE2aNNH777+vjIyM69p2ZmamPvroI3Xo0EFBQUHy9PRUcHCwunfvruXLlyszM9PlbYaHh2vw4MGO5cOHD8tms+nDDz90tE2aNEk2m02nT5++rvoLKq9jzWazadKkSSVSx9U+/PBDp/93Hh4eqlq1qvr166fffvst3/V//PFHTZo0SYsXL861T0ZGht566y3dd999qlGjhnx9fVWvXj2NHTtW58+fL8JXc/0OHjyo+++/XxUqVJC/v786duyon376qUDrfv/99xo6dKiaNGkib29v2Ww2HT58OFu/a8f82sfUqVOd+sfExKhjx44KDg6Wv7+/IiMj9a9//eu6/9+h4DzcXQBK3saNG52WX3nlFcXExGjt2rVO7bfffnuJ1LNgwQLt3LlTI0eOLJH9XY/k5GS9/PLLkqS2bduW6L63bdumefPm6eGHH9ZLL70kT09Pff311xo2bJg2bdqk2bNnF2q7ly9fVu/evfXtt9+qX79+mjlzpkJCQnTq1CmtXLlSDz74oBYtWqRevXpdV/1Vq1bVxo0bVatWrevazvXI61jbuHGjatSoUfJF/Z85c+botttu0+XLl/XDDz/o1VdfVUxMjPbs2aOKFSvmuM7GjRvVuXNnZWRk6NKlS3r//fc1dOjQbP0uXbqkSZMmqX///ho6dKiCgoL0008/6f/9v/+n5cuXa+vWrSpXrlxxv8R8nTp1Sq1atVLFihU1e/Zs+fj4aMqUKWrbtq22bNmiunXr5rn+mjVrtHr1ajVu3FgBAQG5/lHRrVu3bL8HJWnChAlatWqV+vTp42hbvXq1OnfurNatW2vWrFny8/PTF198oaeffloHDhzQP//5z+t6zSggg1Jv0KBBxs/PL99+Fy9eLJb9d+vWzYSFhRXLtovaqVOnjCQzceLEEt/32bNnTWpqarb24cOHG0nmyJEjhdrusGHDjCQzd+7cHJ/ft2+f+fnnn13eblhYmBk0aFCefSZOnGgkmVOnTrm8fWOMSU5Odqm/FY+1OXPmGElmy5YtTu0vv/yykWRmz56d43qbN282AQEBpmXLlubkyZNm6NChxmazmTlz5mTrm56ebk6fPp2t/bPPPjOSzEcffVQkr+V6jR492nh6eprDhw872hISEkxQUJDp27dvvutnZGQ4/v3GG28YSebQoUMF2veFCxeMv7+/ueeee5zaBwwYYLy9vc2FCxec2jt16mQCAgIKtG1cP04DIUdt27ZVgwYNtH79erVo0UK+vr569NFHJUmJiYl67rnnFBERIS8vL1WvXl0jR47UxYsXnbbx7rvvqnXr1goODpafn58aNmyoadOmKS0tzWk/X331lX7//XenaVjpymmDN954Q6+//rrCw8NVrlw5tW3bVvv27VNaWprGjh2ratWqyW63q0+fPjp58mS217Jo0SJFRUXJz89P/v7+6ty5s+Li4pz6DB48WP7+/tq/f7+6du0qf39/hYaGatSoUUpJSXHUU7lyZUnSyy+/7Kj16lMdxalixYry9PTM1t6sWTNJ0n//+1+XtxkfH68PPvhAnTt31sMPP5xjnzp16igyMlLSH7Mwo0aN0h133CG73a7AwEBFRUXpP//5T777yuk0UJajR4/q/vvvV0BAgOx2ux566CGdOnXKqU94eLi6d++uJUuWqHHjxvLx8XHMcl3vsSblfBpo586d6tWrlypWrCgfHx/dcccdmjt3rlOf2NhY2Ww2ffLJJ3rxxRdVrVo1BQQEqEOHDtq7d2++45Kbpk2bSpJOnDiR7blt27apU6dOat++vVavXq3KlStr1qxZmjBhgoYMGaL58+c79S9btqwqVaqUbTtZx87Ro0cLXWdRWrp0qe69916FhYU52gICAnT//fdr+fLlSk9Pz3P9MmUK/5a2aNEiXbhwIdvMlKenp7y8vLLNPFWoUEE+Pj6F3h9cw2kg5Or48eN66KGHNGbMGL322msqU6aMkpOT1aZNG/33v//VuHHjFBkZqV9//VUTJkzQjh07tHr1ascbwIEDB/SXv/zFEWp+/vlnvfrqq9qzZ4/jlMWMGTP017/+VQcOHNDSpUtzrOPdd99VZGSk3n33XZ0/f16jRo1Sjx49dPfdd8vT01OzZ8/W77//rueee05Dhw7VF1984Vj3tdde0/jx4/XII49o/PjxSk1N1RtvvKFWrVpp8+bNTqe60tLS1LNnTw0ZMkSjRo3S+vXr9corr8hut2vChAmqWrWqVq5cqfvuu09Dhgxx/FLLCjC5ye8XbJayZcs6vXkW1Nq1a+Xh4aFbb73V5XVjYmKUlpam3r17F6h/SkqKzp49q+eee07Vq1dXamqqVq9erfvvv19z5szJNfDkp0+fPurbt6+eeOIJ/frrr3rppZe0a9cu/fjjj04B7aefftLu3bs1fvx4RUREyM/PT1LRHWtX27t3r1q0aKHg4GD961//UqVKlTR//nwNHjxYJ06c0JgxY5z6jxs3Ti1bttQHH3ygxMREPf/88+rRo4d2796tsmXLujwmhw4dkqRsP9ft27erU6dO+stf/qJ33nnH6Q160qRJqlGjhoYOHaqyZcuqf//+ee4j69Rv/fr1860nIyNDxph8+5UpU6ZQoeHSpUs6cOCA0ymYLJGRkbp06ZIOHjxYqOO8IKKjoxUQEKAHH3zQqf2JJ57QJ598oqeeekrjxo2Tr6+vli9frqVLl2rKlCnFUgty4O6pHbhfTqeB2rRpYySZNWvWOLVPmTLFlClTJtuU9eeff24kmRUrVuS4j4yMDJOWlmbmzZtnypYta86ePet4Lrep+UOHDhlJplGjRk7Tu2+//baRZHr27OnUf+TIkUaSSUhIMMYYc+TIEePh4WFGjBjh1C8pKcmEhIQ4TSsPGjTISDKffvqpU9+uXbuaunXrOpYLcxpIUoEeOU3f5+ebb74xZcqUMc8884zL6xpjzNSpU40ks3LlykKtn56ebtLS0syQIUNM48aNnZ679jRQ1s/z6teZdRro2vo//vhjI8nMnz/faXtly5Y1e/fuzbOmwhxrxphsP9d+/foZb2/vbKfXunTpYnx9fc358+eNMcbExMQYSaZr165O/T799FMjyWzcuDHPerNOA23atMmkpaWZpKQks3LlShMSEmJat25t0tLS8ly/sP773/+aKlWqmKZNmzr9/8pN1u+E/B75nfrLzf/+9z8jyUyZMiXbcwsWLDCSzIYNGwq8PVdOA+3evdtIMo8//niOz//www+mWrVqjtdYtmxZM23atALXguvHzApyVbFiRd17771ObV9++aUaNGigO+64w2nGoHPnzo47iLp06SJJiouL08SJE/XDDz/o7NmzTtvZt2+f7r777gLV0bVrV6e/1OrVqyfpj4vkrpbVfuTIETVo0EDffPON0tPT9fDDDzvV6uPjozZt2igmJsZpfZvNph49eji1RUZGZrvw2FVbtmwpUL+IiAiXtvvTTz+pb9++at68eYn+hffZZ5/p7bff1s8//+x06u96psQHDBjgtNy3b18NGjRIMTExTs9FRkbm+Jd1UR1rV1u7dq3at2+v0NBQp/bBgwfr66+/1saNG3Xfffc52nv27OnUL+vU2e+//67mzZvnu79r+9SrV0//+c9/5OFR9L+mz549q65du8oYo0WLFhVoJuS9995TUlJSvv2CgoLyfD4zM9Pp7jKbzeY085TX7GJhZh4LIjo6WpJyvDh527Zt6tOnj+6++26999578vPz09q1azV+/HhdvnxZL730UrHUBGeEFeSqatWq2dpOnDih/fv353jthCTHLahHjhxRq1atVLduXf3zn/9UeHi4fHx8tHnzZg0fPlyXLl0qcB2BgYFOy15eXnm2X7582VGrJN111105bvfaX9C+vr7Z3nC9vb0d2yusO+64o0D9XDlVEBcXp44dO6pOnTpasWKFvL29C1XbLbfcIunKKYf8LFmyRH379tWDDz6o0aNHKyQkRB4eHpo5c2ah70aSpJCQEKdlDw8PVapUSWfOnHFqz+mYLMpj7WpnzpzJcX/VqlVzPH+1a68JyfqZFHT/8+bNU7169ZSUlKRFixbpvffeU//+/fX1118XpvxcnTt3Th07dtT//vc/rV27VjVr1izQerVr1y7waaC8TJ482XGtkSSFhYXp8OHDqlixomw2W7ZxleQIoNf+ny8KaWlpmjdvnho1auS4Tuhqw4cPV5UqVbR06VLH/9F27dqpTJkymjRpkgYMGFDgMUThEVaQq5z+igkKClK5cuVyfWPK+qtq2bJlunjxopYsWeJ0sdz27duLpda8avn888+daihpuQW7a82ZM6dAF+vGxcWpQ4cOCgsL07fffiu73V7o2tq1aydPT08tW7ZMTzzxRL7958+fr4iICC1atMjp+Mi6CLmw4uPjVb16dcdyenq6zpw5ky0A5HRMFtexVqlSJR0/fjxb+7FjxyTlP4Pgqnr16jneLNu1a6eMjAx98MEH+vzzz/XAAw8UyT7OnTunDh066NChQ1qzZo1j9qcg2rdvr3Xr1uXbb9CgQTleRJ3lr3/9q7p37+5Yzgp15cqVU+3atbVjx45s6+zYsUPlypUrllDw5Zdf6uTJk7nOkGzfvl39+/fP9sfEXXfdpczMTO3evZuwUgIIK3BJ9+7d9dprr6lSpUp5nrbIelO5+i9+Y4xmzZqVra+3t3eh//rNS+fOneXh4aEDBw7oT3/6U5Fs09W/lqWiPQ20fft2dejQQTVq1NCqVaty/fyNggoJCdHQoUM1c+ZMx2e4XOvAgQO6ePGiIiMjZbPZ5OXl5RQa4uPjC3Q3UF4+/vhjNWnSxLH86aefKj09vUCfZVNcx1r79u21dOlSHTt2zDGbIv0xA+Lr61ugUzvXY9q0aVq8eLEmTJig+++//7rudJGuBJWDBw9q1apVaty4sUvrF9VpoGrVqjmN59X69Omjt99+W0ePHnWcfktKStKSJUvUs2fPYjklFh0dLR8fn2ynIq+ud+vWrcrIyHAKLFmf0+LOz+YpTQgrcMnIkSO1ePFitW7dWs8884wiIyOVmZmpI0eO6Ntvv9WoUaN09913q2PHjvLy8lL//v01ZswYXb58WTNnztS5c+eybbNhw4ZasmSJZs6cqSZNmqhMmTI5Tse6Kjw8XJMnT9aLL76ogwcP6r777lPFihV14sQJbd68WX5+fk7T0QVRvnx5hYWF6T//+Y/at2+vwMBABQUFKTw8PNd1iuK1SH/cndKhQwdJ0quvvqrffvvN6RNOa9Wq5XRnks1mU5s2bfL9tN233npLBw8e1ODBg/XNN9+oT58+qlKlik6fPq1Vq1Zpzpw5WrhwoSIjIx23Dv/tb3/TAw88oKNHj+qVV15R1apVC/Rpq7lZsmSJPDw81LFjR8fdQI0aNVLfvn3zXbe4jrWJEyfqyy+/VLt27TRhwgQFBgbq448/1ldffaVp06Zd14xWQVSsWFEvvPCCxowZowULFuihhx4q9LYuXbrkuGX/7bffVnp6ujZt2uR4vnLlyvl+WF9+H8hWFJ577jl99NFH6tatmyZPnixvb29NnTpVly9fznZbee3atSVJ+/fvd7SdOnXKMfuTNUPz9ddfq3LlyqpcubLatGnjtI1jx45p5cqV+vOf/5xr8H/mmWf01FNPqUePHnr88cfl6+urNWvW6M0331SHDh3UqFGjonr5yIt7r++FFeR2N1D9+vVz7H/hwgUzfvx4U7duXePl5WXsdrtp2LCheeaZZ0x8fLyj3/Lly02jRo2Mj4+PqV69uhk9erT5+uuvjSQTExPj6Hf27FnzwAMPmAoVKhibzWayDsusu0feeOMNp/1n3X3x2WefObXn9uFay5YtM+3atTMBAQHG29vbhIWFmQceeMCsXr06zzEw5srdKldbvXq1ady4sfH29r6uux9clfX6cntcfZdNUlKSkWT69etXoG2np6ebuXPnmnvvvdcEBgYaDw8PU7lyZdOlSxezYMECp7tFpk6dasLDw423t7epV6+emTVrVo7j5MrdQNu2bTM9evQw/v7+pnz58qZ///7mxIkT2bbXrVu3HOu/3mPNmOx3AxljzI4dO0yPHj2M3W43Xl5eplGjRtnu2srteMzp9eYkt+PWGGMuXbpkbrnlFlOnTh2Tnp6e53byklVLbo+SOoYLYv/+/aZ3794mICDA+Pr6mvbt25tt27Zl6xcWFpbtzq6sn0VOjzZt2mTbxquvvmokmbVr1+ZZ0+LFi80999xjgoKCjJ+fn6lfv7555ZVXsn1QHIqPzZgCXDEF4IayYsUKde/eXT///LMaNmzo7nIA4LrwCbbATSgmJkb9+vUjqAC4KTCzAgAALI2ZFQAAYGmEFQAAYGmEFQAAYGmEFQAAYGk39IfCZWZm6tixYypfvnyxfcEVAAAoWsYYJSUlqVq1agX6dOYbOqwcO3Ys2zeiAgCAG8PRo0cL9JUFN3RYKV++vKQ/XmxAQICbqwEAAAWRmJio0NBQx/t4fm7osJJ16icgIICwAgDADaagl3BwgS0AALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0y4SVKVOmyGazaeTIke4uBQAAWIglwsqWLVv0/vvvKzIy0t2lAAAAScmp6Qof+5XCx36l5NR0t9bi9rBy4cIFDRgwQLNmzVLFihXdXQ4AALAYt4eV4cOHq1u3burQoYO7SwEAABbk4c6dL1y4UD/99JO2bNlSoP4pKSlKSUlxLCcmJhZXaQAAwCLcNrNy9OhRPf3005o/f758fHwKtM6UKVNkt9sdj9DQ0GKuEgAAuJvNGGPcseNly5apT58+Klu2rKMtIyNDNptNZcqUUUpKitNzUs4zK6GhoUpISFBAQECJ1Q4AwM0uOTVdt0/4RpK0a3Jn+XoV3cmYxMRE2e32Ar9/u+00UPv27bVjxw6ntkceeUS33Xabnn/++WxBRZK8vb3l7e1dUiUCAAALcFtYKV++vBo0aODU5ufnp0qVKmVrBwAApZfb7wYCAADIi1vvBrpWbGysu0sAAAAWw8wKAACwNMIKAADI0+HTyW7dP2EFAAA4OZ+cqsfmbnMsd/3Xd3o4erMSktPcUg9hBQAAOHnqk+3aeOC0U9sP+09rxCdxbqmHsAIAABwOnrqg9b+dUuY17RnGaP1vp3To9MUSr4mwAgAAHH4/m/f1KYfPEFYAAIAbhQX65vl8eCW/EqrkCsIKAABwqFnZX63rVM4WEMrabGpdp7IigggrAADAzd7p31hRtYKc2lrWDtI7/Ru7pR7CCgAAcGL39dSsQU0cyyueaqV5Q5rJ7uvplnoIKwAAIE/hQXlfx1LcCCsAAMDSCCsAAMDSCCsAAMDSCCsAAMDSCCsAAMDSCCsAAMDSCCsAAMDSCCsAAMDSPNxdAAAAsB5fLw8dntrN3WVIYmYFAABYHGEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYmlvDysyZMxUZGamAgAAFBAQoKipKX3/9tTtLAgAAFuPWsFKjRg1NnTpVW7du1datW3XvvfeqV69e+vXXX91ZFgAAsBCbMca4u4irBQYG6o033tCQIUPy7ZuYmCi73a6EhAQFBASUQHUAAOB6ufr+7VECNRVIRkaGPvvsM128eFFRUVE59klJSVFKSopjOTExsaTKAwAAbuL2C2x37Nghf39/eXt764knntDSpUt1++2359h3ypQpstvtjkdoaGgJVwsAAEqa208Dpaam6siRIzp//rwWL16sDz74QOvWrcsxsOQ0sxIaGsppIAAAbiCungZye1i5VocOHVSrVi299957+fblmhUAAG48rr5/u/000LWMMU6zJwAAoHRz6wW248aNU5cuXRQaGqqkpCQtXLhQsbGxWrlypTvLAgAAFuLWsHLixAkNHDhQx48fl91uV2RkpFauXKmOHTu6sywAAGAhbg0r0dHR7tw9AAC4AVjumhUAAICrEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAwI2SU9MVPvYrhY/9Ssmp6e4uB7AkwgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAWMTh08nuLgGwJMIKALjJ+eRUPTZ3m2O567++08PRm5WQnObGqgDrIawAgJs89cl2bTxw2qnth/2nNeKTODdVBFgTYQUA3ODgqQta/9spZV7TnmGM1v92SodOX3RLXYAVEVYAwA1+P5v39SmHzxBWgCyEFQBwg7BA3zyfD6/kV0KVANZHWAEAN6hZ2V+t61TO9ku4rM2m1nUqKyKIsAJkIawAgJu807+xomoFObW1rB2kd/o3dlNFgDURVgDATey+npo1qIljecVTrTRvSDPZfT3dWBVgPYQVALCI8KC8r2MBSiuXwsq0adN06dIlx/L69euVkpLiWE5KStLf/va3oqsOAACUei6FlRdeeEFJSUmO5e7du+t///ufYzk5OVnvvfde0VUHAABKPZfCijEmz2UAAICixjUrAADA0ggrAADA0jxcXeGDDz6Qv7+/JCk9PV0ffvihgoL++JyAq69nAQAAKAouhZVbbrlFs2bNciyHhIToo48+ytYHAACgqLgUVg4fPlxMZQAAAOTMpWtW7r33Xp0/f76YSgEAAMjOpbASGxur1NTU4qoFAAAgG+4GAgAAluZyWElKSlJiYmKej4KaMmWK7rrrLpUvX17BwcHq3bu39u7d62pJAADgJubyrcu33nprrs8ZY2Sz2ZSRkVGgba1bt07Dhw/XXXfdpfT0dL344ovq1KmTdu3aJT8/P1dLAwAANyGbceEz88uUKaPFixcrMDAwz35t2rQpVDGnTp1ScHCw1q1bp9atW+fbPzExUXa7XQkJCQoICCjUPgEAQMly9f3b5ZmVli1bKjg4uFDF5SchIUGScg1DKSkpTt/y7MopJwAAcGMq8gts09PTC7WeMUbPPvus7rnnHjVo0CDHPlOmTJHdbnc8QkNDr6dUAABwA3AprISFhals2bI5Prdr1y49++yzql69eqEKefLJJ/XLL7/ok08+ybXPCy+8oISEBMfj6NGjhdoXAAC4cbh0GujQoUNOyxcuXNDChQsVHR2tLVu2qHnz5ho7dqzLRYwYMUJffPGF1q9frxo1auTaz9vbW97e3i5vHwAA3LhcvmZFkr7//nt98MEHWrx4sSIiIrRr1y6tW7dOLVu2dGk7xhiNGDFCS5cuVWxsrCIiIgpTDgAAuIm5dBpo2rRpuu2229SvXz9VrlxZ33//vX755RfZbDZVrFjR5Z0PHz5c8+fP14IFC1S+fHnFx8crPj5ely5dcnlbAADg5uTSrcseHh56/vnnNXnyZKdrVzw9PfXzzz/r9ttvd23nNluO7XPmzNHgwYPzXZ9blwEAuPG4+v7t0szK5MmT9dlnnykiIkLPP/+8du7cWehCpT9OA+X0KEhQAQAApYNLYWXcuHHat2+fPvroI8XHx6t58+Zq1KiRjDE6d+5ccdUIAABKsUJ9zkqbNm00d+5cHTt2TMOGDdOdd96p1q1bq0WLFnrrrbeKukYAAFCKuXTNSl527typ6Ohoffzxxzp58mRRbDJfXLMCAMCNp1g/bv/SpUtas2aNunfvLumPD2m7+uPvPTw8dODAARdLBlBSklPTdfuEbyRJuyZ3lq9XoT69AABKlEu/qebNm6cvv/zSEVamT5+u+vXrq1y5cpKkvXv3qlq1anrmmWeKvlIAAFAquXTNyscff6xHH33UqW3BggWKiYlRTEyMpk2bpk8//bRICwQAAKWbS2Fl3759uvXWWx3LPj4+KlPmyiaaNWumXbt2FV11AACg1HPpNFBCQoI8PK6scurUKafnMzMzna5hAQAAuF4uzazUqFEjzw+C++WXX/L8IkIAAABXuRRWunbtqgkTJujy5cvZnrt06ZJefvlldevWrciKAwAAcOk00Lhx4/Tpp5+qbt26evLJJ3XrrbfKZrNpz549mj59utLT0zVu3LjiqhUAAJRCLoWVKlWqaMOGDRo2bJjGjh2rrM+Ts9ls6tixo2bMmKEqVaoUS6EAAKB0cvkToSIiIrRy5UqdPXtW+/fvlyTVrl1bgYGBRV4cAABAoT++MjAwUM2aNSvKWgAAALIp1BcZAgAAlBTCCgAAsDTCCgAAsDTCCgAAsDTCCgAAsDTCCgAAsDTCCgAAsDTCCgAAsDTCCgAAsDTCClBKHT6d7O4SAKBACCtAKXE+OVWPzd3mWO76r+/0cPRmJSSnubEqAMgfYQUoJZ76ZLs2Hjjt1PbD/tMa8UmcmyoCgIIhrAClwMFTF7T+t1PKvKY9wxit/+2UDp2+6Ja6AKAgCCtAKfD72byvTzl8hrACwLoIK0ApEBbom+fz4ZX8SqgSAHAdYQUoBWpW9lfrOpWz/Ycva7OpdZ3KiggirACwLsIKUEq807+xomoFObW1rB2kd/o3dlNFAFAwhBWglLD7emrWoCaO5RVPtdK8Ic1k9/V0Y1UAkD/CClBKhQflfR0LAFgFYQUAAFgaYQUAAFgaYQUAAFgaYQUAAFgaYQUAAFgaYQUAAFgaYQUAAFgaYQUAAFgaYQUAAFgaYQUAAFgaYQUAAFgaYQUAAFgaYQUAAFgaYQUAAFgaYQUAAFgaYQUAAFgaYQUAAFgaYQUAAFiah7sLAFByfL08dHhqN3eXAQAuYWYFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYmlvDyvr169WjRw9Vq1ZNNptNy5Ytc2c5AADAgtwaVi5evKhGjRpp+vTp7iwDAABYmIc7d96lSxd16dLFnSUAAACL45oVAABgaW6dWXFVSkqKUlJSHMuJiYlurAYAAJSEG2pmZcqUKbLb7Y5HaGiou0sCAADF7IYKKy+88IISEhIcj6NHj7q7JAAAUMxuqNNA3t7e8vb2dncZAACgBLk1rFy4cEH79+93LB86dEjbt29XYGCgbrnlFjdWBgAArMKtYWXr1q1q166dY/nZZ5+VJA0aNEgffvihm6oCAABW4taw0rZtWxlj3FkCAACwuBvqAlsAAFD6EFYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVYAAIClEVZykJyarvCxXyl87FdKTk13dzkAAJRqhBUAAGBphBUAAGBphBUAAGBphBUAAGBphBUAAGBphBUAAGBphBUAAGBphBUAAGBphBUAAGBphBUAAGBphBUAAGBphBUAAGBphBUAAGBphBUAAGBphBUAAGBphBUAAGBphBUAAGBphBUAAGBphBUAAGBphBUAAGBphBUAAGBphBUAAGBphBUAAGBphBUAAGBphBUAAGBphBUAAGBphJV8HD6d7O4SAAAo1Qgr1zifnKrH5m5zLHf913d6OHqzEpLT3FgVAAClF2HlGk99sl0bD5x2avth/2mN+CTOTRUBAFC6EVaucvDUBa3/7ZQyr2nPMEbrfzulQ6cvuqUuAABKM8LKVX4/m/f1KYfPEFYAAChphJWrhAX65vl8eCW/EqoEAABkIaxcpWZlf7WuUznboJS12dS6TmVFBBFWAAAoaYSVa7zTv7GiagU5tbWsHaR3+jd2U0UAAJRuhJVr2H09NWtQE8fyiqdaad6QZrL7erqxKgAASi/CSj7Cg/K+jgUAABQvwgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0wgoAALA0t4eVGTNmKCIiQj4+PmrSpIm+++47d5cEAAAsxK1hZdGiRRo5cqRefPFFxcXFqVWrVurSpYuOHDnizrIAAICFuDWsvPXWWxoyZIiGDh2qevXq6e2331ZoaKhmzpzpzrIAAICFuC2spKamatu2berUqZNTe6dOnbRhw4Yc10lJSVFiYqLTAwAA3NzcFlZOnz6tjIwMValSxam9SpUqio+Pz3GdKVOmyG63Ox6hoaElUSoAAHAjt19ga7PZnJaNMdnasrzwwgtKSEhwPI4ePVoSJQIAADfycNeOg4KCVLZs2WyzKCdPnsw225LF29tb3t7eJVEeAACwCLfNrHh5ealJkyZatWqVU/uqVavUokULN1UFAACsxm0zK5L07LPPauDAgWratKmioqL0/vvv68iRI3riiSfcWRYAALAQt4aVP//5zzpz5owmT56s48ePq0GDBlqxYoXCwsLcWRYAALAQmzHGuLuIwkpMTJTdbldCQoICAgLcXQ4AACgAV9+/3X43EAAAQF4IKwAAwNIIKwAAwNIIKwAAwNIIKwAAwNIIKwAAwNIIKwAAwNIIKwAAwNIIKwAAwNIIKwAAwNIIKwAAwNIIKwAAwNIIKwAAwNIIKwAAwNIIKwAAwNI83F3A9TDGSJISExPdXAkAACiorPftrPfx/NzQYSUpKUmSFBoa6uZKAACAq5KSkmS32/PtZzMFjTUWlJmZqWPHjql8+fKy2WwurZuYmKjQ0FAdPXpUAQEBxVThjYPxuIKxuIKxcMZ4XMFYXMFYOCvIeBhjlJSUpGrVqqlMmfyvSLmhZ1bKlCmjGjVqXNc2AgICOLiuwnhcwVhcwVg4YzyuYCyuYCyc5TceBZlRycIFtgAAwNIIKwAAwNJKbVjx9vbWxIkT5e3t7e5SLIHxuIKxuIKxcMZ4XMFYXMFYOCuO8bihL7AFAAA3v1I7swIAAG4MhBUAAGBphBUAAGBphBUAAGBppSqsvPrqq2rRooV8fX1VoUKFAq1jjNGkSZNUrVo1lStXTm3bttWvv/5avIWWgHPnzmngwIGy2+2y2+0aOHCgzp8/n+c6gwcPls1mc3o0b968ZAouYjNmzFBERIR8fHzUpEkTfffdd3n2X7dunZo0aSIfHx/VrFlT//73v0uo0uLnyljExsZmOwZsNpv27NlTghUXj/Xr16tHjx6qVq2abDabli1blu86N+tx4epY3MzHxZQpU3TXXXepfPnyCg4OVu/evbV3795817tZj43CjEdRHB+lKqykpqbqwQcf1LBhwwq8zrRp0/TWW29p+vTp2rJli0JCQtSxY0fH9xLdqP7yl79o+/btWrlypVauXKnt27dr4MCB+a5333336fjx447HihUrSqDaorVo0SKNHDlSL774ouLi4tSqVSt16dJFR44cybH/oUOH1LVrV7Vq1UpxcXEaN26cnnrqKS1evLiEKy96ro5Flr179zodB3Xq1CmhiovPxYsX1ahRI02fPr1A/W/m48LVschyMx4X69at0/Dhw7Vp0yatWrVK6enp6tSpky5evJjrOjfzsVGY8chyXceHKYXmzJlj7HZ7vv0yMzNNSEiImTp1qqPt8uXLxm63m3//+9/FWGHx2rVrl5FkNm3a5GjbuHGjkWT27NmT63qDBg0yvXr1KoEKi1ezZs3ME0884dR22223mbFjx+bYf8yYMea2225zanv88cdN8+bNi63GkuLqWMTExBhJ5ty5cyVQnftIMkuXLs2zz818XFytIGNRWo4LY4w5efKkkWTWrVuXa5/ScmwYU7DxKIrjo1TNrLjq0KFDio+PV6dOnRxt3t7eatOmjTZs2ODGyq7Pxo0bZbfbdffddzvamjdvLrvdnu/rio2NVXBwsG699VY99thjOnnyZHGXW6RSU1O1bds2p5+pJHXq1CnX175x48Zs/Tt37qytW7cqLS2t2GotboUZiyyNGzdW1apV1b59e8XExBRnmZZ1sx4X16M0HBcJCQmSpMDAwFz7lKZjoyDjkeV6jg/CSh7i4+MlSVWqVHFqr1KliuO5G1F8fLyCg4OztQcHB+f5urp06aKPP/5Ya9eu1ZtvvqktW7bo3nvvVUpKSnGWW6ROnz6tjIwMl36m8fHxOfZPT0/X6dOni63W4laYsahataref/99LV68WEuWLFHdunXVvn17rV+/viRKtpSb9bgojNJyXBhj9Oyzz+qee+5RgwYNcu1XWo6Ngo5HURwfN/S3LkvSpEmT9PLLL+fZZ8uWLWratGmh92Gz2ZyWjTHZ2qygoGMhZX9NUv6v689//rPj3w0aNFDTpk0VFhamr776Svfff38hq3YPV3+mOfXPqf1G5MpY1K1bV3Xr1nUsR0VF6ejRo/r73/+u1q1bF2udVnQzHxeuKC3HxZNPPqlffvlF33//fb59S8OxUdDxKIrj44YPK08++aT69euXZ5/w8PBCbTskJETSHym5atWqjvaTJ09mS81WUNCx+OWXX3TixIlsz506dcql11W1alWFhYXpt99+c7lWdwkKClLZsmWzzRzk9TMNCQnJsb+Hh4cqVapUbLUWt8KMRU6aN2+u+fPnF3V5lnezHhdF5WY7LkaMGKEvvvhC69evV40aNfLsWxqODVfGIyeuHh83fFgJCgpSUFBQsWw7IiJCISEhWrVqlRo3bizpj/P869at0+uvv14s+7weBR2LqKgoJSQkaPPmzWrWrJkk6ccff1RCQoJatGhR4P2dOXNGR48edQpyVufl5aUmTZpo1apV6tOnj6N91apV6tWrV47rREVFafny5U5t3377rZo2bSpPT89irbc4FWYschIXF3dDHQNF5WY9LorKzXJcGGM0YsQILV26VLGxsYqIiMh3nZv52CjMeOTE5eOj0Jfm3oB+//13ExcXZ15++WXj7+9v4uLiTFxcnElKSnL0qVu3rlmyZIljeerUqcZut5slS5aYHTt2mP79+5uqVauaxMREd7yEInPfffeZyMhIs3HjRrNx40bTsGFD0717d6c+V49FUlKSGTVqlNmwYYM5dOiQiYmJMVFRUaZ69eo33FgsXLjQeHp6mujoaLNr1y4zcuRI4+fnZw4fPmyMMWbs2LFm4MCBjv4HDx40vr6+5plnnjG7du0y0dHRxtPT03z++efueglFxtWx+Mc//mGWLl1q9u3bZ3bu3GnGjh1rJJnFixe76yUUmaSkJMfvBEnmrbfeMnFxceb33383xpSu48LVsbiZj4thw4YZu91uYmNjzfHjxx2P5ORkR5/SdGwUZjyK4vgoVWFl0KBBRlK2R0xMjKOPJDNnzhzHcmZmppk4caIJCQkx3t7epnXr1mbHjh0lX3wRO3PmjBkwYIApX768KV++vBkwYEC228quHovk5GTTqVMnU7lyZePp6WluueUWM2jQIHPkyJGSL74IvPvuuyYsLMx4eXmZO++80+m2u0GDBpk2bdo49Y+NjTWNGzc2Xl5eJjw83MycObOEKy4+rozF66+/bmrVqmV8fHxMxYoVzT333GO++uorN1Rd9LJur7z2MWjQIGNM6TouXB2Lm/m4yGkcrn2fKE3HRmHGoyiOD9v/7RwAAMCSuHUZAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFAABYGmEFuIHEx8fr6aefVu3ateXj46MqVaronnvu0b///W8lJye7u7xchYeHy2azaeHChdmeq1+/vmw2mz788MOSLywH8fHxGjFihGrWrClvb2+FhoaqR48eWrNmTba+r732msqWLaupU6c62rJea26Ptm3b5tnv6m0B+MMN/0WGQGlx8OBBtWzZUhUqVNBrr72mhg0bKj09Xfv27dPs2bNVrVo19ezZM8d109LS3P4FaqGhoZozZ47TN4Nv2rRJ8fHx8vPzc2NlVxw+fNgxxtOmTVNkZKTS0tL0zTffaPjw4dqzZ49T/zlz5mjMmDGaPXu2xo4dK0nasmWLMjIyJEkbNmzQn/70J+3du1cBAQGS/vgCySyTJ0/WY4895rTN8uXLF+dLBG5MRfJlAQCKXefOnU2NGjXMhQsXcnw+MzPT8W9JZubMmaZnz57G19fXTJgwwRhjzIwZM0zNmjWNp6enufXWW828efMc6xw6dMhIMnFxcY62c+fOOX1/VtZ3xnz55ZcmMjLSeHt7m2bNmplffvklz9rDwsLM2LFjjbe3t9P3ST322GNmxIgRxm63O323yPnz581jjz1mKleubMqXL2/atWtntm/f7nh+//79pmfPniY4ONj4+fmZpk2bmlWrVmXb56uvvmoeeeQR4+/vb0JDQ817772XZ51dunQx1atXz3GMr/3urNjYWFO9enWTmppqqlWr5vSdSlmyxuvadbPq+8c//pFnPQD+wGkg4AZw5swZffvttxo+fHiusxA2m81peeLEierVq5d27NihRx99VEuXLtXTTz+tUaNGaefOnXr88cf1yCOPKCYmxuV6Ro8erb///e/asmWLgoOD1bNnT6WlpeW5TpUqVdS5c2fNnTtXkpScnKxFixbp0UcfdepnjFG3bt0UHx+vFStWaNu2bbrzzjvVvn17nT17VpJ04cIFde3aVatXr1ZcXJw6d+6sHj166MiRI07bevPNN9W0aVPFxcXpb3/7m4YNG5ZtdiTL2bNntXLlylzHuEKFCk7L0dHR6t+/vzw9PdW/f39FR0fn+foBXAd3pyUA+du0aZORZJYsWeLUXqlSJePn52f8/PzMmDFjHO2SzMiRI536tmjRwjz22GNObQ8++KDp2rWrMca1mZWFCxc6+pw5c8aUK1fOLFq0KNf6s2YRli1bZmrVqmUyMzPN3LlzTePGjY0xxmlmZc2aNSYgIMBcvnzZaRu1atXKc2bk9ttvN++8847TPh966CHHcmZmpgkODs71229//PHHHMc4JwkJCcbX19cx2xMXF2d8fX1NQkKCU7/8Zla8vLwcP7+sx9XfAg/gD8ysADeQa2dPNm/erO3bt6t+/fpKSUlxeq5p06ZOy7t371bLli2d2lq2bKndu3e7XEdUVJTj34GBgapbt26BttOtWzdduHBB69ev1+zZs7PNqkjStm3bdOHCBVWqVEn+/v6Ox6FDh3TgwAFJ0sWLFzVmzBjdfvvtqlChgvz9/bVnz55sMyuRkZGOf9tsNoWEhOjkyZM51mb+7wvorx3jnCxYsEA1a9ZUo0aNJEl33HGHatasmeMFxHkZPXq0tm/f7vS4++67XdoGUBpwgS1wA6hdu7ZsNlu2Uxg1a9aUJJUrVy7bOjmdyrj2jdgY42grU6aMoy1Lfqd28tp2Tjw8PDRw4EBNnDhRP/74o5YuXZqtT2ZmpqpWrarY2Nhsz2Wdihk9erS++eYb/f3vf1ft2rVVrlw5PfDAA0pNTXXqf+1FxTabTZmZmTnWVqdOHdlsNu3evVu9e/fO83XMnj1bv/76qzw8rvwKzczMVHR0tP7617/mue7VgoKCVLt27QL3B0orZlaAG0ClSpXUsWNHTZ8+XRcvXizUNurVq6fvv//eqW3Dhg2qV6+eJKly5cqSpOPHjzue3759e47b2rRpk+Pf586d0759+3TbbbcVqI5HH31U69atU69evVSxYsVsz995552Kj4+Xh4eHateu7fQICgqSJH333XcaPHiw+vTpo4YNGyokJESHDx8u0P5zExgYqM6dO+vdd9/NcYzPnz8vSdqxY4e2bt2q2NhYpxmR9evXa8uWLdq5c+d11QEgO2ZWgBvEjBkz1LJlSzVt2lSTJk1SZGSkypQpoy1btmjPnj1q0qRJnuuPHj1affv2dVysunz5ci1ZskSrV6+W9MfsTPPmzTV16lSFh4fr9OnTGj9+fI7bmjx5sipVqqQqVaroxRdfVFBQUL6zEVnq1aun06dPy9fXN8fnO3TooKioKPXu3Vuvv/666tatq2PHjmnFihXq3bu3mjZtqtq1a2vJkiXq0aOHbDabXnrppVxnTFwxY8YMtWjRQs2aNdPkyZMVGRmp9PR0rVq1SjNnztTu3bsVHR2tZs2aqXXr1tnWj4qKUnR0tP7xj38UaH9JSUmKj493avP19XXc5gzg/7j5mhkALjh27Jh58sknTUREhPH09DT+/v6mWbNm5o033jAXL1509JNkli5dmm39vG5dNsaYXbt2mebNm5ty5cqZO+64w3z77bc5XmC7fPlyU79+fePl5WXuuusup9uKc5LfbbrX3rqcmJhoRowYYapVq2Y8PT1NaGioGTBggOO250OHDpl27dqZcuXKmdDQUDN9+nTTpk0b8/TTT+e5z0aNGpmJEyfmWeuxY8fM8OHDHRfAVq9e3fTs2dPExMSYlJQUU6lSJTNt2rQc133zzTdNUFCQSUlJMcbkf4GtpGyPxx9/PM/6gNLIZsxVJ6gBIA+xsbFq166dzp07l+1WXgAoLlyzAgAALI2wAgAALI3TQAAAwNKYWQEAAJZGWAEAAJZGWAEAAJZGWAEAAJZGWAEAAJZGWAEAAJZGWAEAAJZGWAEAAJZGWAEAAJb2/wHT+lRbnHgdhQAAAABJRU5ErkJggg==", "text/plain": [ "
" ] @@ -401,24 +363,24 @@ " \n", " 0\n", " 1\n", - " -0.185\n", + " -0.184\n", " 0.111\n", - " 0.096\n", + " 0.098\n", " -0.044\n", " 0.022\n", - " 0.023\n", - " -2.747\n", + " 0.022\n", + " -2.623\n", " \n", " \n", " 1\n", " 2\n", - " 0.716\n", + " 0.717\n", " 0.060\n", " 0.000\n", " 0.371\n", " 0.025\n", " 0.000\n", - " 0.626\n", + " 0.627\n", " \n", " \n", "\n", @@ -426,12 +388,12 @@ ], "text/plain": [ " treatment blp_est blp_se blp_pval qini_est qini_se qini_pval \\\n", - "0 1 -0.185 0.111 0.096 -0.044 0.022 0.023 \n", - "1 2 0.716 0.060 0.000 0.371 0.025 0.000 \n", + "0 1 -0.184 0.111 0.098 -0.044 0.022 0.022 \n", + "1 2 0.717 0.060 0.000 0.371 0.025 0.000 \n", "\n", " cal_r_squared \n", - "0 -2.747 \n", - "1 0.626 " + "0 -2.623 \n", + "1 0.627 " ] }, "execution_count": 8, @@ -459,7 +421,7 @@ { "data": { "text/plain": [ - "" + "" ] }, "execution_count": 9, @@ -468,7 +430,7 @@ }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjcAAAHHCAYAAABDUnkqAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAABAo0lEQVR4nO3deVxUZf//8feoMOACigpuCC65kSJqGnq73WGk3ljdd2m2aGZmZWVZuVWa9Su1xWxxyXKtNLXM6qu3ZiaZppkGZbkrLqW4y6qgzPX7owdzO7LIKDBwfD0fj3nkuc51znzOxWnmzdmwGWOMAAAALKKMpwsAAAAoTIQbAABgKYQbAABgKYQbAABgKYQbAABgKYQbAABgKYQbAABgKYQbAABgKYQbAABgKYQbACXKiy++KJvN5tIWGhqq+++/3zk9Z84c2Ww2bd68uZircxUbGyubzabY2FiP1gHAFeEGbrHZbAV6FdeH/dSpUzVnzpxiea+rtW3bNr344ovav39/sb93amqqxo4dq1tuuUUBAQGy2WyFNm7JyckaN26cwsPDVbFiRfn6+ur666/XiBEjdPjw4UJ5D08riftZly5dXP6f8/X1VYsWLTR58mQ5HI58lz148KBq1aolm82mN954I89+O3bs0PDhw9WyZUtVqlRJNWvWVM+ePT0eKi/2888/67HHHlNYWJgqVKigunXrqnfv3tq1a1eBlr90HC9+eXl55bnc3r175ePjk2vIvtJ1ovCU83QBKF0++ugjl+l58+Zp1apVOdqbNm1aLPVMnTpV1apVc/mtvqTatm2bxo0bpy5duig0NLRY3/vEiRN66aWXVLduXYWHhxda+Ny3b5+ioqJ08OBB3XnnnXrooYfk7e2t3377TTNnztQXX3xR4C+Z/OzcuVNlynjud7G89rNOnTrp7Nmz8vb29khdderU0fjx4yX9/TOeP3++nnrqKR0/flyvvPJKrsucPn1a3bt3V1pamjp16qThw4crODhYffr0ydH3ww8/1MyZM/Wf//xHjz76qJKSkvT+++/rxhtv1IoVKxQVFVWk21cQEydO1Pr163XnnXeqRYsWSkxM1HvvvadWrVpp48aNuv766/Nd/rnnntODDz7o0paWlqaHH35YN998c57LPfXUUypXrpwyMjIKbZ0oRAa4CkOGDDEF2Y3S0tKK5P3DwsJM586di2TdhW3x4sVGklmzZk2xv/e5c+fMkSNHjDHG/Pzzz0aSmT179lWt8/z58yY8PNyUL1/e/PDDDznmJyUlmdGjR7u93rFjx152n5o9e7aRZH7++We31+9wOEx6erpby5TE/axz584mLCzMpe3s2bMmJCTEVKpUyVy4cCHHMufOnTOdOnUyfn5+ZsOGDebcuXOmZ8+exm63m++//z5H/82bN5uUlBSXthMnTpjq1aubDh06FO4GXaH169ebjIwMl7Zdu3YZu91u7rnnnita50cffWQkmU8++STX+StWrDDe3t7m+eefL/B+eLl1onBxWgqFrkuXLrr++uu1ZcsWderUSeXLl9fo0aMlSRkZGRo7dqwaNmwou92u4OBgDR8+PMdvP7Nnz9Y///lPBQYGym63q1mzZpo2bZpLn9DQUP3xxx/6/vvvnYd8u3TpIul/12SsW7dOTzzxhKpXr67KlStr8ODByszM1JkzZ9SvXz9VqVJFVapU0fDhw2WMcVm/w+HQ5MmTFRYWJh8fHwUFBWnw4ME6ffp0jjr+9a9/ad26dWrbtq18fHxUv359zZs3z9lnzpw5uvPOOyVJXbt2LfbTd3a7XTVq1CjUdX7++ef69ddf9dxzz+kf//hHjvl+fn4uRw9++OEH3Xnnnapbt67zZ//UU0/p7Nmzl32vS6+5yZaenq7BgweratWq8vPzU79+/fL8+axcuVJt2rSRr6+v3n//fUlXv5/ldc3N4sWL1bp1a/n6+qpatWq699579ddff7n0uf/++1WxYkX99ddfuu2221SxYkVVr15dzzzzjLKysi47Jrnx8fHRDTfcoJSUFB07dsxlnjFG/fv316+//qpVq1bpxhtvlN1u15IlS9StWzfddttt2rZtm8syrVu3VsWKFV3aqlatqo4dO2r79u1XVGNha9++fY4jZ9ddd53CwsKuuMb58+erQoUKuvXWW3PMO3/+vIYOHaqhQ4eqQYMGhbJOFD5OS6FInDx5Ut27d9ddd92le++9V0FBQXI4HOrVq5fWrVunhx56SE2bNtXWrVv11ltvadeuXVq6dKlz+WnTpiksLEy9evVSuXLl9PXXX+vRRx+Vw+HQkCFDJEmTJ0/W448/rooVK+q5556TJAUFBbnU8fjjj6tGjRoaN26cNm7cqBkzZqhy5cr68ccfVbduXb366qtavny5Xn/9dV1//fXq16+fc9nBgwdrzpw5GjBggJ544gklJCTovffeU1xcnNavX+9y7nzPnj264447NHDgQPXv31+zZs3S/fffr9atWyssLEydOnXSE088oXfeeUejR492nrbL7/RdRkaGUlJSCjTe1apVK1C/wvTVV19Jku67774C9V+8eLHS09P1yCOPqGrVqtq0aZPeffdd/fnnn1q8ePEV1fDYY4+pcuXKevHFF7Vz505NmzZNBw4ccIaObDt37lTfvn01ePBgDRo0SI0bN5ZUePvZxbL3mRtuuEHjx4/X0aNH9fbbb2v9+vWKi4tT5cqVnX2zsrIUHR2tdu3a6Y033tC3336rN998Uw0aNNAjjzxyRWOyf/9+2Ww2l/eRpOHDh2vlypVatWqVbrjhBme7t7e3Pv/8c91xxx3q3r27Nm7cqJo1a+b7HomJiQXa586fP6+kpKQC1R0QEFBopx6NMTp69KjCwsLcXvb48eNatWqV+vTpowoVKuSYP3nyZJ0+fVrPP/+8lixZUijrRBHw8JEjlHK5nZbq3LmzkWSmT5/u0v7RRx+ZMmXK5DiFMX36dCPJrF+/3tmW22mD6OhoU79+fZe2vE4XZJ+2iI6ONg6Hw9keGRlpbDabefjhh51tFy5cMHXq1HFZzw8//JDrIeQVK1bkaA8JCTGSzNq1a51tx44dM3a73Tz99NPONndPS2VvQ0Fe7iis01IRERHG39+/wP1z+5mOHz/e2Gw2c+DAAWdbbqelQkJCTP/+/Z3T2WPTunVrk5mZ6Wx/7bXXjCTz5ZdfuiwryaxYsaJANbmzn61Zs8blZ5qZmWkCAwPN9ddfb86ePevs93//939GkhkzZoyzrX///kaSeemll1zWGRERYVq3bp3jvS7VuXNn06RJE3P8+HFz/Phxs2PHDvPss88aSaZnz56XXf5KrV271thsNvPCCy9ctm/2+BTklZCQUGg1Zp8CmjlzptvLvvvuu0aSWb58eY55R44cMZUqVTLvv/++Mabgp0fzWyeKBkduUCTsdrsGDBjg0rZ48WI1bdpUTZo00YkTJ5zt//znPyVJa9asUfv27SVJvr6+zvlJSUk6f/68OnfurJUrVyopKUn+/v4FqmPgwIEuv8G3a9dOGzZs0MCBA51tZcuWVZs2bbRlyxaXWv39/dWtWzeXWrMP069Zs0Z33323s71Zs2bq2LGjc7p69epq3Lix9u3bV6A6cxMdHa1Vq1Zd8fJFLTk5WZUqVSpw/4t/pmlpaTp79qzat28vY4zi4uJUt25dt2t46KGHXI6gPfLIIxo9erSWL1+uXr16Odvr1aun6OjofGu6mv0s2+bNm3Xs2DG9+OKL8vHxcbb37NlTTZo00bJlyzRu3DiXZR5++GGX6Y4dO+a4QD8vO3bsUPXq1V3aevXqpZkzZ7pVd0EdO3ZMd999t+rVq6fhw4dftn94eHiB9+HCOm26Y8cODRkyRJGRkerfv7/by8+fP1/Vq1dXt27dcswbMWKE6tevn+Ni4atZJ4oG4QZFonbt2jnOg+/evVvbt2/P8WGc7eJrBNavX6+xY8dqw4YNSk9Pd+nnzpfOpV+Y2csFBwfnaL/4Wo3du3crKSlJgYGBl601t/eRpCpVquS4/sMdNWvWvOzpAU/y8/NzK7wdPHhQY8aM0VdffZVjXAp66uJS1113nct0xYoVVbNmzRy329erVy/X5QtrP8t24MABSXKe9rpYkyZNtG7dOpc2Hx+fHP8/uLPfhIaG6oMPPpDD4dDevXv1yiuv6Pjx4y7BqrCkpaXpX//6l1JSUrRu3boc1+LkpkqVKoVyR1VWVpaOHz/u0hYQEJDjMyYxMVE9e/aUv7+/PvvsM5UtW9at99m3b582bNigxx57TOXKuX49bty4UR999JFWr17t1umz/NaJosNIo0hc/BtxNofDoebNm2vSpEm5LpMdOPbu3aubbrpJTZo00aRJkxQcHCxvb28tX75cb7311mWf4XGxvD7ccms3F11Q7HA4FBgYqE8++STX5S/9QsrrfcwlFym74+zZswX+0i/si4ULokmTJoqLi9OhQ4dyhMVLZWVlqVu3bjp16pRGjBihJk2aqEKFCvrrr790//33u/UzvRK57Y+FuZ9dKXe/fC9VoUIFl/DQoUMHtWrVSqNHj9Y777xzteU5ZWZm6t///rd+++03rVy58rK3V1+83KlTpwrUt3r16nmOx6FDh3IE1DVr1jgv7Jb+DqPdu3fXmTNn9MMPP6hWrVoFet+LzZ8/X5J0zz335Jg3fPhwdezYUfXq1XOG5+yjukeOHNHBgwdz/SUnv3Wi6BBuUGwaNGigX3/9VTfddFOOJ9Be7Ouvv1ZGRoa++uorlw+LNWvW5Oib33quttZvv/1WHTp0yPWL8Uq4W+vChQtznNrLy9WEqCsVExOjBQsW6OOPP9aoUaPy7bt161bt2rVLc+fOdblo+2pPu+3evVtdu3Z1TqempurIkSPq0aPHZZctiv0sJCRE0t8XMGefbs22c+dO5/yi0qJFC9177716//339cwzz1zRqb5LORwO9evXT6tXr9aiRYvUuXPnAi/7448/uvx88pOQkJDn859q1KiRY18JDw93/vvcuXOKiYnRrl279O2336pZs2YFrvFi8+fPV4MGDXTjjTfmmHfw4EEdOHAg16OAvXr1kr+/v86cOePWOlF0CDcoNr1799by5cv1wQcf6KGHHnKZd/bsWTkcDlWoUMH529vFX9hJSUmaPXt2jnVWqFAh1w+Uwqh16tSpevnll/Xqq6+6zLtw4YJSU1Nz3I1yOdl3SRS03pJ+zc0dd9yh8ePH65VXXlGXLl0UGRnpMj8lJUUTJkzQK6+8kuvP1Bijt99++6pqmDFjhgYMGOC87mbatGm6cOGCunfvftlli2I/a9OmjQIDAzV9+nQ98MADstvtkqT//ve/2r59u8aMGVOQzboqw4cP17x58zRp0iRNnjz5qtf3+OOPa+HChXr//ff173//261lC+uaGx8fnzxPb2VlZalPnz7asGGDvvzyyxz74cWOHDmipKQkNWjQIMeTguPi4rR9+3a98MILuS47Y8aMHKcuv/vuO7377rt644031KRJkxzLXG6dKDqEGxSb++67T4sWLdLDDz+sNWvWqEOHDsrKytKOHTu0aNEi53NIbr75Znl7eysmJkaDBw9WamqqPvjgAwUGBurIkSMu62zdurWmTZum//f//p8aNmyowMDAHL8xX4nOnTtr8ODBGj9+vOLj43XzzTfLy8tLu3fv1uLFi/X222/rjjvucGudLVu2VNmyZTVx4kQlJSXJbrc7n7GSm8K+5ua9997TmTNnnH8S4euvv9aff/4p6e8vsOzrS7JvZZ49e3a+T3728vLSkiVLFBUVpU6dOql3797q0KGDvLy89Mcff2j+/PmqUqWKXnnlFTVp0kQNGjTQM888o7/++kt+fn76/PPPr+qaJOnv0x433XSTevfurZ07d2rq1Kn6xz/+4XIxcV6KYj/z8vLSxIkTNWDAAHXu3Fl9+/Z13goeGhqqp5566qq2tyCaNWumHj166MMPP9QLL7ygqlWrXvG6Jk+erKlTpyoyMlLly5fXxx9/7DL/9ttvz/fW5sK65iY/Tz/9tL766ivFxMTo1KlTOWq89957nf8eNWqU5s6dm+tRouxT0HmdPsrtycLZgbdz585q06ZNjvmXWyeKkOdu1IIV5HUr+KVPTs2WmZlpJk6caMLCwozdbjdVqlQxrVu3NuPGjTNJSUnOfl999ZVp0aKF8fHxMaGhoWbixIlm1qxZOW4ZTUxMND179jSVKlUykpy36+Z1i2b2bcbHjx93ae/fv7+pUKFCjnpnzJhhWrdubXx9fU2lSpVM8+bNzfDhw83hw4edfUJCQnK99bZz5845bh/+4IMPTP369U3ZsmWL/WnF2bdE5/a6eEyzb1vN7dbp3Jw+fdqMGTPGNG/e3JQvX974+PiY66+/3owaNcr5VGRjjNm2bZuJiooyFStWNNWqVTODBg0yv/76a47b0t25Ffz77783Dz30kKlSpYqpWLGiueeee8zJkydzLJvXrdFXu59deit4toULF5qIiAhjt9tNQECAueeee8yff/7p0ievfa4gT2g2Jv//z2JjY40kM3bs2MuuJz/Zt6sXZL/xlOxHT+T1ulj29lxad1ZWlqldu7Zp1aqVW++d363gV7pOFA6bMR44WQ+gxOrdu7f279+vTZs2eboUALginJYC4GSMUWxsbI5D+wBQmnDkBgAAWAp/OBMAAFgK4QYAAFgK4QYAAFgK4QYAAFjKNXe3lMPh0OHDh1WpUqUie3Q/AAAoXMYYpaSkqFatWpf946XXXLg5fPjwZf/IHwAAKJkOHTqkOnXq5Nvnmgs3lSpVkvT34Pj5+Xm4GgAAUBDJyckKDg52fo/n55oLN9mnovz8/Ag3AACUMgW5pIQLigEAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgpJeuYFhY5cptCRy5SeecHT5QAAcM0i3AAAAEsh3AAAAEsh3AAAAEsh3AAAAEsh3AAAAEsh3AAAAEsh3AAAAEsh3AAAAEsh3AAAAEsh3AAAAEsh3AAAAEsh3AAAAEsh3AAAAEsh3AAAAEsh3AAAAEsh3AAAAEsh3AAAAEvxaLhZu3atYmJiVKtWLdlsNi1duvSyy2RkZOi5555TSEiI7Ha7QkNDNWvWrKIvFgAAlArlPPnmaWlpCg8P1wMPPKB///vfBVqmd+/eOnr0qGbOnKmGDRvqyJEjcjgcRVwpAAAoLTwabrp3767u3bsXuP+KFSv0/fffa9++fQoICJAkhYaGFlF1AACgNCpV19x89dVXatOmjV577TXVrl1bjRo10jPPPKOzZ896ujQAAFBCePTIjbv27dundevWycfHR1988YVOnDihRx99VCdPntTs2bNzXSYjI0MZGRnO6eTk5OIqFwAAeECpOnLjcDhks9n0ySefqG3bturRo4cmTZqkuXPn5nn0Zvz48fL393e+goODi7lqAABQnEpVuKlZs6Zq164tf39/Z1vTpk1ljNGff/6Z6zKjRo1SUlKS83Xo0KHiKhcAAHhAqQo3HTp00OHDh5Wamups27Vrl8qUKaM6derkuozdbpefn5/LCwAAWJdHw01qaqri4+MVHx8vSUpISFB8fLwOHjwo6e+jLv369XP2v/vuu1W1alUNGDBA27Zt09q1a/Xss8/qgQcekK+vryc2AQAAlDAeDTebN29WRESEIiIiJEnDhg1TRESExowZI0k6cuSIM+hIUsWKFbVq1SqdOXNGbdq00T333KOYmBi98847HqkfAACUPB69W6pLly4yxuQ5f86cOTnamjRpolWrVhVhVQAAoDQrVdfcAAAAXA7hBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWIpHw83atWsVExOjWrVqyWazaenSpQVedv369SpXrpxatmxZZPUBAIDSx6PhJi0tTeHh4ZoyZYpby505c0b9+vXTTTfdVESVAQCA0qqcJ9+8e/fu6t69u9vLPfzww7r77rtVtmxZt472AAAA6yt119zMnj1b+/bt09ixYwvUPyMjQ8nJyS4vAABgXaUq3OzevVsjR47Uxx9/rHLlCnbQafz48fL393e+goODi7hKAADgSaUm3GRlZenuu+/WuHHj1KhRowIvN2rUKCUlJTlfhw4dKsIqAQCAp3n0mht3pKSkaPPmzYqLi9Njjz0mSXI4HDLGqFy5cvrmm2/0z3/+M8dydrtddru9uMsFAAAeUmrCjZ+fn7Zu3erSNnXqVH333Xf67LPPVK9ePQ9VBgAAShKPhpvU1FTt2bPHOZ2QkKD4+HgFBASobt26GjVqlP766y/NmzdPZcqU0fXXX++yfGBgoHx8fHK0AwCAa5dHw83mzZvVtWtX5/SwYcMkSf3799ecOXN05MgRHTx40FPlAQCAUshmjDGeLqI4JScny9/fX0lJSfLz8yu09aZnXlCzMSslSdteilZ571Jzxg8AgBLPne/vUnO3FAAAQEEQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQborA/hPpni4BAIBrFuGmEJxJz9SguVuc0z3e+UH9Zm5SUvp5D1YFAMC1iXBTCJ5YEK8Ne0+4tK3fc0KPL4jzUEUAAFy7CDdXad/xVK3dfVyOS9qzjNHa3ceVcCLNI3UBAHCtItxcpQOn8r++Zv9Jwg0AAMWJcHOVQgLK5zs/tGqFYqoEAABIhJurVr96RXW6rnqOgSxrs6nTddVVrxrhBgCA4kS4KQTv9o1QZINqLm0dGlbTu30jPFQRAADXLsJNIfAv76UP+rd2Ti9/oqPmDWwr//JeHqwKAIBrE+GmCIRWy/86HAAAUHQINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFI8Gm7Wrl2rmJgY1apVSzabTUuXLs23/5IlS9StWzdVr15dfn5+ioyM1MqVK4unWAAAUCp4NNykpaUpPDxcU6ZMKVD/tWvXqlu3blq+fLm2bNmirl27KiYmRnFxcUVcKQAAKC3KefLNu3fvru7duxe4/+TJk12mX331VX355Zf6+uuvFRERUcjVAQCA0sij4eZqORwOpaSkKCAgIM8+GRkZysjIcE4nJycXR2kAAMBDSvUFxW+88YZSU1PVu3fvPPuMHz9e/v7+zldwcHAxVggAAIpbqQ038+fP17hx47Ro0SIFBgbm2W/UqFFKSkpyvg4dOlSMVQIAgOJWKk9Lffrpp3rwwQe1ePFiRUVF5dvXbrfLbrcXU2UAAMDTSt2RmwULFmjAgAFasGCBevbs6elyAABACePRIzepqanas2ePczohIUHx8fEKCAhQ3bp1NWrUKP3111+aN2+epL9PRfXv319vv/222rVrp8TEREmSr6+v/P39PbINAACgZPHokZvNmzcrIiLCeRv3sGHDFBERoTFjxkiSjhw5ooMHDzr7z5gxQxcuXNCQIUNUs2ZN52vo0KEeqR8AAJQ8Hj1y06VLFxlj8pw/Z84cl+nY2NiiLQgAAJR6pe6aGwAAgPwQbgAAgKUQbgAAgKW4FW4WLVqkzMxM5/Sff/4ph8PhnE5PT9drr71WeNUBAAC4ya1w07dvX505c8Y53axZM+3fv985nZKSolGjRhVWbQBKgPTMCwoduUyhI5cpPfOCp8sBgMtyK9xcemdTfnc6AQAAeALX3AAAAEsh3AAAAEtx+yF+K1eudP6pA4fDodWrV+v333+XJJfrcQAAADzB7XDTv39/l+nBgwe7TNtstqurCAAA4Cq4FW4uvu0bAACgJHLrmpsHHnhAKSkpRVULAADAVXMr3MydO1dnz54tqloAAACu2lU95wYAACBbSXnop9sXFKekpMjHxyffPn5+fldcEAAAwNVwO9w0atQoz3nGGNlsNmVlZV1VUQAAAFfK7XDz2WefKSAgoChqAQAAuGpuh5sOHTooMDCwKGoBAAC4aoX+5xc4JQUAADzJrXATEhKismXL5jpv165dGj58uOrUqVMohQEAAFwJt8JNQkKCqlat6pxOT0/X7Nmz1bFjRzVr1kxr167VsGHDCr1IAACAgnL7mhtJ2rhxoz788EMtXrxYdevW1fbt27VmzRp17NixsOsDAABwi1tHbt58802FhYXpjjvuUJUqVbR27Vpt3bpVNpvN5YgOAACAp7h15GbEiBEaMWKEXnrppTyvvQEAAPAkt47cvPzyy1q8eLHq1aunESNG6Pfffy+qugAAAK6IW+Fm1KhR2rVrlz766CMlJiaqXbt2Cg8PlzFGp0+fLqoaAQAACuyKnnPTuXNnzZ07V0eOHNGjjz6qVq1aqVOnTmrfvr0mTZpU2DUCAAAU2FU9xM/Pz0+DBw/Wpk2b9Ouvv6pdu3aaMGFCYdUGAADgNrcuKD579qxWr16tf/3rX5L+Pk2VkZHxv5WVK6e9e/cWboUAAABucCvczJ07V8uWLXOGm/fee09hYWHy9fWVJO3cuVO1atXSU089VfiVAgAAFIBbp6U++eQTPfTQQy5t8+fP15o1a7RmzRq99tprWrRoUaEWCAAA4A63ws2ePXvUvHlz57SPj4/KlPnfKtq2batt27YVXnUAAABucuu01JkzZ1yusTl+/LjLfIfD4TIfAACguLl15KZOnTr5Prjvt99+46+CAwAAj3Ir3PTo0UNjxozRuXPncsw7e/asxo0bp549exZacQAAAO5y67TU6NGjtWjRIjVu3FiPPfaYGjVqJOnvu6Tee+89XbhwQaNHjy6SQgEAAArCrXATFBSkH3/8UY888ohGjhwpY4wkyWazqVu3bpo6daqCgoKKpFAAAICCcCvcSFK9evW0YsUKnTp1Snv27JEkNWzYUAEBAYVeHAAAgLvcDjfZAgIC1LZt28KsBQAA4Kpd1d+WAgAAKGkINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwBgcemZFxQ6cplCRy5TeuYFT5cDFDnCDQAAsBTCDQAAsBTCDQAAsBTCDQAAsBSPhpu1a9cqJiZGtWrVks1m09KlSy+7TGxsrFq1aiW73a6GDRtqzpw5RV4nAAAoPTwabtLS0hQeHq4pU6YUqH9CQoJ69uyprl27Kj4+Xk8++aQefPBBrVy5sogrBQAApcUV/+HMwtC9e3d17969wP2nT5+uevXq6c0335QkNW3aVOvWrdNbb72l6OjooioTAACUIqXqmpsNGzYoKirKpS06OlobNmzIc5mMjAwlJye7vAAAgHWVqnCTmJiooKAgl7agoCAlJyfr7NmzuS4zfvx4+fv7O1/BwcHFUSoAAPCQUhVursSoUaOUlJTkfB06dMjTJQEAgCLk0Wtu3FWjRg0dPXrUpe3o0aPy8/OTr69vrsvY7XbZ7fbiKA8AAJQAperITWRkpFavXu3StmrVKkVGRnqoIgAAUNJ4NNykpqYqPj5e8fHxkv6+1Ts+Pl4HDx6U9PcppX79+jn7P/zww9q3b5+GDx+uHTt2aOrUqVq0aJGeeuopT5QPAABKII+Gm82bNysiIkIRERGSpGHDhikiIkJjxoyRJB05csQZdCSpXr16WrZsmVatWqXw8HC9+eab+vDDD7kNHAAAOHn0mpsuXbrIGJPn/NyePtylSxfFxcUVYVUAAKA0K1XX3AAAAFwO4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFgK4QYAAFhKqfrDmSVZee9y2j+hp6fLAADgmseRGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAUOj2n0j32HsTbgAAwFU7k56pQXO3OKd7vPOD+s3cpKT088VeC+EGAABctScWxGvD3hMubev3nNDjC+KKvRbCDQAAuCr7jqdq7e7jclzSnmWM1u4+roQTacVaD+EGAABclQOn8r++Zv9Jwg0AAChFQgLK5zs/tGqFYqrkb4QbAABwVepXr6hO11XPESrK2mzqdF111atGuAEAAKXMu30jFNmgmktbh4bV9G7fiGKvhXADoMA8+dwKACWbf3kvfdC/tXN6+RMdNW9gW/mX9yr2Wgg3APJUkp5bAaB0Ca2W/3U4RYlwAyBPJem5FQBQUIQbALkqac+tAICCItwAyFVJe24FABQU4QZArkracysAoKAINwByVdKeWwEABUW4AZCnkvTcCgAoKMINgDyVpOdWAEBBEW4AFJgnn1sBAAVFuAEAAJZCuAEAAJZCuAEAAJZCuAEAAJZCuAEAAJZCuAEAAJZCuAEAAJZCuAEAAJZCuAEAAJZCuAEAAJZCuAEAAJZCuAEAAJZCuAEAAJZSIsLNlClTFBoaKh8fH7Vr106bNm3Kt//kyZPVuHFj+fr6Kjg4WE899ZTOnTtXTNUCAICSzOPhZuHChRo2bJjGjh2rX375ReHh4YqOjtaxY8dy7T9//nyNHDlSY8eO1fbt2zVz5kwtXLhQo0ePLubKAQBASeTxcDNp0iQNGjRIAwYMULNmzTR9+nSVL19es2bNyrX/jz/+qA4dOujuu+9WaGiobr75ZvXt2/eyR3sAAMC1waPhJjMzU1u2bFFUVJSzrUyZMoqKitKGDRtyXaZ9+/basmWLM8zs27dPy5cvV48ePXLtn5GRoeTkZJcXAFyr9p9I93QJQJHzaLg5ceKEsrKyFBQU5NIeFBSkxMTEXJe5++679dJLL+kf//iHvLy81KBBA3Xp0iXP01Ljx4+Xv7+/8xUcHFzo2wEAJdWZ9EwNmrvFOd3jnR/Ub+YmJaWf92BVQNHy+Gkpd8XGxurVV1/V1KlT9csvv2jJkiVatmyZXn755Vz7jxo1SklJSc7XoUOHirliAPCcJxbEa8PeEy5t6/ec0OML4jxUEVD0ynnyzatVq6ayZcvq6NGjLu1Hjx5VjRo1cl3mhRde0H333acHH3xQktS8eXOlpaXpoYce0nPPPacyZVzzmt1ul91uL5oNAIASbN/xVK3dfTxHe5YxWrv7uBJOpKletQoeqAwoWh49cuPt7a3WrVtr9erVzjaHw6HVq1crMjIy12XS09NzBJiyZctKkowxRVcsAJQyB07lf33N/pNpxVQJULw8euRGkoYNG6b+/furTZs2atu2rSZPnqy0tDQNGDBAktSvXz/Vrl1b48ePlyTFxMRo0qRJioiIULt27bRnzx698MILiomJcYYcAIAUElA+3/mhVTlqA2vyeLjp06ePjh8/rjFjxigxMVEtW7bUihUrnBcZHzx40OVIzfPPPy+bzabnn39ef/31l6pXr66YmBi98sorntoEACiR6levqE7XVde63cfluKi9rM2mDg2rcUoKlmUz19i5nOTkZPn7+yspKUl+fn6eLgco8dIzL6jZmJWSpG0vRau8t8d/J4IbktLP69FPftH6iy4q7nRddb3bN0L+5b08WBmsqCg/L9z5/i51d0sBAArOv7yXPujf2jm9/ImOmjewLcEGlka4AYBrSGi1/K/DAayAcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACylnKcLAAAA1lDeu5z2T+jp6TI4cgMAAKyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACylRISbKVOmKDQ0VD4+PmrXrp02bdqUb/8zZ85oyJAhqlmzpux2uxo1aqTly5cXU7UAAKAkK+fpAhYuXKhhw4Zp+vTpateunSZPnqzo6Gjt3LlTgYGBOfpnZmaqW7duCgwM1GeffabatWvrwIEDqly5cvEXDwAAShyPh5tJkyZp0KBBGjBggCRp+vTpWrZsmWbNmqWRI0fm6D9r1iydOnVKP/74o7y8vCRJoaGhxVkyAAAowTx6WiozM1NbtmxRVFSUs61MmTKKiorShg0bcl3mq6++UmRkpIYMGaKgoCBdf/31evXVV5WVlVVcZQMAgBLMo0duTpw4oaysLAUFBbm0BwUFaceOHbkus2/fPn333Xe65557tHz5cu3Zs0ePPvqozp8/r7Fjx+bon5GRoYyMDOd0cnJy4W4EAAAoUUrEBcXucDgcCgwM1IwZM9S6dWv16dNHzz33nKZPn55r//Hjx8vf39/5Cg4OLuaKAQBAcfJouKlWrZrKli2ro0ePurQfPXpUNWrUyHWZmjVrqlGjRipbtqyzrWnTpkpMTFRmZmaO/qNGjVJSUpLzdejQocLdCAAAUKJ4NNx4e3urdevWWr16tbPN4XBo9erVioyMzHWZDh06aM+ePXI4HM62Xbt2qWbNmvL29s7R3263y8/Pz+UFAACsy+OnpYYNG6YPPvhAc+fO1fbt2/XII48oLS3NefdUv379NGrUKGf/Rx55RKdOndLQoUO1a9cuLVu2TK+++qqGDBniqU0ALK28dzntn9BT+yf0VHlvj99gCQCX5fFPqj59+uj48eMaM2aMEhMT1bJlS61YscJ5kfHBgwdVpsz/MlhwcLBWrlypp556Si1atFDt2rU1dOhQjRgxwlObAAAAShCbMcZ4uojilJycLH9/fyUlJXGKCsA1IT3zgpqNWSlJ2vZSNEfgUCq58/3t8dNSAAAAhYlwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIUnOQGAxWX/CQ3gWsGRGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCnlPF1AcTPGSJKSk5M9XAkAACio7O/t7O/x/Fxz4SYlJUWSFBwc7OFKAACAu1JSUuTv759vH5spSASyEIfDocOHD6tSpUqy2WxKTk5WcHCwDh06JD8/P0+X53GMhyvGwxXj4YrxcMV4uGI8XF3teBhjlJKSolq1aqlMmfyvqrnmjtyUKVNGderUydHu5+fHzncRxsMV4+GK8XDFeLhiPFwxHq6uZjwud8QmGxcUAwAASyHcAAAAS7nmw43dbtfYsWNlt9s9XUqJwHi4YjxcMR6uGA9XjIcrxsNVcY7HNXdBMQAAsLZr/sgNAACwFsINAACwFMINAACwFMINAACwFEuGmylTpig0NFQ+Pj5q166dNm3alG//xYsXq0mTJvLx8VHz5s21fPlyl/nGGI0ZM0Y1a9aUr6+voqKitHv37qLchELlznh88MEH6tixo6pUqaIqVaooKioqR//7779fNpvN5XXLLbcU9WYUGnfGY86cOTm21cfHx6XPtbR/dOnSJcd42Gw29ezZ09mntO4fa9euVUxMjGrVqiWbzaalS5dedpnY2Fi1atVKdrtdDRs21Jw5c3L0cffzqKRwdzyWLFmibt26qXr16vLz81NkZKRWrlzp0ufFF1/MsW80adKkCLei8Lg7HrGxsbn+v5KYmOjS71rZP3L7XLDZbAoLC3P2Kcz9w3LhZuHChRo2bJjGjh2rX375ReHh4YqOjtaxY8dy7f/jjz+qb9++GjhwoOLi4nTbbbfptttu0++//+7s89prr+mdd97R9OnT9dNPP6lChQqKjo7WuXPnimuzrpi74xEbG6u+fftqzZo12rBhg4KDg3XzzTfrr7/+cul3yy236MiRI87XggULimNzrpq74yH9/TTNi7f1wIEDLvOvpf1jyZIlLmPx+++/q2zZsrrzzjtd+pXG/SMtLU3h4eGaMmVKgfonJCSoZ8+e6tq1q+Lj4/Xkk0/qwQcfdPlCv5L9raRwdzzWrl2rbt26afny5dqyZYu6du2qmJgYxcXFufQLCwtz2TfWrVtXFOUXOnfHI9vOnTtdtjcwMNA571raP95++22XcTh06JACAgJyfHYU2v5hLKZt27ZmyJAhzumsrCxTq1YtM378+Fz79+7d2/Ts2dOlrV27dmbw4MHGGGMcDoepUaOGef31153zz5w5Y+x2u1mwYEERbEHhcnc8LnXhwgVTqVIlM3fuXGdb//79za233lrYpRYLd8dj9uzZxt/fP8/1Xev7x1tvvWUqVapkUlNTnW2lef/IJsl88cUX+fYZPny4CQsLc2nr06ePiY6Odk5f7fiWFAUZj9w0a9bMjBs3zjk9duxYEx4eXniFeUhBxmPNmjVGkjl9+nSefa7l/eOLL74wNpvN7N+/39lWmPuHpY7cZGZmasuWLYqKinK2lSlTRlFRUdqwYUOuy2zYsMGlvyRFR0c7+yckJCgxMdGlj7+/v9q1a5fnOkuKKxmPS6Wnp+v8+fMKCAhwaY+NjVVgYKAaN26sRx55RCdPnizU2ovClY5HamqqQkJCFBwcrFtvvVV//PGHc961vn/MnDlTd911lypUqODSXhr3D3dd7rOjMMa3NHM4HEpJScnx2bF7927VqlVL9evX1z333KODBw96qMLi0bJlS9WsWVPdunXT+vXrne3X+v4xc+ZMRUVFKSQkxKW9sPYPS4WbEydOKCsrS0FBQS7tQUFBOc5zZktMTMy3f/Z/3VlnSXEl43GpESNGqFatWi7/A95yyy2aN2+eVq9erYkTJ+r7779X9+7dlZWVVaj1F7YrGY/GjRtr1qxZ+vLLL/Xxxx/L4XCoffv2+vPPPyVd2/vHpk2b9Pvvv+vBBx90aS+t+4e78vrsSE5O1tmzZwvl/7/S7I033lBqaqp69+7tbGvXrp3mzJmjFStWaNq0aUpISFDHjh2VkpLiwUqLRs2aNTV9+nR9/vnn+vzzzxUcHKwuXbrol19+kVQ4n8+l1eHDh/Xf//43x2dHYe4f19xfBUfBTZgwQZ9++qliY2NdLqK96667nP9u3ry5WrRooQYNGig2NlY33XSTJ0otMpGRkYqMjHROt2/fXk2bNtX777+vl19+2YOVed7MmTPVvHlztW3b1qX9Wto/kLv58+dr3Lhx+vLLL12uMenevbvz3y1atFC7du0UEhKiRYsWaeDAgZ4otcg0btxYjRs3dk63b99ee/fu1VtvvaWPPvrIg5V53ty5c1W5cmXddtttLu2FuX9Y6shNtWrVVLZsWR09etSl/ejRo6pRo0auy9SoUSPf/tn/dWedJcWVjEe2N954QxMmTNA333yjFi1a5Nu3fv36qlatmvbs2XPVNRelqxmPbF5eXoqIiHBu67W6f6SlpenTTz8t0AdOadk/3JXXZ4efn598fX0LZX8rjT799FM9+OCDWrRoUY7TdpeqXLmyGjVqZLl9Iy9t27Z1buu1un8YYzRr1izdd9998vb2zrfv1ewflgo33t7eat26tVavXu1sczgcWr16tctv3xeLjIx06S9Jq1atcvavV6+eatSo4dInOTlZP/30U57rLCmuZDykv+/+efnll7VixQq1adPmsu/z559/6uTJk6pZs2ah1F1UrnQ8LpaVlaWtW7c6t/Va3D+kvx+fkJGRoXvvvfey71Na9g93Xe6zozD2t9JmwYIFGjBggBYsWODyeIC8pKamau/evZbbN/ISHx/v3NZrcf+QpO+//1579uwp0C9GV7V/FMplySXIp59+aux2u5kzZ47Ztm2beeihh0zlypVNYmKiMcaY++67z4wcOdLZf/369aZcuXLmjTfeMNu3bzdjx441Xl5eZuvWrc4+EyZMMJUrVzZffvml+e2338ytt95q6tWrZ86ePVvs2+cud8djwoQJxtvb23z22WfmyJEjzldKSooxxpiUlBTzzDPPmA0bNpiEhATz7bffmlatWpnrrrvOnDt3ziPb6A53x2PcuHFm5cqVZu/evWbLli3mrrvuMj4+PuaPP/5w9rmW9o9s//jHP0yfPn1ytJfm/SMlJcXExcWZuLg4I8lMmjTJxMXFmQMHDhhjjBk5cqS57777nP337dtnypcvb5599lmzfft2M2XKFFO2bFmzYsUKZ5/LjW9J5u54fPLJJ6ZcuXJmypQpLp8dZ86ccfZ5+umnTWxsrElISDDr1683UVFRplq1aubYsWPFvn3ucnc83nrrLbN06VKze/dus3XrVjN06FBTpkwZ8+233zr7XEv7R7Z7773XtGvXLtd1Fub+YblwY4wx7777rqlbt67x9vY2bdu2NRs3bnTO69y5s+nfv79L/0WLFplGjRoZb29vExYWZpYtW+Yy3+FwmBdeeMEEBQUZu91ubrrpJrNz587i2JRC4c54hISEGEk5XmPHjjXGGJOenm5uvvlmU716dePl5WVCQkLMoEGDSsX/jNncGY8nn3zS2TcoKMj06NHD/PLLLy7ru5b2D2OM2bFjh5FkvvnmmxzrKs37R/atu5e+sre/f//+pnPnzjmWadmypfH29jb169c3s2fPzrHe/Ma3JHN3PDp37pxvf2P+vlW+Zs2axtvb29SuXdv06dPH7Nmzp3g37Aq5Ox4TJ040DRo0MD4+PiYgIMB06dLFfPfddznWe63sH8b8/ZgMX19fM2PGjFzXWZj7h80YY9w/3gMAAFAyWeqaGwAAAMINAACwFMINAACwFMINAACwFMINAACwFMINAACwFMINAACwFMINAACwFMINYHGJiYkaOnSoGjZsKB8fHwUFBalDhw6aNm2a0tPTPV1enkJDQ2Wz2fTpp5/mmBcWFiabzaY5c+YUf2G5SExM1OOPP6769evLbrcrODhYMTExOf72lCSNHz9eZcuW1euvv+5sy97WvF7333+/JOU5P7cxAq5l5TxdAICis2/fPnXo0EGVK1fWq6++qubNm8tut2vr1q2aMWOGateurV69euW67Pnz5+Xl5VXMFbsKDg7W7NmzdddddznbNm7cqMTERFWoUMGDlf3P/v37nWP8+uuvq3nz5jp//rxWrlypIUOGaMeOHS79Z82apeHDh2vWrFl69tlnJUk///yzsrKyJEk//vij/vOf/2jnzp3y8/OTJPn6+jqXnz17tm655RaXdVauXLkItxAoha7ojzYAKBWio6NNnTp1TGpqaq7zHQ6H89+SzNSpU01MTIwpX7688++JTZ061dSvX994eXmZRo0amXnz5jmXSUhIMJJMXFycs+306dNGklmzZo0x5n9/g+b//u//TPPmzY3dbjft2rVz+eO0uQkJCTEjR440drvdHDx40Nk+aNAg8/jjjxt/f3+Xv+V0+vRpM3DgQFOtWjVTqVIl07VrVxMfH++cv2fPHtOrVy8TGBhoKlSoYNq0aWNWrVqV4z1feeUVM2DAAFOxYkUTHBxs3n///Xzr7N69u6ldu3auY3z69GmX6djYWFO7dm2TmZlpatWqZdavX59jmezxunRZY/7+GX3xxRf51gPAGE5LARZ18uRJffPNNxoyZEieRzlsNpvL9Isvvqjbb79dW7du1QMPPKAvvvhCQ4cO1dNPP63ff/9dgwcP1oABA7RmzRq363n22Wf15ptv6ueff1b16tUVExOj8+fP57tMUFCQoqOjNXfuXElSenq6Fi5cqAceeCBH3zvvvFPHjh3Tf//7X23ZskWtWrXSTTfdpFOnTkmSUlNT1aNHD61evVpxcXG65ZZbFBMTo4MHD7qs580331SbNm0UFxenRx99VI888oh27tyZa32nTp3SihUr8hzjS4+ozJw5U3379pWXl5f69u2rmTNn5rv9AK6Qp9MVgKKxceNGI8ksWbLEpb1q1aqmQoUKpkKFCmb48OHOdknmySefdOnbvn17M2jQIJe2O++80/To0cMY496Rm08//dTZ5+TJk8bX19csXLgwz/pDQkLMW2+9ZZYuXWoaNGhgHA6HmTt3romIiDDGGJcjNz/88IPx8/Mz586dc1lHgwYN8j3yEhYWZt59912X97z33nud0w6HwwQGBppp06bluvxPP/2U6xjnJikpyfj6+jqPJsXFxZmKFSualJQUl36XO3Lj4+Pj/Pllvw4cOHDZ9weuJRy5Aa4xmzZtUnx8vMLCwpSRkeEyr02bNi7T27dvV4cOHVzaOnTooO3bt7v9vpGRkc5/BwQEqHHjxgVaT8+ePZWamqq1a9dq1qxZuR61+fXXX5WamqqqVauqYsWKzldCQoL27t0r6e8jN88884yaNm2qypUrq2LFitq+fXuOIzctWrRw/ttms6lGjRo6duxYrrUZYwq07ZK0YMECNWjQQOHh4ZKkli1bKiQkRAsXLizwOiTprbfeUnx8vMurVq1abq0DsDouKAYsqmHDhrLZbDlOqdSvX1+S60Wq2dy9SLdMmb9/P7r4S/5yp5rcVa5cOd13330aO3asfvrpJ33xxRc5+qSmpqpmzZqKjY3NMS/71NAzzzyjVatW6Y033lDDhg3l6+urO+64Q5mZmS79L72I2mazyeFw5FrbddddJ5vNluOi4dzMnDlTf/zxh8qV+9/HrsPh0KxZszRw4MDLLp+tRo0aatiwYYH7A9cijtwAFlW1alV169ZN7733ntLS0q5oHU2bNtX69etd2tavX69mzZpJkqpXry5JOnLkiHN+fHx8ruvauHGj89+nT5/Wrl271LRp0wLV8cADD+j777/XrbfeqipVquSY36pVKyUmJqpcuXJq2LChy6tatWrOuu+//37dfvvtat68uWrUqKH9+/cX6P3zEhAQoOjoaE2ZMiXXMT5z5owkaevWrdq8ebNiY2NdjrjExsZqw4YNBQpHAAqOIzeAhU2dOlUdOnRQmzZt9OKLL6pFixYqU6aMfv75Z+3YsUOtW7fOd/lnn31WvXv3VkREhKKiovT1119ryZIl+vbbbyX9ffTnxhtv1IQJE1SvXj0dO3ZMzz//fK7reumll1S1alUFBQXpueeeU7Vq1XTbbbcVaDuaNm2qEydOqHz58rnOj4qKUmRkpG677Ta99tpratSokQ4fPqxly5bp9ttvV5s2bXTddddpyZIliomJkc1m0wsvvJDnERl3TJkyRR06dFDbtm310ksvqUWLFrpw4YJWrVqladOmafv27Zo5c6batm2rTp065Vj+hhtu0MyZM12ee5OfM2fOKDEx0aWtUqVKJebWeKBE8PRFPwCK1uHDh81jjz1m6tWrZ7y8vEzFihVN27Ztzeuvv27S0tKc/ZTHbcb53QpujDHbtm0zkZGRxtfX17Rs2dJ88803uV5Q/PXXX5uwsDDj7e1t2rZta3799dd8686+oDgvl94KnpycbB5//HFTq1Yt4+XlZYKDg80999zjvI08ISHBdO3a1fj6+prg4GDz3nvvmc6dO5uhQ4fm+57h4eHO2+LzcvjwYTNkyBATEhJivL29Te3atU2vXr3MmjVrTEZGhqlatap57bXXcl124sSJJjAw0GRmZhpjLn9BcW6v8ePH51sfcK2xGePGFXEA4KbY2Fh17dpVp0+f5mFzAIoF19wAAABLIdwAAABL4bQUAACwFI7cAAAASyHcAAAASyHcAAAASyHcAAAASyHcAAAASyHcAAAASyHcAAAASyHcAAAASyHcAAAAS/n/L+zTgW40TfcAAAAASUVORK5CYII=", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjcAAAHFCAYAAAAOmtghAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy88F64QAAAACXBIWXMAAA9hAAAPYQGoP6dpAAA+HklEQVR4nO3dd3wVVf7/8fdNJ0ACSSAQCAm9KUUQgSxNhBggiIoIrtJdFRcFFJbil+auICqiUiyUgAKCNBvSNBARlLKJq4IizaAQINQQIBByfn/4y9Vrer3J8Ho+HvfxYM49M/czh2F458zMjc0YYwQAAGARLs4uAAAAoDARbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbgAAgKUQbpBrNpstV6+tW7cWSz3Lli3TrFmziuWzCury5cuaPHlysY3NXy1ZskR9+/ZV/fr15eLiotDQ0ELZblpamt59913dddddCggIkLu7uypXrqwePXro448/VlpaWp63GRoaqoEDB9qXjx49KpvNpqioKHvb5MmTZbPZlJiYWAh7kbPsjjWbzabJkycXSx1/FhUV5fDvzs3NTVWrVlXfvn31888/57j+N998o8mTJ2v16tVZ9rlx44Zmzpypu+++W9WrV5e3t7caNmyosWPH6vz584W4NwUzf/589erVS6GhoSpTpozq1KmjJ554QidOnMj1Nq5fv66ZM2fq1ltvVZkyZVShQgW1bdtWO3bssPc5cOCAnn32WbVo0UIVKlSQn5+fwsLCtGrVqgzb27Jli7p06aKgoCB5enqqcuXKuvPOO7V+/fpC2Wdkz83ZBaD02Llzp8Py888/r+joaH3xxRcO7Y0aNSqWepYtW6bvv/9eI0aMKJbPK4jLly9rypQpkqSOHTsW++e/++67SkhIUKtWrZSWlqbr168XeJtXr15Vr169tGnTJvXt21fz5s1TlSpVdPr0aW3YsEEPPPCAVqxYoXvuuadAn1O1alXt3LlTtWvXLnDN+ZXdsbZz505Vr169+Iv6/xYtWqQGDRro6tWr+uqrr/Sf//xH0dHR+vHHH1WxYsVM19m5c6fCw8N148YNXblyRW+//baGDh2aod+VK1c0efJk9evXT0OHDlVAQID++9//6t///rc+/vhj7dmzR2XKlCnqXczRpEmT1KlTJ73wwguqVq2afvrpJz3//PP68MMPFRsbq8DAwGzXv3Hjhu69915t375dY8aMUdu2bZWcnKy9e/cqOTnZ3m/Tpk369NNP9cgjj+j2229XamqqVqxYoQceeEBTpkzRxIkT7X3PnDmjxo0ba+jQoapSpYrOnj2rN998U927d9e7776rhx9+uMjGA5IMkE8DBgwwZcuWzbFfcnJykXx+9+7dTUhISJFsu7CdPn3aSDKTJk1yyuffuHHD/ufCGrcnnnjCSDKLFy/O9P0DBw6Yb7/9Ns/bDQkJMQMGDMi2z6RJk4wkc/r06Txv3xhjLl++nKf+JfFYW7RokZFkdu/e7dA+ZcoUI8ksXLgw0/V27dplfHx8TFhYmDl16pQZOnSosdlsZtGiRRn6pqammsTExAztH3zwgZFk3n333ULZl4I6efJkhrbdu3cbSeb555/Pcf1XX33VuLi4mJ07d2bb7/Tp0yYtLS1De/fu3Y23t7e5evVqtutfu3bNVKtWzbRr1y7HmlAwXJZCoerYsaNuueUWxcTEqG3btvL29tbgwYMlSRcvXtSzzz6rmjVrysPDQ9WqVdOIESMcfjKSpDlz5qh9+/aqXLmyypYtq1tvvVUzZsxwmG3o2LGjPv30U/3yyy8OU/PSH5cxXnrpJb344ov2qeqOHTvqwIEDun79usaOHaugoCD5+vrq3nvv1alTpzLsy4oVK9SmTRuVLVtW5cqVU3h4uGJjYx36DBw4UOXKldPBgwfVrVs3lStXTsHBwXrmmWeUkpJir6dSpUqSpClTpthr/fOll6Lm4lK4/9QTEhI0f/58hYeHq3///pn2qVu3rpo0aSLp91meZ555Rs2aNZOvr6/8/PzUpk0bffjhhzl+VmaXpdIdO3ZM9913n3x8fOTr66uHH35Yp0+fdugTGhqqHj16aM2aNWrevLm8vLzss2gFPdakzC9Lff/997rnnntUsWJFeXl5qVmzZlq8eLFDn61bt8pms2n58uWaMGGCgoKC5OPjo7vuuks//fRTjuOSlZYtW0qSTp48meG9vXv3qmvXrurcubO2bNmiSpUq6Z133tHEiRM1ZMgQvffeew79XV1d5e/vn2E7rVq1kvT7+JcElStXztDWokULubq65qrG1157Te3bt1fr1q2z7RcQEODwd5+uVatWunz5ss6ePZvt+u7u7qpQoYLc3LhoUtQYYRS6EydO6OGHH9aYMWP0wgsvyMXFRZcvX1aHDh3066+/avz48WrSpIl++OEHTZw4Ud999522bNliP2kcOnRIDz30kD0Effvtt/rPf/6jH3/8UQsXLpQkzZ07V//4xz906NAhrV27NtM65syZoyZNmmjOnDk6f/68nnnmGUVGRuqOO+6Qu7u7Fi5cqF9++UXPPvushg4dqo8++si+7gsvvKDnnntOgwYN0nPPPadr167ppZdeUrt27bRr1y6HS2/Xr19Xz549NWTIED3zzDOKiYnR888/L19fX02cOFFVq1bVhg0bdPfdd2vIkCH26f/0wJOV1NTUXI23q6trpifcohQdHa3r16+rV69eueqfkpKis2fP6tlnn1W1atV07do1bdmyRffdd58WLVqUZUDKyb333qs+ffro8ccf1w8//KD/+7//0759+/TNN9/I3d3d3u+///2v9u/fr+eee041a9ZU2bJlJRXesfZnP/30k9q2bavKlSvr9ddfl7+/v9577z0NHDhQJ0+e1JgxYxz6jx8/XmFhYZo/f74uXryof/3rX4qMjNT+/fvl6uqa5zE5cuSIJKlevXoO7XFxcerataseeughvfHGGw6Bd/LkyapevbqGDh0qV1dX9evXL9vPSL8U3bhx4xzruXHjhowxOfZzcXEp1BC+bds23bhxI8cajx07pqNHjyoyMlLjx4/XggULdObMGdWvX19jxozRgAEDcvys6OhoVapUKdOQlZaWprS0NJ06dUpvvfWWDhw4oBdffDHf+4VccvbUEUqvzC5LdejQwUgyn3/+uUP7tGnTjIuLS4Yp9FWrVhlJZv369Zl+xo0bN8z169fNkiVLjKurqzl79qz9vawuFRw5csRIMk2bNnW4HDNr1iwjyfTs2dOh/4gRI4wkc+HCBWOMMfHx8cbNzc0MHz7coV9SUpKpUqWK6dOnj8MYSDIrV6506NutWzdTv359+3J+LktJytUrs8sJ2SmMSyzTp083ksyGDRvytX5qaqq5fv26GTJkiGnevLnDe3+9LJX+9/nn/Uy/LDVy5EiHdZcuXWokmffee89he66uruann37Ktqb8HGvGmAx/r3379jWenp4mPj7eoV9ERITx9vY258+fN8YYEx0dbSSZbt26OfRbuXKlkZTjJZL0y1Jff/21uX79uklKSjIbNmwwVapUMe3btzfXr1/Pdv38+vXXX01gYKBp2bKlw7+vrKSfE3J65XQpMi8uXrxoGjZsaIKDg01SUlK2fXfu3GkkGR8fH9OoUSOzcuVKs3HjRtO7d28jybz99tvZrv/OO+8YSea1117L9P3w8HD7Pvr4+Jg1a9bke7+Qe8zcoNBVrFhRd955p0PbJ598oltuuUXNmjVzmJEIDw+3P2EVEREhSYqNjdWkSZP01VdfZZjmPXDggO64445c1dGtWzeHnwQbNmwoSerevbtDv/T2+Ph43XLLLdq4caNSU1PVv39/h1q9vLzUoUMHRUdHO6xvs9kUGRnp0NakSZMMN1rn1e7du3PVr2bNmgX6nOLywQcfaNasWfr2228dLkV6eXnle5t///vfHZb79OmjAQMGKDo62uG9Jk2aZJjJkArvWPuzL774Qp07d1ZwcLBD+8CBA/XZZ59p586duvvuu+3tPXv2dOiXfinvl19+yfEyiaQMfRo2bKgPP/ywSC59nD17Vt26dZMxRitWrMjVTMtbb72lpKSkHPsFBARk+376DEg6m82W6czW1atXdd999+mXX37RF198oXLlyuW43fT11q9fr5CQEElSly5d1LJlS02dOlWPPvpoput+9tlnevLJJ9W7d28NHz480z5vvPGGzp8/rxMnTui9997Tgw8+qMWLF+c4O4aCIdyg0FWtWjVD28mTJ3Xw4EGHSwV/lv5Ib3x8vNq1a6f69evrtddeU2hoqLy8vLRr1y49+eSTunLlSq7r8PPzc1j28PDItv3q1av2WiXp9ttvz3S7fz2he3t7Z/gP2tPT0769/GrWrFmu+uXn0kVB1ahRQ9Ifl0BysmbNGvXp00cPPPCARo8erSpVqsjNzU3z5s2zX/7JjypVqjgsu7m5yd/fX2fOnHFoz+yYLMxj7c/OnDmT6ecFBQXZ3/+zv97T4unpKUm5/vwlS5aoYcOGSkpK0ooVK/TWW2+pX79++uyzz/JTfpbOnTunLl266LffftMXX3yhWrVq5Wq9OnXq5PqyVHamTp1qv1dKkkJCQnT06FGHPikpKfannj755JNchdP08W/QoIE92Ei/h6fw8HBNmzZNp06dynDJaePGjbrvvvvUpUsXLV26NMtLw3Xr1rX/uWfPnoqIiNCTTz6pBx98sNDvhcMfCDcodJn9Iw8ICFCZMmWy/I8s/ae2devWKTk5WWvWrHE40cTFxRVJrdnVsmrVKocailtWQfCvFi1aVKw3J0tSp06d5O7urnXr1unxxx/Psf97772nmjVrasWKFQ7HR/pN1/mVkJCgatWq2ZdTU1N15syZDIEhs2OyqI41f3//TL9f5fjx45JynqHIq4YNG9pvIu7UqZNu3Lih+fPna9WqVerdu3ehfMa5c+d011136ciRI/r888/ts0u50blzZ23bti3HfgMGDMj0pvF0//jHP9SjRw/7cnoITJeSkqJevXopOjpaH374oTp37pyr+mrXri1vb+9M30sPZX8NIRs3blSvXr3UoUMHrV692v4DUm60atVKGzZs0OnTp3N8RB35R7hBsejRo4deeOEF+fv7Z3sZJf0/oT+fuIwxeueddzL09fT0zPdP19kJDw+Xm5ubDh06pPvvv79QtpnXn8alkn1ZqkqVKho6dKjmzZunJUuWZHpD8KFDh5ScnKwmTZrIZrPJw8PDIWQkJCTk6mmp7CxdulQtWrSwL69cuVKpqam5+i6hojrWOnfurLVr1+r48eP22Rrp9xkWb2/vXF1qKogZM2Zo9erVmjhxou67774Czw6kB5vDhw9r8+bNat68eZ7WL6zLUkFBQQ7j+WfpMzZffPGF1qxZo/Dw8FzX5+bmpnvuuUerVq3S0aNH7V9waYzRhg0bVLt2bYfaNm3apF69eulvf/ub1q1blyFkZccYo23btqlChQqZPoWGwkO4QbEYMWKEVq9erfbt22vkyJFq0qSJ0tLSFB8fr02bNumZZ57RHXfcoS5dusjDw0P9+vXTmDFjdPXqVc2bN0/nzp3LsM1bb71Va9as0bx589SiRQu5uLjYf4ItiNDQUE2dOlUTJkzQ4cOHdffdd6tixYo6efKkdu3apbJlyzpMj+dG+fLlFRISYv+J0s/PTwEBAdl+U3Bh7Eu6ffv2ad++fZJ+DxWXL1+2f6tqo0aNHJ7+stls6tChQ47fpjxz5kwdPnxYAwcO1MaNG3XvvfcqMDBQiYmJ2rx5sxYtWqT3339fTZo0sT+KPWzYMPXu3VvHjh3T888/r6pVq+bq23SzsmbNGrm5ualLly72p6WaNm2qPn365LhuUR1rkyZN0ieffKJOnTpp4sSJ8vPz09KlS/Xpp59qxowZ8vX1zff+5kbFihU1btw4jRkzRsuWLSvQl8VduXLF/hUIs2bNUmpqqr7++mv7+5UqVcrxyxXr16+f78/Prd69e+uzzz7ThAkT5O/v71Cjj4+Pw/Fdp04dSdLBgwftbc8//7w+++wz3X333Zo8ebJ8fHw0f/58ffvtt1q5cqW93/bt29WrVy9VqVJF48ePzzDL16hRI/n4+EiS7rnnHjVt2lTNmjWTv7+/jh8/rqioKG3btk1z5szhcfCi5sSbmVHKZfW0VOPGjTPtf+nSJfPcc8+Z+vXrGw8PD+Pr62tuvfVWM3LkSJOQkGDv9/HHH5umTZsaLy8vU61aNTN69Gjz2WefGUkmOjra3u/s2bOmd+/epkKFCsZms5n0wzn96ZqXXnrJ4fPTn0754IMPHNqz+jK0devWmU6dOhkfHx/j6elpQkJCTO/evc2WLVuyHQNj/nia58+2bNlimjdvbjw9PQv96ZCcpNeT2evPT/okJSUZSaZv37652m5qaqpZvHixufPOO42fn59xc3MzlSpVMhEREWbZsmUOT9NMnz7dhIaGGk9PT9OwYUPzzjvvZDpOeXlaau/evSYyMtKUK1fOlC9f3vTr1y/DF7qFhISY7t27Z1p/QY81YzI+LWWMMd99952JjIw0vr6+xsPDwzRt2jTDU21ZHY+Z7W9msjpujTHmypUrpkaNGqZu3bomNTU12+1kJ72WrF7FeQxnJ7saO3To4NA3JCQk0yffvvvuO9O9e3dTvnx54+XlZVq3bm0+/vhjhz7Z/Tv66zHz4osvmttvv91UrFjRuLq6Gn9/fxMeHm4++eSTIhgB/JXNmFzc6QXgprB+/Xr16NFD3377rW699VZnlwMA+cKt2gDsoqOj1bdvX4INgFKNmRsAAGApzNwAAABLIdwAAABLIdwAAABLIdwAAABLuem+RSgtLU3Hjx9X+fLls/xdIAAAoGQxxigpKUlBQUE5fvP2TRdujh8/nuG39QIAgNLh2LFjql69erZ9brpwU758eUm/D07612QDAICS7eLFiwoODrb/P56dmy7cpF+K8vHxIdwAAFDK5OaWEm4oBgAAlkK4AQAAlkK4AQAAlkK4AQAAlkK4AQAAlkK4AQAAlkK4AQAAlkK4AQAAlkK4AQAAlkK4AQAAlkK4AQAAlkK4AQAAlkK4AQAAlkK4AQAAlkK4KSSXr6UqdOynCh37qS5fS3V2OQAA3LQINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFKcGm5iYmIUGRmpoKAg2Ww2rVu3Lsd1UlJSNGHCBIWEhMjT01O1a9fWwoULi75YAABQKrg588OTk5PVtGlTDRo0SPfff3+u1unTp49OnjypBQsWqE6dOjp16pRSU1OLuFIAAFBaODXcREREKCIiItf9N2zYoG3btunw4cPy8/OTJIWGhhZRdQAAoDQqVffcfPTRR2rZsqVmzJihatWqqV69enr22Wd15cqVLNdJSUnRxYsXHV4AAMC6nDpzk1eHDx/W9u3b5eXlpbVr1yoxMVHDhg3T2bNns7zvZtq0aZoyZUoxVwoAAJylVM3cpKWlyWazaenSpWrVqpW6deummTNnKioqKsvZm3HjxunChQv217Fjx4q5agAAUJxK1cxN1apVVa1aNfn6+trbGjZsKGOMfv31V9WtWzfDOp6envL09CzOMgEAgBOVqpmbsLAwHT9+XJcuXbK3HThwQC4uLqpevboTKwMAACWFU8PNpUuXFBcXp7i4OEnSkSNHFBcXp/j4eEm/X1Lq37+/vf9DDz0kf39/DRo0SPv27VNMTIxGjx6twYMHq0yZMs7YBQAAUMI4Ndzs2bNHzZs3V/PmzSVJo0aNUvPmzTVx4kRJ0okTJ+xBR5LKlSunzZs36/z582rZsqX+/ve/KzIyUq+//rpT6gcAACWPU++56dixo4wxWb4fFRWVoa1BgwbavHlzEVYFAABKs1J1zw0AAEBOCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSnBpuYmJiFBkZqaCgINlsNq1bty7X63711Vdyc3NTs2bNiqw+AABQ+jg13CQnJ6tp06aaPXt2nta7cOGC+vfvr86dOxdRZQAAoLRyc+aHR0REKCIiIs/rPfbYY3rooYfk6uqap9keAABgfaXunptFixbp0KFDmjRpUq76p6Sk6OLFiw4vAABgXaUq3Pz8888aO3asli5dKje33E06TZs2Tb6+vvZXcHBwEVcJAACcqdSEmxs3buihhx7SlClTVK9evVyvN27cOF24cMH+OnbsWBFWCQAAnM2p99zkRVJSkvbs2aPY2Fj985//lCSlpaXJGCM3Nzdt2rRJd955Z4b1PD095enpWdzlAgAAJyk14cbHx0ffffedQ9vcuXP1xRdfaNWqVapZs6aTKgMAACWJU8PNpUuXdPDgQfvykSNHFBcXJz8/P9WoUUPjxo3Tb7/9piVLlsjFxUW33HKLw/qVK1eWl5dXhnYAAHDzcmq42bNnjzp16mRfHjVqlCRpwIABioqK0okTJxQfH++s8gAAQClkM8YYZxdRnC5evChfX19duHBBPj4+hbbdy9dS1WjiRknSvqnh8vYoNVf8AAAo8fLy/3epeVoKAAAgNwg3AADAUgg3AADAUgg3AADAUgg3AADAUgg3AADAUgg3AADAUgg3AADAUgg3AADAUgg3AADAUgg3AADAUgg3ReBo4mVnlwAAwE2LcFMIzl++pkcX77Uvd3v9S/VfsEsXLl93YlUAANycCDeF4Knlcdp5KNGh7auDiRq+PNZJFQEAcPMi3BTQ4dOXFPPzaaX9pf2GMYr5+bSOJCY7pS4AAG5WhJsC+uVs9vfXHD1DuAEAoDgRbgooxM872/dD/csWUyUAAEAi3BRYrUrl1L5upQwD6WqzqX3dSqoZQLgBAKA4EW4KwRv9mqtN7QCHtrA6AXqjX3MnVQQAwM2LcFMIfL3d9c6AFvbl9U+105IhreTr7e7EqgAAuDkRbopAaED29+EAAICiQ7gBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACWQrgBAACW4tRwExMTo8jISAUFBclms2ndunXZ9l+zZo26dOmiSpUqycfHR23atNHGjRuLp1gAAFAqODXcJCcnq2nTppo9e3au+sfExKhLly5av3699u7dq06dOikyMlKxsbFFXCkAACgt3Jz54REREYqIiMh1/1mzZjksv/DCC/rwww/18ccfq3nz5oVcHQAAKI1K9T03aWlpSkpKkp+fn7NLAQAAJYRTZ24K6pVXXlFycrL69OmTZZ+UlBSlpKTYly9evFgcpQEAACcptTM3y5cv1+TJk7VixQpVrlw5y37Tpk2Tr6+v/RUcHFyMVQIAgOJWKsPNihUrNGTIEK1cuVJ33XVXtn3HjRunCxcu2F/Hjh0rpioBAIAzlLrLUsuXL9fgwYO1fPlyde/ePcf+np6e8vT0LIbKAABASeDUcHPp0iUdPHjQvnzkyBHFxcXJz89PNWrU0Lhx4/Tbb79pyZIlkn4PNv3799drr72m1q1bKyEhQZJUpkwZ+fr6OmUfAABAyeLUy1J79uxR8+bN7Y9xjxo1Ss2bN9fEiRMlSSdOnFB8fLy9/1tvvaXU1FQ9+eSTqlq1qv319NNPO6V+AABQ8jh15qZjx44yxmT5flRUlMPy1q1bi7YgAABQ6pXKG4oBAACykqdwM2PGDF25csW+HBMT4/AdMklJSRo2bFjhVQcAAJBHeQo348aNU1JSkn25R48e+u233+zLly9f1ltvvVV41QEAAORRnsLNX++Pye5+GQAAAGfgnhsAAGAphBsAAGApeX4UfP78+SpXrpwkKTU1VVFRUQoICJAkh/txAAAAnCFP4aZGjRp655137MtVqlTRu+++m6EPAOu4fC1VjSZulCTtmxoub49S91tbANxk8nSWOnr0aBGVAQAAUDjydM/NnXfeqfPnzxdRKQAAAAWXp3CzdetWXbt2rahqAQAAKDCelgIAAJaS5zsDk5KS5OXllW0fHx+ffBcEAABQEHkON/Xq1cvyPWOMbDabbty4UaCiAAAA8ivP4WbVqlXy8/MriloAAEApVlK+OiLPnxoWFqbKlSsXRS0AAAAFVug3FKemphb2JgEAAHItT+EmJCRErq6umb63b98+jRo1StWqVSuUwgAAAPIjT+HmyJEj8vf3ty9funRJ8+fPV5s2bdSkSRPt2rVLY8eOLfQiAQAAcitfd/ps375d8+fP1+rVq1WzZk3t27dP27ZtU1hYWGHXBwAAkCd5mrmZMWOGGjRooL59+6pSpUravn27/ve//8lms6lixYpFVSMAAECu5WnmZvz48frXv/6lqVOnZnnvDQAAgDPlaeZm6tSp+uCDD1SzZk3961//0vfff19UdQEAAORLnsLN+PHjdeDAAb377rtKSEhQ69at1bRpUxljdO7cuaKqEQAAINfy9T03HTp00OLFi3X8+HE98cQTuu2229S+fXu1bdtWM2fOLOwaAQAAcq1AX+Ln4+Ojxx9/XLt27dK3336rO+64Q9OnTy+s2gAAAPIsTzcUX7lyRZ9//rl69OghSRo3bpxSUlL+2Jibmw4dOlS4FQIAAORBnsLNkiVL9Mknn9jDzezZs9W4cWOVKVNGkvTTTz8pKChII0eOLPxKAQAAciFPl6WWLl2qwYMHO7QtW7ZM0dHRio6O1owZM7Ry5cpCLRAAACAv8hRuDhw4oHr16tmXvby85OLyxyZatWqlffv2FV51AAAAeZSny1IXLlyQm9sfq5w+fdrh/bS0NId7cAAAAIpbnmZuqlevnu0X9/3vf/9T9erVC1wUAABAfuUp3HTr1k0TJ07U1atXM7x35coVTZkyRd27dy+04gAAAPIqz79bauXKlapfv77++c9/ql69erLZbPrxxx81e/Zspaamavz48UVVKwAAQI7yFG4CAwO1Y8cOPfHEExo7dqyMMZIkm82mLl26aO7cuQoMDCySQgEAAHIjT+FGkmrWrKkNGzbo7NmzOnjwoCSpTp068vPzK/TiAAAA8irP4Sadn5+fWrVqVZi1AAAAFFiBfrcUAABASUO4AQAAlkK4AQAAlkK4AQAAlkK4AQAAlkK4AQCLu3wtVaFjP1Xo2E91+Vqqs8sBihzhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWArhBgAAWIpTw01MTIwiIyMVFBQkm82mdevW5bjOtm3b1KJFC3l5ealWrVp68803i75QAABQajg13CQnJ6tp06aaPXt2rvofOXJE3bp1U7t27RQbG6vx48frqaee0urVq4u4UgAAUFq4OfPDIyIiFBERkev+b775pmrUqKFZs2ZJkho2bKg9e/bo5Zdf1v33319EVQIAgNKkVN1zs3PnTnXt2tWhLTw8XHv27NH169czXSclJUUXL150eAEAAOsqVeEmISFBgYGBDm2BgYFKTU1VYmJiputMmzZNvr6+9ldwcHBxlAoAAJykVIUbSbLZbA7LxphM29ONGzdOFy5csL+OHTtW5DUCAADnceo9N3lVpUoVJSQkOLSdOnVKbm5u8vf3z3QdT09PeXp6Fkd5AACgBChVMzdt2rTR5s2bHdo2bdqkli1byt3d3UlVAQCAksSp4ebSpUuKi4tTXFycpN8f9Y6Li1N8fLyk3y8p9e/f397/8ccf1y+//KJRo0Zp//79WrhwoRYsWKBnn33WGeUDAIASyKmXpfbs2aNOnTrZl0eNGiVJGjBggKKionTixAl70JGkmjVrav369Ro5cqTmzJmjoKAgvf766zwGDgAA7Jwabjp27Gi/ITgzUVFRGdo6dOig//73v0VYFQAAKM1K1T03AAAAOSHcAAAASyHcAAAASyHcAAAASylVX+JXknl7uOno9O7OLgMAgJseMzcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAMBSCDcAAKDQHU287LTPJtwAAIACO3/5mh5dvNe+3O31L9V/wS5duHy92Gsh3AAAgAJ7anmcdh5KdGj76mCihi+PLfZaCDcAAKBADp++pJifTyvtL+03jFHMz6d1JDG5WOsh3AAAgAL55Wz299ccPUO4AQAApUiIn3e274f6ly2mSn5HuAEAAAVSq1I5ta9bKUOocLXZ1L5uJdUMINwAAIBS5o1+zdWmdoBDW1idAL3Rr3mx10K4AQAABebr7a53BrSwL69/qp2WDGklX2/3Yq+FcAMAAApdaED29+EUJcINAACwFMINgFxz5tepA0BuEW4AZKkkfZ06AOQW4QZAlkrS16kDQG4RbgBkqqR9nToA5BbhBkCmStrXqQNAbhFuAGSqpH2dOgDkFuEGQKZK2tepA0BuEW4AZKkkfZ06AOQW4QZAlkrS16kDQG4RbgDkmjO/Th0AcotwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALIVwAwAALMXp4Wbu3LmqWbOmvLy81KJFC3355ZfZ9l+6dKmaNm0qb29vVa1aVYMGDdKZM2eKqVoAAFDSOTXcrFixQiNGjNCECRMUGxurdu3aKSIiQvHx8Zn23759u/r3768hQ4bohx9+0AcffKDdu3dr6NChxVw5AJRORxMvO7sEoMg5NdzMnDlTQ4YM0dChQ9WwYUPNmjVLwcHBmjdvXqb9v/76a4WGhuqpp55SzZo19be//U2PPfaY9uzZU8yVA0DpcP7yNT26eK99udvrX6r/gl26cPm6E6sCipbTws21a9e0d+9ede3a1aG9a9eu2rFjR6brtG3bVr/++qvWr18vY4xOnjypVatWqXv37ll+TkpKii5evOjwAoCbxVPL47TzUKJD21cHEzV8eayTKgKKntPCTWJiom7cuKHAwECH9sDAQCUkJGS6Ttu2bbV06VI9+OCD8vDwUJUqVVShQgW98cYbWX7OtGnT5Ovra38FBwcX6n4AQEl1+PQlxfx8Wml/ab9hjGJ+Pq0jiclOqQsoak6/odhmszksG2MytKXbt2+fnnrqKU2cOFF79+7Vhg0bdOTIET3++ONZbn/cuHG6cOGC/XXs2LFCrR8ASqpfzmZ/f83RM4QbWJObsz44ICBArq6uGWZpTp06lWE2J920adMUFham0aNHS5KaNGmismXLql27dvr3v/+tqlWrZljH09NTnp6ehb8DAFDChfh5Z/t+qH/ZYqoEKF5Om7nx8PBQixYttHnzZof2zZs3q23btpmuc/nyZbm4OJbs6uoq6fcZHwDAH2pVKqf2dStlONG72mxqX7eSagYQbmBNTr0sNWrUKM2fP18LFy7U/v37NXLkSMXHx9svM40bN079+/e394+MjNSaNWs0b948HT58WF999ZWeeuoptWrVSkFBQc7aDQAosd7o11xtagc4tIXVCdAb/Zo7qSKg6DntspQkPfjggzpz5oymTp2qEydO6JZbbtH69esVEhIiSTpx4oTDd94MHDhQSUlJmj17tp555hlVqFBBd955p1588UVn7QIAlGi+3u56Z0ALNZq4UZK0/ql2ahTk4+SqgKLl1HAjScOGDdOwYcMyfS8qKipD2/DhwzV8+PAirgoArCk0IPv7cAArcPrTUgAAAIWJcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACyFcAMAACzFzdkFAAAAa/D2cNPR6d2dXQYzNwAAwFoINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFIINwAAwFKcHm7mzp2rmjVrysvLSy1atNCXX36Zbf+UlBRNmDBBISEh8vT0VO3atbVw4cJiqhYAAJR0bs788BUrVmjEiBGaO3euwsLC9NZbbykiIkL79u1TjRo1Ml2nT58+OnnypBYsWKA6dero1KlTSk1NLebKAQBASeXUcDNz5kwNGTJEQ4cOlSTNmjVLGzdu1Lx58zRt2rQM/Tds2KBt27bp8OHD8vPzkySFhoYWZ8kAAKCEc9plqWvXrmnv3r3q2rWrQ3vXrl21Y8eOTNf56KOP1LJlS82YMUPVqlVTvXr19Oyzz+rKlStZfk5KSoouXrzo8AIAANbltJmbxMRE3bhxQ4GBgQ7tgYGBSkhIyHSdw4cPa/v27fLy8tLatWuVmJioYcOG6ezZs1nedzNt2jRNmTKl0OsHAAAlk9NvKLbZbA7LxpgMbenS0tJks9m0dOlStWrVSt26ddPMmTMVFRWV5ezNuHHjdOHCBfvr2LFjhb4PAACg5HDazE1AQIBcXV0zzNKcOnUqw2xOuqpVq6patWry9fW1tzVs2FDGGP3666+qW7duhnU8PT3l6elZuMUDAIASy2kzNx4eHmrRooU2b97s0L5582a1bds203XCwsJ0/PhxXbp0yd524MABubi4qHr16kVaLwAAKB2cellq1KhRmj9/vhYuXKj9+/dr5MiRio+P1+OPPy7p90tK/fv3t/d/6KGH5O/vr0GDBmnfvn2KiYnR6NGjNXjwYJUpU8ZZuwEAAEoQpz4K/uCDD+rMmTOaOnWqTpw4oVtuuUXr169XSEiIJOnEiROKj4+39y9Xrpw2b96s4cOHq2XLlvL391efPn3073//21m7AAAAShinhhtJGjZsmIYNG5bpe1FRURnaGjRokOFSFgAAQDqnPy0FAABQmAg3AADAUpx+WQpAyebt4aaj07s7uwwAyDVmbgAAgKUQbgAAgKVwWQoALI5Li7jZMHMDAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAshXADAAAsxc3ZBRQ3Y4wk6eLFi06uBAAA5Fb6/9vp/49n56YLN0lJSZKk4OBgJ1cCAADyKikpSb6+vtn2sZncRCALSUtL0/Hjx1W+fHnZbDZJv6fB4OBgHTt2TD4+Pk6u0LkYC0eMhyPGwxHj4YjxcMR4/KEwxsIYo6SkJAUFBcnFJfu7am66mRsXFxdVr1490/d8fHxu+gMwHWPhiPFwxHg4YjwcMR6OGI8/FHQscpqxSccNxQAAwFIINwAAwFIIN5I8PT01adIkeXp6OrsUp2MsHDEejhgPR4yHI8bDEePxh+Iei5vuhmIAAGBtzNwAAABLIdwAAABLIdwAAABLIdwAAABLsWS4mTt3rmrWrCkvLy+1aNFCX375Zbb9t23bphYtWsjLy0u1atXSm2++maHP6tWr1ahRI3l6eqpRo0Zau3ZtUZVf6PIyHmvWrFGXLl1UqVIl+fj4qE2bNtq4caNDn6ioKNlstgyvq1evFvWuFIq8jMfWrVsz3dcff/zRoV9pPT7yMhYDBw7MdCwaN25s71Oaj42YmBhFRkYqKChINptN69aty3EdK5878joeVj935HU8rH7uyOt4FPf5w3LhZsWKFRoxYoQmTJig2NhYtWvXThEREYqPj8+0/5EjR9StWze1a9dOsbGxGj9+vJ566imtXr3a3mfnzp168MEH9cgjj+jbb7/VI488oj59+uibb74prt3Kt7yOR0xMjLp06aL169dr79696tSpkyIjIxUbG+vQz8fHRydOnHB4eXl5FccuFUhexyPdTz/95LCvdevWtb9XWo+PvI7Fa6+95jAGx44dk5+fnx544AGHfqX12EhOTlbTpk01e/bsXPW3+rkjr+Nh9XNHXscjnRXPHVLex6PYzx/GYlq1amUef/xxh7YGDRqYsWPHZtp/zJgxpkGDBg5tjz32mGndurV9uU+fPubuu+926BMeHm769u1bSFUXnbyOR2YaNWpkpkyZYl9etGiR8fX1LawSi1VexyM6OtpIMufOnctym6X1+CjosbF27Vpjs9nM0aNH7W2l+dj4M0lm7dq12fax+rnjz3IzHpmx0rnjz3IzHlY+d/xVfo6Poj5/WGrm5tq1a9q7d6+6du3q0N61a1ft2LEj03V27tyZoX94eLj27Nmj69evZ9snq22WFPkZj79KS0tTUlKS/Pz8HNovXbqkkJAQVa9eXT169Mjw01lJVJDxaN68uapWrarOnTsrOjra4b3SeHwUxrGxYMEC3XXXXQoJCXFoL43HRn5Y+dxRGKx07igIq507CktRnz8sFW4SExN148YNBQYGOrQHBgYqISEh03USEhIy7Z+amqrExMRs+2S1zZIiP+PxV6+88oqSk5PVp08fe1uDBg0UFRWljz76SMuXL5eXl5fCwsL0888/F2r9hS0/41G1alW9/fbbWr16tdasWaP69eurc+fOiomJsfcpjcdHQY+NEydO6LPPPtPQoUMd2kvrsZEfVj53FAYrnTvyw6rnjsJQHOcPS/5WcJvN5rBsjMnQllP/v7bndZslSX5rX758uSZPnqwPP/xQlStXtre3bt1arVu3ti+HhYXptttu0xtvvKHXX3+98AovInkZj/r166t+/fr25TZt2ujYsWN6+eWX1b59+3xtsyTJb91RUVGqUKGCevXq5dBe2o+NvLL6uSO/rHruyAurnzsKojjOH5aauQkICJCrq2uG1Hvq1KkM6ThdlSpVMu3v5uYmf3//bPtktc2SIj/jkW7FihUaMmSIVq5cqbvuuivbvi4uLrr99ttL/E9fBRmPP2vdurXDvpbG46MgY2GM0cKFC/XII4/Iw8Mj276l5djIDyufOwrCiueOwmKFc0dBFdf5w1LhxsPDQy1atNDmzZsd2jdv3qy2bdtmuk6bNm0y9N+0aZNatmwpd3f3bPtktc2SIj/jIf3+U9fAgQO1bNkyde/ePcfPMcYoLi5OVatWLXDNRSm/4/FXsbGxDvtaGo+PgozFtm3bdPDgQQ0ZMiTHzyktx0Z+WPnckV9WPXcUFiucOwqq2M4fhXJbcgny/vvvG3d3d7NgwQKzb98+M2LECFO2bFn7Hdljx441jzzyiL3/4cOHjbe3txk5cqTZt2+fWbBggXF3dzerVq2y9/nqq6+Mq6urmT59utm/f7+ZPn26cXNzM19//XWx719e5XU8li1bZtzc3MycOXPMiRMn7K/z58/b+0yePNls2LDBHDp0yMTGxppBgwYZNzc388033xT7/uVVXsfj1VdfNWvXrjUHDhww33//vRk7dqyRZFavXm3vU1qPj7yORbqHH37Y3HHHHZluszQfG0lJSSY2NtbExsYaSWbmzJkmNjbW/PLLL8aYm+/ckdfxsPq5I6/jYeVzhzF5H490xXX+sFy4McaYOXPmmJCQEOPh4WFuu+02s23bNvt7AwYMMB06dHDov3XrVtO8eXPj4eFhQkNDzbx58zJs84MPPjD169c37u7upkGDBg4HaEmXl/Ho0KGDkZThNWDAAHufESNGmBo1ahgPDw9TqVIl07VrV7Njx45i3KOCyct4vPjii6Z27drGy8vLVKxY0fztb38zn376aYZtltbjI6//Vs6fP2/KlClj3n777Uy3V5qPjfRHd7M69m+2c0dex8Pq5468jofVzx35+fdSnOcPmzH//w44AAAAC7DUPTcAAACEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEGwAAYCmEG8DiEhIS9PTTT6tOnTry8vJSYGCg/va3v+nNN9/U5cuXnV1elkJDQ2Wz2fT+++9neK9x48ay2WyKiooq/sIykZCQoOHDh6tWrVry9PRUcHCwIiMj9fnnn2fo+8ILL8jV1VXTp0+3t6Xva1avjh07Ztvvz9sCILk5uwAARefw4cMKCwtThQoV9MILL+jWW29VamqqDhw4oIULFyooKEg9e/bMdN3r16/bfwGkswQHB2vRokXq27evve3rr79WQkKCypYt68TK/nD06FH7GM+YMUNNmjTR9evXtXHjRj355JP68ccfHfovWrRIY8aM0cKFCzV27FhJ0u7du3Xjxg1J0o4dO3T//ffrp59+ko+PjyQ5/PbkqVOn6tFHH3XYZvny5YtyF4HSJ1+/tAFAqRAeHm6qV69uLl26lOn7aWlp9j9LMvPmzTM9e/Y03t7eZuLEicYYY+bOnWtq1apl3N3dTb169cySJUvs6xw5csRIMrGxsfa2c+fOGUkmOjraGPPH76D55JNPTJMmTYynp6dp1aqV+d///pdt7SEhIWbs2LHG09PTxMfH29sfffRRM3z4cOPr62sWLVpkbz9//rx59NFHTaVKlUz58uVNp06dTFxcnP39gwcPmp49e5rKlSubsmXLmpYtW5rNmzdn+Mz//Oc/ZtCgQaZcuXImODjYvPXWW9nWGRERYapVq5bpGJ87d85heevWraZatWrm2rVrJigoyOF3eaVLH6+/rpte36uvvpptPQCM4bIUYFFnzpzRpk2b9OSTT2Y5y2Gz2RyWJ02apHvuuUffffedBg8erLVr1+rpp5/WM888o++//16PPfaYBg0apOjo6DzXM3r0aL388svavXu3KleurJ49e+r69evZrhMYGKjw8HAtXrxYknT58mWtWLFCgwcPduhnjFH37t2VkJCg9evXa+/evbrtttvUuXNnnT17VpJ06dIldevWTVu2bFFsbKzCw8MVGRmp+Ph4h2298soratmypWJjYzVs2DA98cQTGWZf0p09e1YbNmzIcowrVKjgsLxgwQL169dP7u7u6tevnxYsWJDt/gPIJ2enKwBF4+uvvzaSzJo1axza/f39TdmyZU3ZsmXNmDFj7O2SzIgRIxz6tm3b1jz66KMObQ888IDp1q2bMSZvMzfvv/++vc+ZM2dMmTJlzIoVK7KsP32WYt26daZ27domLS3NLF682DRv3twYYxxmbj7//HPj4+Njrl696rCN2rVrZzvz0qhRI/PGG284fObDDz9sX05LSzOVK1fO9Ld9G2PMN998k+kYZ+bChQvG29vbPpsUGxtrvL29zYULFxz65TRz4+HhYf/7S3+ljzWA3zFzA1jcX2dndu3apbi4ODVu3FgpKSkO77Vs2dJhef/+/QoLC3NoCwsL0/79+/NcR5s2bex/9vPzU/369XO1ne7du+vSpUuKiYnRwoULM8zaSNLevXt16dIl+fv7q1y5cvbXkSNHdOjQIUlScnKyxowZo0aNGqlChQoqV66cfvzxxwwzN02aNLH/2WazqUqVKjp16lSmtRlj7P1ysmzZMtWqVUtNmzaVJDVr1ky1atXK9Ibp7IwePVpxcXEOrzvuuCNP2wCsjhuKAYuqU6eObDZbhksqtWrVkiSVKVMmwzqZXVr563/cxhh7m4uLi70tXU6XmrLbdmbc3Nz0yCOPaNKkSfrmm2+0du3aDH3S0tJUtWpVbd26NcN76ZeGRo8erY0bN+rll19WnTp1VKZMGfXu3VvXrl1z6P/Xm6htNpvS0tIyra1u3bqy2Wzav3+/evXqle1+LFy4UD/88IPc3P447aalpWnBggX6xz/+ke26fxYQEKA6derkuj9wM2LmBrAof39/denSRbNnz1ZycnK+ttGwYUNt377doW3Hjh1q2LChJKlSpUqSpBMnTtjfj4uLy3RbX3/9tf3P586d04EDB9SgQYNc1TF48GBt27ZN99xzjypWrJjh/dtuu00JCQlyc3NTnTp1HF4BAQGSpC+//FIDBw7Uvffeq1tvvVVVqlTR0aNHc/X5WfHz81N4eLjmzJmT6RifP39ekvTdd99pz5492rp1q8OMS0xMjHbv3q3vv/++QHUAcMTMDWBhc+fOVVhYmFq2bKnJkyerSZMmcnFx0e7du/Xjjz+qRYsW2a4/evRo9enTx35z7scff6w1a9Zoy5Ytkn6f/WndurWmT5+u0NBQJSYm6rnnnst0W1OnTpW/v78CAwM1YcIEBQQE5Djbka5hw4ZKTEyUt7d3pu/fddddatOmjXr16qUXX3xR9evX1/Hjx7V+/Xr16tVLLVu2VJ06dbRmzRpFRkbKZrPp//7v/7KckcmLuXPnqm3btmrVqpWmTp2qJk2aKDU1VZs3b9a8efO0f/9+LViwQK1atVL79u0zrN+mTRstWLBAr776aq4+LykpSQkJCQ5t3t7e9sfGAYgbigGrO378uPnnP/9patasadzd3U25cuVMq1atzEsvvWSSk5Pt/SSZtWvXZlg/u0fBjTFm3759pnXr1qZMmTKmWbNmZtOmTZneUPzxxx+bxo0bGw8PD3P77bc7PKadmZwee/7ro+AXL140w4cPN0FBQcbd3d0EBwebv//97/bHyI8cOWI6depkypQpY4KDg83s2bNNhw4dzNNPP53tZzZt2tRMmjQp21qPHz9unnzySfsNv9WqVTM9e/Y00dHRJiUlxfj7+5sZM2Zkuu4rr7xiAgICTEpKijEm5xuKJWV4PfbYY9nWB9xsbMb86WI5ABSyrVu3qlOnTjp37lyGR6MBoChwzw0AALAUwg0AALAULksBAABLYeYGAABYCuEGAABYCuEGAABYCuEGAABYCuEGAABYCuEGAABYCuEGAABYCuEGAABYCuEGAABYyv8D2utW2xdbHVkAAAAASUVORK5CYII=", "text/plain": [ "
" ] @@ -489,7 +451,7 @@ { "data": { "text/plain": [ - "" + "" ] }, "execution_count": 10, @@ -498,7 +460,7 @@ }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAioAAAHHCAYAAACRAnNyAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8pXeV/AAAACXBIWXMAAA9hAAAPYQGoP6dpAAA2j0lEQVR4nO3de5yN9f7//+fCnE+MMSTjXGEYh3HY02wjIZvZdNhSUkludBhiq0h9cmjvGmWXyikV0kFJRbUrNr4zyCaHKOVclDDO5sgMs96/P/rN2pYZY4bhemse99tt3W5zva/Ta611zaznel/v6xqXMcYIAADAQhWcLgAAAOBcCCoAAMBaBBUAAGAtggoAALAWQQUAAFiLoAIAAKxFUAEAANYiqAAAAGsRVAAAgLUIKgAuubFjx8rlcnm11a1bV/fdd59n+q233pLL5dK6desuc3XeUlNT5XK5lJqa6mgdAH5HUIFcLleJHpfrD/fUqVP11ltvXZZ9XazNmzdr7Nix2r1792Xf99q1azV48GBFR0crKChItWvXVu/evbV9+/aL3nZGRobGjRun5s2bKzg4WAEBAWratKlGjhypffv2lUH1zrPxOLvhhhu8fucCAgIUExOjl19+WW63u9h1f/31V9WsWVMul0v/+te/zrnc1q1bNWLECLVo0UIhISG66qqrlJiY6FhAdLvdeuGFF1SvXj35+/srJiZG77//fqm2sWTJEt14440KCwtTSEiIYmNjNXfuXM/8I0eOaMKECUpISFC1atVUuXJl/elPf/Ja5mzffvutevbsqfDwcAUGBqpp06Z69dVXL/h54sJVcroAOO+dd97xmn777be1ePHiQu2NGze+LPVMnTpVERERXt+2bbV582aNGzdON9xwg+rWrXtZ9/38889r5cqVuv322xUTE6O0tDRNnjxZrVq10urVq9W0adML2u7PP/+szp0769dff9Xtt9+uQYMGydfXV99//71mzJih+fPnl0kY2rZtmypUcO670rmOs4SEBJ04cUK+vr6O1FWrVi0lJydLkg4fPqw5c+bo73//uw4dOqRnn322yHWOHTumbt26KTs7WwkJCRoxYoSioqJ0xx13FFr2zTff1IwZM/S3v/1NDz/8sNLT0zV9+nT96U9/0sKFC9W5c+dL+vzO9tRTT2n8+PEaOHCg2rRpo08//VR33XWXXC6X7rzzzvOuP2vWLA0YMEBdunTRc889p4oVK2rbtm3as2ePZ5lVq1bpqaeeUvfu3fV///d/qlSpkj7++GPdeeednt/hM/3nP/9Rjx491LJlSz399NMKDg7WTz/9pN9++63Mnz9KwABnSUpKMiU5NLKzsy/J/qOjo02HDh0uybbL2rx584wkk5KSctn3vXLlSpObm+vVtn37duPn52f69u17Qds8deqUad68uQkMDDQrVqwoND89Pd08+eSTpd7umDFjzntMzZo1y0gya9euLfX23W63ycnJKdU6Nh5nHTp0MNHR0V5tJ06cMHXq1DEhISHm9OnThdY5efKkSUhIMKGhoWbVqlXm5MmTJjEx0fj5+Zlly5YVWn7dunUmMzPTq+3w4cOmWrVqJj4+vmyf0Hn89ttvxsfHxyQlJXna3G63ad++valVq1aRz/dMu3btMgEBAeaRRx4pdrmff/7Z7N6926vN7XabG2+80fj5+ZmsrCxPe3p6uqlevbq59dZbTX5+/gU8K5Q1ggoKKSqoFPwBXbdunWnfvr0JCAgwQ4cONcb8/ody9OjRpkGDBsbX19fUqlXLPP744+bkyZNe25g5c6bp2LGjqVatmvH19TWNGzc2U6dO9VqmTp06RpLXo+DDpOCDbMWKFWbIkCEmIiLChIWFmUGDBpnc3Fxz7Ngxc88995jKlSubypUrm8cff9y43W6v7efn55uJEyeaJk2aGD8/PxMZGWkGDRpkjh49WqiOxMREs2LFCtOmTRvj5+dn6tWrZ2bPnu1ZpqCesx9OhJYztWrVyrRq1eqC1v3ggw+MJPPss8+WaPnly5ebXr16maioKM97P2zYsEKhoaigUqdOHdOvXz/PdMHruWzZMjNo0CATHh5uQkJCzD333HPO92fhwoUmNjbW+Pn5mYkTJxpjLv44S0lJKfJ9/PDDD02rVq2Mv7+/qVq1qunbt6/57bffvJbp16+fCQoKMr/99pu5+eabTVBQkImIiDCPPvroeT90jSk6qBhjTK9evYwks2/fPq92t9tt7rjjDhMWFma++eYbT3tubq7561//aqpUqWJ+/PHH8+7XGGNuu+02Ex4eXqJly8qUKVOMpEI1zpkzx/O7XpyRI0caX19fc/z4cWOMMZmZmYV+54vz6quvGknm+++/97RNmzbNSDKbN282xhiTlZVFYHEYp35QYkeOHFG3bt1055136u6771b16tXldrvVs2dPff311xo0aJAaN26sTZs2aeLEidq+fbsWLFjgWX/atGmKjo5Wz549ValSJX3++ed6+OGH5Xa7lZSUJEl6+eWXNWTIEAUHB+upp56SJFWvXt2rjiFDhqhGjRoaN26cVq9erddff12VK1fWf//7X9WuXVvPPfecvvzyS02YMEFNmzbVvffe61n3gQce0FtvvaX+/fvrkUce0a5duzR58mRt2LBBK1eulI+Pj2fZnTt3qlevXhowYID69eunmTNn6r777lNsbKyio6OVkJCgRx55RK+++qqefPJJz6mx4k6R5ebmKjMzs0Svd0RERImWO5MxRgcOHFB0dHSp15Wkzz77TJJ0zz33lGj5efPmKScnRw899JCqVq2qNWvWaNKkSfrtt980b968C6ph8ODBqly5ssaOHatt27Zp2rRp+uWXXzyDXAts27ZNffr00QMPPKCBAwfquuuuk1R2x9mZCo6ZNm3aKDk5WQcOHNArr7yilStXasOGDapcubJn2fz8fHXt2lXt2rXTv/71Ly1ZskQvvviiGjRooIceeuiCXpPdu3fL5XJ57UeSRowYoUWLFmnx4sVq06aNp93X11cff/yxevXqpW7dumn16tW66qqrit1HWlpaiY65U6dOKT09vUR1h4eHF3t6b8OGDQoKCir0O9O2bVvP/D//+c/nXH/JkiVq1KiRvvzySz3++OPau3evqlSpoqSkJI0bN+68pxbT0tIkef+uLVmyRKGhodq7d69uueUWbd++XUFBQbrnnns0ceJE+fv7n/d5o4w5nZRgn3P1qEgyr732mlf7O++8YypUqFDom89rr71mJJmVK1d62orqmu/ataupX7++V9u5uuQLvnF37drV61tTXFyccblc5sEHH/S0nT592tSqVctrOytWrDCSzHvvvee13YULFxZqL/jGvXz5ck/bwYMHjZ+fn3n00Uc9baU99XOuXpiiHhfinXfeMZLMjBkzLmj9li1bmrCwsBIvX9R7mpycbFwul/nll188baXpUYmNjTV5eXme9hdeeMFIMp9++qnXupLMwoULS1RTaY6zs3tU8vLyTGRkpGnatKk5ceKEZ7l///vfRpIZPXq0p61fv35GknnmmWe8ttmyZUsTGxtbaF9n69Chg2nUqJE5dOiQOXTokNm6dat5/PHHjSSTmJh43vUv1PLly43L5TJPP/30eZcteH1K8ti1a1ex20pMTCz0vhjz+2llSeaJJ54odv3Q0FBTpUoV4+fnZ55++mnz0UcfmbvuuqtE6x45csRERkaa9u3be7XHxMSYwMBAExgYaIYMGWI+/vhjM2TIECPJ3HnnncVuE5cGPSooMT8/P/Xv39+rbd68eWrcuLEaNWqkw4cPe9pvvPFGSVJKSoquv/56SVJAQIBnfnp6uk6dOqUOHTpo0aJFSk9PV1hYWInqGDBggNc363bt2mnVqlUaMGCAp61ixYpq3bq11q9f71VrWFiYunTp4lVrbGysgoODlZKSorvuusvT3qRJE7Vv394zXa1aNV133XX6+eefS1RnUbp27arFixdf8PrF2bp1q5KSkhQXF6d+/fpd0DYyMjIUEhJS4uXPfE+zs7N14sQJXX/99TLGaMOGDapdu3apaxg0aJBXz9ZDDz2kJ598Ul9++aV69uzpaa9Xr566du1abE0Xc5wVWLdunQ4ePKixY8d6fZtOTExUo0aN9MUXXxQajPnggw96Tbdv377Q4PRz2bp1q6pVq+bV1rNnT82YMaNUdZfUwYMHddddd6levXoaMWLEeZdv3rx5iY/hGjVqFDv/xIkT8vPzK9Re8DqfOHGi2PWzsrLkdrs1fvx4jRw5UpL0t7/9TUePHtUrr7yiJ598ssjj2e12q2/fvjp+/LgmTZpUaJs5OTl68MEHPVf53HbbbcrLy9P06dP1zDPP6Jprrim2LpQtggpK7Oqrry50JcSOHTu0ZcuWQn9YCxw8eNDz88qVKzVmzBitWrVKOTk5XsuV5gPk7A+/gvWioqIKtR87dsyr1vT0dEVGRp631qL2I0lVqlTx2mZpXXXVVeftgr8QaWlpSkxMVFhYmD766CNVrFjxgrYTGhpaqiD266+/avTo0frss88KvS4lPT1wtrM/BIKDg3XVVVcVugS8Xr16Ra5fVsdZgV9++UWSPKeWztSoUSN9/fXXXm3+/v6Ffh9Kc9zUrVtXb7zxhtxut3766Sc9++yzOnTo0CU55ZCdna2//vWvyszM1Ndff63g4ODzrlOlSpUyuzIoICBAubm5hdpPnjzpmX++9bOzs9WnTx+v9j59+mjhwoXasGGDEhISCq03ZMgQLVy4UG+//baaN29eaJsF2zjTXXfdpenTp2vVqlUElcuMoIISK+qPhtvtVrNmzfTSSy8VuU5BePjpp5/UqVMnNWrUSC+99JKioqLk6+urL7/8UhMnTjzvPSLOdK4P4aLajTFetUZGRuq9994rcv2zP1zOtZ8zt1laJ06cKPEH+Pm+jRZIT09Xt27ddPz4ca1YsUI1a9a84PoaNWqkDRs2aM+ePYWC39ny8/PVpUsXHT16VCNHjlSjRo0UFBSkvXv36r777ivVe3ohijoey/I4u1AXGhILBAUFeQWB+Ph4tWrVSk8++WSZ3scjLy9Pt912m77//nstWrSoxJez5+Xl6ejRoyVatlq1asW+HldddZVSUlJkjPHqJd2/f78knfdYrlmzpnbs2FFofFHBl5GiwuG4ceM0depUjR8/vsixWDVr1tSPP/5Yqm3i0iKo4KI0aNBA3333nTp16lTozqNn+vzzz5Wbm6vPPvvMq6ciJSWl0LLFbedia12yZIni4+PP+02tpEpb69y5cwudPjuXkgSikydPqkePHtq+fbuWLFmiJk2alKqes/Xo0UPvv/++3n33XY0aNarYZTdt2qTt27dr9uzZXgOWL/bU1o4dO9SxY0fPdFZWlvbv36/u3bufd91LcZzVqVNH0u+DdwtOaRbYtm2bZ/6lEhMTo7vvvlvTp0/XY489dkGn087mdrt17733aunSpfrwww/VoUOHEq/73//+1+v9Kc6uXbuKvb9QixYt9Oabb2rLli1ex+4333zjmV+c2NhY7dixQ3v37lX9+vU97QU3JTz7y8eUKVM0duxYDRs2zHOqqKhtLl68WHv37vXqRTvXNnHpcWdaXJTevXtr7969euONNwrNO3HihLKzsyX971vmmR++6enpmjVrVqH1goKCdPz48UtSa35+vv7xj38Umnf69OkL2mdQUJAklXjdgjEqJXmcT35+vu644w6tWrVK8+bNU1xcXKnrP1uvXr3UrFkzPfvss1q1alWh+ZmZmZ6rZIp6T40xeuWVVy6qhtdff12nTp3yTE+bNk2nT59Wt27dzrvupTjOWrdurcjISL322mtepym++uorbdmyRYmJiefdxsUaMWKETp06dc6ey9IaMmSI5s6dq6lTp+q2224r1boFY1RK8jhfr+DNN98sHx8fTZ061dNmjNFrr72mq6++2jO+Tfq9l2Xr1q1ex0bBDe3OHL/jdrs1a9YshYeHKzY21tM+d+5cPfLII+rbt2+xr2Pv3r0LbVP6/UZ5lSpV0g033FDsc0LZo0cFF+Wee+7Rhx9+qAcffFApKSmKj49Xfn6+tm7dqg8//FCLFi1S69atddNNN8nX11c9evTQAw88oKysLL3xxhuKjIz0dPMWiI2N1bRp0/TPf/5TDRs2VGRkZKFvsheiQ4cOeuCBB5ScnKyNGzfqpptuko+Pj3bs2KF58+bplVdeUa9evUq1zRYtWqhixYp6/vnnlZ6eLj8/P914443nHAdTlmNUHn30UX322Wfq0aOHjh49qnfffddr/t133+35ueDy2lmzZhV7x18fHx998skn6ty5sxISEtS7d2/Fx8fLx8dHP/74o+bMmaMqVaro2WefVaNGjdSgQQM99thj2rt3r0JDQ/Xxxx9fdNd4Xl6eOnXqpN69e2vbtm2aOnWq/vznP3sNpD2XS3Gc+fj46Pnnn1f//v3VoUMH9enTx3N5ct26dfX3v//9op5vSTRp0kTdu3fXm2++qaefflpVq1a94G29/PLLmjp1quLi4hQYGFjouLn11ls9AbwoZTlGpVatWho2bJgmTJigU6dOqU2bNlqwYIFWrFih9957z+u00ahRozR79myvXpqbb75ZnTp1UnJysg4fPqzmzZtrwYIF+vrrrzV9+nTPQN01a9bo3nvvVdWqVdWpU6dCp3+vv/56T49My5Ytdf/992vmzJk6ffq0OnTooNTUVM2bN0+jRo26qFOruEBOXW4EexV3w7ei5OXlmeeff95ER0cbPz8/U6VKFRMbG2vGjRtn0tPTPct99tlnJiYmxvj7+5u6deua559/3sycObPQZYxpaWkmMTHRhISEFHnDt7PvXFpw6euhQ4e82gtuvnW2119/3cTGxpqAgAATEhJimjVrZkaMGOF1M62CG4qdrUOHDoUuaX3jjTdM/fr1TcWKFS/rDd8KLhk/1+NMkyZNOuflvEU5duyYGT16tGnWrJkJDAw0/v7+pmnTpmbUqFFm//79nuU2b95sOnfubIKDg01ERIQZOHCg+e6774wkM2vWLM9yF3LDtypVqpjg4GDTt29fc+TIkULrnuty3Ys9zs51w7e5c+eali1bGj8/PxMeHl7sDd/OVpI78xpT/O9ZamqqkWTGjBlz3u0Up+AS6nM9zndJcVnLz883zz33nKlTp47x9fU10dHR5t133z1n3WfXl5mZaYYOHWpq1KhhfH19TbNmzQqtf77bApx5rBrz+9+0sWPHmjp16hgfHx/TsGFDzw0Fcfm5jLmIkYEArgi9e/fW7t27tWbNGqdLAYBS4dQP8AdnjFFqamqhLn4AuBLQowIAAKzFVT8AAMBaBBUAAGAtggoAALAWQQUAAFjrir7qx+12a9++fQoJCblkt10HAABlyxijzMxM1axZUxUqFN9nckUHlX379p33H6cBAAA77dmzR7Vq1Sp2mSs6qISEhEj6/YmGhoY6XA0AACiJjIwMRUVFeT7Hi3NFB5WC0z2hoaEEFQAArjAlGbbBYFoAAGAtggoAALAWQQUAAFjLmqAyfvx4uVwuDRs2zOlSAACAJawIKmvXrtX06dMVExPjdCkAAMAijgeVrKws9e3bV2+88YaqVKnidDkAAMAijgeVpKQkJSYmqnPnzuddNjc3VxkZGV4PAADwx+XofVQ++OADffvtt1q7dm2Jlk9OTta4ceMucVUAAMAWjvWo7NmzR0OHDtV7770nf3//Eq0zatQopaenex579uy5xFUCAAAnuYwxxokdL1iwQLfeeqsqVqzoacvPz5fL5VKFChWUm5vrNa8oGRkZCgsLU3p6OnemBQDgClGaz2/HTv106tRJmzZt8mrr37+/GjVqpJEjR543pAAAgD8+x4JKSEiImjZt6tUWFBSkqlWrFmoHAADlk+NX/QAAAJyLVf89OTU11ekSAACARehRAQAAheTknVbdJ75Q3Se+UE7eacfqIKgAAABrEVQAAIC1CCoAAMBaBBUAAGAtggoAALAWQQUAAFiLoAIAAKxFUAEAANYiqAAAAGsRVAAAgLUIKgAAwFoEFQAAYC2CCgAAsBZBBQAAFGv34RzH9k1QAQAAXo7n5Gng7PWe6e6vrtC9M9YoPefUZa+FoAIAALw88v5GrfrpsFfbyp2HNeT9DZe9FoIKAADw+PlQlpbvOCT3We35xmj5jkPadTj7stZDUAEAAB6/HC1+PMruIwQVAADgkDrhgcXOr1s16DJV8juCCgAA8KhfLVgJ11QrFBAqulxKuKaa6kUQVAAAgIMm9WmpuAYRXm3xDSM0qU/Ly14LQQUAAHgJC/TRG/1iPdNfPtJebw9oq7BAn8teC0EFAAAUq25E8eNWLiWCCgAAsBZBBQAAWIugAgAArEVQAQAA1iKoAAAAaxFUAACAtQgqAADAWgQVAABgLYIKAACwFkEFAABYi6ACAACsRVABAADWquR0AQAAwD6BvpW0e3yi02XQowIAAOxFUAEAANYiqAAAAGsRVAAAgLUIKgAAwFoEFQAAYC2CCgAAsBZBBQAAWIugAgAArEVQAQAA1iKoAAAAaxFUAACAtQgqAADAWgQVAABgLYIKAACwFkEFAABYi6ACAACsRVABAADWIqgAAABrEVQAAIC1CCoAAMBaBBUAAGAtggoAALAWQQUAAFiLoAIAAKxFUAEAANYiqAAAAGsRVAAAgLUIKgAAwFoEFQAAYC2CCgAAsBZBBQAAWIugAgAArOVoUJk2bZpiYmIUGhqq0NBQxcXF6auvvnKyJAAAYBFHg0qtWrU0fvx4rV+/XuvWrdONN96om2++WT/++KOTZQEAAEu4jDHG6SLOFB4ergkTJmjAgAHnXTYjI0NhYWFKT09XaGjoZagOAABcrNJ8fle6TDWdV35+vubNm6fs7GzFxcU5XQ4AALCA40Fl06ZNiouL08mTJxUcHKz58+erSZMmRS6bm5ur3Nxcz3RGRsblKhMAADjA8at+rrvuOm3cuFHffPONHnroIfXr10+bN28uctnk5GSFhYV5HlFRUZe5WgAAcDlZN0alc+fOatCggaZPn15oXlE9KlFRUYxRAQDgCnJFjlEp4Ha7vcLImfz8/OTn53eZKwIAAE5xNKiMGjVK3bp1U+3atZWZmak5c+YoNTVVixYtcrIsAABgCUeDysGDB3Xvvfdq//79CgsLU0xMjBYtWqQuXbo4WRYAALCEo0FlxowZTu4eAABYzvGrfgAAAM6FoAIAAKxFUAEAANYiqAAAAGsRVAAAgLUIKgAAwFoEFQAAYC2CCgAAsBZBBQAAWIugAgAArEVQAQAA1iKoAAAAaxFUAACAtQgqAADAWgQVAABgLYIKAACwFkEFAABYi6ACAACsRVABAADWIqgAAABrEVQAAIC1CCoAYJmcvNOq+8QXqvvEF8rJO+10OYCjCCoAAMBaBBUAAGAtggoAALAWQQUAAFiLoAIAAKxFUAEAANYiqAAAAGsRVAAAgLUIKgAAwFoEFQAAYC2CCgBYbPfhHKdLABxFUAEAixzPydPA2es9091fXaF7Z6xRes4pB6sCnENQAQCLPPL+Rq366bBX28qdhzXk/Q0OVQQ4i6ACAJb4+VCWlu84JPdZ7fnGaPmOQ9p1ONuRugAnEVQAwBK/HC1+PMruIwQVlD8EFQCwRJ3wwGLn160adJkqAexBUAEAS9SvFqyEa6oV+sNc0eVSwjXVVC+CoILyh6ACABaZ1Kel4hpEeLXFN4zQpD4tHaoIcBZBBQAsEhboozf6xXqmv3ykvd4e0FZhgT4OVgU4h6ACABarG1H8uBXgj46gAgAArEVQAQAA1iKoAAAAaxFUAACAtQgqAADAWgQVAABgLYIKAACwFkEFAABYi6ACAACsVcnpAgAA3gJ9K2n3+ESnywCsQI8KAACwFkEFAABYi6ACAACsRVABAADWIqgAAABrEVQAAIC1CCoAAMBaBBUAAGAtggoAALAWQQUAAFiLoAIAAKxFUAEAANYiqAAAAGsRVAAAgLUIKgAAwFoEFQAAYC2CCgAAsBZBBQAAWIugAgAArFWqoPLhhx8qLy/PM/3bb7/J7XZ7pnNycvTCCy+UeHvJyclq06aNQkJCFBkZqVtuuUXbtm0rTUkAAOAPrFRBpU+fPjp+/LhnukmTJtq9e7dnOjMzU6NGjSrx9pYtW6akpCStXr1aixcv1qlTp3TTTTcpOzu7NGUBAIA/qEqlWdgYU+x0aS1cuNBr+q233lJkZKTWr1+vhISEi9o2AAC48pUqqFxq6enpkqTw8PAi5+fm5io3N9cznZGRcVnqAgAAzrBmMK3b7dawYcMUHx+vpk2bFrlMcnKywsLCPI+oqKjLXCUAALicSt2jsmjRIoWFhUn6PVwsXbpUP/zwgyR5jV8praSkJP3www/6+uuvz7nMqFGjNHz4cM90RkYGYQUAgD8wlynFQJMKFc7fAeNyuZSfn1+qIgYPHqxPP/1Uy5cvV7169Uq8XkZGhsLCwpSenq7Q0NBS7RMAADijNJ/fpepROfNS5LJgjNGQIUM0f/58paamliqkAACAP75SjVG5//77lZmZWWY7T0pK0rvvvqs5c+YoJCREaWlpSktL04kTJ8psHwAA4MpVqlM/FStW1P79+xUZGVk2O3e5imyfNWuW7rvvvvOuz6kfAACuPJfs1M/F3jflUm8PAAD8sZT6qp/MzEz5+/sXuwy9GwAAoCyUOqhce+2155xnjLmgq34AlJ2cvNNqMnqRJGnzM10V6GvVfR0BoFRK/Rfso48+OuedYwEAAMpSqYNKfHx8mQ2mBQAAKE6Z30Kf0z4AAKCslCqo1KlTRxUrVixy3vbt2zVixAjVqlWrTAoDAAAoVVDZtWuXqlat6pnOycnRrFmz1L59ezVp0kTLly/3+l88AAAAF+OCLgdYvXq13nzzTc2bN0+1a9fWli1blJKSovbt25d1fQAAoBwrVY/Kiy++qOjoaPXq1UtVqlTR8uXLtWnTJrlcLq+eFgAAgLJQqh6VkSNHauTIkXrmmWfOOVYFAACgrJSqR+Uf//iH5s2bp3r16mnkyJH64YcfLlVdAAAApQsqo0aN0vbt2/XOO+8oLS1N7dq1U/PmzWWM0bFjxy5VjQAAoJy6oPuodOjQQbNnz9b+/fv18MMPq1WrVkpISND111+vl156qaxrBAAA5dRF3fAtNDRUDzzwgNasWaPvvvtO7dq10/jx48uqNgAAUM6VajDtiRMntHTpUv31r3+V9PupoNzc3P9trFIl/fTTT2VbIQAAKLdKFVRmz56tL774whNUJk+erOjoaAUEBEiStm3bppo1a+rvf/972VcKAADKnVKd+nnvvfc0aNAgr7Y5c+YoJSVFKSkpeuGFF/Thhx+WaYEAAKD8KlVQ2blzp5o1a+aZ9vf3V4UK/9tE27ZttXnz5rKrDgAAlGulOvVz/PhxrzEphw4d8prvdru95gMAAFyMUvWo1KpVq9ibvH3//ff892QAAFBmShVUunfvrtGjR+vkyZOF5p04cULjxo1TYmJimRUH4OLsPpzjdAkAcFFcxhhT0oUPHDigFi1ayNfXV4MHD9a1114r6ferfSZPnqzTp09rw4YNql69+iUr+EwZGRkKCwtTenq6QkNDL8s+AZsdz8lT0nsbtPKnw562hGuqaVKflgoL9HGwMgD4n9J8fpcqqEjSrl279NBDD2nx4sUqWNXlcqlLly6aOnWq6tevf+GVlxJBBfB274w1+nrHIbnPaKvocim+YYTeHtDWsboA4Eyl+fwu1WBaSapXr54WLlyoo0ePaufOnZKkhg0bKjw8/MKqBVAmfj6UpeU7DhVqzzdGy3cc0q7D2aoXEeRAZQBw4UodVAqEh4erbVu+oQG2+OVo8eNRdh8hqAC48lzU//oBYI864YHFzq9blZAC4MpDUAH+IOpXC1bCNdUK/VJXdLmUcE01elMAXJEIKsAfyKQ+LRXXIMKrLb5hhCb1aelQRQBwcQgqwB9IWKCP3ugX65n+8pH2entAWy5NBnDFIqgAf2B1I4oftwIAtiOoAAAAaxFUAACAtQgqAADAWgQVAABgLYIKAACwFkEFAABYi6ACAACsRVABAADWIqgAAABrEVQAAIC1CCoAAMBaBBUAAGAtggoAALAWQQUAAFiLoAIAAKxFUAEAANaq5HQBAMpWoG8l7R6f6HQZAFAm6FEBAADWIqgAAABrEVQAAIC1CCoAAMBaBBUAAGAtggoAALAWQQUAAFiLoAIAAKxFUAEAANYiqAAAAGsRVAAAgLUIKgAAwFoEFQAAYC2CCgAAsBZBBQAAWIugAgAArEVQAQAA1iKoAAAAaxFUAACAtQgqAADAWgQVAABgLYIKAACwFkEFAABYy9Ggsnz5cvXo0UM1a9aUy+XSggULnCwHAABYxtGgkp2drebNm2vKlClOlgEAACxVycmdd+vWTd26dXOyBAAAYDHGqAAAAGs52qNSWrm5ucrNzfVMZ2RkOFgNAAC41K6oHpXk5GSFhYV5HlFRUU6XBAAALqErKqiMGjVK6enpnseePXucLgkAAFxCV9SpHz8/P/n5+TldBgAAuEwcDSpZWVnauXOnZ3rXrl3auHGjwsPDVbt2bQcrAwAANnA0qKxbt04dO3b0TA8fPlyS1K9fP7311lsOVQUAAGzhaFC54YYbZIxxsgQAAGCxK2owLQAAKF8IKgAAwFoEFQAAYC2CCgAAsBZBBQAAWIugAgAArEVQAQAA1iKoAAAAaxFUAACAtQgqAADAWgQVAABgLYIKAACwFkEFAABYi6ACAACsRVABAADWIqgAAABrEVQAAIC1CCoAAMBaBBUAAGAtggoAALAWQQUAAFiLoAIAAKxFUAEAANYiqAAAAGsRVAAAgLUIKgAAwFoEFQAAYC2CShFy8k6r7hNfqO4TXygn77TT5QAAUG4RVAAAgLUIKgAAwFoEFQAAYC2CCgAAsBZBBQAAWIugAgAArEVQAQAA1iKoAAAAaxFUAACAtQgqAADAWgQVAABgLYIKAACwFkEFAABYi6ACAACsRVABAADWIqgAAABrEVQAAIC1CCoAAMBaBBUAAGAtggoAALAWQQUAAFiLoAIAAKxFUAEAANYiqAAAAGsRVAAAgLUIKgAAwFoEFQAAYC2CynnsPpzjdAkAAJRbBJWzHM/J08DZ6z3T3V9doXtnrFF6zikHqwIAoHwiqJzlkfc3atVPh73aVu48rCHvb3CoIgAAyi+Cyhl+PpSl5TsOyX1We74xWr7jkHYdznakLgAAyiuCyhl+OVr8eJTdRwgqAABcTgSVM9QJDyx2ft2qQZepEgAAIBFUvNSvFqyEa6oVelEqulxKuKaa6kUQVAAAuJwIKmeZ1Kel4hpEeLXFN4zQpD4tHaoIAIDyi6BylrBAH73RL9Yz/eUj7fX2gLYKC/RxsCoAAMongsp51I0oftwKAAC4dAgqAADAWgQVAABgLYIKAACwFkEFAABYi6ACAACsRVABAADWsiKoTJkyRXXr1pW/v7/atWunNWvWOF0SAACwgONBZe7cuRo+fLjGjBmjb7/9Vs2bN1fXrl118OBBp0sDAAAOczyovPTSSxo4cKD69++vJk2a6LXXXlNgYKBmzpzpdGkAAMBhjgaVvLw8rV+/Xp07d/a0VahQQZ07d9aqVasKLZ+bm6uMjAyvBwAA+ONyNKgcPnxY+fn5ql69uld79erVlZaWVmj55ORkhYWFeR5RUVGXq1QAAOAAx0/9lMaoUaOUnp7ueezZs8fpkgAAwCVUycmdR0REqGLFijpw4IBX+4EDB1SjRo1Cy/v5+cnPz+9ylQcAABzmaI+Kr6+vYmNjtXTpUk+b2+3W0qVLFRcX52BlAADABo72qEjS8OHD1a9fP7Vu3Vpt27bVyy+/rOzsbPXv39/p0gAAgMMcDyp33HGHDh06pNGjRystLU0tWrTQwoULCw2wBQAA5Y/jQUWSBg8erMGDBztdBgAAsMwVddUPAAAoXwgqAADAWgQVAABgLYIKAACwFkEFAABYi6ACAACsRVABAADWIqgAAABrEVQAAIC1CCoAAMBaVtxC3zaBvpW0e3yi02UAAFDu0aMCAACsRVABAADWIqgAAABrEVQAAIC1CCoAAMBaBBUAAGAtggoAALAWQQUAAFiLoAIAAKxFUAEAANYiqAAAAGsRVAAAgLUIKgAAwFoEFQAAYC2CCgAAsFYlpwu4GMYYSVJGRobDlQAAgJIq+Nwu+BwvzhUdVDIzMyVJUVFRDlcCAABKKzMzU2FhYcUu4zIliTOWcrvd2rdvn0JCQuRyuZwu54qTkZGhqKgo7dmzR6GhoU6XUy7xHtiB98F5vAfOu5zvgTFGmZmZqlmzpipUKH4UyhXdo1KhQgXVqlXL6TKueKGhofxhcBjvgR14H5zHe+C8y/UenK8npQCDaQEAgLUIKgAAwFoElXLMz89PY8aMkZ+fn9OllFu8B3bgfXAe74HzbH0PrujBtAAA4I+NHhUAAGAtggoAALAWQQUAAFiLoAIAAKxFUCnHpkyZorp168rf31/t2rXTmjVrnC6pXFm+fLl69OihmjVryuVyacGCBU6XVK4kJyerTZs2CgkJUWRkpG655RZt27bN6bLKnWnTpikmJsZzk7G4uDh99dVXTpdVro0fP14ul0vDhg1zuhRJBJVya+7cuRo+fLjGjBmjb7/9Vs2bN1fXrl118OBBp0srN7Kzs9W8eXNNmTLF6VLKpWXLlikpKUmrV6/W4sWLderUKd10003Kzs52urRypVatWho/frzWr1+vdevW6cYbb9TNN9+sH3/80enSyqW1a9dq+vTpiomJcboUDy5PLqfatWunNm3aaPLkyZJ+/79JUVFRGjJkiJ544gmHqyt/XC6X5s+fr1tuucXpUsqtQ4cOKTIyUsuWLVNCQoLT5ZRr4eHhmjBhggYMGOB0KeVKVlaWWrVqpalTp+qf//ynWrRooZdfftnpsuhRKY/y8vK0fv16de7c2dNWoUIFde7cWatWrXKwMsA56enpkn7/kIQz8vPz9cEHHyg7O1txcXFOl1PuJCUlKTEx0euzwQZX9D8lxIU5fPiw8vPzVb16da/26tWra+vWrQ5VBTjH7XZr2LBhio+PV9OmTZ0up9zZtGmT4uLidPLkSQUHB2v+/Plq0qSJ02WVKx988IG+/fZbrV271ulSCiGoACj3kpKS9MMPP+jrr792upRy6brrrtPGjRuVnp6ujz76SP369dOyZcsIK5fJnj17NHToUC1evFj+/v5Ol1MIQaUcioiIUMWKFXXgwAGv9gMHDqhGjRoOVQU4Y/Dgwfr3v/+t5cuXq1atWk6XUy75+vqqYcOGkqTY2FitXbtWr7zyiqZPn+5wZeXD+vXrdfDgQbVq1crTlp+fr+XLl2vy5MnKzc1VxYoVHauPMSrlkK+vr2JjY7V06VJPm9vt1tKlSzkvjHLDGKPBgwdr/vz5+n//7/+pXr16TpeE/5/b7VZubq7TZZQbnTp10qZNm7Rx40bPo3Xr1urbt682btzoaEiR6FEpt4YPH65+/fqpdevWatu2rV5++WVlZ2erf//+TpdWbmRlZWnnzp2e6V27dmnjxo0KDw9X7dq1HaysfEhKStKcOXP06aefKiQkRGlpaZKksLAwBQQEOFxd+TFq1Ch169ZNtWvXVmZmpubMmaPU1FQtWrTI6dLKjZCQkEJjs4KCglS1alUrxmwRVMqpO+64Q4cOHdLo0aOVlpamFi1aaOHChYUG2OLSWbdunTp27OiZHj58uCSpX79+euuttxyqqvyYNm2aJOmGG27wap81a5buu+++y19QOXXw4EHde++92r9/v8LCwhQTE6NFixapS5cuTpcGS3AfFQAAYC3GqAAAAGsRVAAAgLUIKgAAwFoEFQAAYC2CCgAAsBZBBQAAWIugAgAArEVQAQAA1iKoAFeQtLQ0DR06VA0bNpS/v7+qV6+u+Ph4TZs2TTk5OU6Xd05169aVy+XSBx98UGhedHS0XC6XNXfjTUtL05AhQ1S/fn35+fkpKipKPXr08PrfWAWSk5NVsWJFTZgwwdNW8FzP9Si46+255hf1GgHlGbfQB64QP//8s+Lj41W5cmU999xzatasmfz8/LRp0ya9/vrruvrqq9WzZ88i1z116pR8fHwuc8XeoqKiNGvWLN15552ettWrVystLU1BQUEOVvY/u3fv9rzGEyZMULNmzXTq1CktWrRISUlJ2rp1q9fyM2fO1IgRIzRz5kw9/vjjkqS1a9cqPz9fkvTf//5Xf/vb37Rt2zaFhoZKktf/EZo1a5b+8pe/eG2zcuXKl/AZAlcgA+CK0LVrV1OrVi2TlZVV5Hy32+35WZKZOnWq6dGjhwkMDDRjxowxxhgzdepUU79+fePj42OuvfZa8/bbb3vW2bVrl5FkNmzY4Gk7duyYkWRSUlKMMcakpKQYSebf//63adasmfHz8zPt2rUzmzZtKrb2OnXqmCeeeML4+fmZX3/91dM+cOBAM2TIEBMWFmZmzZrltd8BAwaYiIgIExISYjp27Gg2btzomb9z507Ts2dPExkZaYKCgkzr1q3N4sWLC+3z2WefNf379zfBwcEmKirKTJ8+vdg6u3XrZq6++uoiX+Njx455Taemppqrr77a5OXlmZo1a5qVK1cWWqfg9Tp7XWN+f4/mz59fbD0AjOHUD3AFOHLkiP7zn/8oKSnpnL0PLpfLa3rs2LG69dZbtWnTJt1///2aP3++hg4dqkcffVQ//PCDHnjgAfXv318pKSmlrufxxx/Xiy++qLVr16patWrq0aOHTp06Vew61atXV9euXTV79mxJUk5OjubOnav777+/0LK33367Dh48qK+++krr169Xq1at1KlTJx09elTS7/95unv37lq6dKk2bNigv/zlL+rRo4d+/fVXr+28+OKLat26tTZs2KCHH35YDz30kLZt21ZkfUePHtXChQvP+Rqf3dMxY8YM9enTRz4+PurTp49mzJhR7PMHcIGcTkoAzm/16tVGkvnkk0+82qtWrWqCgoJMUFCQGTFihKddkhk2bJjXstdff70ZOHCgV9vtt99uunfvbowpXY/KBx984FnmyJEjJiAgwMydO/ec9depU8dMnDjRLFiwwDRo0MC43W4ze/Zs07JlS2OM8epRWbFihQkNDTUnT5702kaDBg2K7RGJjo42kyZN8trn3Xff7Zl2u90mMjLSTJs2rcj1v/nmmyJf46Kkp6ebgIAATy/Phg0bTHBwsMnMzPRa7nw9Kv7+/p73r+Dxyy+/nHf/QHlCjwpwBVuzZo02btyo6Oho5ebmes1r3bq11/SWLVsUHx/v1RYfH68tW7aUer9xcXGen8PDw3XdddeVaDuJiYnKysrS8uXLNXPmzCJ7U7777jtlZWWpatWqCg4O9jx27dqln376SdLvPSqPPfaYGjdurMqVKys4OFhbtmwp1KMSExPj+dnlcqlGjRo6ePBgkbWZUvwj+ffff18NGjRQ8+bNJUktWrRQnTp1NHfu3BJvQ5ImTpyojRs3ej1q1qxZqm0Af3QMpgWuAA0bNpTL5Sp02qJ+/fqSvAdoFijtANUKFX7/3nLmB/b5TueUVqVKlXTPPfdozJgx+uabbzR//vxCy2RlZemqq65SampqoXkFp18ee+wxLV68WP/617/UsGFDBQQEqFevXsrLy/Na/uwBxC6XS263u8jarrnmGrlcrkIDZosyY8YM/fjjj6pU6X9/Qt1ut2bOnKkBAwacd/0CNWrUUMOGDUu8PFAe0aMCXAGqVq2qLl26aPLkycrOzr6gbTRu3FgrV670alu5cqWaNGkiSapWrZokaf/+/Z75GzduLHJbq1ev9vx87Ngxbd++XY0bNy5RHffff7+WLVumm2++WVWqVCk0v1WrVkpLS1OlSpXUsGFDr0dERISn7vvuu0+33nqrmjVrpho1amj37t0l2v+5hIeHq2vXrpoyZUqRr/Hx48clSZs2bdK6deuUmprq1ROSmpqqVatWlSjoACg5elSAK8TUqVMVHx+v1q1ba+zYsYqJiVGFChW0du1abd26VbGxscWu//jjj6t3795q2bKlOnfurM8//1yffPKJlixZIun3Xpk//elPGj9+vOrVq6eDBw/q//7v/4rc1jPPPKOqVauqevXqeuqppxQREaFbbrmlRM+jcePGOnz4sAIDA4uc37lzZ8XFxemWW27RCy+8oGuvvVb79u3TF198oVtvvVWtW7fWNddco08++UQ9evSQy+XS008/fc6ektKYMmWK4uPj1bZtWz3zzDOKiYnR6dOntXjxYk2bNk1btmzRjBkz1LZtWyUkJBRav02bNpoxY4bXfVWKc/z4caWlpXm1hYSEWHO5NmAFpwfJACi5ffv2mcGDB5t69eoZHx8fExwcbNq2bWsmTJhgsrOzPcvpHJe+Fnd5sjHGbN682cTFxZmAgADTokUL85///KfIwbSff/65iY6ONr6+vqZt27bmu+++K7bugsG053L25ckZGRlmyJAhpmbNmsbHx8dERUWZvn37ei5t3rVrl+nYsaMJCAgwUVFRZvLkyaZDhw5m6NChxe6zefPmnku1z2Xfvn0mKSnJ1KlTx/j6+pqrr77a9OzZ06SkpJjc3FxTtWpV88ILLxS57vPPP28iIyNNXl6eMeb8g2mLeiQnJxdbH1DeuIwpxQgyAOVaamqqOnbsqGPHjnFjMgCXBWNUAACAtQgqAADAWpz6AQAA1qJHBQAAWIugAgAArEVQAQAA1iKoAAAAaxFUAACAtQgqAADAWgQVAABgLYIKAACwFkEFAABY6/8DQu9UKqAXqh0AAAAASUVORK5CYII=", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAioAAAHFCAYAAADcytJ5AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy88F64QAAAACXBIWXMAAA9hAAAPYQGoP6dpAAA1QUlEQVR4nO3deXRV1d3G8edmhIQkkISZmIRBRDGAIBIoIDLJKLSKUAdQsIpIQahMWkCshGJrbaVQFQhQRXAArApo0AAOIGMUB1AwGFoIMiYhgUCS/f7hmyuXzBByNub7Weusxdlnn3N+dyD3ufsM12WMMQIAALCQl9MFAAAAFIWgAgAArEVQAQAA1iKoAAAAaxFUAACAtQgqAADAWgQVAABgLYIKAACwFkEFAABYi6BSyblcrlJN69evr5B6li5dqueee65C9nWpsrKyNH369Ap7bs536NAhPfHEE4qNjVV4eLiCg4PVunVrvfjii8rNzb2kbefl5enf//63unXrpvDwcPn6+qpWrVrq27ev3n77beXl5ZV5m1FRURo2bJh7fv/+/XK5XFq0aJG7bfr06XK5XDp69Ogl1V9axb3XXC6Xpk+fXiF1nG/RokUe/+98fHxUt25dDR48WN99912J63/22WeaPn263nzzzSL75Obm6tlnn9Wtt96qBg0aKCAgQM2aNdOkSZN08uTJcnw0pbdjxw5169ZN1apVU/Xq1fXrX/9a33//fanXz8zM1NSpU3X11VfL399fYWFh6tKli8dztn37do0aNUrXX3+9goKCVLt2bXXr1k0ffvhhge1FRUUV+bewSpUq5fKYUXo+ThcAZ23atMlj/qmnnlJiYmKB/7zXXntthdSzdOlSffnllxo7dmyF7O9SZGVl6cknn5Qk3XzzzRW67+3bt2vJkiW699579cc//lG+vr5as2aNRo4cqc2bN2vhwoUXtd0zZ85owIABev/99zV48GDNmzdPderU0ZEjR7R27VrdcccdWr58uW677bZLqr9u3bratGmTGjVqdEnbuRTFvdc2bdqkBg0aVHxR/y8+Pl7XXHONzpw5o08++URPP/20EhMTtXv3btWoUaPQdTZt2qSePXsqNzdXp0+f1osvvqgRI0YU6Hf69GlNnz5dQ4YM0YgRIxQeHq4dO3boT3/6k95++21t27ZNVatWvdwP0W337t26+eab1bJlS7322ms6c+aMpk6dqo4dOyopKUk1a9Ysdv1Tp06pS5cuOnjwoCZNmqSYmBilpaXp008/VVZWlrvfq6++qi1btuj+++9XixYtlJmZqX/961/q2rWrFi9erHvvvdfdd+XKlcrOzvbYT0pKiu68804NHDiwfJ8AlMwA5xk6dKgJDAwssV9mZuZl2X+fPn1MZGTkZdl2eTty5IiRZKZNm1bh+z5+/Lg5e/ZsgfZRo0YZSSYlJeWitjty5EgjySxevLjQ5d9++635/PPPy7zdyMhIM3To0GL7TJs2zUgyR44cKfP2jTEmKyurTP1tfK/Fx8cbSWbr1q0e7U8++aSRZBYuXFjoelu2bDHBwcGmQ4cO5scffzQjRowwLpfLxMfHF+ibk5Njjh49WqD99ddfN5LMv//973J5LKV1xx13mPDwcJOWluZu279/v/H19TUTJkwocf0xY8aYwMBAs2/fvmL7HT58uEBbTk6OiYmJMY0aNSpxP9OnTzeSzLp160rsi/JFUIGHwoJK586dzXXXXWc2bNhgYmNjTdWqVc2dd95pjDEmLS3NjB8/3kRFRRlfX19Tr149M2bMGHPq1CmPbcyZM8d07NjR1KxZ0wQEBJjmzZubP//5zx4ftp07dzaSCkzGGJOcnGwkmdmzZ5tZs2aZyMhIU6VKFdO5c2ezZ88ec/bsWTNx4kRTt25dExwcbAYMGFDoH6Zly5aZdu3amYCAABMYGGh69OhhduzYUehz8N1335levXqZwMBA06BBAzNu3Dhz5swZj3ounEr6ML7cFi9ebCSZTz/9tMzrHjp0yPj6+pqePXuWqv/p06fNuHHjTIsWLUxwcLCpUaOGadeunVm1alWBvhcGlfzn7/wP0vygsmPHDjNw4EATFBRkgoODzV133WV+/PHHAtvr06ePefPNN03Lli2Nv7+/mThxojHm0t9rxphCA+iuXbtM//79TfXq1Y2/v79p0aKFWbRokUefxMREI8ksXbrUTJkyxdStW9cEBQWZrl27mt27d5f4nBYVVN59910jycTFxRVYZ9u2baZ69epm4MCB5vTp0x7Pp5eXV6mDxw8//GAkmZkzZ5aqf3k4d+6cqVq1qnnwwQcLLOvRo4dp0qRJsetnZmaawMBAM2zYsIuu4b777jN+fn7F9snLyzORkZGmYcOGJi8v76L3hYvDoR+UyqFDh3T33XdrwoQJmjlzpry8vJSVlaXOnTvrv//9r6ZMmaKYmBh99dVXmjp1qnbt2qV169bJ5XJJkvbt26ff/va3io6Olp+fnz7//HM9/fTT2r17t/swxdy5c/W73/1O+/bt08qVKwut45///KdiYmL0z3/+UydPntT48ePVr18/3XTTTfL19dXChQv1ww8/6A9/+INGjBih//znP+51Z86cqSeeeEL33XefnnjiCZ09e1bPPPOMOnbsqC1btngc3jp37pz69++v4cOHa/z48dq4caOeeuophYSEaOrUqapbt67Wrl2rW2+9VcOHD3cPsZc0TJ2Tk1Oq59vb29v93JXFhx9+KB8fH1199dVlXjcxMVHnzp3TgAEDStU/Oztbx48f1x/+8AfVr19fZ8+e1bp16/TrX/9a8fHxHkPpZTFw4EANGjRIDz30kL766iv98Y9/1Ndff63PPvtMvr6+7n47duzQN998oyeeeELR0dEKDAyUVH7vtfPt2bNH7du3V61atfSPf/xDYWFhevnllzVs2DAdPnxYEyZM8Og/ZcoUdejQQfPnz1d6eromTpyofv366ZtvvpG3t3eZn5Pk5GRJKvC6JiUlqUePHvrtb3+r559/Xl5eP592OH36dDVo0EAjRoyQt7e3hgwZUuw+8g/3XnfddSXWk5ubK2NMif28vLw8arrQvn37dPr0acXExBRYFhMTo4SEBJ05c6bI80K2b9+uzMxMNWnSRCNHjtSyZcuUmZmpmJgYPfnkk+rTp0+x9eXk5Oijjz4q8TGvW7dOP/zwg/70pz9d1P9LXCKnkxLsUtSIiiTzwQcfeLTHxcUZLy+vAt/+3njjDSPJrF69utB95ObmmnPnzpklS5YYb29vc/z4cfeyoobj87+Bt2jRwuTm5rrbn3vuOSPJ9O/f36P/2LFjjST3cHJKSorx8fExo0eP9uiXkZFh6tSpYwYNGuTxHEgyr732mkff3r17m6ZNm7rnL+bQjwr5Fl/YVNiQfUnee+894+XlZR599NEyr2uMMbNmzTKSzNq1ay9q/ZycHHPu3DkzfPhw06pVK49lZRlRubD+V155xUgyL7/8ssf2vL29zZ49e4qt6WLea8YUHFEZPHiw8ff3L3BIrVevXiYgIMCcPHnSGPPziErv3r09+r322mtGktm0aVOx9eaPqGzevNmcO3fOZGRkmLVr15o6deqYTp06mXPnzhW7/sX673//a2rXrm3atGnj8f+rKEWNSF04lTTC+MknnxhJ5tVXXy2wbObMmUaSOXjwYJHrv/rqq0aS+7DXf/7zH/POO++YLl26GJfLVeJ7+fHHHzeSCh0FPN+dd95pvL29zX//+99i++HyYEQFpVKjRg3dcsstHm3vvPOOmjdvrpYtW3qMFPTs2dN9pVCvXr0kSTt37tS0adP0ySef6Pjx4x7b+fbbb3XTTTeVqo7evXt7fENr1qyZJBX45pTfnpKSoubNm+u9995TTk6O7r33Xo9aq1Spos6dOysxMdFjfZfLpX79+nm0xcTEFHqFQFls3bq1VP2io6PLtN0dO3Zo0KBBateuneLi4i6mtIvy+uuv67nnntPnn3+uzMxMd/ulXBlx1113ecwPGjRIQ4cOVWJioseymJiYQkeOyuu9dr4PP/xQXbt2VUREhEf7sGHDtGbNGm3atEm33nqru71///4e/fJHDH744Qe1a9euxP1d2KdZs2Z666235ONT/n+yjx8/rt69e8sYo+XLlxc7ApLvhRdeUEZGRon9wsPDS1VDcaMUxS3LvwLNz89Pa9asUVBQkCSpS5cuatKkiZ566in17Nmz0HXnz5+vp59+WuPHjy/25PDjx49r1apVuvXWW1W/fv3SPByUM4IKSqVu3boF2g4fPqy9e/d6DMefL/8y05SUFHXs2FFNmzbV3//+d0VFRalKlSrasmWLRo0apdOnT5e6jtDQUI95Pz+/YtvPnDnjrlWSbrzxxkK3e+Ef54CAgAIftv7+/u7tXayWLVuWql9ZDg/s3LlT3bt3V5MmTbR69Wr5+/tfVG1XXXWVpJ8PM5RkxYoVGjRokO644w499thjqlOnjnx8fDRv3ryLvupIkurUqeMx7+Pjo7CwMB07dsyjvbD3ZHm+18537NixQvdXr1499/LzhYWFecznvyal3f+SJUvUrFkzZWRkaPny5XrhhRc0ZMgQrVmz5mLKL9KJEyfUvXt3/e9//9OHH36ohg0blmq9xo0bl/rQT3Hyn6cLnz/pp4DgcrlUvXr1Etdv3769O6RIP/3/7dy5s1atWlXoevHx8XrwwQf1u9/9Ts8880yxNb788svKzs4u9AoqVAyCCkqlsG814eHhqlq1apEfSvnfplatWqXMzEytWLFCkZGR7uVJSUmXpdbiannjjTc8aqhoRYW6C8XHx3vcd6QoO3fuVLdu3RQZGan3339fISEhF11bly5d5Ovrq1WrVumhhx4qsf/LL7+s6OhoLV++3OP9ceFlnWWVmprq8c01JydHx44dK/DhX9h78nK918LCwnTo0KEC7QcPHpRU+pGD0mrWrJnatGkj6afXJTc3V/Pnz9cbb7yh22+/vVz2ceLECXXr1k3Jycn64IMPCj1PpChdu3bVhg0bSuw3dOhQj3vlXKhRo0aqWrWqdu3aVWDZrl271Lhx42JH54qr2RhTaFCKj4/XiBEjNHToUP3rX/8q8ZyTBQsWqHbt2urbt2+x/XD5EFRw0fr27auZM2cqLCys2EMV+X8Izv+mb4zRSy+9VKCvv7//RX/rLU7Pnj3l4+Ojffv26Te/+U25bLOs35Kl8j30k5SUpG7duqlBgwZKSEgo8v4apVWnTh2NGDFC8+bNc9+j5UL79u1zn6zocrnk5+fn8Yc+NTVVb7311iXV8corr6h169bu+ddee005OTmlulfN5Xqvde3aVStXrtTBgwfdoyjSTyMfAQEBpTqccylmz56tN998U1OnTtWvf/3rUh2eKU5+SPn++++VkJCgVq1alWn98jr04+Pjo379+mnFihWaPXu2e1QkJSVFiYmJevTRR4tdv27duoqNjdUnn3yi9PR0BQcHS/rpHkcbNmwo8LosWrRII0aM0N1336358+eXGFK2bdumL774QhMmTLgsh91QOjzzuGhjx47Vm2++qU6dOunRRx9VTEyM8vLylJKSovfff1/jx4/XTTfdpO7du8vPz09DhgzRhAkTdObMGc2bN08nTpwosM3rr79eK1as0Lx589S6dWt5eXm5v1leiqioKM2YMUOPP/64vv/+e916662qUaOGDh8+rC1btigwMNB987bSCgoKUmRkpN566y117dpVoaGhCg8PV1RUVJHrlMdjkX66CqVbt26SpKefflrfffedx104GzVq5HEFksvlUufOnUu8i+6zzz6r77//XsOGDdN7772ngQMHqnbt2jp69KgSEhIUHx+vZcuWKSYmRn379tWKFSv08MMP6/bbb9eBAwf01FNPqW7duqW6i2pRVqxYIR8fH3Xv3t191U+LFi00aNCgEte9XO+1adOm6Z133lGXLl00depUhYaG6pVXXtG7776r2bNnX9JIVmnUqFFDkydP1oQJE7R06VLdfffdF72t06dPq2fPntq5c6eee+455eTkaPPmze7lNWvWLPFGfE2bNr3o/V/oySef1I033qi+fftq0qRJ7hu+hYeHa/z48R59fXx81LlzZ33wwQfutr/85S/q0qWLevbsqYkTJ8rlcumvf/2rjh49qqeeesrd7/XXX9fw4cPVsmVLPfjgg9qyZYvHtlu1alXgsOmCBQskScOHDy+3x4uL4OipvLBOcfdRKcypU6fME088YZo2bWr8/PxMSEiIuf76682jjz5qUlNT3f3efvtt06JFC1OlShVTv35989hjj5k1a9YYSSYxMdHd7/jx4+b222831atXNy6Xq8B9VJ555hmP/edfZfH66697tBd1P4pVq1aZLl26mODgYOPv728iIyPN7bff7nETp6Juepd/Vcr51q1bZ1q1amX8/f0r9D4q+Y+vqOn8q2kyMjKMJDN48OBSbTsnJ8csXrzY3HLLLSY0NNT4+PiYmjVrml69epmlS5d6XBUya9YsExUVZfz9/U2zZs3MSy+9VOjzVJarfrZv32769etnqlWrZoKCgsyQIUMK3BMn/z4qhbnU95oxRd9HpV+/fiYkJMT4+fmZFi1aFLg6q6j3Y2GPtzBFvW+N+em+NVdddZVp0qSJycnJKXY7xSnqHkD5kxP3Atq2bZvp2rWrCQgIcN8Hae/evQX6STKdO3cu0P7RRx+Zzp07m4CAABMQEGBuueUW88knn3j0yb+ar6gpOTnZo39WVpYJCQkxnTp1Ks+HiovgMqYUZ0QBuGKtXr1affv21eeff67rr7/e6XIAoEz4UULgFy4xMVGDBw8mpAC4IjGiAgAArMWICgAAsBZBBQAAWIugAgAArEVQAQAA1rqib/iWl5engwcPKigoiJ/eBgDgCmGMUUZGhurVq1finZav6KBy8ODBAr9mCgAArgwHDhxQgwYNiu1zRQeV/N+FOHDggPs3HgAAgN3S09MVERHh8avXRbmig0r+4Z7g4GCCCgAAV5jSnLbBybQAAMBaBBUAAGAtggoAALCWNUElLi5OLpdLY8eOdboUAABgCSuCytatW/Xiiy8qJibG6VIAAIBFHA8qp06d0l133aWXXnpJNWrUcLocAABgEceDyqhRo9SnTx9169atxL7Z2dlKT0/3mAAAwC+Xo/dRWbZsmXbs2KGtW7eWqn9cXJyefPLJy1wVAACwhWMjKgcOHNCYMWP08ssvq0qVKqVaZ/LkyUpLS3NPBw4cuMxVAgAAJ7mMMcaJHa9atUoDBw6Ut7e3uy03N1cul0teXl7Kzs72WFaY9PR0hYSEKC0tjTvTAgBwhSjL57djh366du2qXbt2ebTdd999uuaaazRx4sQSQwoAAPjlcyyoBAUFqXnz5h5tgYGBCgsLK9AOAAAqJ8ev+gEAACiKVb+evH79eqdLAAAAFmFEBQAAFJB1NkdRk95V1KR3lXU2x7E6CCoAAMBaBBUAAGAtggoAALAWQQUAAFiLoAIAAKxFUAEAANYiqAAAAGsRVAAAgLUIKgAAwFoEFQAAYC2CCgAAsBZBBQAAWIugAgAArEVQAQAAxdp/NMuxfRNUAACAh5NZZ/XA4u3u+d7/+Ej3LtiitKxzFV4LQQUAAHj4/atJ2rTvqEfbJ3uPavSrOyu8FoIKAABw+/7IKW387ojyLmjPNUYbvzui5KOZFVoPQQUAALj9cLz481H2HyOoAAAAh0SGBhS7PCossIIq+QlBBQAAuDWsWU2dmtQsEBC8XS51alJT0eEEFQAA4KDnh7RSbKNwj7YOjcP1/JBWFV4LQQUAAHgICfDVS0Nbu+dX/76jlgxvq5AA3wqvhaACAACKFRVe/HkrlxNBBQAAWIugAgAArEVQAQAA1iKoAAAAaxFUAACAtQgqAADAWgQVAABgLYIKAACwFkEFAABYi6ACAACsRVABAADWIqgAAABr+ThdAAAAsE+An4/2z+rjdBmMqAAAAHsRVAAAgLUIKgAAwFoEFQAAYC2CCgAAsBZBBQAAWIugAgAArEVQAQAA1iKoAAAAaxFUAACAtQgqAADAWgQVAABgLYIKAACwFkEFAABYi6ACAACsRVABAADWIqgAAABrEVQAAIC1CCoAAMBaBBUAAGAtggoAALAWQQUAAFiLoAIAAKxFUAEAANYiqAAAAGsRVAAAgLUIKgAAwFoEFQAAYC2CCgAAsBZBBQAAWIugAgAArEVQAQAA1iKoAAAAazkaVObNm6eYmBgFBwcrODhYsbGxWrNmjZMlAQAAizgaVBo0aKBZs2Zp27Zt2rZtm2655Rbddttt+uqrr5wsCwAAWMJljDFOF3G+0NBQPfPMMxo+fHiJfdPT0xUSEqK0tDQFBwdXQHUAAOBSleXz26eCaipRbm6uXn/9dWVmZio2NtbpcgAAgAUcDyq7du1SbGyszpw5o2rVqmnlypW69tprC+2bnZ2t7Oxs93x6enpFlQkAABzg+FU/TZs2VVJSkjZv3qyRI0dq6NCh+vrrrwvtGxcXp5CQEPcUERFRwdUCAICKZN05Kt26dVOjRo30wgsvFFhW2IhKREQE56gAAHAFuSLPUclnjPEII+fz9/eXv79/BVcEAACc4mhQmTJlinr16qWIiAhlZGRo2bJlWr9+vdauXetkWQAAwBKOBpXDhw/rnnvu0aFDhxQSEqKYmBitXbtW3bt3d7IsAABgCUeDyoIFC5zcPQAAsJzjV/0AAAAUhaACAACsRVABAADWIqgAAABrEVQAAIC1CCoAAMBaBBUAAGAtggoAALAWQQUAAFiLoAIAAKxFUAEAANYiqAAAAGsRVAAAgLUIKgAAwFoEFQAAYC2CCgAAsBZBBQAAWIugAgAArEVQAQAA1iKoAAAAaxFUAACAtQgqAGCZrLM5ipr0rqImvausszlOlwM4iqACAACsRVABAADWIqgAAABrEVQAAIC1CCoAAMBaBBUAAGAtggoAALAWQQUAAFiLoAIAAKxFUAEAANYiqACAxfYfzXK6BMBRBBUAsMjJrLN6YPF293zvf3ykexdsUVrWOQerApxDUAEAi/z+1SRt2nfUo+2TvUc1+tWdDlUEOIugAgCW+P7IKW387ojyLmjPNUYbvzui5KOZjtQFOImgAgCW+OF48eej7D9GUEHlQ1ABAEtEhgYUuzwqLLCCKgHsQVABAEs0rFlNnZrULPCH2dvlUqcmNRUdTlBB5UNQAQCLPD+klWIbhXu0dWgcrueHtHKoIsBZBBUAsEhIgK9eGtraPb/69x21ZHhbhQT4OlgV4ByCCgBYLCq8+PNWgF86ggoAALAWQQUAAFiLoAIAAKxFUAEAANYiqAAAAGsRVAAAgLUIKgAAwFoEFQAAYC2CCgAAsJaP0wUAADwF+Plo/6w+TpcBWIERFQAAYC2CCgAAsBZBBQAAWIugAgAArEVQAQAA1iKoAAAAaxFUAACAtQgqAADAWgQVAABgLYIKAACwFkEFAABYi6ACAACsRVABAADWIqgAAABrEVQAAIC1CCoAAMBaBBUAAGAtggoAALAWQQUAAFirTEFl9uzZOn36tHt+48aNys7Ods9nZGTo4YcfLvX24uLidOONNyooKEi1atXSgAEDtGfPnrKUBAAAfsHKFFQmT56sjIwM93zfvn31v//9zz2flZWlF154odTb27Bhg0aNGqXNmzcrISFBOTk56tGjhzIzM8tSFgAA+IXyKUtnY0yx82W1du1aj/n4+HjVqlVL27dvV6dOnS5p2wAA4MpXpqByuaWlpUmSQkNDC12enZ3tcagpPT29QuoCAADOsOZkWmOMxo0bp1/96ldq3rx5oX3i4uIUEhLiniIiIiq4SgAAUJHKPKIyf/58VatWTZKUk5OjRYsWKTw8XJI8zl8pq0ceeURffPGFPv744yL7TJ48WePGjXPPp6enE1YAAPgFc5kynGgSFRUll8tVYr/k5OQyFTF69GitWrVKGzduVHR0dKnXS09PV0hIiNLS0hQcHFymfQIAAGeU5fO7TCMq+/fvv5S6CjDGaPTo0Vq5cqXWr19fppACAAB++cp0jsott9yikydPltvOR40apZdffllLly5VUFCQUlNTlZqa6nGvFgAAUHmV6dCPl5eXUlNTVatWrfLZeRGHkeLj4zVs2LAS1+fQDwAAV57LduinvF3qfVgAAMAvW5mDSkZGhqpUqVJsH0Y3AABAeShzULn66quLXGaMkcvlUm5u7iUVBeDiZZ3N0bVT35MkfT2jpwL8rLqvIwCUSZn/gr3xxhtF3jkWAACgPJU5qHTo0KHcTqYFAAAoTrnfQj8nJ6e8NwkAACqpMgWVyMhIeXt7F7rs66+/1rhx41S/fv1yKQwAAKBMQSU5OVlhYWHu+VOnTmn+/PmKjY1VTEyMtmzZokmTJpV7kQAAoHK6qMsBPv74Y82fP19vvvmmoqOj9fXXX2vDhg3q0KFDedcHAAAqsTKNqMyePVvXXHONBg8erJo1a+rjjz/WF198IZfLpRo1alyuGgEAQCVVphGVKVOmaOLEiZoxY0aR56oAAACUlzKNqMyYMUOvv/66oqOjNXHiRH355ZeXqy4AAICyBZUpU6bo22+/1b///W+lpqaqXbt2atGihYwxOnHixOWqEQAAVFIXdR+Vzp07a/HixTp48KBGjhypG264QZ06dVL79u317LPPlneNAACgkrqkG74FBwfroYce0pYtW/T555/rpptu0qxZs8qrNgAAUMmV6WTa06dP64MPPlDfvn0lSZMnT1Z2dvbPG/Px0b59+8q3QgAAUGmVKagsWbJE77zzjjuozJkzR9ddd52qVq0qSdqzZ4/q1aunRx99tPwrBQAAlU6ZDv288soruv/++z3ali5dqsTERCUmJmr27Nl67bXXyrVAAABQeZUpqHz77be6+uqr3fNVqlSRl9fPm2jbtq2+/vrr8qsOAABUamU69JOWliYfn59XOXLkiMfyvLw8j3NWAAAALkWZRlQaNGhQ7E3evvjiCzVo0OCSiwIAAJDKGFR69+6tqVOn6syZMwWWnT59Wk8++aT69OlTbsUBuDT7j2Y5XQIAXBKXMcaUtvPhw4fVsmVL+fn56ZFHHtHVV18tl8ul3bt3a86cOcrJydHOnTtVu3bty1mzW3p6ukJCQpSWlqbg4OAK2Sdgs5NZZzXqlZ36ZN9Rd1unJjX1/JBWCgnwdbAyAPhZWT6/yxRUJCk5OVkjR45UQkKC8ld1uVzq3r275s6dq4YNG1585WVEUAE83btgiz7+7ojyzmvzdrnUoXG4lgxv61hdAHC+snx+l+lkWkmKjo7W2rVrdfz4ce3du1eS1LhxY4WGhl5ctQDKxfdHTmnjd0cKtOcao43fHVHy0UxFhwc6UBkAXLwyB5V8oaGhatuWb2iALX44Xvz5KPuPEVQAXHku6bd+ANgjMjSg2OVRYYQUAFceggrwC9GwZjV1alKzwH9qb5dLnZrUZDQFwBWJoAL8gjw/pJViG4V7tHVoHK7nh7RyqCIAuDQEFeAXJCTAVy8Nbe2eX/37jloyvC2XJgO4YhFUgF+wqPDiz1sBANsRVAAAgLUIKgAAwFoEFQAAYC2CCgAAsBZBBQAAWIugAgAArEVQAQAA1iKoAAAAaxFUAACAtQgqAADAWgQVAABgLYIKAACwFkEFAABYi6ACAACsRVABAADWIqgAAABr+ThdAIDyFeDno/2z+jhdBgCUC0ZUAACAtQgqAADAWgQVAABgLYIKAACwFkEFAABYi6ACAACsRVABAADWIqgAAABrEVQAAIC1CCoAAMBaBBUAAGAtggoAALAWQQUAAFiLoAIAAKxFUAEAANYiqAAAAGsRVAAAgLUIKgAAwFoEFQAAYC2CCgAAsBZBBQAAWIugAgAArEVQAQAA1nI0qGzcuFH9+vVTvXr15HK5tGrVKifLAQAAlnE0qGRmZqpFixaaM2eOk2UAAABL+Ti58169eqlXr15OlgAAACzGOSoAAMBajo6olFV2drays7Pd8+np6Q5WAwAALrcrakQlLi5OISEh7ikiIsLpkgAAwGV0RQWVyZMnKy0tzT0dOHDA6ZIAAMBldEUd+vH395e/v7/TZQAAgAriaFA5deqU9u7d655PTk5WUlKSQkNDddVVVzlYGQAAsIGjQWXbtm3q0qWLe37cuHGSpKFDh2rRokUOVQUAAGzhaFC5+eabZYxxsgQAAGCxK+pkWgAAULkQVAAAgLUIKgAAwFoEFQAAYC2CCgAAsBZBBQAAWIugAgAArEVQAQAA1iKoAAAAaxFUAACAtQgqAADAWgQVAABgLYIKAACwFkEFAABYi6ACAACsRVABAADWIqgAAABrEVQAAIC1CCoAAMBaBBUAAGAtggoAALAWQQUAAFiLoAIAAKxFUAEAANYiqAAAAGsRVAAAgLUIKgAAwFoElUJknc1R1KR3FTXpXWWdzXG6HAAAKi2CCgAAsBZBBQAAWIugAgAArEVQAQAA1iKoAAAAaxFUAACAtQgqAADAWgQVAABgLYIKAACwFkEFAABYi6ACAACsRVABAADWIqgAAABrEVQAAIC1CCoAAMBaBBUAAGAtggoAALAWQQUAAFiLoAIAAKxFUAEAANYiqAAAAGsRVAAAgLUIKgAAwFoEFQAAYC2CCgAAsBZBBQAAWIugAgAArEVQKcH+o1lOlwAAQKVFULnAyayzemDxdvd87398pHsXbFFa1jkHqwIAoHIiqFzg968madO+ox5tn+w9qtGv7nSoIgAAKi+Cynm+P3JKG787orwL2nON0cbvjij5aKYjdQEAUFkRVM7zw/Hiz0fZf4ygAgBARSKonCcyNKDY5VFhgRVUCQAAkAgqHhrWrKZOTWoWeFK8XS51alJT0eEEFQAAKhJB5QLPD2ml2EbhHm0dGofr+SGtHKoIAIDKi6BygZAAX700tLV7fvXvO2rJ8LYKCfB1sCoAACongkoJosKLP28FAABcPgQVAABgLYIKAACwFkEFAABYi6ACAACsRVABAADWIqgAAABrOR5U5s6dq+joaFWpUkWtW7fWRx995HRJAADAEo4GleXLl2vs2LF6/PHHtXPnTnXs2FG9evVSSkqKk2UBAABLOBpUnn32WQ0fPlwjRoxQs2bN9NxzzykiIkLz5s1zsiwAAGAJx4LK2bNntX37dvXo0cOjvUePHvr0008LXSc7O1vp6ekeEwAA+OVyLKgcPXpUubm5ql27tkd77dq1lZqaWug6cXFxCgkJcU8REREVUSoAAHCI4yfTulwuj3ljTIG2fJMnT1ZaWpp7OnDgQEWUCAAAHOLj1I7Dw8Pl7e1dYPTkxx9/LDDKks/f31/+/v4VUR4AALCAYyMqfn5+at26tRISEjzaExIS1L59e4eqAgAANnFsREWSxo0bp3vuuUdt2rRRbGysXnzxRaWkpOihhx5ysiwAAGAJR4PKnXfeqWPHjmnGjBk6dOiQmjdvrtWrVysyMtLJsgAAgCUcDSqS9PDDD+vhhx92ugwAAGAhx6/6AQAAKApBBQAAWIugAgAArEVQAQAA1iKoAAAAaxFUAACAtQgqAADAWgQVAABgLYIKAACwFkEFAABYy/Fb6NsowM9H+2f1cboMAAAqPUZUAACAtQgqAADAWgQVAABgLYIKAACwFkEFAABYi6ACAACsRVABAADWIqgAAABrEVQAAIC1CCoAAMBaBBUAAGAtggoAALAWQQUAAFiLoAIAAKxFUAEAANbycbqAS2GMkSSlp6c7XAkAACit/M/t/M/x4lzRQSUjI0OSFBER4XAlAACgrDIyMhQSElJsH5cpTZyxVF5eng4ePKigoCC5XC6ny7nipKenKyIiQgcOHFBwcLDT5VRKvAZ24HVwHq+B8yryNTDGKCMjQ/Xq1ZOXV/FnoVzRIypeXl5q0KCB02Vc8YKDg/nD4DBeAzvwOjiP18B5FfUalDSSko+TaQEAgLUIKgAAwFoElUrM399f06ZNk7+/v9OlVFq8BnbgdXAer4HzbH0NruiTaQEAwC8bIyoAAMBaBBUAAGAtggoAALAWQQUAAFiLoFJJzZ07V9HR0apSpYpat26tjz76yOmSKpWNGzeqX79+qlevnlwul1atWuV0SZVOXFycbrzxRgUFBalWrVoaMGCA9uzZ43RZlc68efMUExPjvslYbGys1qxZ43RZlVpcXJxcLpfGjh3rdCmSCCqV0vLlyzV27Fg9/vjj2rlzpzp27KhevXopJSXF6dIqjczMTLVo0UJz5sxxupRKa8OGDRo1apQ2b96shIQE5eTkqEePHsrMzHS6tEqlQYMGmjVrlrZt26Zt27bplltu0W233aavvvrK6dIqpa1bt+rFF19UTEyM06W4cXlyJXTTTTfphhtu0Lx589xtzZo104ABAxQXF+dgZZWTy+XSypUrNWDAAKdLqdSOHDmiWrVqacOGDerUqZPT5VRqoaGheuaZZzR8+HCnS6lUTp06pRtuuEFz587Vn/70J7Vs2VLPPfec02UxolLZnD17Vtu3b1ePHj082nv06KFPP/3UoaoA56WlpUn66UMSzsjNzdWyZcuUmZmp2NhYp8updEaNGqU+ffqoW7duTpfi4Yr+UUKU3dGjR5Wbm6vatWt7tNeuXVupqakOVQU4yxijcePG6Ve/+pWaN2/udDmVzq5duxQbG6szZ86oWrVqWrlypa699lqny6pUli1bph07dmjr1q1Ol1IAQaWScrlcHvPGmAJtQGXxyCOP6IsvvtDHH3/sdCmVUtOmTZWUlKSTJ0/qzTff1NChQ7VhwwbCSgU5cOCAxowZo/fff19VqlRxupwCCCqVTHh4uLy9vQuMnvz4448FRlmAymD06NH6z3/+o40bN6pBgwZOl1Mp+fn5qXHjxpKkNm3aaOvWrfr73/+uF154weHKKoft27frxx9/VOvWrd1tubm52rhxo+bMmaPs7Gx5e3s7Vh/nqFQyfn5+at26tRISEjzaExIS1L59e4eqAiqeMUaPPPKIVqxYoQ8//FDR0dFOl4T/Z4xRdna202VUGl27dtWuXbuUlJTkntq0aaO77rpLSUlJjoYUiRGVSmncuHG655571KZNG8XGxurFF19USkqKHnroIadLqzROnTqlvXv3uueTk5OVlJSk0NBQXXXVVQ5WVnmMGjVKS5cu1VtvvaWgoCD3KGNISIiqVq3qcHWVx5QpU9SrVy9FREQoIyNDy5Yt0/r167V27VqnS6s0goKCCpybFRgYqLCwMCvO2SKoVEJ33nmnjh07phkzZujQoUNq3ry5Vq9ercjISKdLqzS2bdumLl26uOfHjRsnSRo6dKgWLVrkUFWVS/7l+TfffLNHe3x8vIYNG1bxBVVShw8f1j333KNDhw4pJCREMTExWrt2rbp37+50abAE91EBAADW4hwVAABgLYIKAACwFkEFAABYi6ACAACsRVABAADWIqgAAABrEVQAAIC1CCoAAMBaBBXgCpKamqoxY8aocePGqlKlimrXrq1f/epX+te//qWsrCynyytSVFSUXC6Xli1bVmDZddddJ5fLZc0deVNTUzV69Gg1bNhQ/v7+ioiIUL9+/fTBBx8U6Dtz5kx5e3tr1qxZ7rb8x1rUlH8n3KL6nb8tANxCH7hifP/99+rQoYOqV6+umTNn6vrrr1dOTo6+/fZbLVy4UPXq1VP//v0LXffcuXPy9fWt4Io9RUREKD4+XoMHD3a3bd68WampqQoMDHSwsp/t37/f/RzPnj1bMTExOnfunN577z2NGjVKu3fv9ugfHx+vCRMmaOHChZo0aZIkaevWrcrNzZUkffrpp/rNb36jPXv2KDg4WNJPPwyab8aMGXrggQc8thkUFHQ5HyJw5TEArgg9e/Y0DRo0MKdOnSp0eV5envvfksy8efNM//79TUBAgJk6daoxxpi5c+eahg0bGl9fX3P11VebJUuWuNdJTk42kszOnTvdbSdOnDCSTGJiojHGmMTERCPJvPPOOyYmJsb4+/ubtm3bmi+++KLY2iMjI82kSZOMv7+/SUlJcbc/8MADZvTo0SYkJMTEx8e720+ePGkeeOABU7NmTRMUFGS6dOlikpKS3Mv37t1r+vfvb2rVqmUCAwNNmzZtTEJCQoF9Pv300+a+++4z1apVMxEREeaFF14ots5evXqZ+vXrF/ocnzhxwmN+/fr1pn79+ubs2bOmXr16ZsOGDQXWyX++Llw3v76//e1vxdYDwBgO/QBXgGPHjun999/XqFGjihx9cLlcHvPTpk3Tbbfdpl27dun+++/XypUrNWbMGI0fP15ffvmlHnzwQd13331KTEwscz2PPfaY/vKXv2jr1q2qVauW+vfvr3PnzhW7Tu3atdWzZ08tXrxYkpSVlaXly5fr/vvv9+hnjFGfPn2Umpqq1atXa/v27brhhhvUtWtXHT9+XNJPvz7du3dvrVu3Tjt37lTPnj3Vr18/paSkeGzrr3/9q9q0aaOdO3fq4Ycf1siRIwuMiuQ7fvy41q5dW+RzXL16dY/5BQsWaMiQIfL19dWQIUO0YMGCYh8/gIvkdFICULLNmzcbSWbFihUe7WFhYSYwMNAEBgaaCRMmuNslmbFjx3r0bd++vXnggQc82u644w7Tu3dvY0zZRlSWLVvm7nPs2DFTtWpVs3z58iLrzx89WLVqlWnUqJHJy8szixcvNq1atTLGGI8RlQ8++MAEBwebM2fOeGyjUaNGxY6IXHvtteb555/32Ofdd9/tns/LyzO1atUy8+bNK3T9zz77rNDnuDBpaWkmICDAPcqzc+dOExAQYNLS0jz6lTSi4ufn53798qf85xrATxhRAa4gF46abNmyRUlJSbruuuuUnZ3tsaxNmzYe89988406dOjg0dahQwd98803Za4jNjbW/e/Q0FA1bdq0VNvp06ePTp06pY0bN2rhwoUFRlMkafv27Tp16pTCwsJUrVo195ScnKx9+/ZJkjIzMzVhwgRde+21ql69uqpVq6bdu3cXGFGJiYlx/9vlcqlOnTr68ccfC63N/P8PyV/4HBdm6dKlatiwoVq0aCFJatmypRo2bFjoycLFeeyxx5SUlOQx3XTTTWXaBvBLx8m0wBWgcePGcrlcBQ5bNGzYUJJUtWrVAusUdvjiwg9hY4y7zcvLy92Wr6TDOcVtuzA+Pj665557NG3aNH322WdauXJlgT55eXmqW7eu1q9fX2BZ/uGXxx57TO+9957+8pe/qHHjxqpatapuv/12nT171qP/hScQu1wu5eXlFVpbkyZN5HK59M0332jAgAHFPo6FCxfqq6++ko/Pz39C8/LytGDBAv3ud78rdt3zhYeHq3HjxqXuD1RGjKgAV4CwsDB1795dc+bMUWZm5kVto1mzZvr444892j799FM1a9ZMklSzZk1J0qFDh9zLk5KSCt3W5s2b3f8+ceKEvv32W11zzTWlquP+++/Xhg0bdNttt6lGjRoFlt9www1KTU2Vj4+PGjdu7DGFh4dLkj766CMNGzZMAwcO1PXXX686depo//79pdp/UUJDQ9WzZ0/985//LPQ5PnnypCRp165d2rZtm9avX+8xErJx40Zt3bpVX3755SXVAcATIyrAFWLu3Lnq0KGD2rRpo+nTpysmJkZeXl7aunWrdu/erdatWxe7/mOPPaZBgwa5T0x9++23tWLFCq1bt07ST6My7dq106xZsxQVFaWjR4/qiSeeKHRbM2bMUFhYmGrXrq3HH39c4eHhJY5C5GvWrJmOHj2qgICAQpd369ZNsbGxGjBggP785z+radOmOnjwoFavXq0BAwaoTZs2aty4sVasWKF+/frJ5XLpj3/8Y5EjJWUxd+5ctW/fXm3bttWMGTMUExOjnJwcJSQkaN68efrmm2+0YMECtW3bVp06dSqwfmxsrBYsWKC//e1vpdpfRkaGUlNTPdoCAgLclzIDECfTAleSgwcPmkceecRER0cbX19fU61aNdO2bVvzzDPPmMzMTHc/SWblypUF1i/u8mRjjPn6669Nu3btTNWqVU3Lli3N+++/X+jJtG+//ba57rrrjJ+fn7nxxhs9Lh0uTEmX4l54eXJ6eroZPXq0qVevnvH19TURERHmrrvucl/anJycbLp06WKqVq1qIiIizJw5c0znzp3NmDFjit1nixYtzLRp04qt9eDBg2bUqFHuk13r169v+vfvbxITE012drYJCwszs2fPLnTdv/71ryY8PNxkZ2cbY0o+mVZSgenBBx8stj6gsnEZc94BaQAoxvr169WlSxedOHGiwOW6AHA5cI4KAACwFkEFAABYi0M/AADAWoyoAAAAaxFUAACAtQgqAADAWgQVAABgLYIKAACwFkEFAABYi6ACAACsRVABAADWIqgAAABr/R8hrXgcUfMBVAAAAABJRU5ErkJggg==", "text/plain": [ "
" ] @@ -521,7 +483,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "dev_env", "language": "python", "name": "python3" }, @@ -535,9 +497,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.17" + "version": "3.10.9" } }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} From 12dae44c5c4ec161263ac32b5115dc20c19320cd Mon Sep 17 00:00:00 2001 From: Fabio Vera Date: Wed, 10 Jan 2024 15:45:27 -0500 Subject: [PATCH 23/25] rename function, fix warning Signed-off-by: Fabio Vera --- econml/_ortho_learner.py | 6 +++--- econml/tests/test_utilities.py | 10 +++++----- econml/utilities.py | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/econml/_ortho_learner.py b/econml/_ortho_learner.py index 76a51a374..9a53f1f70 100644 --- a/econml/_ortho_learner.py +++ b/econml/_ortho_learner.py @@ -43,7 +43,7 @@ class in this module implements the general logic in a very versatile way TreatmentExpansionMixin) from .inference import BootstrapInference from .utilities import (_deprecate_positional, check_input_arrays, - cross_product, filter_none_kwargs, single_strata_from_discrete_arrays, + cross_product, filter_none_kwargs, strata_from_discrete_arrays, inverse_onehot, jacify_featurizer, ndim, reshape, shape, transpose) from .sklearn_extensions.model_selection import ModelSelector @@ -636,7 +636,7 @@ def _strata(self, Y, T, X=None, W=None, Z=None, if self.discrete_instrument: arrs.append(Z) - return single_strata_from_discrete_arrays(arrs) + return strata_from_discrete_arrays(arrs) def _prefit(self, Y, T, *args, only_final=False, **kwargs): @@ -721,7 +721,7 @@ def fit(self, Y, T, *, X=None, W=None, Z=None, sample_weight=None, freq_weight=N f"Only one outcome variable is supported when discrete_outcome=True. Got Y of shape {Y.shape}") if len(self.outcome_transformer.classes_) > 2: raise AttributeError( - f"({self.outcome_transformer.classes_} outcome classes detected. \ + f"({len(self.outcome_transformer.classes_)} outcome classes detected. \ Currently, only 2 outcome classes are allowed when discrete_outcome=True. \ Classes provided include {self.outcome_transformer.classes_[:5]}") else: diff --git a/econml/tests/test_utilities.py b/econml/tests/test_utilities.py index b80fb2c8b..518da03a5 100644 --- a/econml/tests/test_utilities.py +++ b/econml/tests/test_utilities.py @@ -10,7 +10,7 @@ import pytest from econml.utilities import (einsum_sparse, todense, tocoo, transpose, inverse_onehot, cross_product, transpose_dictionary, deprecated, _deprecate_positional, - single_strata_from_discrete_arrays) + strata_from_discrete_arrays) from sklearn.preprocessing import OneHotEncoder @@ -184,7 +184,7 @@ def test_single_strata_from_discrete_array(self): Z = np.repeat([[0, 1]], 6, axis=0).ravel() Y = np.repeat([0, 1], 6, axis=0) - assert set(single_strata_from_discrete_arrays([T, Z, Y])) == set(np.arange(12)) - assert set(single_strata_from_discrete_arrays([T, Z])) == set(np.arange(6)) - assert set(single_strata_from_discrete_arrays([T])) == set(np.arange(3)) - assert single_strata_from_discrete_arrays([]) is None + assert set(strata_from_discrete_arrays([T, Z, Y])) == set(np.arange(12)) + assert set(strata_from_discrete_arrays([T, Z])) == set(np.arange(6)) + assert set(strata_from_discrete_arrays([T])) == set(np.arange(3)) + assert strata_from_discrete_arrays([]) is None diff --git a/econml/utilities.py b/econml/utilities.py index db3f4cb49..fc577954a 100644 --- a/econml/utilities.py +++ b/econml/utilities.py @@ -1484,7 +1484,7 @@ def jacify_featurizer(featurizer): return _TransformerWrapper(featurizer) -def single_strata_from_discrete_arrays(arrs): +def strata_from_discrete_arrays(arrs): """ Combine multiple discrete arrays into a single array for stratification purposes: From 5014d4c82e8d9d5c0561fba4518c45f3ea18a4e7 Mon Sep 17 00:00:00 2001 From: Fabio Vera Date: Thu, 11 Jan 2024 13:50:27 -0500 Subject: [PATCH 24/25] add test for discrete model constraints, fix warning whitespace Signed-off-by: Fabio Vera --- econml/_ortho_learner.py | 6 +-- econml/dr/_drlearner.py | 11 +++-- econml/sklearn_extensions/model_selection.py | 9 +++- econml/tests/test_discrete_outcome.py | 43 ++++++++++++++++++++ econml/utilities.py | 2 +- 5 files changed, 61 insertions(+), 10 deletions(-) diff --git a/econml/_ortho_learner.py b/econml/_ortho_learner.py index 9a53f1f70..9423617c2 100644 --- a/econml/_ortho_learner.py +++ b/econml/_ortho_learner.py @@ -721,9 +721,9 @@ def fit(self, Y, T, *, X=None, W=None, Z=None, sample_weight=None, freq_weight=N f"Only one outcome variable is supported when discrete_outcome=True. Got Y of shape {Y.shape}") if len(self.outcome_transformer.classes_) > 2: raise AttributeError( - f"({len(self.outcome_transformer.classes_)} outcome classes detected. \ - Currently, only 2 outcome classes are allowed when discrete_outcome=True. \ - Classes provided include {self.outcome_transformer.classes_[:5]}") + f"({len(self.outcome_transformer.classes_)} outcome classes detected. " + "Currently, only 2 outcome classes are allowed when discrete_outcome=True. " + f"Classes provided include {self.outcome_transformer.classes_[:5]}") else: self.outcome_transformer = None diff --git a/econml/dr/_drlearner.py b/econml/dr/_drlearner.py index b08883f3b..ed10378f8 100644 --- a/econml/dr/_drlearner.py +++ b/econml/dr/_drlearner.py @@ -103,12 +103,15 @@ def predict(self, Y, T, X=None, W=None, *, sample_weight=None, groups=None): n = T.shape[0] Y_pred = np.zeros((T.shape[0], T.shape[1] + 1)) T_counter = np.zeros(T.shape) - if self._discrete_outcome and hasattr(self._model_regression, 'predict_proba'): - Y_pred[:, 0] = self._model_regression.predict_proba(np.hstack([XW, T_counter]))[:, 1].reshape(n) + if hasattr(self._model_regression, 'predict_proba'): + if self._discrete_outcome: + Y_pred[:, 0] = self._model_regression.predict_proba(np.hstack([XW, T_counter]))[:, 1].reshape(n) + else: + raise AttributeError("Cannot use a classifier for model_regression when discrete_outcome=False!") else: if self._discrete_outcome: - warn("A regressor was passed when discrete_outcome=True. \ - Using a classifier is recommended.", UserWarning) + warn("A regressor was passed to model_regression when discrete_outcome=True. " + "Using a classifier is recommended.", UserWarning) Y_pred[:, 0] = self._model_regression.predict(np.hstack([XW, T_counter])).reshape(n) Y_pred[:, 0] += (Y.reshape(n) - Y_pred[:, 0]) * np.all(T == 0, axis=1) / propensities[:, 0] for t in np.arange(T.shape[1]): diff --git a/econml/sklearn_extensions/model_selection.py b/econml/sklearn_extensions/model_selection.py index 4b1456d51..dda389eaf 100644 --- a/econml/sklearn_extensions/model_selection.py +++ b/econml/sklearn_extensions/model_selection.py @@ -318,8 +318,13 @@ def best_score(self): def predict(self, *args, **kwargs): return self.best_model.predict(*args, **kwargs) - def predict_proba(self, *args, **kwargs): - return self.best_model.predict_proba(*args, **kwargs) + # only expose predict_proba if best_model has predict_proba + # used because logic elsewhere uses hasattr predict proba to check if model is a classifier + def __getattr__(self, name): + if name == 'predict_proba': + return getattr(self.best_model, name) + else: + self.__getattribute__(name) def score(self, *args, **kwargs): if hasattr(self.best_model, 'score'): diff --git a/econml/tests/test_discrete_outcome.py b/econml/tests/test_discrete_outcome.py index b8e650478..da670f44f 100644 --- a/econml/tests/test_discrete_outcome.py +++ b/econml/tests/test_discrete_outcome.py @@ -180,3 +180,46 @@ def gen_array(n, is_binary, d): 'predict_proba' ) ), 'Auto outcome model is not a classifier!' + + def test_constraints(self): + """ + Confirm errors/warnings when discreteness is not handled correctly for + discrete outcomes and treatments + """ + X = np.random.normal(size=(100, 3)) + Y = np.random.choice([0, 1], size=(100)) + T = np.random.choice([0, 1], size=(100, 1)) + + ests = [ + LinearDML() + ] + + for est in ests: + with self.subTest(est=est, kind='discrete treatment'): + est.discrete_treatment = False + est.model_t = LogisticRegression() + with pytest.raises(AttributeError): + est.fit(Y=Y, T=T, X=X) + est.discrete_treatment = True + est.model_t = LinearRegression() + with pytest.warns(UserWarning): + est.fit(Y=Y, T=T, X=X) + + ests += [LinearDRLearner()] + for est in ests: + print(est) + with self.subTest(est=est, kind='discrete outcome'): + est.discrete_outcome = False + if isinstance(est, LinearDRLearner): + est.model_regression = LogisticRegression() + else: + est.model_y = LogisticRegression() + with pytest.raises(AttributeError): + est.fit(Y=Y, T=T, X=X) + est.discrete_outcome = True + if isinstance(est, LinearDRLearner): + est.model_regression = LinearRegression() + else: + est.model_y = LinearRegression() + with pytest.warns(UserWarning): + est.fit(Y=Y, T=T, X=X) diff --git a/econml/utilities.py b/econml/utilities.py index fc577954a..8e9bc8fa5 100644 --- a/econml/utilities.py +++ b/econml/utilities.py @@ -587,7 +587,7 @@ def check_input_arrays(*args, validate_len=True, force_all_finite=True, dtype=No try: assert_all_finite(new_arg) except ValueError: - warnings.warn("Input contains NaN. Causal identification strategy can be" + warnings.warn("Input contains NaN. Causal identification strategy can be erroneous" " in the presence of missing values.") if validate_len: From 74842eb961a3e05caa06d284b3601470f39a2256 Mon Sep 17 00:00:00 2001 From: Fabio Vera Date: Thu, 11 Jan 2024 15:23:03 -0500 Subject: [PATCH 25/25] fix test Signed-off-by: Fabio Vera --- econml/tests/test_federated_learning.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/econml/tests/test_federated_learning.py b/econml/tests/test_federated_learning.py index 27f656945..9c7c5804c 100644 --- a/econml/tests/test_federated_learning.py +++ b/econml/tests/test_federated_learning.py @@ -70,7 +70,7 @@ def test_lineardrlearner(self): b = np.random.normal(size=(n_x + n_w + n_t - 1)) t_model = FunctionClassifier(lambda XW: np.exp(XW @ a)) - y_model = FunctionClassifier(lambda XW: XW @ b) + y_model = FunctionRegressor(lambda XW: XW @ b) for cov_type in ['HC0', 'HC1', 'nonrobust']: with self.subTest(n_t=n_t, cov_type=cov_type):