diff --git a/bob/pipelines/tests/test_xarray.py b/bob/pipelines/tests/test_xarray.py index 6fdc32876f7552a6059e3b7e288a9cc9bbf161c5..043f41c0729a48867173a098e09f0de33e75d07d 100644 --- a/bob/pipelines/tests/test_xarray.py +++ b/bob/pipelines/tests/test_xarray.py @@ -229,7 +229,7 @@ def test_dataset_pipeline_with_dask_ml(): scaler = dask_ml.preprocessing.StandardScaler() pca = dask_ml.decomposition.PCA(n_components=3, random_state=0) - clf = SGDClassifier(random_state=0, loss="log", penalty="l2", tol=1e-3) + clf = SGDClassifier(random_state=0, loss="log_loss", penalty="l2", tol=1e-3) clf = dask_ml.wrappers.Incremental(clf, scoring="accuracy") iris_ds = _build_iris_dataset(shuffle=True) diff --git a/doc/xarray.rst b/doc/xarray.rst index 7d946ee870fc12726dda1dcfb672162fd47eba88..8db4c262b01d2548ae9b617e95900a6d709eb207 100644 --- a/doc/xarray.rst +++ b/doc/xarray.rst @@ -420,7 +420,7 @@ provide dask-ml estimators, set ``input_dask_array`` as ``True``. >>> # construct the estimators >>> scaler = dask_ml.preprocessing.StandardScaler() >>> pca = dask_ml.decomposition.PCA(n_components=3, random_state=0) - >>> clf = SGDClassifier(random_state=0, loss='log', penalty='l2', tol=1e-3) + >>> clf = SGDClassifier(random_state=0, loss='log_loss', penalty='l2', tol=1e-3) >>> clf = dask_ml.wrappers.Incremental(clf, scoring="accuracy") >>> pipeline = mario.xr.DatasetPipeline(