diff --git a/bob/learn/misc/linear_scoring.cpp b/bob/learn/misc/linear_scoring.cpp
index bc812291167c0da628fa194e05e2c79dc4a120f2..b5bf6e079ded2262e206f4452f70649623492eec 100644
--- a/bob/learn/misc/linear_scoring.cpp
+++ b/bob/learn/misc/linear_scoring.cpp
@@ -69,28 +69,72 @@ static inline bool f(PyObject* o){return o != 0 && PyObject_IsTrue(o) > 0;}
 
 
 /*** linear_scoring ***/
-static auto linear_scoring = bob::extension::FunctionDoc(
+static auto linear_scoring1 = bob::extension::FunctionDoc(
   "linear_scoring",
   "",
   0,
   true
 )
 .add_prototype("models, ubm, test_stats, test_channelOffset, frame_length_normalisation", "output")
-.add_parameter("models", "", "")
-.add_parameter("ubm", "", "")
-.add_parameter("test_stats", "", "")
-.add_parameter("test_channelOffset", "", "")
+.add_parameter("models", "list(:py:class:`bob.learn.misc.GMMMachine`)", "")
+.add_parameter("ubm", ":py:class:`bob.learn.misc.GMMMachine`", "")
+.add_parameter("test_stats", "list(:py:class:`bob.learn.misc.GMMStats`)", "")
+.add_parameter("test_channelOffset", "list(array_like<float,1>)", "")
 .add_parameter("frame_length_normalisation", "bool", "")
 .add_return("output","array_like<float,1>","Score");
-static PyObject* PyBobLearnMisc_linear_scoring(PyObject*, PyObject* args, PyObject* kwargs) {
 
-  char** kwlist = linear_scoring.kwlist(0);
+
+static auto linear_scoring2 = bob::extension::FunctionDoc(
+  "linear_scoring",
+  "",
+  0,
+  true
+)
+.add_prototype("models, ubm_mean, ubm_variance, test_stats, test_channelOffset, frame_length_normalisation", "output")
+.add_parameter("models", "list(array_like<float,1>)", "")
+.add_parameter("ubm_mean", "list(array_like<float,1>)", "")
+.add_parameter("ubm_variance", "list(array_like<float,1>)", "")
+.add_parameter("test_stats", "list(:py:class:`bob.learn.misc.GMMStats`)", "")
+.add_parameter("test_channelOffset", "list(array_like<float,1>)", "")
+.add_parameter("frame_length_normalisation", "bool", "")
+.add_return("output","array_like<float,1>","Score");
+
+
+
+static auto linear_scoring3 = bob::extension::FunctionDoc(
+  "linear_scoring",
+  "",
+  0,
+  true
+)
+.add_prototype("model, ubm_mean, ubm_variance, test_stats, test_channelOffset, frame_length_normalisation", "output")
+.add_parameter("model", "array_like<float,1>", "")
+.add_parameter("ubm_mean", "array_like<float,1>", "")
+.add_parameter("ubm_variance", "array_like<float,1>", "")
+.add_parameter("test_stats", ":py:class:`bob.learn.misc.GMMStats`", "")
+.add_parameter("test_channelOffset", "array_like<float,1>", "")
+.add_parameter("frame_length_normalisation", "bool", "")
+.add_return("output","array_like<float,1>","Score");
+
+static PyObject* PyBobLearnMisc_linear_scoring(PyObject*, PyObject* args, PyObject* kwargs) {
     
   //Cheking the number of arguments
   int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
-
-    //Read a list of GMM
-  if((nargs >= 3) && (nargs<=5)){
+    
+  //Reading the first input argument
+  PyObject* arg = 0;
+  if (PyTuple_Size(args))
+    arg = PyTuple_GET_ITEM(args, 0);
+  else {
+    PyObject* tmp = PyDict_Values(kwargs);
+    auto tmp_ = make_safe(tmp);
+    arg = PyList_GET_ITEM(tmp, 0);
+  }
+  
+  //Checking the signature of the method (list of GMMMachine as input)
+  if ((PyList_Check(arg)) && PyBobLearnMiscGMMMachine_Check(PyList_GetItem(arg, 0)) && (nargs >= 3) && (nargs<=5) ){
+  
+    char** kwlist = linear_scoring1.kwlist(0);
 
     PyObject* gmm_list_o                 = 0;
     PyBobLearnMiscGMMMachineObject* ubm  = 0;
@@ -103,7 +147,7 @@ static PyObject* PyBobLearnMisc_linear_scoring(PyObject*, PyObject* args, PyObje
                                                                        &PyList_Type, &stats_list_o,
                                                                        &PyList_Type, &channel_offset_list_o,
                                                                        &PyBool_Type, &frame_length_normalisation)){
-      linear_scoring.print_usage(); 
+      linear_scoring1.print_usage();
       Py_RETURN_NONE;
     }
 
@@ -127,54 +171,96 @@ static PyObject* PyBobLearnMisc_linear_scoring(PyObject*, PyObject* args, PyObje
 
     return PyBlitzArrayCxx_AsConstNumpy(scores);
   }
-  else{
-    PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - linear_scoring requires 5 or 6 arguments, but you provided %d (see help)", nargs);
-    linear_scoring.print_usage();
-    Py_RETURN_NONE;
-  }
-  /*
+
+  //Checking the signature of the method (list of arrays as input
+  else if ((PyList_Check(arg)) && PyArray_Check(PyList_GetItem(arg, 0)) && (nargs >= 4) && (nargs<=6) ){
   
+    char** kwlist = linear_scoring2.kwlist(0);
+
+    PyObject* model_supervector_list_o        = 0;
+    PyBlitzArrayObject* ubm_means             = 0;
+    PyBlitzArrayObject* ubm_variances         = 0;
+    PyObject* stats_list_o                    = 0;
+    PyObject* channel_offset_list_o           = 0;
+    PyObject* frame_length_normalisation      = Py_False;
+
+    if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&O&O!|O!O!", kwlist, &PyList_Type, &model_supervector_list_o,
+                                                                       &PyBlitzArray_Converter, &ubm_means,
+                                                                       &PyBlitzArray_Converter, &ubm_variances,
+                                                                       &PyList_Type, &stats_list_o,
+                                                                       &PyList_Type, &channel_offset_list_o,
+                                                                       &PyBool_Type, &frame_length_normalisation)){
+      linear_scoring2.print_usage(); 
+      Py_RETURN_NONE;
+    }
+    
+    //protects acquired resources through this scope
+    auto ubm_means_ = make_safe(ubm_means);
+    auto ubm_variances_ = make_safe(ubm_variances);    
+
+    std::vector<blitz::Array<double,1> > model_supervector_list;
+    if(extract_array_list(model_supervector_list_o ,model_supervector_list)!=0)
+      Py_RETURN_NONE;
+
+    std::vector<boost::shared_ptr<const bob::learn::misc::GMMStats> > stats_list;
+    if(extract_gmmstats_list(stats_list_o ,stats_list)!=0)
+      Py_RETURN_NONE;
+
+    std::vector<blitz::Array<double,1> > channel_offset_list;
+    if(extract_array_list(channel_offset_list_o ,channel_offset_list)!=0)
+      Py_RETURN_NONE;
+
+    blitz::Array<double, 2> scores = blitz::Array<double, 2>(model_supervector_list.size(), stats_list.size());
+    if(channel_offset_list.size()==0)
+      bob::learn::misc::linearScoring(model_supervector_list, *PyBlitzArrayCxx_AsBlitz<double,1>(ubm_means),*PyBlitzArrayCxx_AsBlitz<double,1>(ubm_variances), stats_list, f(frame_length_normalisation),scores);
+    else
+      bob::learn::misc::linearScoring(model_supervector_list, *PyBlitzArrayCxx_AsBlitz<double,1>(ubm_means),*PyBlitzArrayCxx_AsBlitz<double,1>(ubm_variances), stats_list, channel_offset_list, f(frame_length_normalisation),scores);
+
+    return PyBlitzArrayCxx_AsConstNumpy(scores);
   
-  PyBlitzArrayObject *rawscores_probes_vs_models_o, *rawscores_zprobes_vs_models_o, *rawscores_probes_vs_tmodels_o, 
-  *rawscores_zprobes_vs_tmodels_o, *mask_zprobes_vs_tmodels_istruetrial_o;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O&O&O&|O&", kwlist, &PyBlitzArray_Converter, &rawscores_probes_vs_models_o,
-                                                                       &PyBlitzArray_Converter, &rawscores_zprobes_vs_models_o,
-                                                                       &PyBlitzArray_Converter, &rawscores_probes_vs_tmodels_o,
-                                                                       &PyBlitzArray_Converter, &rawscores_zprobes_vs_tmodels_o,
-                                                                       &PyBlitzArray_Converter, &mask_zprobes_vs_tmodels_istruetrial_o)){
-    zt_norm.print_usage();
-    Py_RETURN_NONE;
   }
+  
+  //Checking the signature of the method (list of arrays as input
+  else if (PyArray_Check(arg) && (nargs >= 5) && (nargs<=6) ){
+  
+    char** kwlist = linear_scoring3.kwlist(0);
 
-  // get the number of command line arguments
-  auto rawscores_probes_vs_models_          = make_safe(rawscores_probes_vs_models_o);
-  auto rawscores_zprobes_vs_models_         = make_safe(rawscores_zprobes_vs_models_o);
-  auto rawscores_probes_vs_tmodels_         = make_safe(rawscores_probes_vs_tmodels_o);
-  auto rawscores_zprobes_vs_tmodels_        = make_safe(rawscores_zprobes_vs_tmodels_o);
-  //auto mask_zprobes_vs_tmodels_istruetrial_ = make_safe(mask_zprobes_vs_tmodels_istruetrial_o);
+    PyBlitzArrayObject* model                 = 0;
+    PyBlitzArrayObject* ubm_means             = 0;
+    PyBlitzArrayObject* ubm_variances         = 0;
+    PyBobLearnMiscGMMStatsObject* stats       = 0;
+    PyBlitzArrayObject* channel_offset        = 0;
+    PyObject* frame_length_normalisation      = Py_False;
 
-  blitz::Array<double,2>  rawscores_probes_vs_models = *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_probes_vs_models_o);
-  blitz::Array<double,2> normalized_scores = blitz::Array<double,2>(rawscores_probes_vs_models.extent(0), rawscores_probes_vs_models.extent(1));
+    if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O&O&O!O&|O!", kwlist, &PyBlitzArray_Converter, &model,
+                                                                       &PyBlitzArray_Converter, &ubm_means,
+                                                                       &PyBlitzArray_Converter, &ubm_variances,
+                                                                       &PyBobLearnMiscGMMStats_Type, &stats,
+                                                                       &PyBlitzArray_Converter, &channel_offset,
+                                                                       &PyBool_Type, &frame_length_normalisation)){
+      linear_scoring3.print_usage(); 
+      Py_RETURN_NONE;
+    }
+    
+    //protects acquired resources through this scope
+    auto model_ = make_safe(model);
+    auto ubm_means_ = make_safe(ubm_means);
+    auto ubm_variances_ = make_safe(ubm_variances);
+    auto channel_offset_ = make_safe(channel_offset);
 
-  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
+    double score = bob::learn::misc::linearScoring(*PyBlitzArrayCxx_AsBlitz<double,1>(model), *PyBlitzArrayCxx_AsBlitz<double,1>(ubm_means),*PyBlitzArrayCxx_AsBlitz<double,1>(ubm_variances), *stats->cxx, *PyBlitzArrayCxx_AsBlitz<double,1>(channel_offset), f(frame_length_normalisation));
+
+    return Py_BuildValue("d",score);
+  }
 
-  if(nargs==4)
-    bob::learn::misc::ztNorm(*PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_probes_vs_models_o),
-                             *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_zprobes_vs_models_o),
-                             *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_probes_vs_tmodels_o),
-                             *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_zprobes_vs_tmodels_o),
-                             normalized_scores);
-  else
-    bob::learn::misc::ztNorm(*PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_probes_vs_models_o), 
-                             *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_zprobes_vs_models_o), 
-                             *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_probes_vs_tmodels_o), 
-                             *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_zprobes_vs_tmodels_o), 
-                             *PyBlitzArrayCxx_AsBlitz<bool,2>(mask_zprobes_vs_tmodels_istruetrial_o),
-                             normalized_scores);
-
-  return PyBlitzArrayCxx_AsConstNumpy(normalized_scores);
-  */
+  
+  else{
+    PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - linear_scoring requires 5 or 6 arguments, but you provided %d (see help)", nargs);
+    linear_scoring1.print_usage();
+    linear_scoring2.print_usage();
+    linear_scoring3.print_usage();
+    Py_RETURN_NONE;
+  }
 
 }
 
diff --git a/bob/learn/misc/main.cpp b/bob/learn/misc/main.cpp
index d51df95eb1ed4b2b53307518d40e77f34869ed24..9e6dc2c236985b3b387413a02f446af817f40ff6 100644
--- a/bob/learn/misc/main.cpp
+++ b/bob/learn/misc/main.cpp
@@ -33,10 +33,10 @@ static PyMethodDef module_methods[] = {
     z_norm.doc()
   },
   {
-    linear_scoring.name(),
+    linear_scoring1.name(),
     (PyCFunction)PyBobLearnMisc_linear_scoring,
     METH_VARARGS|METH_KEYWORDS,
-    linear_scoring.doc()
+    linear_scoring1.doc()
   },
 
   {0}//Sentinel
diff --git a/bob/learn/misc/test_linearscoring.py b/bob/learn/misc/test_linearscoring.py
index 987f0a10cd9f651d381cc85201eb0d4194ee99f4..8a96a0175331d8de00bbf744c3165cc42eb4d8f1 100644
--- a/bob/learn/misc/test_linearscoring.py
+++ b/bob/learn/misc/test_linearscoring.py
@@ -65,7 +65,7 @@ def test_LinearScoring():
   scores = linear_scoring([model1, model2], ubm, [stats1, stats2, stats3], [], True)
   assert (abs(scores - ref_scores_01) < 1e-7).all()
   #scores = linear_scoring([model1, model2], ubm, [stats1, stats2, stats3], (), True)
-  assert (abs(scores - ref_scores_01) < 1e-7).all()
+  #assert (abs(scores - ref_scores_01) < 1e-7).all()
   #scores = linear_scoring([model1, model2], ubm, [stats1, stats2, stats3], None, True)
   #assert (abs(scores - ref_scores_01) < 1e-7).all()
 
@@ -77,7 +77,7 @@ def test_LinearScoring():
   scores = linear_scoring([model1, model2], ubm, [stats1, stats2, stats3], test_channeloffset, True)
   assert (abs(scores - ref_scores_11) < 1e-7).all()
 
-"""
+
   # 2/ Use mean/variance supervectors
   # 2/a/ Without test_channelOffset, without frame-length normalisation
   scores = linear_scoring([model1.mean_supervector, model2.mean_supervector], ubm.mean_supervector, ubm.variance_supervector, [stats1, stats2, stats3])
@@ -95,6 +95,7 @@ def test_LinearScoring():
   scores = linear_scoring([model1.mean_supervector, model2.mean_supervector], ubm.mean_supervector, ubm.variance_supervector, [stats1, stats2, stats3], test_channeloffset, True)
   assert (abs(scores - ref_scores_11) < 1e-7).all()
 
+
   # 3/ Using single model/sample
   # 3/a/ without frame-length normalisation
   score = linear_scoring(model1.mean_supervector, ubm.mean_supervector, ubm.variance_supervector, stats1, test_channeloffset[0])
@@ -110,6 +111,7 @@ def test_LinearScoring():
   score = linear_scoring(model2.mean_supervector, ubm.mean_supervector, ubm.variance_supervector, stats3, test_channeloffset[2])
   assert abs(score - ref_scores_10[1,2]) < 1e-7
 
+
   # 3/b/ without frame-length normalisation
   score = linear_scoring(model1.mean_supervector, ubm.mean_supervector, ubm.variance_supervector, stats1, test_channeloffset[0], True)
   assert abs(score - ref_scores_11[0,0]) < 1e-7
@@ -123,4 +125,4 @@ def test_LinearScoring():
   assert abs(score - ref_scores_11[1,1]) < 1e-7
   score = linear_scoring(model2.mean_supervector, ubm.mean_supervector, ubm.variance_supervector, stats3, test_channeloffset[2], True)
   assert abs(score - ref_scores_11[1,2]) < 1e-7
-"""
+