Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
bob.learn.em
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
bob
bob.learn.em
Commits
8121f54a
Commit
8121f54a
authored
2 years ago
by
Yannick DAYER
Browse files
Options
Downloads
Patches
Plain Diff
Fix IVector dask bags test
parent
67962115
No related branches found
No related tags found
1 merge request
!60
Port of I-Vector to python
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
bob/learn/em/ivector.py
+5
-5
5 additions, 5 deletions
bob/learn/em/ivector.py
bob/learn/em/test/test_ivector.py
+21
-9
21 additions, 9 deletions
bob/learn/em/test/test_ivector.py
with
26 additions
and
14 deletions
bob/learn/em/ivector.py
+
5
−
5
View file @
8121f54a
...
@@ -183,18 +183,21 @@ def m_step(
...
@@ -183,18 +183,21 @@ def m_step(
machine
:
"
IVectorMachine
"
,
stats
:
List
[
IVectorStats
]
machine
:
"
IVectorMachine
"
,
stats
:
List
[
IVectorStats
]
)
->
"
IVectorMachine
"
:
)
->
"
IVectorMachine
"
:
"""
Updates the Machine with the maximization step of the e-m algorithm.
"""
"""
Updates the Machine with the maximization step of the e-m algorithm.
"""
# Merge all the stats
stats
=
functools
.
reduce
(
operator
.
iadd
,
stats
)
stats
=
functools
.
reduce
(
operator
.
iadd
,
stats
)
A
=
stats
.
nij_sigma_wij2
.
transpose
((
0
,
2
,
1
))
A
=
stats
.
nij_sigma_wij2
.
transpose
((
0
,
2
,
1
))
B
=
stats
.
fnorm_sigma_wij
.
transpose
((
0
,
2
,
1
))
B
=
stats
.
fnorm_sigma_wij
.
transpose
((
0
,
2
,
1
))
# Default value of X if any of A[c] is 0
X
=
np
.
zeros_like
(
B
)
X
=
np
.
zeros_like
(
B
)
# Solve for all A != 0
# Solve for all A
[c]
!= 0
if
any
(
mask
:
=
A
.
any
(
axis
=
(
-
2
,
-
1
))):
if
any
(
mask
:
=
A
.
any
(
axis
=
(
-
2
,
-
1
))):
# Prevents solving with 0 matrices
X
[
mask
]
=
[
X
[
mask
]
=
[
np
.
linalg
.
solve
(
A
[
c
],
B
[
c
])
for
c
in
range
(
len
(
mask
))
if
A
[
c
].
any
()
np
.
linalg
.
solve
(
A
[
c
],
B
[
c
])
for
c
in
range
(
len
(
mask
))
if
A
[
c
].
any
()
]
]
# Update the machine
machine
.
T
=
X
.
transpose
((
0
,
2
,
1
))
machine
.
T
=
X
.
transpose
((
0
,
2
,
1
))
if
machine
.
update_sigma
:
if
machine
.
update_sigma
:
...
@@ -309,9 +312,6 @@ class IVectorMachine(BaseEstimator):
...
@@ -309,9 +312,6 @@ class IVectorMachine(BaseEstimator):
new_machine
=
dask
.
compute
(
dask
.
delayed
(
m_step
)(
self
,
stats
))[
0
]
new_machine
=
dask
.
compute
(
dask
.
delayed
(
m_step
)(
self
,
stats
))[
0
]
for
attr
in
[
"
T
"
,
"
sigma
"
]:
for
attr
in
[
"
T
"
,
"
sigma
"
]:
setattr
(
self
,
attr
,
getattr
(
new_machine
,
attr
))
setattr
(
self
,
attr
,
getattr
(
new_machine
,
attr
))
self
.
T
.
persist
()
self
.
sigma
.
persist
()
else
:
else
:
stats
=
[
stats
=
[
e_step
(
e_step
(
...
...
This diff is collapsed.
Click to expand it.
bob/learn/em/test/test_ivector.py
+
21
−
9
View file @
8121f54a
...
@@ -5,6 +5,7 @@
...
@@ -5,6 +5,7 @@
import
contextlib
import
contextlib
import
copy
import
copy
import
dask.bag
import
dask.distributed
import
dask.distributed
import
numpy
as
np
import
numpy
as
np
...
@@ -13,8 +14,7 @@ from pkg_resources import resource_filename
...
@@ -13,8 +14,7 @@ from pkg_resources import resource_filename
from
bob.learn.em
import
GMMMachine
,
GMMStats
,
IVectorMachine
from
bob.learn.em
import
GMMMachine
,
GMMStats
,
IVectorMachine
from
bob.learn.em.ivector
import
e_step
,
m_step
from
bob.learn.em.ivector
import
e_step
,
m_step
from
bob.learn.em.test.test_kmeans
import
to_numpy
from
.test_kmeans
import
to_dask_array
,
to_numpy
@contextlib.contextmanager
@contextlib.contextmanager
...
@@ -27,6 +27,17 @@ def _dask_distributed_context():
...
@@ -27,6 +27,17 @@ def _dask_distributed_context():
client
.
close
()
client
.
close
()
def
to_dask_bag
(
*
args
):
"""
Converts all args into dask Bags.
"""
result
=
[]
for
x
in
args
:
x
=
np
.
asarray
(
x
)
result
.
append
(
dask
.
bag
.
from_sequence
(
x
,
npartitions
=
x
.
shape
[
0
]
*
2
))
if
len
(
result
)
==
1
:
return
result
[
0
]
return
result
def
test_ivector_machine_base
():
def
test_ivector_machine_base
():
# Create the UBM and set its values manually
# Create the UBM and set its values manually
ubm
=
GMMMachine
(
n_gaussians
=
2
)
ubm
=
GMMMachine
(
n_gaussians
=
2
)
...
@@ -283,21 +294,22 @@ def test_ivector_fit():
...
@@ -283,21 +294,22 @@ def test_ivector_fit():
# Serial test
# Serial test
np
.
random
.
seed
(
0
)
np
.
random
.
seed
(
0
)
fit_data
=
to_numpy
(
fit_data
)
fit_data
=
to_numpy
(
fit_data
)
projected_data
=
ubm
.
transform
(
d
for
d
in
fit_data
)
projected_data
=
ubm
.
transform
(
fit_data
)
m
=
IVectorMachine
(
ubm
=
ubm
,
dim_t
=
2
,
max_iterations
=
2
)
m
=
IVectorMachine
(
ubm
=
ubm
,
dim_t
=
2
,
max_iterations
=
2
)
m
.
fit
(
d
for
d
in
projected_data
)
m
.
fit
(
projected_data
)
result
=
m
.
transform
(
ubm
.
transform
(
d
for
d
in
test_data
))
result
=
m
.
transform
(
ubm
.
transform
(
test_data
))
np
.
testing
.
assert_almost_equal
(
result
,
reference_result
,
decimal
=
5
)
np
.
testing
.
assert_almost_equal
(
result
,
reference_result
,
decimal
=
5
)
# Parallel test
# Parallel test
with
_dask_distributed_context
():
with
_dask_distributed_context
():
for
transform
in
[
to_numpy
,
to_dask_
array
]:
for
transform
in
[
to_numpy
,
to_dask_
bag
]:
np
.
random
.
seed
(
0
)
np
.
random
.
seed
(
0
)
fit_data
=
transform
(
fit_data
)
fit_data
=
transform
(
fit_data
)
projected_data
=
ubm
.
transform
(
d
for
d
in
fit_data
)
projected_data
=
ubm
.
transform
(
fit_data
)
projected_data
=
transform
(
projected_data
)
m
=
IVectorMachine
(
ubm
=
ubm
,
dim_t
=
2
,
max_iterations
=
2
)
m
=
IVectorMachine
(
ubm
=
ubm
,
dim_t
=
2
,
max_iterations
=
2
)
m
.
fit
(
d
for
d
in
projected_data
)
m
.
fit
(
projected_data
)
result
=
m
.
transform
(
d
for
d
in
test_data
)
result
=
m
.
transform
(
ubm
.
transform
(
test_data
)
)
np
.
testing
.
assert_almost_equal
(
np
.
testing
.
assert_almost_equal
(
np
.
array
(
result
),
reference_result
,
decimal
=
5
np
.
array
(
result
),
reference_result
,
decimal
=
5
)
)
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment