Commit d645a23d authored by Samuel GAIST's avatar Samuel GAIST
Browse files

[databases] Code cleanup

parent 7780ed3e
Pipeline #17171 canceled with stage
in 111 minutes and 27 seconds
......@@ -141,7 +141,7 @@ def load_database_sets(configuration, database_name):
else:
logger.error("Database specification should have the format "
"`<database>/<version>/[<protocol>/[<set>]]', the value "
"you passed (%s) is not valid", (dataset_name))
"you passed (%s) is not valid", database_name)
return (None, None)
# Load the dataformat
......@@ -290,7 +290,7 @@ def start_db_container(configuration, cmd, host,
databases_container.add_volume(configuration.cache, '/beat/cache')
# Specify the volumes to mount inside the container
if not db_configuration.has_key('datasets_root_path'):
if 'datasets_root_path' not in db_configuration:
databases_container.add_volume(
database_path, os.path.join('/databases', db_name))
else:
......@@ -308,16 +308,18 @@ def start_db_container(configuration, cmd, host,
db_socket.connect(db_address)
for output_name, dataformat_name in db_set['outputs'].items():
if excluded_outputs is not None and output_name in excluded_outputs:
if excluded_outputs is not None and \
output_name in excluded_outputs:
continue
data_source = RemoteDataSource()
data_source.setup(db_socket, output_name,
dataformat_name, configuration.path)
input = inputs.Input(
output_name, database.dataformats[dataformat_name], data_source)
input_group.add(input)
input_ = inputs.Input(output_name,
database.dataformats[dataformat_name],
data_source)
input_group.add(input_)
return (databases_container, db_socket, zmq_context, input_list)
......@@ -370,8 +372,6 @@ def pull(webapi, prefix, names, force, indentation, format_cache):
force, indentation)
# see what dataformats one needs to pull
indent = indentation * ' '
dataformats = []
for name in names:
obj = Database(prefix, name)
......@@ -574,11 +574,13 @@ def view_outputs(configuration, dataset_name, excluded_outputs=None, uid=None,
input_group = inputs.InputGroup(set_name, restricted_access=False)
for output_name, dataformat_name in db_set['outputs'].items():
if (excluded_outputs is not None) and (output_name in excluded_outputs):
if excluded_outputs is not None and \
output_name in excluded_outputs:
continue
input = inputs.Input(
output_name, database.dataformats[dataformat_name], view.data_sources[output_name])
input = inputs.Input(output_name,
database.dataformats[dataformat_name],
view.data_sources[output_name])
input_group.add(input)
else:
......@@ -586,9 +588,10 @@ def view_outputs(configuration, dataset_name, excluded_outputs=None, uid=None,
(databases_container, db_socket, zmq_context, input_list) = \
start_db_container(configuration, CMD_VIEW_OUTPUTS,
host, db_name, protocol_name, set_name, database, db_set,
excluded_outputs=excluded_outputs, uid=uid, db_root=db_root
)
host, db_name, protocol_name,
set_name, database, db_set,
excluded_outputs=excluded_outputs,
uid=uid, db_root=db_root)
input_group = input_list.group(set_name)
......@@ -607,9 +610,9 @@ def view_outputs(configuration, dataset_name, excluded_outputs=None, uid=None,
print 'FROM %d TO %d' % (start, end)
whole_inputs = [input for input in input_group
if (input.data_index == start) and
(input.data_index_end == end)]
whole_inputs = [input_ for input_ in input_group
if input_.data_index == start and
input_.data_index_end == end]
for input in whole_inputs:
label = ' - ' + str(input.name) + ': '
......@@ -617,15 +620,16 @@ def view_outputs(configuration, dataset_name, excluded_outputs=None, uid=None,
previous_start = start
selected_inputs = [input for input in input_group
if (input.data_index == input_group.first_data_index) and
((input.data_index != start) or
(input.data_index_end != end))]
selected_inputs = \
[input_ for input_ in input_group
if input_.data_index == input_group.first_data_index and
(input_.data_index != start or
input_.data_index_end != end)]
grouped_inputs = {}
for input in selected_inputs:
key = (input.data_index, input.data_index_end)
if not grouped_inputs.has_key(key):
for input_ in selected_inputs:
key = (input_.data_index, input_.data_index_end)
if key not in grouped_inputs:
grouped_inputs[key] = []
grouped_inputs[key].append(input)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment