Skip to content
Snippets Groups Projects
Commit 31e9b4c3 authored by Samuel GAIST's avatar Samuel GAIST
Browse files

[utils] Test refactor to separate data and tests

parent 2bfbacdf
No related branches found
No related tags found
No related merge requests found
# You may import any python packages that will be available in the environment you will run this algorithm in
# Environments can change based on the experiment's settings
# You may import any python packages that will be available in the environment
# you will run this algorithm in Environments can change based on the
# experiment's settings
{% for ref, lib in contents.uses.items() %}# Library "{{ lib }}" is available under "{{ ref }}"
{% endfor %}
class Algorithm:
# initialise fields to store cross-input data (e.g. machines, aggregations, etc.)
# initialise fields to store cross-input data (e.g. machines,
# aggregations, etc.)
def __init__(self):
pass
{% if contents.parameters %}
......@@ -18,7 +20,8 @@ class Algorithm:
return True
{% endif %}
# this will be called each time the sync'd input has more data available to be processed
# this will be called each time the sync'd input has more data available to
# be processed
def process(self, inputs, outputs):
# Groups available:
{% for group in contents.groups %} # Group {{ loop.index - 1 }}:
......@@ -29,19 +32,22 @@ class Algorithm:
{% for rName, result in contents.results.items() %}# Result "{{ rName }}" with type "{{ result.type }}"
{% endfor %}{% endif %}
# to check if there is more data waiting in the inputs
# (if it is False, you have processed all the inputs and this "process" function won't be called again):
# (if it is False, you have processed all the inputs and this "process"
# function won't be called again):
# if inputs.hasMoreData():
# to check if a specific input is done:
# if inputs["input1"].isDataUnitDone():
# to manually fetch the next input of a specific input
# (e.g. the block is not sync'd to the input but you want the input immediately)
# (e.g. the block is not sync'd to the input but you want the input
# immediately)
# inputs['input1'].next()
# you can then access that input value as normal:
# self.val1 = inputs['input1'].data
# to get the data for an input (note that the value will be of the type specified in the metadata!):
# to get the data for an input (note that the value will be of the type
# specified in the metadata!):
# data_value = inputs['input1'].data
# to write to an output:
......
# You may import any python packages that will be available in the environment you will run this database in
# Environments can change based on the experiment's settings
# You may import any python packages that will be available in the environment
# you will run this database in Environments can change based on the
# experiment's settings
from beat.backend.python.database import View
{% for view in views %}
class {{ view }}(View):
# build the data for your view
# split the raw data into (homogenous) bits and return a keyed iterable
# (something with `.keys()` available to it, like a dict)
# the key names must be the same as the output names for sets that use this view
# root_folder: the path to the root folder of the database's files (not always applicable)
# the key names must be the same as the output names for sets that use this
# view.
# root_folder: the path to the root folder of the database's files (not
# always applicable)
# parameters: parameters passed to the view, defined in the metadata
def index(self, root_folder, parameters):
pass
......@@ -18,7 +21,7 @@ class {{ view }}(View):
def get(self, output, index):
# to get the current object referenced by the given index:
# obj = self.objs[index]
# note that this object is a named tuple, with fields equivalent to your keys from
# the objects returned from the index function
# note that this object is a named tuple, with fields equivalent to
# your keys from the objects returned from the index function
pass
{% endfor %}
# You may import any python packages that will be available in the environment you will run this library in
# Environments can change based on the experiment's settings
# You may import any python packages that will be available in the environment
# you will run this library in Environments can change based on the
# experiment's settings
{% for ref, lib in uses.items() %}# Library "{{ lib }}" is available under "{{ ref }}"
{% endfor %}
# You may import any python packages that will be available in the environment
# you will run this algorithm in Environments can change based on the
# experiment's settings
class Algorithm:
# initialise fields to store cross-input data (e.g. machines,
# aggregations, etc.)
def __init__(self):
pass
# this will be called each time the sync'd input has more data available to
# be processed
def process(self, inputs, outputs):
# Groups available:
# to check if there is more data waiting in the inputs
# (if it is False, you have processed all the inputs and this "process"
# function won't be called again):
# if inputs.hasMoreData():
# to check if a specific input is done:
# if inputs["input1"].isDataUnitDone():
# to manually fetch the next input of a specific input
# (e.g. the block is not sync'd to the input but you want the input
# immediately)
# inputs['input1'].next()
# you can then access that input value as normal:
# self.val1 = inputs['input1'].data
# to get the data for an input (note that the value will be of the type
# specified in the metadata!):
# data_value = inputs['input1'].data
# to write to an output:
# outputs['output1'].write({
# 'output_field_1': 1,
# 'output_field_2': 'output'
# })
# always return True, it signals BEAT to continue processing
return True
# You may import any python packages that will be available in the environment
# you will run this database in Environments can change based on the
# experiment's settings
from beat.backend.python.database import View
class View(View):
# build the data for your view
# split the raw data into (homogenous) bits and return a keyed iterable
# (something with `.keys()` available to it, like a dict)
# the key names must be the same as the output names for sets that use this
# view.
# root_folder: the path to the root folder of the database's files (not
# always applicable)
# parameters: parameters passed to the view, defined in the metadata
def index(self, root_folder, parameters):
pass
# returns a value at a specific index in the iterable for this view
# output: the specific output value requested
# index: the current index of the iterable
def get(self, output, index):
# to get the current object referenced by the given index:
# obj = self.objs[index]
# note that this object is a named tuple, with fields equivalent to
# your keys from the objects returned from the index function
pass
# You may import any python packages that will be available in the environment
# you will run this library in Environments can change based on the
# experiment's settings
......@@ -27,86 +27,36 @@
# test the utils.py file
# (mostly python file generation via jinja2 templates)
import nose.tools
import os
import nose.tools
import pkg_resources
from .. import utils
def test_generate_empty_database():
empty_db = """# You may import any python packages that will be available in the environment you will run this database in
# Environments can change based on the experiment's settings
from beat.backend.python.database import View
class View(View):
# build the data for your view
# split the raw data into (homogenous) bits and return a keyed iterable
# (something with `.keys()` available to it, like a dict)
# the key names must be the same as the output names for sets that use this view
# root_folder: the path to the root folder of the database's files (not always applicable)
# parameters: parameters passed to the view, defined in the metadata
def index(self, root_folder, parameters):
pass
# returns a value at a specific index in the iterable for this view
# output: the specific output value requested
# index: the current index of the iterable
def get(self, output, index):
# to get the current object referenced by the given index:
# obj = self.objs[index]
# note that this object is a named tuple, with fields equivalent to your keys from
# the objects returned from the index function
pass
"""
str = utils.generate_database()
nose.tools.eq_(str, empty_db)
def test_generate_empty_algorithm():
empty_alg = """# You may import any python packages that will be available in the environment you will run this algorithm in
# Environments can change based on the experiment's settings
class Algorithm:
# initialise fields to store cross-input data (e.g. machines, aggregations, etc.)
def __init__(self):
pass
# this will be called each time the sync'd input has more data available to be processed
def process(self, inputs, outputs):
# Groups available:
# to check if there is more data waiting in the inputs
# (if it is False, you have processed all the inputs and this "process" function won't be called again):
# if inputs.hasMoreData():
# to check if a specific input is done:
# if inputs["input1"].isDataUnitDone():
DATA_PATH = pkg_resources.resource_filename("beat.editor.test", "reference_data")
# to manually fetch the next input of a specific input
# (e.g. the block is not sync'd to the input but you want the input immediately)
# inputs['input1'].next()
# you can then access that input value as normal:
# self.val1 = inputs['input1'].data
# to get the data for an input (note that the value will be of the type specified in the metadata!):
# data_value = inputs['input1'].data
def test_generate_empty_database():
database = utils.generate_database()
with open(os.path.join(DATA_PATH, "empty_database.py")) as db_file:
empty_db = db_file.read()
nose.tools.eq_(database, empty_db)
# to write to an output:
# outputs['output1'].write({
# 'output_field_1': 1,
# 'output_field_2': 'output'
# })
# always return True, it signals BEAT to continue processing
return True"""
def test_generate_empty_algorithm():
alg = {
"name": "user/alg/1",
"contents": {"splittable": True, "groups": [], "uses": {}},
}
algorithm = utils.generate_algorithm(alg["contents"])
with open(os.path.join(DATA_PATH, "empty_algorithm.py")) as db_file:
empty_alg = db_file.read()
nose.tools.eq_(algorithm, empty_alg)
alg = { 'name': 'user/alg/1', 'contents': { 'splittable': True, 'groups': [], 'uses': {} }}
str = utils.generate_algorithm(alg['contents'])
nose.tools.eq_(str, empty_alg)
def test_generate_empty_library():
empty_lib = """# You may import any python packages that will be available in the environment you will run this library in
# Environments can change based on the experiment's settings
"""
str = utils.generate_library()
nose.tools.eq_(str, empty_lib)
library = utils.generate_library()
with open(os.path.join(DATA_PATH, "empty_library.py")) as db_file:
empty_lib = db_file.read()
nose.tools.eq_(library, empty_lib)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment