__init__.py 10.7 KB
Newer Older
1 2 3

from __future__ import print_function

4 5 6 7 8 9
import unittest

import os
import pkg_resources

import gridtk
10 11
import subprocess, signal
import time
12 13 14

from gridtk.models import Job

15
class GridTKTest(unittest.TestCase):
16 17 18 19 20 21 22
  # This class defines tests for the gridtk

  def setUp(self):
    # Create a temporary directory that will contain all outputs
    import tempfile
    self.temp_dir = tempfile.mkdtemp(prefix='gridtk_test')
    self.log_dir = os.path.join(self.temp_dir, 'logs')
23 24
    self.database = os.path.join(self.temp_dir, 'database.sql3')
    self.scheduler_job = None
25 26 27


  def tearDown(self):
28 29
    # make sure that all scheduler jobs are stopped after exiting
    if self.scheduler_job:
30
      self.scheduler_job.kill()
31 32 33 34 35 36 37
    # Clean up the mess that we created
    import shutil
    shutil.rmtree(self.temp_dir)

  def test01_local(self):
    # This test executes all commands of the local grid manager and asserts that everything is fine

38 39 40 41 42
    # first test, if the '/bin/bash' exists
    bash = '/bin/bash'
    if not os.path.exists(bash):
      raise SkipException("Could not find '%s' which is required to run the test scripts"%bash)

43 44
    try:

45 46
      import nose

47 48 49 50 51
      # first, add some commands to the database
      script_1 = pkg_resources.resource_filename('gridtk.tests', 'test_script.sh')
      script_2 = pkg_resources.resource_filename('gridtk.tests', 'test_array.sh')
      from gridtk.script import jman
      # add a simple script that will write some information to the
52 53
      jman.main(['./bin/jman', '--local', '--database', self.database, 'submit', '--log-dir', self.log_dir, '--name', 'test_1', bash, script_1])
      jman.main(['./bin/jman', '--local', '--database', self.database, 'submit', '--log-dir', self.log_dir, '--name', 'test_2',  '--dependencies', '1', '--parametric', '1-7:2', bash, script_2])
54 55

      # check that the database was created successfully
56
      self.assertTrue(os.path.exists(self.database))
57 58 59 60 61 62 63 64 65 66

      print()
      # test that the list command works (should also work with the "default" grid manager
      jman.main(['./bin/jman', '--database', self.database, 'list', '--job-ids', '1'])
      jman.main(['./bin/jman', '--database', self.database, 'list', '--job-ids', '2', '--print-array-jobs', '--print-dependencies'])

      # get insight into the database
      job_manager = gridtk.local.JobManagerLocal(database=self.database)
      session = job_manager.lock()
      jobs = list(session.query(Job))
67 68 69 70 71 72
      self.assertEqual(len(jobs), 2)
      self.assertEqual(jobs[0].id, 1)
      self.assertEqual(jobs[1].id, 2)
      self.assertEqual(len(jobs[1].array), 4)
      self.assertEqual(jobs[0].status, 'submitted')
      self.assertEqual(jobs[1].status, 'submitted')
73 74 75

      # check that the job dependencies are correct
      waiting = jobs[0].get_jobs_waiting_for_us()
76 77
      self.assertEqual(len(waiting), 1)
      self.assertEqual(waiting[0].id, 2)
78
      waited = jobs[1].get_jobs_we_wait_for()
79 80
      self.assertEqual(len(waited), 1)
      self.assertEqual(waited[0].id, 1)
81 82 83 84 85 86 87 88 89 90 91

      job_manager.unlock()

      # now, start the local execution of the job in a parallel job
      self.scheduler_job = subprocess.Popen(['./bin/jman', '--local', '--database', self.database, 'run-scheduler', '--sleep-time', '5', '--parallel', '2'])

      # sleep some time to assure that the scheduler was able to start the first job
      time.sleep(4)
      # ... and kill the scheduler
      self.scheduler_job.kill()
      self.scheduler_job = None
92

93 94 95
      # now, the first job needs to have status failure, and the second needs to be queued
      session = job_manager.lock()
      jobs = list(session.query(Job))
96
      self.assertEqual(len(jobs), 2)
97 98 99 100
      if jobs[0].status in ('submitted', 'queued', 'executing'):
        # on slow machines, we don0t want the tests to fail, so we just skip
        job_manager.unlock()
        raise nose.plugins.skip.SkipTest("This machine seems to be quite slow in processing parallel jobs.")
101 102
      self.assertEqual(jobs[0].status, 'failure')
      self.assertEqual(jobs[1].status, 'queued')
103 104 105
      # the result files should already be there
      self.assertTrue(os.path.exists(jobs[0].std_out_file()))
      self.assertTrue(os.path.exists(jobs[0].std_err_file()))
106
      job_manager.unlock()
107 108


109
      # reset the job 1
110
      jman.main(['./bin/jman', '--local', '--database', self.database, 'resubmit', '--job-id', '1', '--running-jobs', '--overwrite-command', script_1])
111

112 113
      # now, start the local execution of the job in a parallel job
      self.scheduler_job = subprocess.Popen(['./bin/jman', '--local', '--database', self.database, 'run-scheduler', '--sleep-time', '5', '--parallel', '2'])
114

115 116 117 118 119 120 121 122 123
      # sleep some time to assure that the scheduler was able to finish the first and start the second job
      time.sleep(9)
      # ... and kill the scheduler
      self.scheduler_job.kill()
      self.scheduler_job = None

      # Job 1 and two array jobs of job two should be finished now, the other two still need to be queued
      session = job_manager.lock()
      jobs = list(session.query(Job))
124
      self.assertEqual(len(jobs), 2)
125 126 127 128
      if jobs[0].status in ('queued', 'executing') or jobs[1].status == 'queued':
        # on slow machines, we don0t want the tests to fail, so we just skip
        job_manager.unlock()
        raise nose.plugins.skip.SkipTest("This machine seems to be quite slow in processing parallel jobs.")
129 130
      self.assertEqual(jobs[0].status, 'failure')
      self.assertEqual(jobs[1].status, 'executing')
131 132 133 134
      if jobs[1].array[0].status == 'executing' or jobs[1].array[1].status == 'executing':
        # on slow machines, we don0t want the tests to fail, so we just skip
        job_manager.unlock()
        raise nose.plugins.skip.SkipTest("This machine seems to be quite slow in processing parallel jobs.")
135 136 137 138 139
      self.assertEqual(jobs[1].array[0].status, 'failure')
      self.assertEqual(jobs[1].array[0].result, 1)
      self.assertEqual(jobs[1].array[1].status, 'success')
      self.assertEqual(jobs[1].array[1].result, 0)
      self.assertEqual(len([a for a in jobs[1].array if a.status == 'queued']), 2)
140 141 142 143 144
      out_file = jobs[0].std_out_file()
      err_file = jobs[0].std_err_file()
      job_manager.unlock()

      # the result files of the first job should now be there
145 146 147
      self.assertTrue(os.path.isfile(out_file))
      self.assertTrue(os.path.isfile(err_file))
      self.assertEqual(open(out_file).read().rstrip(), 'This is a text message to std-out')
148
      self.assertEqual(open(err_file).read().split('\n')[0], 'This is a text message to std-err')
149 150 151 152

      # resubmit all jobs
      jman.main(['./bin/jman', '--local', '--database', self.database, 'resubmit', '--running-jobs'])
      # check that the log files have been cleaned
153 154
      self.assertFalse(os.path.exists(out_file))
      self.assertFalse(os.path.exists(err_file))
155
      # ... but the log dir still exists
156
      self.assertTrue(os.path.exists(self.log_dir))
157 158 159 160 161 162 163 164

      # now, let the scheduler run all jobs
      self.scheduler_job = subprocess.Popen(['./bin/jman', '--local', '--database', self.database, 'run-scheduler', '--sleep-time', '1', '--parallel', '2', '--die-when-finished'])
      # and wait for the job to finish (the timeout argument to Popen only exists from python 3.3 onwards)
      self.scheduler_job.wait()
      self.scheduler_job = None

      # check that all output files are generated again
165 166 167
      self.assertTrue(os.path.isfile(out_file))
      self.assertTrue(os.path.isfile(err_file))
      self.assertEqual(open(out_file).read().rstrip(), 'This is a text message to std-out')
168
      self.assertEqual(open(err_file).read().split('\n')[0], 'This is a text message to std-err')
169 170 171

      # check that exactly four output and four error files have been created
      files = os.listdir(self.log_dir)
172
      self.assertEqual(len(files), 10)
173
      for i in range(1,8,2):
174 175
        self.assertTrue('test_2.o2.%d'%i in files)
        self.assertTrue('test_2.e2.%d'%i in files)
176 177 178 179

      # check that all array jobs are finished now
      session = job_manager.lock()
      jobs = list(session.query(Job))
180 181 182 183
      self.assertEqual(len(jobs), 2)
      self.assertEqual(jobs[1].status, 'failure')
      self.assertEqual(jobs[1].array[0].status, 'failure')
      self.assertEqual(jobs[1].array[0].result, 1)
184
      for i in range(1,4):
185 186
        self.assertEqual(jobs[1].array[i].status, 'success')
        self.assertEqual(jobs[1].array[i].result, 0)
187 188 189 190 191 192 193 194 195 196 197
      job_manager.unlock()

      print()
      # test that the list command still works
      jman.main(['./bin/jman', '--database', self.database, 'list', '--print-array-jobs'])

      print()
      # test that the report command works
      jman.main(['./bin/jman', '--database', self.database, 'report'])

      # clean-up
198
      jman.main(['./bin/jman', '--local', '--database', self.database, 'delete', '--job-ids', '1-2'])
199 200

      # check that the database and the log files are gone
201
      self.assertEqual(len(os.listdir(self.temp_dir)), 0)
202 203

      # add the scripts again, but this time with the --stop-on-failure option
204 205
      jman.main(['./bin/jman', '--local', '--database', self.database, 'submit', '--log-dir', self.log_dir, '--name', 'test_1', '--stop-on-failure', bash, script_1])
      jman.main(['./bin/jman', '--local', '--database', self.database, 'submit', '--log-dir', self.log_dir, '--name', 'test_2',  '--dependencies', '1', '--parametric', '1-7:2', '--stop-on-failure', bash, script_2])
206 207 208 209 210 211 212 213

      # and execute them, but without writing the log files
      self.scheduler_job = subprocess.Popen(['./bin/jman', '--local', '--database', self.database, 'run-scheduler', '--sleep-time', '0.1', '--parallel', '2', '--die-when-finished', '--no-log-files'])
      # and wait for the job to finish (the timeout argument to Popen only exists from python 3.3 onwards)
      self.scheduler_job.wait()
      self.scheduler_job = None

      # assert that the log files are not there
214 215
      self.assertFalse(os.path.isfile(out_file))
      self.assertFalse(os.path.isfile(err_file))
216 217 218 219 220


      # check that all array jobs are finished now
      session = job_manager.lock()
      jobs = list(session.query(Job))
221 222 223 224 225
      self.assertEqual(len(jobs), 2)
      self.assertEqual(jobs[0].status, 'failure')
      self.assertEqual(jobs[0].result, 255)
      self.assertEqual(jobs[1].status, 'failure')
      self.assertTrue(jobs[1].result is None)
226 227 228 229 230 231 232 233
      job_manager.unlock()

      # and clean up again
      jman.main(['./bin/jman', '--local', '--database', self.database, 'delete'])

    except KeyboardInterrupt:
      # make sure that the keyboard interrupt is captured and the mess is cleaned up (i.e. by calling tearDown)
      pass
234

235

236
  def notest02_grid(self):
237
    # Tests the functionality of the grid toolkit in the grid
238
    import nose
239
    raise nose.plugins.skip.SkipTest("This test is not yet implemented. If you find a proper ways to test the grid functionality, please go ahead and implement the test.")