__init__.py 12.8 KB
Newer Older
1 2 3

from __future__ import print_function

4 5 6 7 8 9
import unittest

import os
import pkg_resources

import gridtk
10 11
import subprocess, signal
import time
12 13 14

from gridtk.models import Job

15
class GridTKTest(unittest.TestCase):
16 17 18 19 20 21 22
  # This class defines tests for the gridtk

  def setUp(self):
    # Create a temporary directory that will contain all outputs
    import tempfile
    self.temp_dir = tempfile.mkdtemp(prefix='gridtk_test')
    self.log_dir = os.path.join(self.temp_dir, 'logs')
23 24
    self.database = os.path.join(self.temp_dir, 'database.sql3')
    self.scheduler_job = None
25 26 27


  def tearDown(self):
28 29
    # make sure that all scheduler jobs are stopped after exiting
    if self.scheduler_job:
30
      self.scheduler_job.kill()
31 32 33 34 35 36 37
    # Clean up the mess that we created
    import shutil
    shutil.rmtree(self.temp_dir)

  def test01_local(self):
    # This test executes all commands of the local grid manager and asserts that everything is fine

38 39 40 41 42
    # first test, if the '/bin/bash' exists
    bash = '/bin/bash'
    if not os.path.exists(bash):
      raise SkipException("Could not find '%s' which is required to run the test scripts"%bash)

43 44
    try:

45 46
      import nose

47 48 49
      # first, add some commands to the database
      script_1 = pkg_resources.resource_filename('gridtk.tests', 'test_script.sh')
      script_2 = pkg_resources.resource_filename('gridtk.tests', 'test_array.sh')
50
      rdir = pkg_resources.resource_filename('gridtk', 'tests')
51 52
      from gridtk.script import jman
      # add a simple script that will write some information to the
53 54
      jman.main(['./bin/jman', '--local', '--database', self.database, 'submit', '--log-dir', self.log_dir, '--name', 'test_1', bash, script_1])
      jman.main(['./bin/jman', '--local', '--database', self.database, 'submit', '--log-dir', self.log_dir, '--name', 'test_2',  '--dependencies', '1', '--parametric', '1-7:2', bash, script_2])
55
      jman.main(['./bin/jman', '--local', '--database', self.database, 'submit', '--log-dir', self.log_dir, '--name', 'test_3',  '--dependencies', '1', '2', '--exec-dir', rdir, bash, "test_array.sh"])
56 57

      # check that the database was created successfully
58
      self.assertTrue(os.path.exists(self.database))
59 60 61 62

      print()
      # test that the list command works (should also work with the "default" grid manager
      jman.main(['./bin/jman', '--database', self.database, 'list', '--job-ids', '1'])
63
      jman.main(['./bin/jman', '--database', self.database, 'list', '--job-ids', '2', '--print-array-jobs', '--print-dependencies', '--print-times'])
64 65 66 67 68

      # get insight into the database
      job_manager = gridtk.local.JobManagerLocal(database=self.database)
      session = job_manager.lock()
      jobs = list(session.query(Job))
69
      self.assertEqual(len(jobs), 3)
70 71
      self.assertEqual(jobs[0].id, 1)
      self.assertEqual(jobs[1].id, 2)
72
      self.assertEqual(jobs[2].id, 3)
73 74 75
      self.assertEqual(len(jobs[1].array), 4)
      self.assertEqual(jobs[0].status, 'submitted')
      self.assertEqual(jobs[1].status, 'submitted')
76
      self.assertEqual(jobs[2].status, 'submitted')
77 78 79 80 81 82
      self.assertTrue(all(j.submit_time is not None for j in jobs))
      self.assertTrue(all(j.start_time is None for j in jobs))
      self.assertTrue(all(j.finish_time is None for j in jobs))
      self.assertTrue(all(j.submit_time is not None for j in jobs[1].array))
      self.assertTrue(all(j.start_time is None for j in jobs[1].array))
      self.assertTrue(all(j.finish_time is None for j in jobs[1].array))
83 84 85

      # check that the job dependencies are correct
      waiting = jobs[0].get_jobs_waiting_for_us()
86
      self.assertEqual(len(waiting), 2)
87
      self.assertEqual(waiting[0].id, 2)
88 89 90
      self.assertEqual(waiting[1].id, 3)
      waited = jobs[2].get_jobs_we_wait_for()
      self.assertEqual(len(waited), 2)
91
      self.assertEqual(waited[0].id, 1)
92
      self.assertEqual(waited[1].id, 2)
93 94 95 96 97 98 99 100 101 102 103

      job_manager.unlock()

      # now, start the local execution of the job in a parallel job
      self.scheduler_job = subprocess.Popen(['./bin/jman', '--local', '--database', self.database, 'run-scheduler', '--sleep-time', '5', '--parallel', '2'])

      # sleep some time to assure that the scheduler was able to start the first job
      time.sleep(4)
      # ... and kill the scheduler
      self.scheduler_job.kill()
      self.scheduler_job = None
104

105 106 107
      # now, the first job needs to have status failure, and the second needs to be queued
      session = job_manager.lock()
      jobs = list(session.query(Job))
108
      self.assertEqual(len(jobs), 3)
109 110 111 112
      if jobs[0].status in ('submitted', 'queued', 'executing'):
        # on slow machines, we don0t want the tests to fail, so we just skip
        job_manager.unlock()
        raise nose.plugins.skip.SkipTest("This machine seems to be quite slow in processing parallel jobs.")
113 114
      self.assertEqual(jobs[0].status, 'failure')
      self.assertEqual(jobs[1].status, 'queued')
115
      self.assertEqual(jobs[2].status, 'waiting')
116 117 118 119 120 121 122
      self.assertTrue(jobs[0].start_time is not None)
      self.assertTrue(jobs[0].finish_time is not None)
      self.assertTrue(jobs[1].start_time is None)
      self.assertTrue(jobs[1].finish_time is None)
      self.assertTrue(jobs[2].start_time is None)
      self.assertTrue(jobs[2].finish_time is None)

123 124 125
      # the result files should already be there
      self.assertTrue(os.path.exists(jobs[0].std_out_file()))
      self.assertTrue(os.path.exists(jobs[0].std_err_file()))
126
      job_manager.unlock()
127 128


129
      # reset the job 1
130
      jman.main(['./bin/jman', '--local', '--database', self.database, 'resubmit', '--job-id', '1', '--running-jobs', '--overwrite-command', script_1])
131

132 133
      # now, start the local execution of the job in a parallel job
      self.scheduler_job = subprocess.Popen(['./bin/jman', '--local', '--database', self.database, 'run-scheduler', '--sleep-time', '5', '--parallel', '2'])
134

135 136 137 138 139 140 141 142 143
      # sleep some time to assure that the scheduler was able to finish the first and start the second job
      time.sleep(9)
      # ... and kill the scheduler
      self.scheduler_job.kill()
      self.scheduler_job = None

      # Job 1 and two array jobs of job two should be finished now, the other two still need to be queued
      session = job_manager.lock()
      jobs = list(session.query(Job))
144
      self.assertEqual(len(jobs), 3)
145 146 147 148
      if jobs[0].status in ('queued', 'executing') or jobs[1].status == 'queued':
        # on slow machines, we don0t want the tests to fail, so we just skip
        job_manager.unlock()
        raise nose.plugins.skip.SkipTest("This machine seems to be quite slow in processing parallel jobs.")
149 150
      self.assertEqual(jobs[0].status, 'failure')
      self.assertEqual(jobs[1].status, 'executing')
151 152 153 154
      if jobs[1].array[0].status == 'executing' or jobs[1].array[1].status == 'executing':
        # on slow machines, we don0t want the tests to fail, so we just skip
        job_manager.unlock()
        raise nose.plugins.skip.SkipTest("This machine seems to be quite slow in processing parallel jobs.")
155 156 157 158 159
      self.assertEqual(jobs[1].array[0].status, 'failure')
      self.assertEqual(jobs[1].array[0].result, 1)
      self.assertEqual(jobs[1].array[1].status, 'success')
      self.assertEqual(jobs[1].array[1].result, 0)
      self.assertEqual(len([a for a in jobs[1].array if a.status == 'queued']), 2)
160 161 162 163 164
      out_file = jobs[0].std_out_file()
      err_file = jobs[0].std_err_file()
      job_manager.unlock()

      # the result files of the first job should now be there
165 166 167
      self.assertTrue(os.path.isfile(out_file))
      self.assertTrue(os.path.isfile(err_file))
      self.assertEqual(open(out_file).read().rstrip(), 'This is a text message to std-out')
168
      self.assertEqual(open(err_file).read().split('\n')[0], 'This is a text message to std-err')
169 170 171 172

      # resubmit all jobs
      jman.main(['./bin/jman', '--local', '--database', self.database, 'resubmit', '--running-jobs'])
      # check that the log files have been cleaned
173 174
      self.assertFalse(os.path.exists(out_file))
      self.assertFalse(os.path.exists(err_file))
175
      # ... but the log dir still exists
176
      self.assertTrue(os.path.exists(self.log_dir))
177

178 179
      # now, let the scheduler run all jobs, but this time in verbose mode
      self.scheduler_job = subprocess.Popen(['./bin/jman', '--local', '-vv', '--database', self.database, 'run-scheduler', '--sleep-time', '1', '--parallel', '2', '--die-when-finished'])
180 181 182 183 184
      # and wait for the job to finish (the timeout argument to Popen only exists from python 3.3 onwards)
      self.scheduler_job.wait()
      self.scheduler_job = None

      # check that all output files are generated again
185 186 187
      self.assertTrue(os.path.isfile(out_file))
      self.assertTrue(os.path.isfile(err_file))
      self.assertEqual(open(out_file).read().rstrip(), 'This is a text message to std-out')
188
      self.assertEqual(open(err_file).read().split('\n')[1], 'This is a text message to std-err')
189 190 191

      # check that exactly four output and four error files have been created
      files = os.listdir(self.log_dir)
192
      self.assertEqual(len(files), 12)
193
      for i in range(1,8,2):
194 195
        self.assertTrue('test_2.o2.%d'%i in files)
        self.assertTrue('test_2.e2.%d'%i in files)
196 197 198 199

      # check that all array jobs are finished now
      session = job_manager.lock()
      jobs = list(session.query(Job))
200
      self.assertEqual(len(jobs), 3)
201 202 203
      self.assertEqual(jobs[1].status, 'failure')
      self.assertEqual(jobs[1].array[0].status, 'failure')
      self.assertEqual(jobs[1].array[0].result, 1)
204
      for i in range(1,4):
205 206
        self.assertEqual(jobs[1].array[i].status, 'success')
        self.assertEqual(jobs[1].array[i].result, 0)
207 208
      self.assertEqual(jobs[2].status, 'success')
      self.assertEqual(jobs[2].result, 0)
209 210 211 212 213 214 215 216

      self.assertTrue(all(j.submit_time is not None for j in jobs))
      self.assertTrue(all(j.start_time is not None for j in jobs))
      self.assertTrue(all(j.finish_time is not None for j in jobs))
      self.assertTrue(all(j.submit_time is not None for j in jobs[1].array))
      self.assertTrue(all(j.start_time is not None for j in jobs[1].array))
      self.assertTrue(all(j.finish_time is not None for j in jobs[1].array))

217 218 219 220 221 222 223 224 225 226 227
      job_manager.unlock()

      print()
      # test that the list command still works
      jman.main(['./bin/jman', '--database', self.database, 'list', '--print-array-jobs'])

      print()
      # test that the report command works
      jman.main(['./bin/jman', '--database', self.database, 'report'])

      # clean-up
228
      jman.main(['./bin/jman', '--local', '--database', self.database, 'delete', '--job-ids', '1-3'])
229 230

      # check that the database and the log files are gone
231
      self.assertEqual(len(os.listdir(self.temp_dir)), 0)
232 233

      # add the scripts again, but this time with the --stop-on-failure option
234 235
      jman.main(['./bin/jman', '--local', '--database', self.database, 'submit', '--log-dir', self.log_dir, '--name', 'test_1', '--stop-on-failure', bash, script_1])
      jman.main(['./bin/jman', '--local', '--database', self.database, 'submit', '--log-dir', self.log_dir, '--name', 'test_2',  '--dependencies', '1', '--parametric', '1-7:2', '--stop-on-failure', bash, script_2])
236
      jman.main(['./bin/jman', '--local', '--database', self.database, 'submit', '--log-dir', self.log_dir, '--name', 'test_3',  '--dependencies', '1', '2', '--exec-dir', rdir, '--stop-on-failure', bash, "test_array.sh"])
237 238 239 240 241 242 243 244

      # and execute them, but without writing the log files
      self.scheduler_job = subprocess.Popen(['./bin/jman', '--local', '--database', self.database, 'run-scheduler', '--sleep-time', '0.1', '--parallel', '2', '--die-when-finished', '--no-log-files'])
      # and wait for the job to finish (the timeout argument to Popen only exists from python 3.3 onwards)
      self.scheduler_job.wait()
      self.scheduler_job = None

      # assert that the log files are not there
245 246
      self.assertFalse(os.path.isfile(out_file))
      self.assertFalse(os.path.isfile(err_file))
247 248 249 250 251


      # check that all array jobs are finished now
      session = job_manager.lock()
      jobs = list(session.query(Job))
252
      self.assertEqual(len(jobs), 3)
253 254 255 256
      self.assertEqual(jobs[0].status, 'failure')
      self.assertEqual(jobs[0].result, 255)
      self.assertEqual(jobs[1].status, 'failure')
      self.assertTrue(jobs[1].result is None)
257 258
      self.assertEqual(jobs[2].status, 'failure')
      self.assertTrue(jobs[2].result is None)
259 260 261 262
      job_manager.unlock()

      # and clean up again
      jman.main(['./bin/jman', '--local', '--database', self.database, 'delete'])
263
      self.assertEqual(len(os.listdir(self.temp_dir)), 0)
264 265 266 267

    except KeyboardInterrupt:
      # make sure that the keyboard interrupt is captured and the mess is cleaned up (i.e. by calling tearDown)
      pass
268

269

270
  def notest02_grid(self):
271
    # Tests the functionality of the grid toolkit in the grid
272
    import nose
273
    raise nose.plugins.skip.SkipTest("This test is not yet implemented. If you find a proper ways to test the grid functionality, please go ahead and implement the test.")