__init__.py 11.6 KB
Newer Older
1 2 3

from __future__ import print_function

4 5 6 7 8 9
import unittest

import os
import pkg_resources

import gridtk
10 11
import subprocess, signal
import time
12 13 14

from gridtk.models import Job

15
class GridTKTest(unittest.TestCase):
16 17 18 19 20 21 22
  # This class defines tests for the gridtk

  def setUp(self):
    # Create a temporary directory that will contain all outputs
    import tempfile
    self.temp_dir = tempfile.mkdtemp(prefix='gridtk_test')
    self.log_dir = os.path.join(self.temp_dir, 'logs')
23 24
    self.database = os.path.join(self.temp_dir, 'database.sql3')
    self.scheduler_job = None
25 26 27


  def tearDown(self):
28 29
    # make sure that all scheduler jobs are stopped after exiting
    if self.scheduler_job:
30
      self.scheduler_job.kill()
31 32 33 34 35 36 37
    # Clean up the mess that we created
    import shutil
    shutil.rmtree(self.temp_dir)

  def test01_local(self):
    # This test executes all commands of the local grid manager and asserts that everything is fine

38 39 40 41 42
    # first test, if the '/bin/bash' exists
    bash = '/bin/bash'
    if not os.path.exists(bash):
      raise SkipException("Could not find '%s' which is required to run the test scripts"%bash)

43 44
    try:

45 46
      import nose

47 48 49
      # first, add some commands to the database
      script_1 = pkg_resources.resource_filename('gridtk.tests', 'test_script.sh')
      script_2 = pkg_resources.resource_filename('gridtk.tests', 'test_array.sh')
50
      rdir = pkg_resources.resource_filename('gridtk', 'tests')
51 52
      from gridtk.script import jman
      # add a simple script that will write some information to the
53 54
      jman.main(['./bin/jman', '--local', '--database', self.database, 'submit', '--log-dir', self.log_dir, '--name', 'test_1', bash, script_1])
      jman.main(['./bin/jman', '--local', '--database', self.database, 'submit', '--log-dir', self.log_dir, '--name', 'test_2',  '--dependencies', '1', '--parametric', '1-7:2', bash, script_2])
55
      jman.main(['./bin/jman', '--local', '--database', self.database, 'submit', '--log-dir', self.log_dir, '--name', 'test_3',  '--dependencies', '1', '2', '--exec-dir', rdir, bash, "test_array.sh"])
56 57

      # check that the database was created successfully
58
      self.assertTrue(os.path.exists(self.database))
59 60 61 62 63 64 65 66 67 68

      print()
      # test that the list command works (should also work with the "default" grid manager
      jman.main(['./bin/jman', '--database', self.database, 'list', '--job-ids', '1'])
      jman.main(['./bin/jman', '--database', self.database, 'list', '--job-ids', '2', '--print-array-jobs', '--print-dependencies'])

      # get insight into the database
      job_manager = gridtk.local.JobManagerLocal(database=self.database)
      session = job_manager.lock()
      jobs = list(session.query(Job))
69
      self.assertEqual(len(jobs), 3)
70 71
      self.assertEqual(jobs[0].id, 1)
      self.assertEqual(jobs[1].id, 2)
72
      self.assertEqual(jobs[2].id, 3)
73 74 75
      self.assertEqual(len(jobs[1].array), 4)
      self.assertEqual(jobs[0].status, 'submitted')
      self.assertEqual(jobs[1].status, 'submitted')
76
      self.assertEqual(jobs[2].status, 'submitted')
77 78 79

      # check that the job dependencies are correct
      waiting = jobs[0].get_jobs_waiting_for_us()
80
      self.assertEqual(len(waiting), 2)
81
      self.assertEqual(waiting[0].id, 2)
82 83 84
      self.assertEqual(waiting[1].id, 3)
      waited = jobs[2].get_jobs_we_wait_for()
      self.assertEqual(len(waited), 2)
85
      self.assertEqual(waited[0].id, 1)
86
      self.assertEqual(waited[1].id, 2)
87 88 89 90 91 92 93 94 95 96 97

      job_manager.unlock()

      # now, start the local execution of the job in a parallel job
      self.scheduler_job = subprocess.Popen(['./bin/jman', '--local', '--database', self.database, 'run-scheduler', '--sleep-time', '5', '--parallel', '2'])

      # sleep some time to assure that the scheduler was able to start the first job
      time.sleep(4)
      # ... and kill the scheduler
      self.scheduler_job.kill()
      self.scheduler_job = None
98

99 100 101
      # now, the first job needs to have status failure, and the second needs to be queued
      session = job_manager.lock()
      jobs = list(session.query(Job))
102
      self.assertEqual(len(jobs), 3)
103 104 105 106
      if jobs[0].status in ('submitted', 'queued', 'executing'):
        # on slow machines, we don0t want the tests to fail, so we just skip
        job_manager.unlock()
        raise nose.plugins.skip.SkipTest("This machine seems to be quite slow in processing parallel jobs.")
107 108
      self.assertEqual(jobs[0].status, 'failure')
      self.assertEqual(jobs[1].status, 'queued')
109
      self.assertEqual(jobs[2].status, 'waiting')
110 111 112
      # the result files should already be there
      self.assertTrue(os.path.exists(jobs[0].std_out_file()))
      self.assertTrue(os.path.exists(jobs[0].std_err_file()))
113
      job_manager.unlock()
114 115


116
      # reset the job 1
117
      jman.main(['./bin/jman', '--local', '--database', self.database, 'resubmit', '--job-id', '1', '--running-jobs', '--overwrite-command', script_1])
118

119 120
      # now, start the local execution of the job in a parallel job
      self.scheduler_job = subprocess.Popen(['./bin/jman', '--local', '--database', self.database, 'run-scheduler', '--sleep-time', '5', '--parallel', '2'])
121

122 123 124 125 126 127 128 129 130
      # sleep some time to assure that the scheduler was able to finish the first and start the second job
      time.sleep(9)
      # ... and kill the scheduler
      self.scheduler_job.kill()
      self.scheduler_job = None

      # Job 1 and two array jobs of job two should be finished now, the other two still need to be queued
      session = job_manager.lock()
      jobs = list(session.query(Job))
131
      self.assertEqual(len(jobs), 3)
132 133 134 135
      if jobs[0].status in ('queued', 'executing') or jobs[1].status == 'queued':
        # on slow machines, we don0t want the tests to fail, so we just skip
        job_manager.unlock()
        raise nose.plugins.skip.SkipTest("This machine seems to be quite slow in processing parallel jobs.")
136 137
      self.assertEqual(jobs[0].status, 'failure')
      self.assertEqual(jobs[1].status, 'executing')
138 139 140 141
      if jobs[1].array[0].status == 'executing' or jobs[1].array[1].status == 'executing':
        # on slow machines, we don0t want the tests to fail, so we just skip
        job_manager.unlock()
        raise nose.plugins.skip.SkipTest("This machine seems to be quite slow in processing parallel jobs.")
142 143 144 145 146
      self.assertEqual(jobs[1].array[0].status, 'failure')
      self.assertEqual(jobs[1].array[0].result, 1)
      self.assertEqual(jobs[1].array[1].status, 'success')
      self.assertEqual(jobs[1].array[1].result, 0)
      self.assertEqual(len([a for a in jobs[1].array if a.status == 'queued']), 2)
147 148 149 150 151
      out_file = jobs[0].std_out_file()
      err_file = jobs[0].std_err_file()
      job_manager.unlock()

      # the result files of the first job should now be there
152 153 154
      self.assertTrue(os.path.isfile(out_file))
      self.assertTrue(os.path.isfile(err_file))
      self.assertEqual(open(out_file).read().rstrip(), 'This is a text message to std-out')
155
      self.assertEqual(open(err_file).read().split('\n')[0], 'This is a text message to std-err')
156 157 158 159

      # resubmit all jobs
      jman.main(['./bin/jman', '--local', '--database', self.database, 'resubmit', '--running-jobs'])
      # check that the log files have been cleaned
160 161
      self.assertFalse(os.path.exists(out_file))
      self.assertFalse(os.path.exists(err_file))
162
      # ... but the log dir still exists
163
      self.assertTrue(os.path.exists(self.log_dir))
164

165 166
      # now, let the scheduler run all jobs, but this time in verbose mode
      self.scheduler_job = subprocess.Popen(['./bin/jman', '--local', '-vv', '--database', self.database, 'run-scheduler', '--sleep-time', '1', '--parallel', '2', '--die-when-finished'])
167 168 169 170 171
      # and wait for the job to finish (the timeout argument to Popen only exists from python 3.3 onwards)
      self.scheduler_job.wait()
      self.scheduler_job = None

      # check that all output files are generated again
172 173 174
      self.assertTrue(os.path.isfile(out_file))
      self.assertTrue(os.path.isfile(err_file))
      self.assertEqual(open(out_file).read().rstrip(), 'This is a text message to std-out')
175
      self.assertEqual(open(err_file).read().split('\n')[1], 'This is a text message to std-err')
176 177 178

      # check that exactly four output and four error files have been created
      files = os.listdir(self.log_dir)
179
      self.assertEqual(len(files), 12)
180
      for i in range(1,8,2):
181 182
        self.assertTrue('test_2.o2.%d'%i in files)
        self.assertTrue('test_2.e2.%d'%i in files)
183 184 185 186

      # check that all array jobs are finished now
      session = job_manager.lock()
      jobs = list(session.query(Job))
187
      self.assertEqual(len(jobs), 3)
188 189 190
      self.assertEqual(jobs[1].status, 'failure')
      self.assertEqual(jobs[1].array[0].status, 'failure')
      self.assertEqual(jobs[1].array[0].result, 1)
191
      for i in range(1,4):
192 193
        self.assertEqual(jobs[1].array[i].status, 'success')
        self.assertEqual(jobs[1].array[i].result, 0)
194 195
      self.assertEqual(jobs[2].status, 'success')
      self.assertEqual(jobs[2].result, 0)
196 197 198 199 200 201 202 203 204 205 206
      job_manager.unlock()

      print()
      # test that the list command still works
      jman.main(['./bin/jman', '--database', self.database, 'list', '--print-array-jobs'])

      print()
      # test that the report command works
      jman.main(['./bin/jman', '--database', self.database, 'report'])

      # clean-up
207
      jman.main(['./bin/jman', '--local', '--database', self.database, 'delete', '--job-ids', '1-3'])
208 209

      # check that the database and the log files are gone
210
      self.assertEqual(len(os.listdir(self.temp_dir)), 0)
211 212

      # add the scripts again, but this time with the --stop-on-failure option
213 214
      jman.main(['./bin/jman', '--local', '--database', self.database, 'submit', '--log-dir', self.log_dir, '--name', 'test_1', '--stop-on-failure', bash, script_1])
      jman.main(['./bin/jman', '--local', '--database', self.database, 'submit', '--log-dir', self.log_dir, '--name', 'test_2',  '--dependencies', '1', '--parametric', '1-7:2', '--stop-on-failure', bash, script_2])
215
      jman.main(['./bin/jman', '--local', '--database', self.database, 'submit', '--log-dir', self.log_dir, '--name', 'test_3',  '--dependencies', '1', '2', '--exec-dir', rdir, '--stop-on-failure', bash, "test_array.sh"])
216 217 218 219 220 221 222 223

      # and execute them, but without writing the log files
      self.scheduler_job = subprocess.Popen(['./bin/jman', '--local', '--database', self.database, 'run-scheduler', '--sleep-time', '0.1', '--parallel', '2', '--die-when-finished', '--no-log-files'])
      # and wait for the job to finish (the timeout argument to Popen only exists from python 3.3 onwards)
      self.scheduler_job.wait()
      self.scheduler_job = None

      # assert that the log files are not there
224 225
      self.assertFalse(os.path.isfile(out_file))
      self.assertFalse(os.path.isfile(err_file))
226 227 228 229 230


      # check that all array jobs are finished now
      session = job_manager.lock()
      jobs = list(session.query(Job))
231
      self.assertEqual(len(jobs), 3)
232 233 234 235
      self.assertEqual(jobs[0].status, 'failure')
      self.assertEqual(jobs[0].result, 255)
      self.assertEqual(jobs[1].status, 'failure')
      self.assertTrue(jobs[1].result is None)
236 237
      self.assertEqual(jobs[2].status, 'failure')
      self.assertTrue(jobs[2].result is None)
238 239 240 241
      job_manager.unlock()

      # and clean up again
      jman.main(['./bin/jman', '--local', '--database', self.database, 'delete'])
242
      self.assertEqual(len(os.listdir(self.temp_dir)), 0)
243 244 245 246

    except KeyboardInterrupt:
      # make sure that the keyboard interrupt is captured and the mess is cleaned up (i.e. by calling tearDown)
      pass
247

248

249
  def notest02_grid(self):
250
    # Tests the functionality of the grid toolkit in the grid
251
    import nose
252
    raise nose.plugins.skip.SkipTest("This test is not yet implemented. If you find a proper ways to test the grid functionality, please go ahead and implement the test.")