[5] | 1 | /* $Id$ */ |
---|
[1] | 2 | /* |
---|
| 3 | * PSNC DRMAA for SLURM |
---|
[13] | 4 | * Copyright (C) 2011 Poznan Supercomputing and Networking Center |
---|
[1] | 5 | * |
---|
| 6 | * This program is free software: you can redistribute it and/or modify |
---|
| 7 | * it under the terms of the GNU General Public License as published by |
---|
| 8 | * the Free Software Foundation, either version 3 of the License, or |
---|
| 9 | * (at your option) any later version. |
---|
| 10 | * |
---|
| 11 | * This program is distributed in the hope that it will be useful, |
---|
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
| 14 | * GNU General Public License for more details. |
---|
| 15 | * |
---|
| 16 | * You should have received a copy of the GNU General Public License |
---|
| 17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
---|
| 18 | */ |
---|
| 19 | #include <string.h> |
---|
| 20 | #include <stdlib.h> |
---|
| 21 | #include <unistd.h> |
---|
| 22 | #include <signal.h> |
---|
| 23 | |
---|
| 24 | #include <drmaa_utils/common.h> |
---|
| 25 | #include <drmaa_utils/conf.h> |
---|
| 26 | #include <drmaa_utils/datetime.h> |
---|
| 27 | #include <drmaa_utils/drmaa.h> |
---|
| 28 | #include <drmaa_utils/drmaa_util.h> |
---|
| 29 | #include <drmaa_utils/environ.h> |
---|
| 30 | #include <drmaa_utils/template.h> |
---|
| 31 | |
---|
| 32 | #include <slurm_drmaa/job.h> |
---|
| 33 | #include <slurm_drmaa/session.h> |
---|
| 34 | #include <slurm_drmaa/util.h> |
---|
| 35 | |
---|
| 36 | #include <slurm/slurm.h> |
---|
| 37 | #include <stdint.h> |
---|
| 38 | |
---|
| 39 | static void |
---|
| 40 | slurmdrmaa_job_control( fsd_job_t *self, int action ) |
---|
| 41 | { |
---|
| 42 | slurmdrmaa_job_t *slurm_self = (slurmdrmaa_job_t*)self; |
---|
| 43 | job_desc_msg_t job_desc; |
---|
| 44 | |
---|
| 45 | fsd_log_enter(( "({job_id=%s}, action=%d)", self->job_id, action )); |
---|
| 46 | |
---|
| 47 | fsd_mutex_lock( &self->session->drm_connection_mutex ); |
---|
| 48 | TRY |
---|
| 49 | { |
---|
| 50 | switch( action ) |
---|
| 51 | { |
---|
| 52 | case DRMAA_CONTROL_SUSPEND: |
---|
| 53 | if(slurm_suspend(fsd_atoi(self->job_id)) == -1) { |
---|
| 54 | fsd_exc_raise_fmt( FSD_ERRNO_INTERNAL_ERROR,"slurm_suspend error: %s,job_id: %s",slurm_strerror(slurm_get_errno()),self->job_id); |
---|
| 55 | } |
---|
[7] | 56 | slurm_self->user_suspended = true; |
---|
[1] | 57 | break; |
---|
| 58 | case DRMAA_CONTROL_HOLD: |
---|
| 59 | /* change priority to 0*/ |
---|
| 60 | slurm_init_job_desc_msg(&job_desc); |
---|
| 61 | slurm_self->old_priority = job_desc.priority; |
---|
| 62 | job_desc.job_id = atoi(self->job_id); |
---|
| 63 | job_desc.priority = 0; |
---|
[13] | 64 | job_desc.alloc_sid = 0; |
---|
[1] | 65 | if(slurm_update_job(&job_desc) == -1) { |
---|
| 66 | fsd_exc_raise_fmt( FSD_ERRNO_INTERNAL_ERROR,"slurm_update_job error: %s,job_id: %s",slurm_strerror(slurm_get_errno()),self->job_id); |
---|
| 67 | } |
---|
| 68 | break; |
---|
| 69 | case DRMAA_CONTROL_RESUME: |
---|
| 70 | if(slurm_resume(fsd_atoi(self->job_id)) == -1) { |
---|
| 71 | fsd_exc_raise_fmt( FSD_ERRNO_INTERNAL_ERROR,"slurm_resume error: %s,job_id: %s",slurm_strerror(slurm_get_errno()),self->job_id); |
---|
| 72 | } |
---|
[7] | 73 | slurm_self->user_suspended = false; |
---|
[1] | 74 | break; |
---|
| 75 | case DRMAA_CONTROL_RELEASE: |
---|
| 76 | /* change priority back*/ |
---|
| 77 | slurm_init_job_desc_msg(&job_desc); |
---|
| 78 | job_desc.priority = 1; |
---|
| 79 | job_desc.job_id = atoi(self->job_id); |
---|
| 80 | if(slurm_update_job(&job_desc) == -1) { |
---|
| 81 | fsd_exc_raise_fmt( FSD_ERRNO_INTERNAL_ERROR,"slurm_update_job error: %s,job_id: %s",slurm_strerror(slurm_get_errno()),self->job_id); |
---|
| 82 | } |
---|
| 83 | break; |
---|
| 84 | case DRMAA_CONTROL_TERMINATE: |
---|
| 85 | if(slurm_kill_job(fsd_atoi(self->job_id),SIGKILL,0) == -1) { |
---|
| 86 | fsd_exc_raise_fmt( FSD_ERRNO_INTERNAL_ERROR,"slurm_terminate_job error: %s,job_id: %s",slurm_strerror(slurm_get_errno()),self->job_id); |
---|
| 87 | } |
---|
| 88 | break; |
---|
| 89 | default: |
---|
| 90 | fsd_exc_raise_fmt( |
---|
| 91 | FSD_ERRNO_INVALID_ARGUMENT, |
---|
| 92 | "job::control: unknown action %d", action ); |
---|
| 93 | } |
---|
| 94 | |
---|
| 95 | fsd_log_debug(("job::control: successful")); |
---|
| 96 | } |
---|
| 97 | FINALLY |
---|
| 98 | { |
---|
| 99 | fsd_mutex_unlock( &self->session->drm_connection_mutex ); |
---|
| 100 | } |
---|
| 101 | END_TRY |
---|
| 102 | |
---|
| 103 | fsd_log_return(( "" )); |
---|
| 104 | } |
---|
| 105 | |
---|
| 106 | |
---|
| 107 | static void |
---|
| 108 | slurmdrmaa_job_update_status( fsd_job_t *self ) |
---|
| 109 | { |
---|
| 110 | job_info_msg_t *job_info = NULL; |
---|
[7] | 111 | slurmdrmaa_job_t * slurm_self = (slurmdrmaa_job_t *) self; |
---|
[1] | 112 | fsd_log_enter(( "({job_id=%s})", self->job_id )); |
---|
| 113 | |
---|
| 114 | fsd_mutex_lock( &self->session->drm_connection_mutex ); |
---|
| 115 | TRY |
---|
| 116 | { |
---|
| 117 | if ( slurm_load_job( &job_info, fsd_atoi(self->job_id), SHOW_ALL) ) { |
---|
| 118 | fsd_exc_raise_fmt( FSD_ERRNO_INTERNAL_ERROR,"slurm_load_jobs error: %s,job_id: %s",slurm_strerror(slurm_get_errno()),self->job_id); |
---|
| 119 | } |
---|
| 120 | |
---|
| 121 | self->exit_status = job_info->job_array[0].exit_code; |
---|
| 122 | fsd_log_debug(("exit_status = %d -> %d",self->exit_status, WEXITSTATUS(self->exit_status))); |
---|
| 123 | |
---|
| 124 | switch(job_info->job_array[0].job_state) |
---|
| 125 | { |
---|
| 126 | case JOB_PENDING: |
---|
| 127 | switch(job_info->job_array[0].state_reason) |
---|
| 128 | { |
---|
[13] | 129 | case WAIT_NO_REASON: /* not set or job not pending */ |
---|
| 130 | case WAIT_PRIORITY: /* higher priority jobs exist */ |
---|
| 131 | case WAIT_DEPENDENCY: /* dependent job has not completed */ |
---|
| 132 | case WAIT_RESOURCES: /* required resources not available */ |
---|
| 133 | case WAIT_PART_NODE_LIMIT: /* request exceeds partition node limit */ |
---|
| 134 | case WAIT_PART_TIME_LIMIT: /* request exceeds partition time limit */ |
---|
| 135 | #if SLURM_VERSION_NUMBER < SLURM_VERSION_NUM(2,2,0) |
---|
[1] | 136 | case WAIT_PART_STATE: |
---|
[13] | 137 | #endif |
---|
| 138 | #if SLURM_VERSION_NUMBER >= SLURM_VERSION_NUM(2,2,0) |
---|
| 139 | case WAIT_PART_DOWN: /* requested partition is down */ |
---|
| 140 | case WAIT_PART_INACTIVE: /* requested partition is inactive */ |
---|
| 141 | #endif |
---|
[1] | 142 | self->state = DRMAA_PS_QUEUED_ACTIVE; |
---|
| 143 | break; |
---|
[13] | 144 | #if SLURM_VERSION_NUMBER >= SLURM_VERSION_NUM(2,2,0) |
---|
| 145 | case WAIT_HELD_USER: /* job is held by user */ |
---|
[15] | 146 | |
---|
[1] | 147 | self->state = DRMAA_PS_USER_ON_HOLD; |
---|
| 148 | break; |
---|
[13] | 149 | case WAIT_HELD: /* job is held by administrator */ |
---|
| 150 | self->state = DRMAA_PS_SYSTEM_ON_HOLD; |
---|
| 151 | break; |
---|
[15] | 152 | #else |
---|
| 153 | case WAIT_HELD: |
---|
| 154 | self->state = DRMAA_PS_USER_ON_HOLD; |
---|
| 155 | break; |
---|
| 156 | #endif |
---|
[13] | 157 | case WAIT_TIME: /* job waiting for specific begin time */ |
---|
| 158 | case WAIT_LICENSES: /* job is waiting for licenses */ |
---|
| 159 | case WAIT_ASSOC_JOB_LIMIT: /* user/bank job limit reached */ |
---|
| 160 | case WAIT_ASSOC_RESOURCE_LIMIT: /* user/bank resource limit reached */ |
---|
| 161 | case WAIT_ASSOC_TIME_LIMIT: /* user/bank time limit reached */ |
---|
| 162 | case WAIT_RESERVATION: /* reservation not available */ |
---|
| 163 | case WAIT_NODE_NOT_AVAIL: /* required node is DOWN or DRAINED */ |
---|
| 164 | #if SLURM_VERSION_NUMBER < SLURM_VERSION_NUM(2,2,0) |
---|
[1] | 165 | case WAIT_TBD1: |
---|
[13] | 166 | #endif |
---|
[1] | 167 | case WAIT_TBD2: |
---|
| 168 | self->state = DRMAA_PS_QUEUED_ACTIVE; |
---|
| 169 | break; |
---|
[13] | 170 | case FAIL_DOWN_PARTITION: /* partition for job is DOWN */ |
---|
| 171 | case FAIL_DOWN_NODE: /* some node in the allocation failed */ |
---|
| 172 | case FAIL_BAD_CONSTRAINTS: /* constraints can not be satisfied */ |
---|
| 173 | case FAIL_SYSTEM: /* slurm system failure */ |
---|
| 174 | case FAIL_LAUNCH: /* unable to launch job */ |
---|
| 175 | case FAIL_EXIT_CODE: /* exit code was non-zero */ |
---|
| 176 | case FAIL_TIMEOUT: /* reached end of time limit */ |
---|
| 177 | case FAIL_INACTIVE_LIMIT: /* reached slurm InactiveLimit */ |
---|
| 178 | #if SLURM_VERSION_NUMBER < SLURM_VERSION_NUM(2,2,0) |
---|
[1] | 179 | case FAIL_BANK_ACCOUNT: |
---|
[13] | 180 | #else |
---|
| 181 | case FAIL_ACCOUNT: /* invalid account */ |
---|
| 182 | #endif |
---|
[14] | 183 | |
---|
| 184 | #if SLURM_VERSION_NUMBER >= SLURM_VERSION_NUM(2,2,0) |
---|
[13] | 185 | case FAIL_QOS: /* invalid QOS */ |
---|
| 186 | case WAIT_QOS_THRES: /* required QOS threshold has been breached */ |
---|
[14] | 187 | #endif |
---|
[1] | 188 | self->state = DRMAA_PS_FAILED; |
---|
| 189 | break; |
---|
| 190 | default: |
---|
| 191 | fsd_log_error(("job_state_reason = %d, assert(0)",job_info->job_array[0].state_reason)); |
---|
| 192 | fsd_assert(false); |
---|
| 193 | |
---|
| 194 | } |
---|
| 195 | break; |
---|
| 196 | case JOB_RUNNING: |
---|
| 197 | self->state = DRMAA_PS_RUNNING; |
---|
| 198 | break; |
---|
| 199 | case JOB_SUSPENDED: |
---|
[7] | 200 | if(slurm_self->user_suspended == true) |
---|
| 201 | self->state = DRMAA_PS_USER_SUSPENDED; |
---|
| 202 | else |
---|
| 203 | self->state = DRMAA_PS_SYSTEM_SUSPENDED; /* assume SYSTEM - suspendig jobs is administrator only */ |
---|
[1] | 204 | break; |
---|
| 205 | case JOB_COMPLETE: |
---|
| 206 | self->state = DRMAA_PS_DONE; |
---|
| 207 | break; |
---|
| 208 | case JOB_CANCELLED: |
---|
| 209 | self->exit_status = -1; |
---|
| 210 | case JOB_FAILED: |
---|
| 211 | case JOB_TIMEOUT: |
---|
| 212 | case JOB_NODE_FAIL: |
---|
| 213 | self->state = DRMAA_PS_FAILED; |
---|
| 214 | break; |
---|
| 215 | default: /*transient states */ |
---|
| 216 | if(job_info->job_array[0].job_state >= 0x8000) { |
---|
| 217 | fsd_log_debug(("state COMPLETING")); |
---|
| 218 | } |
---|
| 219 | else if (job_info->job_array[0].job_state >= 0x4000) { |
---|
| 220 | fsd_log_debug(("state Allocated nodes booting")); |
---|
| 221 | } |
---|
| 222 | else { |
---|
| 223 | fsd_log_error(("job_state = %d, assert(0)",job_info->job_array[0].job_state)); |
---|
| 224 | fsd_assert(false); |
---|
| 225 | } |
---|
| 226 | } |
---|
| 227 | |
---|
| 228 | if(self->exit_status == -1) /* input,output,error path failure etc*/ |
---|
| 229 | self->state = DRMAA_PS_FAILED; |
---|
| 230 | |
---|
| 231 | fsd_log_debug(("state: %d ,state_reason: %d-> %s", job_info->job_array[0].job_state, job_info->job_array[0].state_reason, drmaa_job_ps_to_str(self->state))); |
---|
| 232 | |
---|
| 233 | self->last_update_time = time(NULL); |
---|
| 234 | |
---|
| 235 | if( self->state >= DRMAA_PS_DONE ) |
---|
| 236 | fsd_cond_broadcast( &self->status_cond ); |
---|
| 237 | } |
---|
| 238 | FINALLY |
---|
| 239 | { |
---|
| 240 | if(job_info != NULL) |
---|
| 241 | slurm_free_job_info_msg (job_info); |
---|
| 242 | |
---|
| 243 | fsd_mutex_unlock( &self->session->drm_connection_mutex ); |
---|
| 244 | } |
---|
| 245 | END_TRY |
---|
| 246 | |
---|
| 247 | fsd_log_return(( "" )); |
---|
| 248 | } |
---|
| 249 | |
---|
| 250 | fsd_job_t * |
---|
| 251 | slurmdrmaa_job_new( char *job_id ) |
---|
| 252 | { |
---|
| 253 | slurmdrmaa_job_t *self = NULL; |
---|
| 254 | self = (slurmdrmaa_job_t*)fsd_job_new( job_id ); |
---|
| 255 | |
---|
| 256 | fsd_realloc( self, 1, slurmdrmaa_job_t ); |
---|
| 257 | |
---|
| 258 | self->super.control = slurmdrmaa_job_control; |
---|
| 259 | self->super.update_status = slurmdrmaa_job_update_status; |
---|
| 260 | self->old_priority = UINT32_MAX; |
---|
[7] | 261 | self->user_suspended = true; |
---|
[1] | 262 | return (fsd_job_t*)self; |
---|
| 263 | } |
---|
| 264 | |
---|
| 265 | |
---|
| 266 | void |
---|
| 267 | slurmdrmaa_job_create_req( |
---|
| 268 | fsd_drmaa_session_t *session, |
---|
| 269 | const fsd_template_t *jt, |
---|
| 270 | fsd_environ_t **envp, |
---|
| 271 | job_desc_msg_t * job_desc, |
---|
| 272 | int n_job /* ~job_step */ |
---|
| 273 | ) |
---|
| 274 | { |
---|
| 275 | fsd_expand_drmaa_ph_t *volatile expand = NULL; |
---|
| 276 | |
---|
| 277 | TRY |
---|
| 278 | { |
---|
| 279 | expand = fsd_expand_drmaa_ph_new( NULL, NULL, fsd_asprintf("%d",n_job) ); |
---|
| 280 | slurmdrmaa_job_create( session, jt, envp, expand, job_desc, n_job); |
---|
| 281 | } |
---|
| 282 | EXCEPT_DEFAULT |
---|
| 283 | { |
---|
| 284 | fsd_exc_reraise(); |
---|
| 285 | } |
---|
| 286 | FINALLY |
---|
| 287 | { |
---|
| 288 | if( expand ) |
---|
| 289 | expand->destroy( expand ); |
---|
| 290 | } |
---|
| 291 | END_TRY |
---|
| 292 | } |
---|
| 293 | |
---|
| 294 | static char * |
---|
| 295 | internal_map_file( fsd_expand_drmaa_ph_t *expand, const char *path, |
---|
| 296 | bool *host_given, const char *name ) |
---|
| 297 | { |
---|
| 298 | const char *p; |
---|
| 299 | |
---|
| 300 | for( p = path; *p != ':'; p++ ) |
---|
| 301 | if( *p == '\0' ) |
---|
| 302 | fsd_exc_raise_fmt( FSD_DRMAA_ERRNO_INVALID_ATTRIBUTE_FORMAT, |
---|
| 303 | "invalid format of drmaa_%s_path: missing colon", name ); |
---|
| 304 | if( host_given ) |
---|
| 305 | *host_given = ( p != path ); |
---|
| 306 | |
---|
| 307 | p++; |
---|
| 308 | |
---|
| 309 | return expand->expand( expand, fsd_strdup(p), FSD_DRMAA_PH_HD | FSD_DRMAA_PH_WD | FSD_DRMAA_PH_INCR ); |
---|
| 310 | } |
---|
| 311 | |
---|
| 312 | void |
---|
| 313 | slurmdrmaa_job_create( |
---|
| 314 | fsd_drmaa_session_t *session, |
---|
| 315 | const fsd_template_t *jt, |
---|
| 316 | fsd_environ_t **envp, |
---|
| 317 | fsd_expand_drmaa_ph_t *expand, |
---|
| 318 | job_desc_msg_t * job_desc, |
---|
| 319 | int n_job |
---|
| 320 | ) |
---|
| 321 | { |
---|
| 322 | const char *input_path_orig = NULL; |
---|
| 323 | const char *output_path_orig = NULL; |
---|
| 324 | const char *error_path_orig = NULL; |
---|
| 325 | char *volatile input_path = NULL; |
---|
| 326 | char *volatile output_path = NULL; |
---|
| 327 | char *volatile error_path = NULL; |
---|
| 328 | bool input_host = false; |
---|
| 329 | bool output_host = false; |
---|
| 330 | bool error_host = false; |
---|
| 331 | bool join_files = false; |
---|
| 332 | const char *value; |
---|
| 333 | const char *const *vector; |
---|
| 334 | const char *job_category = "default"; |
---|
| 335 | |
---|
| 336 | slurmdrmaa_init_job_desc( job_desc ); |
---|
| 337 | |
---|
| 338 | slurm_init_job_desc_msg( job_desc ); |
---|
| 339 | |
---|
| 340 | job_desc->user_id = getuid(); |
---|
| 341 | job_desc->group_id = getgid(); |
---|
| 342 | |
---|
| 343 | job_desc->env_size = 0; |
---|
| 344 | |
---|
| 345 | /* job name */ |
---|
| 346 | value = jt->get_attr( jt, DRMAA_JOB_NAME ); |
---|
| 347 | if( value ) |
---|
| 348 | { |
---|
| 349 | job_desc->name = fsd_strdup(value); |
---|
| 350 | fsd_log_debug(("# job_name = %s",job_desc->name)); |
---|
| 351 | } |
---|
| 352 | |
---|
| 353 | /* job state at submit */ |
---|
| 354 | value = jt->get_attr( jt, DRMAA_JS_STATE ); |
---|
| 355 | if( value ) |
---|
| 356 | { |
---|
| 357 | if( 0 == strcmp( value, DRMAA_SUBMISSION_STATE_ACTIVE ) ) |
---|
| 358 | {} |
---|
| 359 | else if( 0 == strcmp( value, DRMAA_SUBMISSION_STATE_HOLD ) ) |
---|
| 360 | { |
---|
| 361 | job_desc->priority = 0; |
---|
| 362 | fsd_log_debug(("# hold = user")); |
---|
| 363 | } |
---|
| 364 | else |
---|
| 365 | { |
---|
| 366 | fsd_exc_raise_msg(FSD_DRMAA_ERRNO_INVALID_ATTRIBUTE_VALUE, "invalid value of drmaa_js_state attribute" ); |
---|
| 367 | } |
---|
| 368 | } |
---|
| 369 | |
---|
| 370 | TRY |
---|
| 371 | { |
---|
| 372 | const char *command = NULL; |
---|
| 373 | char *command_expanded = NULL; |
---|
| 374 | char *temp_script_old = NULL; |
---|
| 375 | char *temp_script = ""; |
---|
| 376 | const char *const *i; |
---|
| 377 | int j; |
---|
| 378 | |
---|
| 379 | /* remote command */ |
---|
| 380 | command = jt->get_attr( jt, DRMAA_REMOTE_COMMAND ); |
---|
| 381 | if( command == NULL ) |
---|
| 382 | fsd_exc_raise_msg( |
---|
| 383 | FSD_DRMAA_ERRNO_CONFLICTING_ATTRIBUTE_VALUES, |
---|
| 384 | "drmaa_remote_command not set for job template" |
---|
| 385 | ); |
---|
| 386 | |
---|
| 387 | command_expanded = expand->expand( expand, fsd_strdup(command), FSD_DRMAA_PH_HD | FSD_DRMAA_PH_WD ); |
---|
| 388 | |
---|
| 389 | temp_script = fsd_asprintf("#!/bin/bash\n%s",command_expanded); |
---|
| 390 | fsd_free(command_expanded); |
---|
| 391 | |
---|
| 392 | /* arguments list */ |
---|
| 393 | vector = jt->get_v_attr( jt, DRMAA_V_ARGV ); |
---|
| 394 | |
---|
| 395 | if( vector ) |
---|
| 396 | { |
---|
| 397 | for( i = vector, j = 2; *i; i++, j++ ) |
---|
| 398 | { |
---|
| 399 | char *arg_expanded = expand->expand( expand, fsd_strdup(*i), FSD_DRMAA_PH_HD | FSD_DRMAA_PH_WD ); |
---|
| 400 | |
---|
| 401 | temp_script_old = fsd_strdup(temp_script); |
---|
| 402 | |
---|
| 403 | if (strcmp(temp_script, "") != 0) { |
---|
| 404 | fsd_free(temp_script); |
---|
| 405 | } |
---|
| 406 | /* add too script */ |
---|
| 407 | temp_script = fsd_asprintf("%s '%s'", temp_script_old, arg_expanded); |
---|
| 408 | fsd_free(temp_script_old); |
---|
| 409 | fsd_free(arg_expanded); |
---|
| 410 | } |
---|
| 411 | } |
---|
| 412 | |
---|
| 413 | job_desc->script = fsd_asprintf("%s\n", temp_script); |
---|
| 414 | fsd_log_debug(("# Script:\n%s", job_desc->script)); |
---|
| 415 | fsd_free(temp_script); |
---|
| 416 | } |
---|
| 417 | END_TRY |
---|
| 418 | |
---|
| 419 | |
---|
| 420 | /* start time */ |
---|
| 421 | value = jt->get_attr( jt, DRMAA_START_TIME ); |
---|
| 422 | if( value ) |
---|
| 423 | { |
---|
| 424 | job_desc->begin_time = fsd_datetime_parse( value ); |
---|
| 425 | fsd_log_debug(( "\n drmaa_start_time: %s -> %ld", value, (long)job_desc->begin_time)); |
---|
| 426 | } |
---|
| 427 | |
---|
[17] | 428 | /* propagate all environment variables from submission host */ |
---|
| 429 | { |
---|
| 430 | extern char **environ; |
---|
| 431 | char **i; |
---|
| 432 | unsigned j = 0; |
---|
| 433 | |
---|
| 434 | for ( i = environ; *i; i++) { |
---|
| 435 | job_desc->env_size++; |
---|
| 436 | } |
---|
| 437 | |
---|
| 438 | fsd_log_debug(("environ env_size = %d",job_desc->env_size)); |
---|
| 439 | fsd_calloc(job_desc->environment, job_desc->env_size+1, char *); |
---|
| 440 | |
---|
| 441 | for( i = environ; *i; i++,j++ ) |
---|
| 442 | { |
---|
| 443 | job_desc->environment[j] = fsd_strdup(*i); |
---|
| 444 | } |
---|
| 445 | |
---|
| 446 | } |
---|
| 447 | |
---|
[1] | 448 | /* environment */ |
---|
[17] | 449 | |
---|
[1] | 450 | vector = jt->get_v_attr( jt, DRMAA_V_ENV ); |
---|
| 451 | if( vector ) |
---|
| 452 | { |
---|
| 453 | const char *const *i; |
---|
| 454 | unsigned j = 0; |
---|
[17] | 455 | unsigned env_offset = job_desc->env_size; |
---|
[1] | 456 | |
---|
| 457 | for( i = vector; *i; i++ ) |
---|
| 458 | { |
---|
| 459 | job_desc->env_size++; |
---|
| 460 | } |
---|
[17] | 461 | fsd_log_debug(("jt env_size = %d",job_desc->env_size)); |
---|
[1] | 462 | |
---|
| 463 | fsd_log_debug(("# environment =")); |
---|
[17] | 464 | fsd_realloc(job_desc->environment, job_desc->env_size+1, char *); |
---|
[1] | 465 | |
---|
| 466 | for( i = vector; *i; i++,j++ ) |
---|
| 467 | { |
---|
[17] | 468 | job_desc->environment[j + env_offset] = fsd_strdup(*i); |
---|
| 469 | fsd_log_debug((" %s", job_desc->environment[j+ env_offset])); |
---|
[1] | 470 | } |
---|
| 471 | } |
---|
| 472 | |
---|
| 473 | /* wall clock time hard limit */ |
---|
| 474 | value = jt->get_attr( jt, DRMAA_WCT_HLIMIT ); |
---|
| 475 | if (value) |
---|
| 476 | { |
---|
| 477 | job_desc->time_limit = slurmdrmaa_datetime_parse( value ); |
---|
[17] | 478 | fsd_log_debug(("# wct_hlimit = %s -> %ld",value, (long int)slurmdrmaa_datetime_parse( value ))); |
---|
[1] | 479 | } |
---|
| 480 | |
---|
| 481 | |
---|
| 482 | /*expand->set(expand, FSD_DRMAA_PH_INCR,fsd_asprintf("%d", n_job));*/ /* set current value */ |
---|
| 483 | /* TODO: test drmaa_ph_incr */ |
---|
| 484 | /* job working directory */ |
---|
| 485 | value = jt->get_attr( jt, DRMAA_WD ); |
---|
| 486 | if( value ) |
---|
| 487 | { |
---|
| 488 | char *cwd_expanded = expand->expand( expand, fsd_strdup(value), FSD_DRMAA_PH_HD | FSD_DRMAA_PH_INCR ); |
---|
| 489 | |
---|
| 490 | expand->set( expand, FSD_DRMAA_PH_WD, fsd_strdup(cwd_expanded)); |
---|
| 491 | |
---|
| 492 | fsd_log_debug(("# work_dir = %s",cwd_expanded)); |
---|
| 493 | job_desc->work_dir = fsd_strdup(cwd_expanded); |
---|
| 494 | fsd_free(cwd_expanded); |
---|
| 495 | } |
---|
| 496 | else |
---|
| 497 | { |
---|
| 498 | char cwdbuf[4096] = ""; |
---|
| 499 | |
---|
| 500 | if ((getcwd(cwdbuf, 4095)) == NULL) { |
---|
| 501 | char errbuf[256] = "InternalError"; |
---|
| 502 | (void)strerror_r(errno, errbuf, 256); /*on error the default message would be returned */ |
---|
| 503 | fsd_log_error(("getcwd failed: %s", errbuf)); |
---|
| 504 | job_desc->work_dir = fsd_strdup("."); |
---|
| 505 | } else { |
---|
| 506 | job_desc->work_dir = fsd_strdup(cwdbuf); |
---|
| 507 | } |
---|
| 508 | |
---|
| 509 | fsd_log_debug(("work_dir(default:CWD) %s", job_desc->work_dir)); |
---|
| 510 | } |
---|
| 511 | |
---|
| 512 | TRY |
---|
| 513 | { |
---|
| 514 | /* input path */ |
---|
| 515 | input_path_orig = jt->get_attr( jt, DRMAA_INPUT_PATH ); |
---|
| 516 | if( input_path_orig ) |
---|
| 517 | { |
---|
| 518 | input_path = internal_map_file( expand, input_path_orig, &input_host,"input" ); |
---|
| 519 | fsd_log_debug(( "\n drmaa_input_path: %s -> %s", input_path_orig, input_path )); |
---|
| 520 | } |
---|
| 521 | |
---|
| 522 | /* output path */ |
---|
| 523 | output_path_orig = jt->get_attr( jt, DRMAA_OUTPUT_PATH ); |
---|
| 524 | if( output_path_orig ) |
---|
| 525 | { |
---|
| 526 | output_path = internal_map_file( expand, output_path_orig, &output_host,"output" ); |
---|
| 527 | fsd_log_debug(( "\n drmaa_output_path: %s -> %s", output_path_orig, output_path )); |
---|
| 528 | } |
---|
| 529 | |
---|
| 530 | /* error path */ |
---|
| 531 | error_path_orig = jt->get_attr( jt, DRMAA_ERROR_PATH ); |
---|
| 532 | if( error_path_orig ) |
---|
| 533 | { |
---|
| 534 | error_path = internal_map_file( expand, error_path_orig, &error_host,"error" ); |
---|
| 535 | fsd_log_debug(( "\n drmaa_error_path: %s -> %s", error_path_orig, error_path )); |
---|
| 536 | } |
---|
| 537 | |
---|
| 538 | /* join files */ |
---|
| 539 | value = jt->get_attr( jt, DRMAA_JOIN_FILES ); |
---|
| 540 | if( value ) |
---|
| 541 | { |
---|
| 542 | if( (value[0] == 'y' || value[0] == 'Y') && value[1] == '\0' ) |
---|
| 543 | join_files = true; |
---|
| 544 | else if( (value[0] == 'n' || value[0] == 'N') && value[1] == '\0' ) |
---|
| 545 | join_files = false; |
---|
| 546 | else |
---|
| 547 | fsd_exc_raise_msg( |
---|
| 548 | FSD_DRMAA_ERRNO_INVALID_ATTRIBUTE_VALUE, |
---|
| 549 | "invalid value of drmaa_join_files attribute" ); |
---|
| 550 | } |
---|
| 551 | |
---|
| 552 | if( join_files ) |
---|
| 553 | { |
---|
| 554 | if( output_path == NULL ) |
---|
| 555 | fsd_exc_raise_msg(FSD_DRMAA_ERRNO_CONFLICTING_ATTRIBUTE_VALUES, "drmaa_join_files is set and output file is not given" ); |
---|
| 556 | if( error_path!=NULL && 0 != strcmp( output_path, error_path ) ) |
---|
| 557 | fsd_log_warning(( "Error file was given but will be ignored since drmaa_join_files was set." )); |
---|
| 558 | |
---|
| 559 | if (error_path) |
---|
| 560 | fsd_free(error_path); |
---|
| 561 | |
---|
| 562 | error_path = fsd_strdup(output_path); |
---|
| 563 | } |
---|
| 564 | else |
---|
| 565 | { |
---|
| 566 | if( error_path == NULL && output_path ) |
---|
| 567 | error_path = fsd_strdup( "/dev/null" ); |
---|
| 568 | if( output_path == NULL && error_path ) |
---|
| 569 | output_path = fsd_strdup( "/dev/null" ); |
---|
| 570 | } |
---|
| 571 | |
---|
| 572 | |
---|
| 573 | /* email addresses to send notifications */ |
---|
| 574 | vector = jt->get_v_attr( jt, DRMAA_V_EMAIL ); |
---|
| 575 | if( vector && vector[0] ) |
---|
| 576 | { |
---|
| 577 | /* only to one email address message may be send */ |
---|
| 578 | job_desc->mail_user = fsd_strdup(vector[0]); |
---|
| 579 | fsd_log_debug(("# mail_user = %s\n",vector[0])); |
---|
| 580 | if( vector[1] != NULL ) |
---|
| 581 | { |
---|
| 582 | fsd_log_error(( "LL only supports one e-mail notification address" )); |
---|
| 583 | fsd_exc_raise_msg(FSD_DRMAA_ERRNO_INVALID_ATTRIBUTE_VALUE,"LL only supports one e-mail notification address"); |
---|
| 584 | } |
---|
| 585 | } |
---|
| 586 | |
---|
| 587 | /* block email */ |
---|
| 588 | value = jt->get_attr( jt, DRMAA_BLOCK_EMAIL ); |
---|
| 589 | if( value ) |
---|
| 590 | { |
---|
| 591 | bool block; |
---|
| 592 | if( strcmp(value, "0") == 0 ) |
---|
| 593 | { |
---|
| 594 | block = true; |
---|
| 595 | fsd_log_debug(("# block_email = true")); |
---|
| 596 | fsd_log_debug(("# mail_user delated")); |
---|
| 597 | fsd_free(job_desc->mail_user); |
---|
| 598 | job_desc->mail_user = NULL; |
---|
| 599 | } |
---|
| 600 | else if( strcmp(value, "1") == 0 ) |
---|
| 601 | block = false; |
---|
| 602 | else |
---|
| 603 | fsd_exc_raise_msg(FSD_DRMAA_ERRNO_INVALID_ATTRIBUTE_VALUE,"invalid value of drmaa_block_email attribute" ); |
---|
| 604 | |
---|
| 605 | if( block && output_path == NULL ) |
---|
| 606 | { |
---|
| 607 | fsd_log_debug(( "output path not set and we want to block e-mail, set to /dev/null" )); |
---|
| 608 | output_path = fsd_strdup( "/dev/null" ); |
---|
| 609 | } |
---|
| 610 | } |
---|
| 611 | |
---|
| 612 | if( input_path ) |
---|
| 613 | { |
---|
| 614 | job_desc->std_in = fsd_strdup(input_path); |
---|
| 615 | fsd_log_debug(("# input = %s", input_path)); |
---|
| 616 | } |
---|
| 617 | |
---|
| 618 | if( output_path ) |
---|
| 619 | { |
---|
| 620 | job_desc->std_out = fsd_strdup(output_path); |
---|
| 621 | fsd_log_debug(("# output = %s", output_path)); |
---|
| 622 | } |
---|
| 623 | |
---|
| 624 | if( error_path ) |
---|
| 625 | { |
---|
| 626 | job_desc->std_err = fsd_strdup(error_path); |
---|
| 627 | fsd_log_debug(("# error = %s", error_path)); |
---|
| 628 | } |
---|
| 629 | } |
---|
| 630 | FINALLY |
---|
| 631 | { |
---|
| 632 | fsd_free( input_path ); |
---|
| 633 | fsd_free( output_path ); |
---|
| 634 | fsd_free( error_path ); |
---|
| 635 | input_path = NULL; |
---|
| 636 | output_path = NULL; |
---|
| 637 | error_path = NULL; |
---|
| 638 | } |
---|
| 639 | END_TRY |
---|
| 640 | |
---|
| 641 | /* native specification */ |
---|
| 642 | value = jt->get_attr( jt, DRMAA_NATIVE_SPECIFICATION ); |
---|
| 643 | if( value ) |
---|
| 644 | { |
---|
| 645 | fsd_log_debug(("# Native specification: %s\n", value)); |
---|
| 646 | slurmdrmaa_parse_native(job_desc, value); |
---|
| 647 | } |
---|
| 648 | |
---|
| 649 | /* job category */ |
---|
| 650 | value = jt->get_attr( jt, DRMAA_JOB_CATEGORY ); |
---|
| 651 | if( value ) |
---|
| 652 | job_category = value; |
---|
| 653 | |
---|
| 654 | { |
---|
| 655 | fsd_conf_option_t *category_value = NULL; |
---|
| 656 | category_value = fsd_conf_dict_get( session->job_categories, job_category ); |
---|
| 657 | |
---|
| 658 | if( category_value != NULL ) |
---|
| 659 | { |
---|
| 660 | if( category_value->type != FSD_CONF_STRING ) |
---|
| 661 | fsd_exc_raise_fmt( |
---|
| 662 | FSD_ERRNO_INTERNAL_ERROR, |
---|
| 663 | "configuration error: job category should be string" |
---|
| 664 | ); |
---|
| 665 | |
---|
| 666 | fsd_log_debug(("# Job category %s : %s\n",value,category_value->val.string)); |
---|
| 667 | slurmdrmaa_parse_native(job_desc,category_value->val.string); |
---|
| 668 | } |
---|
| 669 | else |
---|
| 670 | { |
---|
| 671 | if( value != NULL ) |
---|
| 672 | fsd_exc_raise_fmt( |
---|
| 673 | FSD_DRMAA_ERRNO_INVALID_ATTRIBUTE_VALUE, |
---|
| 674 | "invalid job category: %s", job_category |
---|
| 675 | ); |
---|
| 676 | } |
---|
| 677 | } |
---|
| 678 | |
---|
| 679 | } |
---|
| 680 | |
---|
| 681 | |
---|