Changeset 13 for trunk/slurm_drmaa/job.c
- Timestamp:
- 01/02/11 17:13:14 (14 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/slurm_drmaa/job.c
r7 r13 2 2 /* 3 3 * PSNC DRMAA for SLURM 4 * Copyright (C) 201 0Poznan Supercomputing and Networking Center4 * Copyright (C) 2011 Poznan Supercomputing and Networking Center 5 5 * 6 6 * This program is free software: you can redistribute it and/or modify … … 62 62 job_desc.job_id = atoi(self->job_id); 63 63 job_desc.priority = 0; 64 job_desc.alloc_sid = 0; 64 65 if(slurm_update_job(&job_desc) == -1) { 65 66 fsd_exc_raise_fmt( FSD_ERRNO_INTERNAL_ERROR,"slurm_update_job error: %s,job_id: %s",slurm_strerror(slurm_get_errno()),self->job_id); … … 126 127 switch(job_info->job_array[0].state_reason) 127 128 { 128 case WAIT_NO_REASON: 129 case WAIT_PRIORITY: 130 case WAIT_DEPENDENCY: 131 case WAIT_RESOURCES: 132 case WAIT_PART_NODE_LIMIT: 133 case WAIT_PART_TIME_LIMIT: 129 case WAIT_NO_REASON: /* not set or job not pending */ 130 case WAIT_PRIORITY: /* higher priority jobs exist */ 131 case WAIT_DEPENDENCY: /* dependent job has not completed */ 132 case WAIT_RESOURCES: /* required resources not available */ 133 case WAIT_PART_NODE_LIMIT: /* request exceeds partition node limit */ 134 case WAIT_PART_TIME_LIMIT: /* request exceeds partition time limit */ 135 #if SLURM_VERSION_NUMBER < SLURM_VERSION_NUM(2,2,0) 134 136 case WAIT_PART_STATE: 137 #endif 138 #if SLURM_VERSION_NUMBER >= SLURM_VERSION_NUM(2,2,0) 139 case WAIT_PART_DOWN: /* requested partition is down */ 140 case WAIT_PART_INACTIVE: /* requested partition is inactive */ 141 #endif 135 142 self->state = DRMAA_PS_QUEUED_ACTIVE; 136 143 break; 137 case WAIT_HELD: 144 #if SLURM_VERSION_NUMBER >= SLURM_VERSION_NUM(2,2,0) 145 case WAIT_HELD_USER: /* job is held by user */ 146 #endif 138 147 self->state = DRMAA_PS_USER_ON_HOLD; 139 148 break; 140 case WAIT_TIME: 141 case WAIT_LICENSES: 142 case WAIT_ASSOC_JOB_LIMIT: 143 case WAIT_ASSOC_RESOURCE_LIMIT: 144 case WAIT_ASSOC_TIME_LIMIT: 145 case WAIT_RESERVATION: 146 case WAIT_NODE_NOT_AVAIL: 149 case WAIT_HELD: /* job is held by administrator */ 150 self->state = DRMAA_PS_SYSTEM_ON_HOLD; 151 break; 152 case WAIT_TIME: /* job waiting for specific begin time */ 153 case WAIT_LICENSES: /* job is waiting for licenses */ 154 case WAIT_ASSOC_JOB_LIMIT: /* user/bank job limit reached */ 155 case WAIT_ASSOC_RESOURCE_LIMIT: /* user/bank resource limit reached */ 156 case WAIT_ASSOC_TIME_LIMIT: /* user/bank time limit reached */ 157 case WAIT_RESERVATION: /* reservation not available */ 158 case WAIT_NODE_NOT_AVAIL: /* required node is DOWN or DRAINED */ 159 #if SLURM_VERSION_NUMBER < SLURM_VERSION_NUM(2,2,0) 147 160 case WAIT_TBD1: 161 #endif 148 162 case WAIT_TBD2: 149 163 self->state = DRMAA_PS_QUEUED_ACTIVE; 150 164 break; 151 case FAIL_DOWN_PARTITION: 152 case FAIL_DOWN_NODE: 153 case FAIL_BAD_CONSTRAINTS: 154 case FAIL_SYSTEM: 155 case FAIL_LAUNCH: 156 case FAIL_EXIT_CODE: 157 case FAIL_TIMEOUT: 158 case FAIL_INACTIVE_LIMIT: 165 case FAIL_DOWN_PARTITION: /* partition for job is DOWN */ 166 case FAIL_DOWN_NODE: /* some node in the allocation failed */ 167 case FAIL_BAD_CONSTRAINTS: /* constraints can not be satisfied */ 168 case FAIL_SYSTEM: /* slurm system failure */ 169 case FAIL_LAUNCH: /* unable to launch job */ 170 case FAIL_EXIT_CODE: /* exit code was non-zero */ 171 case FAIL_TIMEOUT: /* reached end of time limit */ 172 case FAIL_INACTIVE_LIMIT: /* reached slurm InactiveLimit */ 173 #if SLURM_VERSION_NUMBER < SLURM_VERSION_NUM(2,2,0) 159 174 case FAIL_BANK_ACCOUNT: 175 #else 176 case FAIL_ACCOUNT: /* invalid account */ 177 #endif 178 case FAIL_QOS: /* invalid QOS */ 179 case WAIT_QOS_THRES: /* required QOS threshold has been breached */ 160 180 self->state = DRMAA_PS_FAILED; 161 181 break;
Note: See TracChangeset
for help on using the changeset viewer.