-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathplacement2-submit
More file actions
65 lines (50 loc) · 1.38 KB
/
placement2-submit
File metadata and controls
65 lines (50 loc) · 1.38 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
############
#
# Parallel Job
#
############
universe = parallel
executable = placement2.py
SRC_HOST=komatsu.chtc.wisc.edu
SRC_PATH=/home/idpl/100M
DST_HOST=murpa.rocksclusters.org
DST_PATH=100M
### Crondor Settings
# A promise that jobs will not run more often than this (in seconds)
# Required for the the job to run multiple times successfully.
#LEASE=1500
# A run is allowed to take this long (in seconds) to set up; otherwise
# that run is skipped
cron_window=60
# Try to run jobs on this schedule
cron_hour=0-23/1
cron_minute=15
#
# Keep running the job
on_exit_remove=false
# Arguments are:
# 1. File to send (on the sending host)
# 2. Location to write file (on the receiving host)
arguments = -i $(SRC_PATH) -o $(DST_PATH)
## Enable Chirp
+WantIOProxy = true
input = /dev/null
output = placement2.out.$(Node)
error = placement2.err.$(Node)
log = placement2.log
getenv = true
+SrcPath = "$(SRC_PATH)"
+DstHost = "$(DST_HOST)"
+DstPath = "$(DST_PATH)"
+ParallelShutdownPolicy = "WAIT_FOR_ALL"
transfer_input_files = DataMover.py,TimedExec.py,IDPLException.py,CondorTools.py,empty
should_transfer_files = YES
when_to_transfer_output = ON_EXIT
machine_count = 1
requirements = (Machine == "$(SRC_HOST)")
transfer_output_files = empty
queue
machine_count = 1
requirements = (Machine == "$(DST_HOST)")
transfer_output_files = empty
queue