Guest User

linuxConfig

a guest
Jun 5th, 2013
590
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 109.08 KB | None | 0 0
  1. ######################################################################
  2. ##
  3. ## condor_config
  4. ##
  5. ## This is the global configuration file for condor. Any settings
  6. ## made here may potentially be overridden in the local configuration
  7. ## file. KEEP THAT IN MIND! To double-check that a variable is
  8. ## getting set from the configuration file that you expect, use
  9. ## condor_config_val -v <variable name>
  10. ##
  11. ## The file is divided into four main parts:
  12. ## Part 1: Settings you likely want to customize
  13. ## Part 2: Settings you may want to customize
  14. ## Part 3: Settings that control the policy of when condor will
  15. ## start and stop jobs on your machines
  16. ## Part 4: Settings you should probably leave alone (unless you
  17. ## know what you're doing)
  18. ##
  19. ## Please read the INSTALL file (or the Install chapter in the
  20. ## Condor Administrator's Manual) for detailed explanations of the
  21. ## various settings in here and possible ways to configure your
  22. ## pool.
  23. ##
  24. ## Unless otherwise specified, settings that are commented out show
  25. ## the defaults that are used if you don't define a value. Settings
  26. ## that are defined here MUST BE DEFINED since they have no default
  27. ## value.
  28. ##
  29. ## Unless otherwise indicated, all settings which specify a time are
  30. ## defined in seconds.
  31. ##
  32. ######################################################################
  33.  
  34. ######################################################################
  35. ######################################################################
  36. ##
  37. ## ###### #
  38. ## # # ## ##### ##### ##
  39. ## # # # # # # # # #
  40. ## ###### # # # # # #
  41. ## # ###### ##### # #
  42. ## # # # # # # #
  43. ## # # # # # # #####
  44. ##
  45. ## Part 1: Settings you likely want to customize:
  46. ######################################################################
  47. ######################################################################
  48.  
  49. ## What machine is your central manager?
  50.  
  51. ##--------------------------------------------------------------------
  52. ## Pathnames:
  53. ##--------------------------------------------------------------------
  54. ## Where have you installed the bin, sbin and lib condor directories?
  55. RELEASE_DIR = /usr/local/condor
  56.  
  57. ## Where is the local condor directory for each host?
  58. ## This is where the local config file(s), logs and
  59. ## spool/execute directories are located
  60. LOCAL_DIR = $(TILDE)
  61. #LOCAL_DIR = $(RELEASE_DIR)/hosts/$(HOSTNAME)
  62.  
  63. ## Where is the machine-specific local config file for each host?
  64. LOCAL_CONFIG_FILE = /home/condor/condor_config.local
  65.  
  66. ## Where are optional machine-specific local config files located?
  67. ## Config files are included in lexicographic order.
  68. LOCAL_CONFIG_DIR = $(LOCAL_DIR)/config
  69. #LOCAL_CONFIG_DIR = $(LOCAL_DIR)/config
  70.  
  71. ## Blacklist for file processing in the LOCAL_CONFIG_DIR
  72. ## LOCAL_CONFIG_DIR_EXCLUDE_REGEXP = ^((\..*)|(.*~)|(#.*)|(.*\.rpmsave)|(.*\.rpmnew))$
  73.  
  74. ## If the local config file is not present, is it an error?
  75. ## WARNING: This is a potential security issue.
  76. ## If not specified, the default is True
  77. #REQUIRE_LOCAL_CONFIG_FILE = TRUE
  78.  
  79. ##--------------------------------------------------------------------
  80. ## Mail parameters:
  81. ##--------------------------------------------------------------------
  82. ## When something goes wrong with condor at your site, who should get
  83. ## the email?
  84. CONDOR_ADMIN = dunng@uncw.edu
  85.  
  86. ## Full path to a mail delivery program that understands that "-s"
  87. ## means you want to specify a subject:
  88. MAIL = /bin/mail
  89.  
  90. ##--------------------------------------------------------------------
  91. ## Network domain parameters:
  92. ##--------------------------------------------------------------------
  93. ## Internet domain of machines sharing a common UID space. If your
  94. ## machines don't share a common UID space, set it to
  95. ## UID_DOMAIN = $(FULL_HOSTNAME)
  96. ## to specify that each machine has its own UID space.
  97. UID_DOMAIN = uncw.edu
  98.  
  99. ## Internet domain of machines sharing a common file system.
  100. ## If your machines don't use a network file system, set it to
  101. ## FILESYSTEM_DOMAIN = $(FULL_HOSTNAME)
  102. ## to specify that each machine has its own file system.
  103. FILESYSTEM_DOMAIN = $(FULL_HOSTNAME)
  104.  
  105. ## This macro is used to specify a short description of your pool.
  106. ## It should be about 20 characters long. For example, the name of
  107. ## the UW-Madison Computer Science Condor Pool is ``UW-Madison CS''.
  108. COLLECTOR_NAME = My Pool - $(CONDOR_HOST)
  109.  
  110. ######################################################################
  111. ######################################################################
  112. ##
  113. ## ###### #####
  114. ## # # ## ##### ##### # #
  115. ## # # # # # # # #
  116. ## ###### # # # # # #####
  117. ## # ###### ##### # #
  118. ## # # # # # # #
  119. ## # # # # # # #######
  120. ##
  121. ## Part 2: Settings you may want to customize:
  122. ## (it is generally safe to leave these untouched)
  123. ######################################################################
  124. ######################################################################
  125.  
  126. ##
  127. ## The user/group ID <uid>.<gid> of the "Condor" user.
  128. ## (this can also be specified in the environment)
  129. ## Note: the CONDOR_IDS setting is ignored on Win32 platforms
  130. #CONDOR_IDS=x.x
  131.  
  132. ##--------------------------------------------------------------------
  133. ## Flocking: Submitting jobs to more than one pool
  134. ##--------------------------------------------------------------------
  135. ## Flocking allows you to run your jobs in other pools, or lets
  136. ## others run jobs in your pool.
  137. ##
  138. ## To let others flock to you, define FLOCK_FROM.
  139. ##
  140. ## To flock to others, define FLOCK_TO.
  141.  
  142. ## FLOCK_FROM defines the machines where you would like to grant
  143. ## people access to your pool via flocking. (i.e. you are granting
  144. ## access to these machines to join your pool).
  145. FLOCK_FROM =
  146. ## An example of this is:
  147. #FLOCK_FROM = somehost.friendly.domain, anotherhost.friendly.domain
  148.  
  149. ## FLOCK_TO defines the central managers of the pools that you want
  150. ## to flock to. (i.e. you are specifying the machines that you
  151. ## want your jobs to be negotiated at -- thereby specifying the
  152. ## pools they will run in.)
  153. FLOCK_TO =
  154. ## An example of this is:
  155. #FLOCK_TO = central_manager.friendly.domain, condor.cs.wisc.edu
  156.  
  157. ## FLOCK_COLLECTOR_HOSTS should almost always be the same as
  158. ## FLOCK_NEGOTIATOR_HOSTS (as shown below). The only reason it would be
  159. ## different is if the collector and negotiator in the pool that you are
  160. ## flocking too are running on different machines (not recommended).
  161. ## The collectors must be specified in the same corresponding order as
  162. ## the FLOCK_NEGOTIATOR_HOSTS list.
  163. FLOCK_NEGOTIATOR_HOSTS = $(FLOCK_TO)
  164. FLOCK_COLLECTOR_HOSTS = $(FLOCK_TO)
  165. ## An example of having the negotiator and the collector on different
  166. ## machines is:
  167. #FLOCK_NEGOTIATOR_HOSTS = condor.cs.wisc.edu, condor-negotiator.friendly.domain
  168. #FLOCK_COLLECTOR_HOSTS = condor.cs.wisc.edu, condor-collector.friendly.domain
  169.  
  170. ##--------------------------------------------------------------------
  171. ## Host/IP access levels
  172. ##--------------------------------------------------------------------
  173. ## Please see the administrator's manual for details on these
  174. ## settings, what they're for, and how to use them.
  175.  
  176. ## What machines have administrative rights for your pool? This
  177. ## defaults to your central manager. You should set it to the
  178. ## machine(s) where whoever is the condor administrator(s) works
  179. ## (assuming you trust all the users who log into that/those
  180. ## machine(s), since this is machine-wide access you're granting).
  181. ALLOW_ADMINISTRATOR = $(CONDOR_HOST), $(IP_ADDRESS)
  182.  
  183. ## If there are no machines that should have administrative access
  184. ## to your pool (for example, there's no machine where only trusted
  185. ## users have accounts), you can uncomment this setting.
  186. ## Unfortunately, this will mean that administering your pool will
  187. ## be more difficult.
  188. #DENY_ADMINISTRATOR = *
  189.  
  190. ## What machines should have "owner" access to your machines, meaning
  191. ## they can issue commands that a machine owner should be able to
  192. ## issue to their own machine (like condor_vacate). This defaults to
  193. ## machines with administrator access, and the local machine. This
  194. ## is probably what you want.
  195. ALLOW_OWNER = $(FULL_HOSTNAME), $(ALLOW_ADMINISTRATOR)
  196.  
  197. ## Read access. Machines listed as allow (and/or not listed as deny)
  198. ## can view the status of your pool, but cannot join your pool
  199. ## or run jobs.
  200. ## NOTE: By default, without these entries customized, you
  201. ## are granting read access to the whole world. You may want to
  202. ## restrict that to hosts in your domain. If possible, please also
  203. ## grant read access to "*.cs.wisc.edu", so the Condor developers
  204. ## will be able to view the status of your pool and more easily help
  205. ## you install, configure or debug your Condor installation.
  206. ## It is important to have this defined.
  207. ALLOW_READ = *
  208. #ALLOW_READ = *.your.domain, *.cs.wisc.edu
  209. #DENY_READ = *.bad.subnet, bad-machine.your.domain, 144.77.88.*
  210.  
  211. ## Write access. Machines listed here can join your pool, submit
  212. ## jobs, etc. Note: Any machine which has WRITE access must
  213. ## also be granted READ access. Granting WRITE access below does
  214. ## not also automatically grant READ access; you must change
  215. ## ALLOW_READ above as well.
  216. ##
  217. ## You must set this to something else before Condor will run.
  218. ## This most simple option is:
  219. ## ALLOW_WRITE = *
  220. ## but note that this will allow anyone to submit jobs or add
  221. ## machines to your pool and is a serious security risk.
  222.  
  223. ALLOW_WRITE = $(FULL_HOSTNAME), $(IP_ADDRESS)
  224. #ALLOW_WRITE = *.your.domain, your-friend's-machine.other.domain
  225. #DENY_WRITE = bad-machine.your.domain
  226.  
  227. ## Are you upgrading to a new version of Condor and confused about
  228. ## why the above ALLOW_WRITE setting is causing Condor to refuse to
  229. ## start up? If you are upgrading from a configuration that uses
  230. ## HOSTALLOW/HOSTDENY instead of ALLOW/DENY we recommend that you
  231. ## convert all uses of the former to the latter. The syntax of the
  232. ## authorization settings is identical. They both support
  233. ## unauthenticated IP-based authorization as well as authenticated
  234. ## user-based authorization. To avoid confusion, the use of
  235. ## HOSTALLOW/HOSTDENY is discouraged. Support for it may be removed
  236. ## in the future.
  237.  
  238. ## Negotiator access. Machines listed here are trusted central
  239. ## managers. You should normally not have to change this.
  240. ALLOW_NEGOTIATOR = $(CONDOR_HOST), $(IP_ADDRESS)
  241. ## Now, with flocking we need to let the SCHEDD trust the other
  242. ## negotiators we are flocking with as well. You should normally
  243. ## not have to change this either.
  244. ALLOW_NEGOTIATOR_SCHEDD = $(CONDOR_HOST), $(FLOCK_NEGOTIATOR_HOSTS), $(IP_ADDRESS)
  245.  
  246. ## Config access. Machines listed here can use the condor_config_val
  247. ## tool to modify all daemon configurations. This level of host-wide
  248. ## access should only be granted with extreme caution. By default,
  249. ## config access is denied from all hosts.
  250. #ALLOW_CONFIG = trusted-host.your.domain
  251.  
  252. ## Flocking Configs. These are the real things that Condor looks at,
  253. ## but we set them from the FLOCK_FROM/TO macros above. It is safe
  254. ## to leave these unchanged.
  255. ALLOW_WRITE_COLLECTOR = $(ALLOW_WRITE), $(FLOCK_FROM)
  256. ALLOW_WRITE_STARTD = $(ALLOW_WRITE), $(FLOCK_FROM)
  257. ALLOW_READ_COLLECTOR = $(ALLOW_READ), $(FLOCK_FROM)
  258. ALLOW_READ_STARTD = $(ALLOW_READ), $(FLOCK_FROM)
  259.  
  260.  
  261. ##--------------------------------------------------------------------
  262. ## Security parameters for setting configuration values remotely:
  263. ##--------------------------------------------------------------------
  264. ## These parameters define the list of attributes that can be set
  265. ## remotely with condor_config_val for the security access levels
  266. ## defined above (for example, WRITE, ADMINISTRATOR, CONFIG, etc).
  267. ## Please see the administrator's manual for further details on these
  268. ## settings, what they're for, and how to use them. There are no
  269. ## default values for any of these settings. If they are not
  270. ## defined, no attributes can be set with condor_config_val.
  271.  
  272. ## Do you want to allow condor_config_val -reset to work at all?
  273. ## This feature is disabled by default, so to enable, you must
  274. ## uncomment the following setting and change the value to "True".
  275. ## Note: changing this requires a restart not just a reconfig.
  276. #ENABLE_RUNTIME_CONFIG = False
  277.  
  278. ## Do you want to allow condor_config_val -set to work at all?
  279. ## This feature is disabled by default, so to enable, you must
  280. ## uncomment the following setting and change the value to "True".
  281. ## Note: changing this requires a restart not just a reconfig.
  282. #ENABLE_PERSISTENT_CONFIG = False
  283.  
  284. ## Directory where daemons should write persistent config files (used
  285. ## to support condor_config_val -set). This directory should *ONLY*
  286. ## be writable by root (or the user the Condor daemons are running as
  287. ## if non-root). There is no default, administrators must define this.
  288. ## Note: changing this requires a restart not just a reconfig.
  289. #PERSISTENT_CONFIG_DIR = /full/path/to/root-only/local/directory
  290.  
  291. ## Attributes that can be set by hosts with "CONFIG" permission (as
  292. ## defined with ALLOW_CONFIG and DENY_CONFIG above).
  293. ## The commented-out value here was the default behavior of Condor
  294. ## prior to version 6.3.3. If you don't need this behavior, you
  295. ## should leave this commented out.
  296. #SETTABLE_ATTRS_CONFIG = *
  297.  
  298. ## Attributes that can be set by hosts with "ADMINISTRATOR"
  299. ## permission (as defined above)
  300. #SETTABLE_ATTRS_ADMINISTRATOR = *_DEBUG, MAX_*_LOG
  301.  
  302. ## Attributes that can be set by hosts with "OWNER" permission (as
  303. ## defined above) NOTE: any Condor job running on a given host will
  304. ## have OWNER permission on that host by default. If you grant this
  305. ## kind of access, Condor jobs will be able to modify any attributes
  306. ## you list below on the machine where they are running. This has
  307. ## obvious security implications, so only grant this kind of
  308. ## permission for custom attributes that you define for your own use
  309. ## at your pool (custom attributes about your machines that are
  310. ## published with the STARTD_ATTRS setting, for example).
  311. #SETTABLE_ATTRS_OWNER = your_custom_attribute, another_custom_attr
  312.  
  313. ## You can also define daemon-specific versions of each of these
  314. ## settings. For example, to define settings that can only be
  315. ## changed in the condor_startd's configuration by hosts with OWNER
  316. ## permission, you would use:
  317. #STARTD_SETTABLE_ATTRS_OWNER = your_custom_attribute_name
  318.  
  319.  
  320. ##--------------------------------------------------------------------
  321. ## Network filesystem parameters:
  322. ##--------------------------------------------------------------------
  323. ## Do you want to use NFS for file access instead of remote system
  324. ## calls?
  325. #USE_NFS = False
  326.  
  327. ## Do you want to use AFS for file access instead of remote system
  328. ## calls?
  329. #USE_AFS = False
  330.  
  331. ##--------------------------------------------------------------------
  332. ## Checkpoint server:
  333. ##--------------------------------------------------------------------
  334. ## Do you want to use a checkpoint server if one is available? If a
  335. ## checkpoint server isn't available or USE_CKPT_SERVER is set to
  336. ## False, checkpoints will be written to the local SPOOL directory on
  337. ## the submission machine.
  338. #USE_CKPT_SERVER = True
  339.  
  340. ## What's the hostname of this machine's nearest checkpoint server?
  341. #CKPT_SERVER_HOST = checkpoint-server-hostname.your.domain
  342.  
  343. ## Do you want the starter on the execute machine to choose the
  344. ## checkpoint server? If False, the CKPT_SERVER_HOST set on
  345. ## the submit machine is used. Otherwise, the CKPT_SERVER_HOST set
  346. ## on the execute machine is used. The default is true.
  347. #STARTER_CHOOSES_CKPT_SERVER = True
  348.  
  349. ##--------------------------------------------------------------------
  350. ## Miscellaneous:
  351. ##--------------------------------------------------------------------
  352. ## Try to save this much swap space by not starting new shadows.
  353. ## Specified in megabytes.
  354. #RESERVED_SWAP = 0
  355.  
  356. ## What's the maximum number of jobs you want a single submit machine
  357. ## to spawn shadows for? The default is a function of $(DETECTED_MEMORY)
  358. ## and a guess at the number of ephemeral ports available.
  359.  
  360. ## Example 1:
  361. #MAX_JOBS_RUNNING = 10000
  362.  
  363. ## Example 2:
  364. ## This is more complicated, but it produces the same limit as the default.
  365. ## First define some expressions to use in our calculation.
  366. ## Assume we can use up to 80% of memory and estimate shadow private data
  367. ## size of 800k.
  368. #MAX_SHADOWS_MEM = ceiling($(DETECTED_MEMORY)*0.8*1024/800)
  369. ## Assume we can use ~21,000 ephemeral ports (avg ~2.1 per shadow).
  370. ## Under Linux, the range is set in /proc/sys/net/ipv4/ip_local_port_range.
  371. #MAX_SHADOWS_PORTS = 10000
  372. ## Under windows, things are much less scalable, currently.
  373. ## Note that this can probably be safely increased a bit under 64-bit windows.
  374. #MAX_SHADOWS_OPSYS = ifThenElse(regexp("WIN.*","$(OPSYS)"),200,100000)
  375. ## Now build up the expression for MAX_JOBS_RUNNING. This is complicated
  376. ## due to lack of a min() function.
  377. #MAX_JOBS_RUNNING = $(MAX_SHADOWS_MEM)
  378. #MAX_JOBS_RUNNING = \
  379. # ifThenElse( $(MAX_SHADOWS_PORTS) < $(MAX_JOBS_RUNNING), \
  380. # $(MAX_SHADOWS_PORTS), \
  381. # $(MAX_JOBS_RUNNING) )
  382. #MAX_JOBS_RUNNING = \
  383. # ifThenElse( $(MAX_SHADOWS_OPSYS) < $(MAX_JOBS_RUNNING), \
  384. # $(MAX_SHADOWS_OPSYS), \
  385. # $(MAX_JOBS_RUNNING) )
  386.  
  387.  
  388. ## Maximum number of simultaneous downloads of output files from
  389. ## execute machines to the submit machine (limit applied per schedd).
  390. ## The value 0 means unlimited.
  391. #MAX_CONCURRENT_DOWNLOADS = 10
  392.  
  393. ## Maximum number of simultaneous uploads of input files from the
  394. ## submit machine to execute machines (limit applied per schedd).
  395. ## The value 0 means unlimited.
  396. #MAX_CONCURRENT_UPLOADS = 10
  397.  
  398. ## Condor needs to create a few lock files to synchronize access to
  399. ## various log files. Because of problems we've had with network
  400. ## filesystems and file locking over the years, we HIGHLY recommend
  401. ## that you put these lock files on a local partition on each
  402. ## machine. If you don't have your LOCAL_DIR on a local partition,
  403. ## be sure to change this entry. Whatever user (or group) condor is
  404. ## running as needs to have write access to this directory. If
  405. ## you're not running as root, this is whatever user you started up
  406. ## the condor_master as. If you are running as root, and there's a
  407. ## condor account, it's probably condor. Otherwise, it's whatever
  408. ## you've set in the CONDOR_IDS environment variable. See the Admin
  409. ## manual for details on this.
  410. LOCK = $(LOG)
  411.  
  412. ## If you don't use a fully qualified name in your /etc/hosts file
  413. ## (or NIS, etc.) for either your official hostname or as an alias,
  414. ## Condor wouldn't normally be able to use fully qualified names in
  415. ## places that it'd like to. You can set this parameter to the
  416. ## domain you'd like appended to your hostname, if changing your host
  417. ## information isn't a good option. This parameter must be set in
  418. ## the global config file (not the LOCAL_CONFIG_FILE from above).
  419. #DEFAULT_DOMAIN_NAME = your.domain.name
  420.  
  421. ## If you don't have DNS set up, Condor will normally fail in many
  422. ## places because it can't resolve hostnames to IP addresses and
  423. ## vice-versa. If you enable this option, Condor will use
  424. ## pseudo-hostnames constructed from a machine's IP address and the
  425. ## DEFAULT_DOMAIN_NAME. Both NO_DNS and DEFAULT_DOMAIN must be set in
  426. ## your top-level config file for this mode of operation to work
  427. ## properly.
  428. #NO_DNS = True
  429.  
  430. ## Condor can be told whether or not you want the Condor daemons to
  431. ## create a core file if something really bad happens. This just
  432. ## sets the resource limit for the size of a core file. By default,
  433. ## we don't do anything, and leave in place whatever limit was in
  434. ## effect when you started the Condor daemons. If this parameter is
  435. ## set and "True", we increase the limit to as large as it gets. If
  436. ## it's set to "False", we set the limit at 0 (which means that no
  437. ## core files are even created). Core files greatly help the Condor
  438. ## developers debug any problems you might be having.
  439. #CREATE_CORE_FILES = True
  440.  
  441. ## When Condor daemons detect a fatal internal exception, they
  442. ## normally log an error message and exit. If you have turned on
  443. ## CREATE_CORE_FILES, in some cases you may also want to turn on
  444. ## ABORT_ON_EXCEPTION so that core files are generated when an
  445. ## exception occurs. Set the following to True if that is what you
  446. ## want.
  447. #ABORT_ON_EXCEPTION = False
  448.  
  449. ## If your site needs to use UID_DOMAIN settings (defined above) that
  450. ## are not real Internet domains that match the hostnames, you can
  451. ## tell Condor to trust whatever UID_DOMAIN a submit machine gives to
  452. ## the execute machine and just make sure the two strings match. The
  453. ## default for this setting is False, since it is more secure this
  454. ## way.
  455. TRUST_UID_DOMAIN = True
  456.  
  457. ## If you would like to be informed in near real-time via condor_q when
  458. ## a vanilla/standard/java job is in a suspension state, set this attribute to
  459. ## TRUE. However, this real-time update of the condor_schedd by the shadows
  460. ## could cause performance issues if there are thousands of concurrently
  461. ## running vanilla/standard/java jobs under a single condor_schedd and they
  462. ## are allowed to suspend and resume.
  463. #REAL_TIME_JOB_SUSPEND_UPDATES = False
  464.  
  465. ## A standard universe job can perform arbitrary shell calls via the
  466. ## libc 'system()' function. This function call is routed back to the shadow
  467. ## which performs the actual system() invocation in the initial directory of the
  468. ## running program and as the user who submitted the job. However, since the
  469. ## user job can request ARBITRARY shell commands to be run by the shadow, this
  470. ## is a generally unsafe practice. This should only be made available if it is
  471. ## actually needed. If this attribute is not defined, then it is the same as
  472. ## it being defined to False. Set it to True to allow the shadow to execute
  473. ## arbitrary shell code from the user job.
  474. #SHADOW_ALLOW_UNSAFE_REMOTE_EXEC = False
  475.  
  476. ## KEEP_OUTPUT_SANDBOX is an optional feature to tell Condor-G to not
  477. ## remove the job spool when the job leaves the queue. To use, just
  478. ## set to TRUE. Since you will be operating Condor-G in this manner,
  479. ## you may want to put leave_in_queue = false in your job submit
  480. ## description files, to tell Condor-G to simply remove the job from
  481. ## the queue immediately when the job completes (since the output files
  482. ## will stick around no matter what).
  483. #KEEP_OUTPUT_SANDBOX = False
  484.  
  485. ## This setting tells the negotiator to ignore user priorities. This
  486. ## avoids problems where jobs from different users won't run when using
  487. ## condor_advertise instead of a full-blown startd (some of the user
  488. ## priority system in Condor relies on information from the startd --
  489. ## we will remove this reliance when we support the user priority
  490. ## system for grid sites in the negotiator; for now, this setting will
  491. ## just disable it).
  492. #NEGOTIATOR_IGNORE_USER_PRIORITIES = False
  493.  
  494. ## This is a list of libraries containing ClassAd plug-in functions.
  495. #CLASSAD_USER_LIBS =
  496.  
  497. ## This setting tells Condor whether to delegate or copy GSI X509
  498. ## credentials when sending them over the wire between daemons.
  499. ## Delegation can take up to a second, which is very slow when
  500. ## submitting a large number of jobs. Copying exposes the credential
  501. ## to third parties if Condor isn't set to encrypt communications.
  502. ## By default, Condor will delegate rather than copy.
  503. #DELEGATE_JOB_GSI_CREDENTIALS = True
  504.  
  505. ## This setting controls whether Condor delegates a full or limited
  506. ## X509 credential for jobs. Currently, this only affects grid-type
  507. ## gt2 grid universe jobs. The default is False.
  508. #DELEGATE_FULL_JOB_GSI_CREDENTIALS = False
  509.  
  510. ## This setting controls the default behavior for the spooling of files
  511. ## into, or out of, the Condor system by such tools as condor_submit
  512. ## and condor_transfer_data. Here is the list of valid settings for this
  513. ## parameter and what they mean:
  514. ##
  515. ## stm_use_schedd_only
  516. ## Ask the condor_schedd to solely store/retrieve the sandbox
  517. ##
  518. ## stm_use_transferd
  519. ## Ask the condor_schedd for a location of a condor_transferd, then
  520. ## store/retrieve the sandbox from the transferd itself.
  521. ##
  522. ## The allowed values are case insensitive.
  523. ## The default of this parameter if not specified is: stm_use_schedd_only
  524. #SANDBOX_TRANSFER_METHOD = stm_use_schedd_only
  525.  
  526. ## This setting specifies an IP address that depends on the setting of
  527. ## BIND_ALL_INTERFACES. If BIND_ALL_INTERFACES is True (the default), then
  528. ## this variable controls what IP address will be advertised as the public
  529. ## address of the daemon. If BIND_ALL_INTERFACES is False, then this variable
  530. ## specifies which IP address to bind network sockets to. If
  531. ## BIND_ALL_INTERFACES is False and NETWORK_INTERFACE is not defined, Condor
  532. ## chooses a network interface automatically. It tries to choose a public
  533. ## interface if one is available. If it cannot decide which of two interfaces
  534. ## to choose from, it will pick the first one.
  535. #NETWORK_INTERFACE =
  536.  
  537. ##--------------------------------------------------------------------
  538. ## Settings that control the daemon's debugging output:
  539. ##--------------------------------------------------------------------
  540.  
  541. ##
  542. ## The flags given in ALL_DEBUG are shared between all daemons.
  543. ##
  544.  
  545. ALL_DEBUG =
  546.  
  547. MAX_COLLECTOR_LOG = 1000000
  548. COLLECTOR_DEBUG =
  549.  
  550. MAX_KBDD_LOG = 1000000
  551. KBDD_DEBUG =
  552.  
  553. MAX_NEGOTIATOR_LOG = 1000000
  554. NEGOTIATOR_DEBUG = D_MATCH
  555. MAX_NEGOTIATOR_MATCH_LOG = 1000000
  556.  
  557. MAX_SCHEDD_LOG = 1000000
  558. SCHEDD_DEBUG = D_PID
  559.  
  560. MAX_SHADOW_LOG = 1000000
  561. SHADOW_DEBUG =
  562.  
  563. MAX_STARTD_LOG = 1000000
  564. STARTD_DEBUG =
  565.  
  566. MAX_STARTER_LOG = 1000000
  567.  
  568. MAX_MASTER_LOG = 1000000
  569. MASTER_DEBUG =
  570. ## When the master starts up, should it truncate it's log file?
  571. #TRUNC_MASTER_LOG_ON_OPEN = False
  572.  
  573. MAX_JOB_ROUTER_LOG = 1000000
  574. JOB_ROUTER_DEBUG =
  575.  
  576. MAX_ROOSTER_LOG = 1000000
  577. ROOSTER_DEBUG =
  578.  
  579. MAX_SHARED_PORT_LOG = 1000000
  580. SHARED_PORT_DEBUG =
  581.  
  582. MAX_HDFS_LOG = 1000000
  583. HDFS_DEBUG =
  584.  
  585. # High Availability Logs
  586. MAX_HAD_LOG = 1000000
  587. HAD_DEBUG =
  588. MAX_REPLICATION_LOG = 1000000
  589. REPLICATION_DEBUG =
  590. MAX_TRANSFERER_LOG = 1000000
  591. TRANSFERER_DEBUG =
  592.  
  593.  
  594. ## The daemons touch their log file periodically, even when they have
  595. ## nothing to write. When a daemon starts up, it prints the last time
  596. ## the log file was modified. This lets you estimate when a previous
  597. ## instance of a daemon stopped running. This parameter controls how often
  598. ## the daemons touch the file (in seconds).
  599. #TOUCH_LOG_INTERVAL = 60
  600.  
  601. ######################################################################
  602. ######################################################################
  603. ##
  604. ## ###### #####
  605. ## # # ## ##### ##### # #
  606. ## # # # # # # # #
  607. ## ###### # # # # # #####
  608. ## # ###### ##### # #
  609. ## # # # # # # # #
  610. ## # # # # # # #####
  611. ##
  612. ## Part 3: Settings control the policy for running, stopping, and
  613. ## periodically check-pointing condor jobs:
  614. ######################################################################
  615. ######################################################################
  616.  
  617. ## This section contains macros are here to help write legible
  618. ## expressions:
  619. MINUTE = 60
  620. HOUR = (60 * $(MINUTE))
  621. StateTimer = (time() - EnteredCurrentState)
  622. ActivityTimer = (time() - EnteredCurrentActivity)
  623. ActivationTimer = ifThenElse(JobStart =!= UNDEFINED, (time() - JobStart), 0)
  624. LastCkpt = (time() - LastPeriodicCheckpoint)
  625.  
  626. ## The JobUniverse attribute is just an int. These macros can be
  627. ## used to specify the universe in a human-readable way:
  628. STANDARD = 1
  629. VANILLA = 5
  630. MPI = 8
  631. VM = 13
  632. IsMPI = (TARGET.JobUniverse == $(MPI))
  633. IsVanilla = (TARGET.JobUniverse == $(VANILLA))
  634. IsStandard = (TARGET.JobUniverse == $(STANDARD))
  635. IsVM = (TARGET.JobUniverse == $(VM))
  636.  
  637. NonCondorLoadAvg = (LoadAvg - CondorLoadAvg)
  638. BackgroundLoad = 0.3
  639. HighLoad = 0.5
  640. StartIdleTime = 15 * $(MINUTE)
  641. ContinueIdleTime = 5 * $(MINUTE)
  642. MaxSuspendTime = 10 * $(MINUTE)
  643. MaxVacateTime = 10 * $(MINUTE)
  644.  
  645. KeyboardBusy = (KeyboardIdle < $(MINUTE))
  646. ConsoleBusy = (ConsoleIdle < $(MINUTE))
  647. CPUIdle = ($(NonCondorLoadAvg) <= $(BackgroundLoad))
  648. CPUBusy = ($(NonCondorLoadAvg) >= $(HighLoad))
  649. KeyboardNotBusy = ($(KeyboardBusy) == False)
  650.  
  651. BigJob = (TARGET.ImageSize >= (50 * 1024))
  652. MediumJob = (TARGET.ImageSize >= (15 * 1024) && TARGET.ImageSize < (50 * 1024))
  653. SmallJob = (TARGET.ImageSize < (15 * 1024))
  654.  
  655. JustCPU = ($(CPUBusy) && ($(KeyboardBusy) == False))
  656. MachineBusy = ($(CPUBusy) || $(KeyboardBusy))
  657.  
  658. ## The RANK expression controls which jobs this machine prefers to
  659. ## run over others. Some examples from the manual include:
  660. ## RANK = TARGET.ImageSize
  661. ## RANK = (Owner == "coltrane") + (Owner == "tyner") \
  662. ## + ((Owner == "garrison") * 10) + (Owner == "jones")
  663. ## By default, RANK is always 0, meaning that all jobs have an equal
  664. ## ranking.
  665. #RANK = 0
  666.  
  667.  
  668. #####################################################################
  669. ## This where you choose the configuration that you would like to
  670. ## use. It has no defaults so it must be defined. We start this
  671. ## file off with the UWCS_* policy.
  672. ######################################################################
  673.  
  674. ## Also here is what is referred to as the TESTINGMODE_*, which is
  675. ## a quick hardwired way to test Condor with a simple no-preemption policy.
  676. ## Replace UWCS_* with TESTINGMODE_* if you wish to do testing mode.
  677. ## For example:
  678. ## WANT_SUSPEND = $(UWCS_WANT_SUSPEND)
  679. ## becomes
  680. ## WANT_SUSPEND = $(TESTINGMODE_WANT_SUSPEND)
  681.  
  682. # When should we only consider SUSPEND instead of PREEMPT?
  683. WANT_SUSPEND = $(UWCS_WANT_SUSPEND)
  684.  
  685. # When should we preempt gracefully instead of hard-killing?
  686. WANT_VACATE = $(UWCS_WANT_VACATE)
  687.  
  688. ## When is this machine willing to start a job?
  689. START = $(UWCS_START)
  690.  
  691. ## When should a local universe job be allowed to start?
  692. #START_LOCAL_UNIVERSE = TotalLocalJobsRunning < 200
  693.  
  694. ## When should a scheduler universe job be allowed to start?
  695. #START_SCHEDULER_UNIVERSE = TotalSchedulerJobsRunning < 200
  696.  
  697. ## When to suspend a job?
  698. SUSPEND = $(UWCS_SUSPEND)
  699.  
  700. ## When to resume a suspended job?
  701. CONTINUE = $(UWCS_CONTINUE)
  702.  
  703. ## When to nicely stop a job?
  704. ## (as opposed to killing it instantaneously)
  705. PREEMPT = $(UWCS_PREEMPT)
  706.  
  707. ## When to instantaneously kill a preempting job
  708. ## (e.g. if a job is in the pre-empting stage for too long)
  709. KILL = $(UWCS_KILL)
  710.  
  711. PERIODIC_CHECKPOINT = $(UWCS_PERIODIC_CHECKPOINT)
  712. PREEMPTION_REQUIREMENTS = $(UWCS_PREEMPTION_REQUIREMENTS)
  713. PREEMPTION_RANK = $(UWCS_PREEMPTION_RANK)
  714. NEGOTIATOR_PRE_JOB_RANK = $(UWCS_NEGOTIATOR_PRE_JOB_RANK)
  715. NEGOTIATOR_POST_JOB_RANK = $(UWCS_NEGOTIATOR_POST_JOB_RANK)
  716. MaxJobRetirementTime = $(UWCS_MaxJobRetirementTime)
  717. CLAIM_WORKLIFE = $(UWCS_CLAIM_WORKLIFE)
  718.  
  719. #####################################################################
  720. ## This is the UWisc - CS Department Configuration.
  721. #####################################################################
  722.  
  723. # When should we only consider SUSPEND instead of PREEMPT?
  724. # Only when SUSPEND is True and one of the following is also true:
  725. # - the job is small
  726. # - the keyboard is idle
  727. # - it is a vanilla universe job
  728. UWCS_WANT_SUSPEND = ( $(SmallJob) || $(KeyboardNotBusy) || $(IsVanilla) ) && \
  729. ( $(SUSPEND) )
  730.  
  731. # When should we preempt gracefully instead of hard-killing?
  732. UWCS_WANT_VACATE = ( $(ActivationTimer) > 10 * $(MINUTE) || $(IsVanilla) )
  733.  
  734. # Only start jobs if:
  735. # 1) the keyboard has been idle long enough, AND
  736. # 2) the load average is low enough OR the machine is currently
  737. # running a Condor job
  738. # (NOTE: Condor will only run 1 job at a time on a given resource.
  739. # The reasons Condor might consider running a different job while
  740. # already running one are machine Rank (defined above), and user
  741. # priorities.)
  742. UWCS_START = ( (KeyboardIdle > $(StartIdleTime)) \
  743. && ( $(CPUIdle) || \
  744. (State != "Unclaimed" && State != "Owner")) )
  745.  
  746. # Suspend jobs if:
  747. # 1) the keyboard has been touched, OR
  748. # 2a) The CPU has been busy for more than 2 minutes, AND
  749. # 2b) the job has been running for more than 90 seconds
  750. UWCS_SUSPEND = ( $(KeyboardBusy) || \
  751. ( (CpuBusyTime > 2 * $(MINUTE)) \
  752. && $(ActivationTimer) > 90 ) )
  753.  
  754. # Continue jobs if:
  755. # 1) the cpu is idle, AND
  756. # 2) we've been suspended more than 10 seconds, AND
  757. # 3) the keyboard hasn't been touched in a while
  758. UWCS_CONTINUE = ( $(CPUIdle) && ($(ActivityTimer) > 10) \
  759. && (KeyboardIdle > $(ContinueIdleTime)) )
  760.  
  761. # Preempt jobs if:
  762. # 1) The job is suspended and has been suspended longer than we want
  763. # 2) OR, we don't want to suspend this job, but the conditions to
  764. # suspend jobs have been met (someone is using the machine)
  765. UWCS_PREEMPT = ( ((Activity == "Suspended") && \
  766. ($(ActivityTimer) > $(MaxSuspendTime))) \
  767. || (SUSPEND && (WANT_SUSPEND == False)) )
  768.  
  769. # Maximum time (in seconds) to wait for a job to finish before kicking
  770. # it off (due to PREEMPT, a higher priority claim, or the startd
  771. # gracefully shutting down). This is computed from the time the job
  772. # was started, minus any suspension time. Once the retirement time runs
  773. # out, the usual preemption process will take place. The job may
  774. # self-limit the retirement time to _less_ than what is given here.
  775. # By default, nice user jobs and standard universe jobs set their
  776. # MaxJobRetirementTime to 0, so they will not wait in retirement.
  777.  
  778. UWCS_MaxJobRetirementTime = 0
  779.  
  780. ## If you completely disable preemption of claims to machines, you
  781. ## should consider limiting the time span over which new jobs will be
  782. ## accepted on the same claim. See the manual section on disabling
  783. ## preemption for a comprehensive discussion. Since this example
  784. ## configuration does not disable preemption of claims, we leave
  785. ## CLAIM_WORKLIFE undefined (infinite).
  786. #UWCS_CLAIM_WORKLIFE = 1200
  787.  
  788. # How long to allow a job to vacate gracefully. After this time,
  789. # the job is killed.
  790. MachineMaxVacateTime = $(MaxVacateTime)
  791.  
  792. # Abort graceful eviction of a job, even though it has not
  793. # yet used all the time allotted by MachineMaxVacateTime.
  794. UWCS_KILL = false
  795.  
  796. ## Only define vanilla versions of these if you want to make them
  797. ## different from the above settings.
  798. #SUSPEND_VANILLA = ( $(KeyboardBusy) || \
  799. # ((CpuBusyTime > 2 * $(MINUTE)) && $(ActivationTimer) > 90) )
  800. #CONTINUE_VANILLA = ( $(CPUIdle) && ($(ActivityTimer) > 10) \
  801. # && (KeyboardIdle > $(ContinueIdleTime)) )
  802. #PREEMPT_VANILLA = ( ((Activity == "Suspended") && \
  803. # ($(ActivityTimer) > $(MaxSuspendTime))) \
  804. # || (SUSPEND_VANILLA && (WANT_SUSPEND == False)) )
  805. #KILL_VANILLA = false
  806.  
  807. ## Checkpoint every 3 hours on average, with a +-30 minute random
  808. ## factor to avoid having many jobs hit the checkpoint server at
  809. ## the same time.
  810. UWCS_PERIODIC_CHECKPOINT = $(LastCkpt) > (3 * $(HOUR) + \
  811. $RANDOM_INTEGER(-30,30,1) * $(MINUTE) )
  812.  
  813. ## You might want to checkpoint a little less often. A good
  814. ## example of this is below. For jobs smaller than 60 megabytes, we
  815. ## periodic checkpoint every 6 hours. For larger jobs, we only
  816. ## checkpoint every 12 hours.
  817. #UWCS_PERIODIC_CHECKPOINT = \
  818. # ( (TARGET.ImageSize < 60000) && \
  819. # ($(LastCkpt) > (6 * $(HOUR) + $RANDOM_INTEGER(-30,30,1))) ) || \
  820. # ( $(LastCkpt) > (12 * $(HOUR) + $RANDOM_INTEGER(-30,30,1)) )
  821.  
  822. ## The rank expressions used by the negotiator are configured below.
  823. ## This is the order in which ranks are applied by the negotiator:
  824. ## 1. NEGOTIATOR_PRE_JOB_RANK
  825. ## 2. rank in job ClassAd
  826. ## 3. NEGOTIATOR_POST_JOB_RANK
  827. ## 4. cause of preemption (0=user priority,1=startd rank,2=no preemption)
  828. ## 5. PREEMPTION_RANK
  829.  
  830. ## The NEGOTIATOR_PRE_JOB_RANK expression overrides all other ranks
  831. ## that are used to pick a match from the set of possibilities.
  832. ## The following expression matches jobs to unclaimed resources
  833. ## whenever possible, regardless of the job-supplied rank.
  834. UWCS_NEGOTIATOR_PRE_JOB_RANK = RemoteOwner =?= UNDEFINED
  835.  
  836. ## The NEGOTIATOR_POST_JOB_RANK expression chooses between
  837. ## resources that are equally preferred by the job.
  838. ## The following example expression steers jobs toward
  839. ## faster machines and tends to fill a cluster of multiprocessors
  840. ## breadth-first instead of depth-first. It also prefers online
  841. ## machines over offline (hibernating) ones. In this example,
  842. ## the expression is chosen to have no effect when preemption
  843. ## would take place, allowing control to pass on to
  844. ## PREEMPTION_RANK.
  845. UWCS_NEGOTIATOR_POST_JOB_RANK = \
  846. (RemoteOwner =?= UNDEFINED) * (KFlops - SlotID - 1.0e10*(Offline=?=True))
  847.  
  848. ## The negotiator will not preempt a job running on a given machine
  849. ## unless the PREEMPTION_REQUIREMENTS expression evaluates to true
  850. ## and the owner of the idle job has a better priority than the owner
  851. ## of the running job. This expression defaults to true.
  852. UWCS_PREEMPTION_REQUIREMENTS = ((SubmitterGroup =?= RemoteGroup) \
  853. && ($(StateTimer) > (1 * $(HOUR))) \
  854. && (RemoteUserPrio > TARGET.SubmitterUserPrio * 1.2)) \
  855. || (MY.NiceUser == True)
  856.  
  857. ## The PREEMPTION_RANK expression is used in a case where preemption
  858. ## is the only option and all other negotiation ranks are equal. For
  859. ## example, if the job has no preference, it is usually preferable to
  860. ## preempt a job with a small ImageSize instead of a job with a large
  861. ## ImageSize. The default is to rank all preemptable matches the
  862. ## same. However, the negotiator will always prefer to match the job
  863. ## with an idle machine over a preemptable machine, if all other
  864. ## negotiation ranks are equal.
  865. UWCS_PREEMPTION_RANK = (RemoteUserPrio * 1000000) - TARGET.ImageSize
  866.  
  867.  
  868. #####################################################################
  869. ## This is a Configuration that will cause your Condor jobs to
  870. ## always run. This is intended for testing only.
  871. ######################################################################
  872.  
  873. ## This mode will cause your jobs to start on a machine an will let
  874. ## them run to completion. Condor will ignore all of what is going
  875. ## on in the machine (load average, keyboard activity, etc.)
  876.  
  877. TESTINGMODE_WANT_SUSPEND = False
  878. TESTINGMODE_WANT_VACATE = False
  879. TESTINGMODE_START = True
  880. TESTINGMODE_SUSPEND = False
  881. TESTINGMODE_CONTINUE = True
  882. TESTINGMODE_PREEMPT = False
  883. TESTINGMODE_KILL = False
  884. TESTINGMODE_PERIODIC_CHECKPOINT = False
  885. TESTINGMODE_PREEMPTION_REQUIREMENTS = False
  886. TESTINGMODE_PREEMPTION_RANK = 0
  887.  
  888. # Prevent machine claims from being reused indefinitely, since
  889. # preemption of claims is disabled in the TESTINGMODE configuration.
  890. TESTINGMODE_CLAIM_WORKLIFE = 1200
  891.  
  892.  
  893. ######################################################################
  894. ######################################################################
  895. ##
  896. ## ###### #
  897. ## # # ## ##### ##### # #
  898. ## # # # # # # # # #
  899. ## ###### # # # # # # #
  900. ## # ###### ##### # #######
  901. ## # # # # # # #
  902. ## # # # # # # #
  903. ##
  904. ## Part 4: Settings you should probably leave alone:
  905. ## (unless you know what you're doing)
  906. ######################################################################
  907. ######################################################################
  908.  
  909. ######################################################################
  910. ## Daemon-wide settings:
  911. ######################################################################
  912.  
  913. ## Pathnames
  914. LOG = $(LOCAL_DIR)/log
  915. SPOOL = $(LOCAL_DIR)/spool
  916. EXECUTE = $(LOCAL_DIR)/execute
  917. BIN = $(RELEASE_DIR)/bin
  918. LIB = $(RELEASE_DIR)/lib
  919. INCLUDE = $(RELEASE_DIR)/include
  920. SBIN = $(RELEASE_DIR)/sbin
  921. LIBEXEC = $(RELEASE_DIR)/libexec
  922.  
  923. ## If you leave HISTORY undefined (comment it out), no history file
  924. ## will be created.
  925. HISTORY = $(SPOOL)/history
  926.  
  927. ## Log files
  928. COLLECTOR_LOG = $(LOG)/CollectorLog
  929. KBDD_LOG = $(LOG)/KbdLog
  930. MASTER_LOG = $(LOG)/MasterLog
  931. NEGOTIATOR_LOG = $(LOG)/NegotiatorLog
  932. NEGOTIATOR_MATCH_LOG = $(LOG)/MatchLog
  933. SCHEDD_LOG = $(LOG)/SchedLog
  934. SHADOW_LOG = $(LOG)/ShadowLog
  935. STARTD_LOG = $(LOG)/StartLog
  936. STARTER_LOG = $(LOG)/StarterLog
  937. JOB_ROUTER_LOG = $(LOG)/JobRouterLog
  938. ROOSTER_LOG = $(LOG)/RoosterLog
  939. SHARED_PORT_LOG = $(LOG)/SharedPortLog
  940. # High Availability Logs
  941. HAD_LOG = $(LOG)/HADLog
  942. REPLICATION_LOG = $(LOG)/ReplicationLog
  943. TRANSFERER_LOG = $(LOG)/TransfererLog
  944. HDFS_LOG = $(LOG)/HDFSLog
  945.  
  946. ## Lock files
  947. SHADOW_LOCK = $(LOCK)/ShadowLock
  948.  
  949. ## This setting controls how often any lock files currently in use have their
  950. ## time stamp updated. Updating the time stamp prevents administrative programs
  951. ## like 'tmpwatch' from deleting long lived lock files. The parameter is
  952. ## an integer in seconds with a minimum of 60 seconds. The default if not
  953. ## specified is 28800 seconds, or 8 hours.
  954. ## This attribute only takes effect on restart of the daemons or at the next
  955. ## update time.
  956. # LOCK_FILE_UPDATE_INTERVAL = 28800
  957.  
  958. ## This setting primarily allows you to change the port that the
  959. ## collector is listening on. By default, the collector uses port
  960. ## 9618, but you can set the port with a ":port", such as:
  961. ## COLLECTOR_HOST = $(CONDOR_HOST):1234
  962. COLLECTOR_HOST = $(CONDOR_HOST)
  963.  
  964. ## The NEGOTIATOR_HOST parameter has been deprecated. The port where
  965. ## the negotiator is listening is now dynamically allocated and the IP
  966. ## and port are now obtained from the collector, just like all the
  967. ## other daemons. However, if your pool contains any machines that
  968. ## are running version 6.7.3 or earlier, you can uncomment this
  969. ## setting to go back to the old fixed-port (9614) for the negotiator.
  970. #NEGOTIATOR_HOST = $(CONDOR_HOST)
  971.  
  972. ## How long are you willing to let daemons try their graceful
  973. ## shutdown methods before they do a hard shutdown? (30 minutes)
  974. #SHUTDOWN_GRACEFUL_TIMEOUT = 1800
  975.  
  976. ## How much disk space would you like reserved from Condor? In
  977. ## places where Condor is computing the free disk space on various
  978. ## partitions, it subtracts the amount it really finds by this
  979. ## many megabytes. (If undefined, defaults to 0).
  980. RESERVED_DISK = 5
  981.  
  982. ## If your machine is running AFS and the AFS cache lives on the same
  983. ## partition as the other Condor directories, and you want Condor to
  984. ## reserve the space that your AFS cache is configured to use, set
  985. ## this to true.
  986. #RESERVE_AFS_CACHE = False
  987.  
  988. ## By default, if a user does not specify "notify_user" in the submit
  989. ## description file, any email Condor sends about that job will go to
  990. ## "username@UID_DOMAIN". If your machines all share a common UID
  991. ## domain (so that you would set UID_DOMAIN to be the same across all
  992. ## machines in your pool), *BUT* email to user@UID_DOMAIN is *NOT*
  993. ## the right place for Condor to send email for your site, you can
  994. ## define the default domain to use for email. A common example
  995. ## would be to set EMAIL_DOMAIN to the fully qualified hostname of
  996. ## each machine in your pool, so users submitting jobs from a
  997. ## specific machine would get email sent to user@machine.your.domain,
  998. ## instead of user@your.domain. In general, you should leave this
  999. ## setting commented out unless two things are true: 1) UID_DOMAIN is
  1000. ## set to your domain, not $(FULL_HOSTNAME), and 2) email to
  1001. ## user@UID_DOMAIN won't work.
  1002. #EMAIL_DOMAIN = $(FULL_HOSTNAME)
  1003.  
  1004. ## Should Condor daemons create a UDP command socket (for incoming
  1005. ## UDP-based commands) in addition to the TCP command socket? By
  1006. ## default, classified ad updates sent to the collector use UDP, in
  1007. ## addition to some keep alive messages and other non-essential
  1008. ## communication. However, in certain situations, it might be
  1009. ## desirable to disable the UDP command port (for example, to reduce
  1010. ## the number of ports represented by a CCB broker, etc). If not
  1011. ## defined, the UDP command socket is enabled by default, and to
  1012. ## modify this, you must restart your Condor daemons. Also, this
  1013. ## setting must be defined machine-wide. For example, setting
  1014. ## "STARTD.WANT_UDP_COMMAND_SOCKET = False" while the global setting
  1015. ## is "True" will still result in the startd creating a UDP socket.
  1016. #WANT_UDP_COMMAND_SOCKET = True
  1017.  
  1018. ## If your site needs to use TCP updates to the collector, instead of
  1019. ## UDP, you can enable this feature. HOWEVER, WE DO NOT RECOMMEND
  1020. ## THIS FOR MOST SITES! In general, the only sites that might want
  1021. ## this feature are pools made up of machines connected via a
  1022. ## wide-area network where UDP packets are frequently or always
  1023. ## dropped. If you enable this feature, you *MUST* turn on the
  1024. ## COLLECTOR_SOCKET_CACHE_SIZE setting at your collector, and each
  1025. ## entry in the socket cache uses another file descriptor. If not
  1026. ## defined, this feature is disabled by default.
  1027. #UPDATE_COLLECTOR_WITH_TCP = True
  1028.  
  1029. ## HIGHPORT and LOWPORT let you set the range of ports that Condor
  1030. ## will use. This may be useful if you are behind a firewall. By
  1031. ## default, Condor uses port 9618 for the collector, 9614 for the
  1032. ## negotiator, and system-assigned (apparently random) ports for
  1033. ## everything else. HIGHPORT and LOWPORT only affect these
  1034. ## system-assigned ports, but will restrict them to the range you
  1035. ## specify here. If you want to change the well-known ports for the
  1036. ## collector or negotiator, see COLLECTOR_HOST or NEGOTIATOR_HOST.
  1037. ## Note that both LOWPORT and HIGHPORT must be at least 1024 if you
  1038. ## are not starting your daemons as root. You may also specify
  1039. ## different port ranges for incoming and outgoing connections by
  1040. ## using IN_HIGHPORT/IN_LOWPORT and OUT_HIGHPORT/OUT_LOWPORT.
  1041. #HIGHPORT = 9700
  1042. #LOWPORT = 9600
  1043.  
  1044. ## If a daemon doesn't respond for too long, do you want go generate
  1045. ## a core file? This basically controls the type of the signal
  1046. ## sent to the child process, and mostly affects the Condor Master
  1047. #NOT_RESPONDING_WANT_CORE = False
  1048.  
  1049.  
  1050. ######################################################################
  1051. ## Daemon-specific settings:
  1052. ######################################################################
  1053.  
  1054. ##--------------------------------------------------------------------
  1055. ## condor_master
  1056. ##--------------------------------------------------------------------
  1057. ## Daemons you want the master to keep running for you:
  1058. DAEMON_LIST = MASTER, STARTD, SCHEDD
  1059.  
  1060. ## Which daemons use the Condor DaemonCore library (i.e., not the
  1061. ## checkpoint server or custom user daemons)?
  1062. #DC_DAEMON_LIST = \
  1063. #MASTER, STARTD, SCHEDD, KBDD, COLLECTOR, NEGOTIATOR, EVENTD, \
  1064. #VIEW_SERVER, CONDOR_VIEW, VIEW_COLLECTOR, HAWKEYE, CREDD, HAD, \
  1065. #DBMSD, QUILL, JOB_ROUTER, ROOSTER, LEASEMANAGER, HDFS, SHARED_PORT, \
  1066. #DEFRAG
  1067.  
  1068.  
  1069. ## Where are the binaries for these daemons?
  1070. MASTER = $(SBIN)/condor_master
  1071. STARTD = $(SBIN)/condor_startd
  1072. SCHEDD = $(SBIN)/condor_schedd
  1073. KBDD = $(SBIN)/condor_kbdd
  1074. NEGOTIATOR = $(SBIN)/condor_negotiator
  1075. COLLECTOR = $(SBIN)/condor_collector
  1076. CKPT_SERVER = $(SBIN)/condor_ckpt_server
  1077. STARTER_LOCAL = $(SBIN)/condor_starter
  1078. JOB_ROUTER = $(LIBEXEC)/condor_job_router
  1079. ROOSTER = $(LIBEXEC)/condor_rooster
  1080. HDFS = $(SBIN)/condor_hdfs
  1081. SHARED_PORT = $(LIBEXEC)/condor_shared_port
  1082. TRANSFERER = $(LIBEXEC)/condor_transferer
  1083. DEFRAG = $(LIBEXEC)/condor_defrag
  1084.  
  1085. ## When the master starts up, it can place it's address (IP and port)
  1086. ## into a file. This way, tools running on the local machine don't
  1087. ## need to query the central manager to find the master. This
  1088. ## feature can be turned off by commenting out this setting.
  1089. MASTER_ADDRESS_FILE = $(LOG)/.master_address
  1090.  
  1091. ## Where should the master find the condor_preen binary? If you don't
  1092. ## want preen to run at all, set it to nothing.
  1093. PREEN = $(SBIN)/condor_preen
  1094.  
  1095. ## How do you want preen to behave? The "-m" means you want email
  1096. ## about files preen finds that it thinks it should remove. The "-r"
  1097. ## means you want preen to actually remove these files. If you don't
  1098. ## want either of those things to happen, just remove the appropriate
  1099. ## one from this setting.
  1100. PREEN_ARGS = -m -r
  1101.  
  1102. ## How often should the master start up condor_preen? (once a day)
  1103. #PREEN_INTERVAL = 86400
  1104.  
  1105. ## If a daemon dies an unnatural death, do you want email about it?
  1106. #PUBLISH_OBITUARIES = True
  1107.  
  1108. ## If you're getting obituaries, how many lines of the end of that
  1109. ## daemon's log file do you want included in the obituary?
  1110. #OBITUARY_LOG_LENGTH = 20
  1111.  
  1112. ## Should the master run?
  1113. #START_MASTER = True
  1114.  
  1115. ## Should the master start up the daemons you want it to?
  1116. #START_DAEMONS = True
  1117.  
  1118. ## How often do you want the master to send an update to the central
  1119. ## manager?
  1120. #MASTER_UPDATE_INTERVAL = 300
  1121.  
  1122. ## How often do you want the master to check the time stamps of the
  1123. ## daemons it's running? If any daemons have been modified, the
  1124. ## master restarts them.
  1125. #MASTER_CHECK_NEW_EXEC_INTERVAL = 300
  1126.  
  1127. ## Once you notice new binaries, how long should you wait before you
  1128. ## try to execute them?
  1129. #MASTER_NEW_BINARY_DELAY = 120
  1130.  
  1131. ## What's the maximum amount of time you're willing to give the
  1132. ## daemons to quickly shutdown before you just kill them outright?
  1133. #SHUTDOWN_FAST_TIMEOUT = 120
  1134.  
  1135. ######
  1136. ## Exponential back off settings:
  1137. ######
  1138. ## When a daemon keeps crashing, we use "exponential back off" so we
  1139. ## wait longer and longer before restarting it. This is the base of
  1140. ## the exponent used to determine how long to wait before starting
  1141. ## the daemon again:
  1142. #MASTER_BACKOFF_FACTOR = 2.0
  1143.  
  1144. ## What's the maximum amount of time you want the master to wait
  1145. ## between attempts to start a given daemon? (With 2.0 as the
  1146. ## MASTER_BACKOFF_FACTOR, you'd hit 1 hour in 12 restarts...)
  1147. #MASTER_BACKOFF_CEILING = 3600
  1148.  
  1149. ## How long should a daemon run without crashing before we consider
  1150. ## it "recovered". Once a daemon has recovered, we reset the number
  1151. ## of restarts so the exponential back off stuff goes back to normal.
  1152. #MASTER_RECOVER_FACTOR = 300
  1153.  
  1154.  
  1155. ##--------------------------------------------------------------------
  1156. ## condor_collector
  1157. ##--------------------------------------------------------------------
  1158. ## Address to which Condor will send a weekly e-mail with output of
  1159. ## condor_status.
  1160. #CONDOR_DEVELOPERS = condor-admin@cs.wisc.edu
  1161.  
  1162. ## Global Collector to periodically advertise basic information about
  1163. ## your pool.
  1164. #CONDOR_DEVELOPERS_COLLECTOR = condor.cs.wisc.edu
  1165.  
  1166. ## When the collector starts up, it can place it's address (IP and port)
  1167. ## into a file. This way, tools running on the local machine don't
  1168. ## need to query the central manager to find the collector. This
  1169. ## feature can be turned off by commenting out this setting.
  1170. ## This is essential when using a port of "0" (automatic) for the
  1171. ## COLLECTOR_HOST, a useful technique for personal Condor installs.
  1172. COLLECTOR_ADDRESS_FILE = $(LOG)/.collector_address
  1173.  
  1174.  
  1175. ##--------------------------------------------------------------------
  1176. ## condor_negotiator
  1177. ##--------------------------------------------------------------------
  1178. ## Determine if the Negotiator will honor SlotWeight attributes, which
  1179. ## may be used to give a slot greater weight when calculating usage.
  1180. #NEGOTIATOR_USE_SLOT_WEIGHTS = True
  1181.  
  1182.  
  1183. ## How often the Negotaitor starts a negotiation cycle, defined in
  1184. ## seconds.
  1185. #NEGOTIATOR_INTERVAL = 60
  1186.  
  1187. ## Should the Negotiator publish an update to the Collector after
  1188. ## every negotiation cycle. It is useful to have this set to True
  1189. ## to get immediate updates on LastNegotiationCycle statistics.
  1190. #NEGOTIATOR_UPDATE_AFTER_CYCLE = False
  1191.  
  1192.  
  1193. ##--------------------------------------------------------------------
  1194. ## condor_startd
  1195. ##--------------------------------------------------------------------
  1196. ## Where are the various condor_starter binaries installed?
  1197. STARTER_LIST = STARTER, STARTER_STANDARD
  1198. STARTER = $(SBIN)/condor_starter
  1199. STARTER_STANDARD = $(SBIN)/condor_starter.std
  1200. STARTER_LOCAL = $(SBIN)/condor_starter
  1201.  
  1202. ## When the startd starts up, it can place it's address (IP and port)
  1203. ## into a file. This way, tools running on the local machine don't
  1204. ## need to query the central manager to find the startd. This
  1205. ## feature can be turned off by commenting out this setting.
  1206. STARTD_ADDRESS_FILE = $(LOG)/.startd_address
  1207.  
  1208. ## When a machine is claimed, how often should we poll the state of
  1209. ## the machine to see if we need to evict/suspend the job, etc?
  1210. #POLLING_INTERVAL = 5
  1211.  
  1212. ## How often should the startd send updates to the central manager?
  1213. #UPDATE_INTERVAL = 300
  1214.  
  1215. ## How long is the startd willing to stay in the "matched" state?
  1216. #MATCH_TIMEOUT = 300
  1217.  
  1218. ## How long is the startd willing to stay in the preempting/killing
  1219. ## state before it just kills the starter directly?
  1220. #KILLING_TIMEOUT = 30
  1221.  
  1222. ## When a machine unclaimed, when should it run benchmarks?
  1223. ## LastBenchmark is initialized to 0, so this expression says as soon
  1224. ## as we're unclaimed, run the benchmarks. Thereafter, if we're
  1225. ## unclaimed and it's been at least 4 hours since we ran the last
  1226. ## benchmarks, run them again. The startd keeps a weighted average
  1227. ## of the benchmark results to provide more accurate values.
  1228. ## Note, if you don't want any benchmarks run at all, either comment
  1229. ## RunBenchmarks out, or set it to "False".
  1230. BenchmarkTimer = (time() - LastBenchmark)
  1231. RunBenchmarks : (LastBenchmark == 0 ) || ($(BenchmarkTimer) >= (4 * $(HOUR)))
  1232. #RunBenchmarks : False
  1233.  
  1234. ## When the startd does benchmarks, which set of benchmarks should we
  1235. ## run? The default is the same as pre-7.5.6: MIPS and KFLOPS.
  1236. benchmarks_joblist = mips kflops
  1237.  
  1238. ## What's the max "load" of all running benchmarks? With the default
  1239. ## (1.01), the startd will run the benchmarks serially.
  1240. benchmarks_max_job_load = 1.0
  1241.  
  1242. # MIPS (Dhrystone 2.1) benchmark: load 1.0
  1243. benchmarks_mips_executable = $(LIBEXEC)/condor_mips
  1244. benchmarks_mips_job_load = 1.0
  1245.  
  1246. # KFLOPS (clinpack) benchmark: load 1.0
  1247. benchmarks_kflops_executable = $(LIBEXEC)/condor_kflops
  1248. benchmarks_kflops_job_load = 1.0
  1249.  
  1250.  
  1251. ## Normally, when the startd is computing the idle time of all the
  1252. ## users of the machine (both local and remote), it checks the utmp
  1253. ## file to find all the currently active ttys, and only checks access
  1254. ## time of the devices associated with active logins. Unfortunately,
  1255. ## on some systems, utmp is unreliable, and the startd might miss
  1256. ## keyboard activity by doing this. So, if your utmp is unreliable,
  1257. ## set this setting to True and the startd will check the access time
  1258. ## on all tty and pty devices.
  1259. #STARTD_HAS_BAD_UTMP = False
  1260.  
  1261. ## This entry allows the startd to monitor console (keyboard and
  1262. ## mouse) activity by checking the access times on special files in
  1263. ## /dev. Activity on these files shows up as "ConsoleIdle" time in
  1264. ## the startd's ClassAd. Just give a comma-separated list of the
  1265. ## names of devices you want considered the console, without the
  1266. ## "/dev/" portion of the pathname.
  1267. #CONSOLE_DEVICES = mouse, console
  1268.  
  1269.  
  1270. ## The STARTD_ATTRS (and legacy STARTD_EXPRS) entry allows you to
  1271. ## have the startd advertise arbitrary attributes from the config
  1272. ## file in its ClassAd. Give the comma-separated list of entries
  1273. ## from the config file you want in the startd ClassAd.
  1274. ## NOTE: because of the different syntax of the config file and
  1275. ## ClassAds, you might have to do a little extra work to get a given
  1276. ## entry into the ClassAd. In particular, ClassAds require double
  1277. ## quotes (") around your strings. Numeric values can go in
  1278. ## directly, as can boolean expressions. For example, if you wanted
  1279. ## the startd to advertise its list of console devices, when it's
  1280. ## configured to run benchmarks, and how often it sends updates to
  1281. ## the central manager, you'd have to define the following helper
  1282. ## macro:
  1283. #MY_CONSOLE_DEVICES = "$(CONSOLE_DEVICES)"
  1284. ## Note: this must come before you define STARTD_ATTRS because macros
  1285. ## must be defined before you use them in other macros or
  1286. ## expressions.
  1287. ## Then, you'd set the STARTD_ATTRS setting to this:
  1288. #STARTD_ATTRS = MY_CONSOLE_DEVICES, RunBenchmarks, UPDATE_INTERVAL
  1289. ##
  1290. ## STARTD_ATTRS can also be defined on a per-slot basis. The startd
  1291. ## builds the list of attributes to advertise by combining the lists
  1292. ## in this order: STARTD_ATTRS, SLOTx_STARTD_ATTRS. In the below
  1293. ## example, the startd ad for slot1 will have the value for
  1294. ## favorite_color, favorite_season, and favorite_movie, and slot2
  1295. ## will have favorite_color, favorite_season, and favorite_song.
  1296. ##
  1297. #STARTD_ATTRS = favorite_color, favorite_season
  1298. #SLOT1_STARTD_ATTRS = favorite_movie
  1299. #SLOT2_STARTD_ATTRS = favorite_song
  1300. ##
  1301. ## Attributes in the STARTD_ATTRS list can also be on a per-slot basis.
  1302. ## For example, the following configuration:
  1303. ##
  1304. #favorite_color = "blue"
  1305. #favorite_season = "spring"
  1306. #SLOT2_favorite_color = "green"
  1307. #SLOT3_favorite_season = "summer"
  1308. #STARTD_ATTRS = favorite_color, favorite_season
  1309. ##
  1310. ## will result in the following attributes in the slot classified
  1311. ## ads:
  1312. ##
  1313. ## slot1 - favorite_color = "blue"; favorite_season = "spring"
  1314. ## slot2 - favorite_color = "green"; favorite_season = "spring"
  1315. ## slot3 - favorite_color = "blue"; favorite_season = "summer"
  1316. ##
  1317. ## Finally, the recommended default value for this setting, is to
  1318. ## publish the COLLECTOR_HOST setting as a string. This can be
  1319. ## useful using the "$$(COLLECTOR_HOST)" syntax in the submit file
  1320. ## for jobs to know (for example, via their environment) what pool
  1321. ## they're running in.
  1322. COLLECTOR_HOST_STRING = "$(COLLECTOR_HOST)"
  1323. STARTD_ATTRS = COLLECTOR_HOST_STRING
  1324.  
  1325. ## When the startd is claimed by a remote user, it can also advertise
  1326. ## arbitrary attributes from the ClassAd of the job its working on.
  1327. ## Just list the attribute names you want advertised.
  1328. ## Note: since this is already a ClassAd, you don't have to do
  1329. ## anything funny with strings, etc. This feature can be turned off
  1330. ## by commenting out this setting (there is no default).
  1331. STARTD_JOB_EXPRS = ImageSize, ExecutableSize, JobUniverse, NiceUser
  1332.  
  1333. ## If you want to "lie" to Condor about how many CPUs your machine
  1334. ## has, you can use this setting to override Condor's automatic
  1335. ## computation. If you modify this, you must restart the startd for
  1336. ## the change to take effect (a simple condor_reconfig will not do).
  1337. ## Please read the section on "condor_startd Configuration File
  1338. ## Macros" in the Condor Administrators Manual for a further
  1339. ## discussion of this setting. Its use is not recommended. This
  1340. ## must be an integer ("N" isn't a valid setting, that's just used to
  1341. ## represent the default).
  1342. #NUM_CPUS = N
  1343.  
  1344. ## If you never want Condor to detect more the "N" CPUs, uncomment this
  1345. ## line out. You must restart the startd for this setting to take
  1346. ## effect. If set to 0 or a negative number, it is ignored.
  1347. ## By default, it is ignored. Otherwise, it must be a positive
  1348. ## integer ("N" isn't a valid setting, that's just used to
  1349. ## represent the default).
  1350. #MAX_NUM_CPUS = N
  1351.  
  1352. ## Normally, Condor will automatically detect the amount of physical
  1353. ## memory available on your machine. Define MEMORY to tell Condor
  1354. ## how much physical memory (in MB) your machine has, overriding the
  1355. ## value Condor computes automatically. For example:
  1356. #MEMORY = 128
  1357.  
  1358. ## How much memory would you like reserved from Condor? By default,
  1359. ## Condor considers all the physical memory of your machine as
  1360. ## available to be used by Condor jobs. If RESERVED_MEMORY is
  1361. ## defined, Condor subtracts it from the amount of memory it
  1362. ## advertises as available.
  1363. #RESERVED_MEMORY = 0
  1364.  
  1365. ######
  1366. ## SMP startd settings
  1367. ##
  1368. ## By default, Condor will evenly divide the resources in an SMP
  1369. ## machine (such as RAM, swap space and disk space) among all the
  1370. ## CPUs, and advertise each CPU as its own slot with an even share of
  1371. ## the system resources. If you want something other than this,
  1372. ## there are a few options available to you. Please read the section
  1373. ## on "Configuring The Startd for SMP Machines" in the Condor
  1374. ## Administrator's Manual for full details. The various settings are
  1375. ## only briefly listed and described here.
  1376. ######
  1377.  
  1378. ## The maximum number of different slot types.
  1379. #MAX_SLOT_TYPES = 10
  1380.  
  1381. ## Use this setting to define your own slot types. This
  1382. ## allows you to divide system resources unevenly among your CPUs.
  1383. ## You must use a different setting for each different type you
  1384. ## define. The "<N>" in the name of the macro listed below must be
  1385. ## an integer from 1 to MAX_SLOT_TYPES (defined above),
  1386. ## and you use this number to refer to your type. There are many
  1387. ## different formats these settings can take, so be sure to refer to
  1388. ## the section on "Configuring The Startd for SMP Machines" in the
  1389. ## Condor Administrator's Manual for full details. In particular,
  1390. ## read the section titled "Defining Slot Types" to help
  1391. ## understand this setting. If you modify any of these settings, you
  1392. ## must restart the condor_start for the change to take effect.
  1393. #SLOT_TYPE_<N> = 1/4
  1394. #SLOT_TYPE_<N> = cpus=1, ram=25%, swap=1/4, disk=1/4
  1395. # For example:
  1396. #SLOT_TYPE_1 = 1/8
  1397. #SLOT_TYPE_2 = 1/4
  1398.  
  1399. ## If you define your own slot types, you must specify how
  1400. ## many slots of each type you wish to advertise. You do
  1401. ## this with the setting below, replacing the "<N>" with the
  1402. ## corresponding integer you used to define the type above. You can
  1403. ## change the number of a given type being advertised at run-time,
  1404. ## with a simple condor_reconfig.
  1405. #NUM_SLOTS_TYPE_<N> = M
  1406. # For example:
  1407. #NUM_SLOTS_TYPE_1 = 6
  1408. #NUM_SLOTS_TYPE_2 = 1
  1409.  
  1410. ## The number of evenly-divided slots you want Condor to
  1411. ## report to your pool (if less than the total number of CPUs). This
  1412. ## setting is only considered if the "type" settings described above
  1413. ## are not in use. By default, all CPUs are reported. This setting
  1414. ## must be an integer ("N" isn't a valid setting, that's just used to
  1415. ## represent the default).
  1416. #NUM_SLOTS = N
  1417.  
  1418. ## How many of the slots the startd is representing should
  1419. ## be "connected" to the console (in other words, notice when there's
  1420. ## console activity)? This defaults to all slots (N in a
  1421. ## machine with N CPUs). This must be an integer ("N" isn't a valid
  1422. ## setting, that's just used to represent the default).
  1423. #SLOTS_CONNECTED_TO_CONSOLE = N
  1424.  
  1425. ## How many of the slots the startd is representing should
  1426. ## be "connected" to the keyboard (for remote tty activity, as well
  1427. ## as console activity). Defaults to 1.
  1428. #SLOTS_CONNECTED_TO_KEYBOARD = 1
  1429.  
  1430. ## If there are slots that aren't connected to the
  1431. ## keyboard or the console (see the above two settings), the
  1432. ## corresponding idle time reported will be the time since the startd
  1433. ## was spawned, plus the value of this parameter. It defaults to 20
  1434. ## minutes. We do this because, if the slot is configured
  1435. ## not to care about keyboard activity, we want it to be available to
  1436. ## Condor jobs as soon as the startd starts up, instead of having to
  1437. ## wait for 15 minutes or more (which is the default time a machine
  1438. ## must be idle before Condor will start a job). If you don't want
  1439. ## this boost, just set the value to 0. If you change your START
  1440. ## expression to require more than 15 minutes before a job starts,
  1441. ## but you still want jobs to start right away on some of your SMP
  1442. ## nodes, just increase this parameter.
  1443. #DISCONNECTED_KEYBOARD_IDLE_BOOST = 1200
  1444.  
  1445. ######
  1446. ## Settings for computing optional resource availability statistics:
  1447. ######
  1448. ## If STARTD_COMPUTE_AVAIL_STATS = True, the startd will compute
  1449. ## statistics about resource availability to be included in the
  1450. ## classad(s) sent to the collector describing the resource(s) the
  1451. ## startd manages. The following attributes will always be included
  1452. ## in the resource classad(s) if STARTD_COMPUTE_AVAIL_STATS = True:
  1453. ## AvailTime = What proportion of the time (between 0.0 and 1.0)
  1454. ## has this resource been in a state other than "Owner"?
  1455. ## LastAvailInterval = What was the duration (in seconds) of the
  1456. ## last period between "Owner" states?
  1457. ## The following attributes will also be included if the resource is
  1458. ## not in the "Owner" state:
  1459. ## AvailSince = At what time did the resource last leave the
  1460. ## "Owner" state? Measured in the number of seconds since the
  1461. ## epoch (00:00:00 UTC, Jan 1, 1970).
  1462. ## AvailTimeEstimate = Based on past history, this is an estimate
  1463. ## of how long the current period between "Owner" states will
  1464. ## last.
  1465. #STARTD_COMPUTE_AVAIL_STATS = False
  1466.  
  1467. ## If STARTD_COMPUTE_AVAIL_STATS = True, STARTD_AVAIL_CONFIDENCE sets
  1468. ## the confidence level of the AvailTimeEstimate. By default, the
  1469. ## estimate is based on the 80th percentile of past values.
  1470. #STARTD_AVAIL_CONFIDENCE = 0.8
  1471.  
  1472. ## STARTD_MAX_AVAIL_PERIOD_SAMPLES limits the number of samples of
  1473. ## past available intervals stored by the startd to limit memory and
  1474. ## disk consumption. Each sample requires 4 bytes of memory and
  1475. ## approximately 10 bytes of disk space.
  1476. #STARTD_MAX_AVAIL_PERIOD_SAMPLES = 100
  1477.  
  1478. ## CKPT_PROBE is the location of a program which computes aspects of the
  1479. ## CheckpointPlatform classad attribute. By default the location of this
  1480. ## executable will be here: $(LIBEXEC)/condor_ckpt_probe
  1481. CKPT_PROBE = $(LIBEXEC)/condor_ckpt_probe
  1482.  
  1483. ##--------------------------------------------------------------------
  1484. ## condor_schedd
  1485. ##--------------------------------------------------------------------
  1486. ## Where are the various shadow binaries installed?
  1487. SHADOW_LIST = SHADOW, SHADOW_STANDARD
  1488. SHADOW = $(SBIN)/condor_shadow
  1489. SHADOW_STANDARD = $(SBIN)/condor_shadow.std
  1490.  
  1491. ## When the schedd starts up, it can place it's address (IP and port)
  1492. ## into a file. This way, tools running on the local machine don't
  1493. ## need to query the central manager to find the schedd. This
  1494. ## feature can be turned off by commenting out this setting.
  1495. SCHEDD_ADDRESS_FILE = $(SPOOL)/.schedd_address
  1496.  
  1497. ## Additionally, a daemon may store its ClassAd on the local filesystem
  1498. ## as well as sending it to the collector. This way, tools that need
  1499. ## information about a daemon do not have to contact the central manager
  1500. ## to get information about a daemon on the same machine.
  1501. ## This feature is necessary for Quill to work.
  1502. SCHEDD_DAEMON_AD_FILE = $(SPOOL)/.schedd_classad
  1503.  
  1504. ## How often should the schedd send an update to the central manager?
  1505. #SCHEDD_INTERVAL = 300
  1506.  
  1507. ## How long should the schedd wait between spawning each shadow?
  1508. #JOB_START_DELAY = 2
  1509.  
  1510. ## How many concurrent sub-processes should the schedd spawn to handle
  1511. ## queries? (Unix only)
  1512. #SCHEDD_QUERY_WORKERS = 3
  1513.  
  1514. ## How often should the schedd send a keep alive message to any
  1515. ## startds it has claimed? (5 minutes)
  1516. #ALIVE_INTERVAL = 300
  1517.  
  1518. ## This setting controls the maximum number of times that a
  1519. ## condor_shadow processes can have a fatal error (exception) before
  1520. ## the condor_schedd will simply relinquish the match associated with
  1521. ## the dying shadow.
  1522. #MAX_SHADOW_EXCEPTIONS = 5
  1523.  
  1524. ## Estimated virtual memory size of each condor_shadow process.
  1525. ## Specified in kilobytes.
  1526. # SHADOW_SIZE_ESTIMATE = 800
  1527.  
  1528. ## The condor_schedd can renice the condor_shadow processes on your
  1529. ## submit machines. How "nice" do you want the shadows? (1-19).
  1530. ## The higher the number, the lower priority the shadows have.
  1531. # SHADOW_RENICE_INCREMENT = 0
  1532.  
  1533. ## The condor_schedd can renice scheduler universe processes
  1534. ## (e.g. DAGMan) on your submit machines. How "nice" do you want the
  1535. ## scheduler universe processes? (1-19). The higher the number, the
  1536. ## lower priority the processes have.
  1537. # SCHED_UNIV_RENICE_INCREMENT = 0
  1538.  
  1539. ## By default, when the schedd fails to start an idle job, it will
  1540. ## not try to start any other idle jobs in the same cluster during
  1541. ## that negotiation cycle. This makes negotiation much more
  1542. ## efficient for large job clusters. However, in some cases other
  1543. ## jobs in the cluster can be started even though an earlier job
  1544. ## can't. For example, the jobs' requirements may differ, because of
  1545. ## different disk space, memory, or operating system requirements.
  1546. ## Or, machines may be willing to run only some jobs in the cluster,
  1547. ## because their requirements reference the jobs' virtual memory size
  1548. ## or other attribute. Setting NEGOTIATE_ALL_JOBS_IN_CLUSTER to True
  1549. ## will force the schedd to try to start all idle jobs in each
  1550. ## negotiation cycle. This will make negotiation cycles last longer,
  1551. ## but it will ensure that all jobs that can be started will be
  1552. ## started.
  1553. #NEGOTIATE_ALL_JOBS_IN_CLUSTER = False
  1554.  
  1555. ## This setting controls how often, in seconds, the schedd considers
  1556. ## periodic job actions given by the user in the submit file.
  1557. ## (Currently, these are periodic_hold, periodic_release, and periodic_remove.)
  1558. #PERIODIC_EXPR_INTERVAL = 60
  1559.  
  1560. ######
  1561. ## Queue management settings:
  1562. ######
  1563. ## How often should the schedd truncate it's job queue transaction
  1564. ## log? (Specified in seconds, once a day is the default.)
  1565. #QUEUE_CLEAN_INTERVAL = 86400
  1566.  
  1567. ## How often should the schedd commit "wall clock" run time for jobs
  1568. ## to the queue, so run time statistics remain accurate when the
  1569. ## schedd crashes? (Specified in seconds, once per hour is the
  1570. ## default. Set to 0 to disable.)
  1571. #WALL_CLOCK_CKPT_INTERVAL = 3600
  1572.  
  1573. ## What users do you want to grant super user access to this job
  1574. ## queue? (These users will be able to remove other user's jobs).
  1575. ## By default, this only includes root.
  1576. QUEUE_SUPER_USERS = root, condor
  1577.  
  1578.  
  1579. ##--------------------------------------------------------------------
  1580. ## condor_shadow
  1581. ##--------------------------------------------------------------------
  1582. ## If the shadow is unable to read a checkpoint file from the
  1583. ## checkpoint server, it keeps trying only if the job has accumulated
  1584. ## more than MAX_DISCARDED_RUN_TIME seconds of CPU usage. Otherwise,
  1585. ## the job is started from scratch. Defaults to 1 hour. This
  1586. ## setting is only used if USE_CKPT_SERVER (from above) is True.
  1587. #MAX_DISCARDED_RUN_TIME = 3600
  1588.  
  1589. ## Should periodic checkpoints be compressed?
  1590. #COMPRESS_PERIODIC_CKPT = False
  1591.  
  1592. ## Should vacate checkpoints be compressed?
  1593. #COMPRESS_VACATE_CKPT = False
  1594.  
  1595. ## Should we commit the application's dirty memory pages to swap
  1596. ## space during a periodic checkpoint?
  1597. #PERIODIC_MEMORY_SYNC = False
  1598.  
  1599. ## Should we write vacate checkpoints slowly? If nonzero, this
  1600. ## parameter specifies the speed at which vacate checkpoints should
  1601. ## be written, in kilobytes per second.
  1602. #SLOW_CKPT_SPEED = 0
  1603.  
  1604. ## How often should the shadow update the job queue with job
  1605. ## attributes that periodically change? Specified in seconds.
  1606. #SHADOW_QUEUE_UPDATE_INTERVAL = 15 * 60
  1607.  
  1608. ## Should the shadow wait to update certain job attributes for the
  1609. ## next periodic update, or should it immediately these update
  1610. ## attributes as they change? Due to performance concerns of
  1611. ## aggressive updates to a busy condor_schedd, the default is True.
  1612. #SHADOW_LAZY_QUEUE_UPDATE = TRUE
  1613.  
  1614.  
  1615. ##--------------------------------------------------------------------
  1616. ## condor_starter
  1617. ##--------------------------------------------------------------------
  1618. ## The condor_starter can renice the processes of Condor
  1619. ## jobs on your execute machines. If you want this, uncomment the
  1620. ## following entry and set it to how "nice" you want the user
  1621. ## jobs. (1-19) The larger the number, the lower priority the
  1622. ## process gets on your machines.
  1623. ## Note on Win32 platforms, this number needs to be greater than
  1624. ## zero (i.e. the job must be reniced) or the mechanism that
  1625. ## monitors CPU load on Win32 systems will give erratic results.
  1626. #JOB_RENICE_INCREMENT = 10
  1627.  
  1628. ## Should the starter do local logging to its own log file, or send
  1629. ## debug information back to the condor_shadow where it will end up
  1630. ## in the ShadowLog?
  1631. #STARTER_LOCAL_LOGGING = TRUE
  1632.  
  1633. ## If the UID_DOMAIN settings match on both the execute and submit
  1634. ## machines, but the UID of the user who submitted the job isn't in
  1635. ## the passwd file of the execute machine, the starter will normally
  1636. ## exit with an error. Do you want the starter to just start up the
  1637. ## job with the specified UID, even if it's not in the passwd file?
  1638. SOFT_UID_DOMAIN = True
  1639.  
  1640. ## honor the run_as_owner option from the condor submit file.
  1641. ##
  1642. #STARTER_ALLOW_RUNAS_OWNER = TRUE
  1643.  
  1644. ## Tell the Starter/Startd what program to use to remove a directory
  1645. ## condor_rmdir.exe is a windows-only command that does a better job
  1646. ## than the built-in rmdir command when it is run with elevated privileges
  1647. ## Such as when when Condor is running as a service.
  1648. ## /s is delete sub-directories
  1649. ## /c is continue on error
  1650. WINDOWS_RMDIR = $(SBIN)\condor_rmdir.exe
  1651. #WINDOWS_RMDIR_OPTIONS = /s /c
  1652.  
  1653. ##--------------------------------------------------------------------
  1654. ## condor_procd
  1655. ##--------------------------------------------------------------------
  1656. ##
  1657. # the path to the procd binary
  1658. #
  1659. PROCD = $(SBIN)/condor_procd
  1660.  
  1661. # the path to the procd "address"
  1662. # - on UNIX this will be a named pipe; we'll put it in the
  1663. # $(LOCK) directory by default (note that multiple named pipes
  1664. # will be created in this directory for when the procd responds
  1665. # to its clients)
  1666. # - on Windows, this will be a named pipe as well (but named pipes on
  1667. # Windows are not even close to the same thing as named pipes on
  1668. # UNIX); the name will be something like:
  1669. # \\.\pipe\condor_procd
  1670. #
  1671. PROCD_ADDRESS = $(LOCK)/procd_pipe
  1672.  
  1673. # Note that in other Condor daemons, turning on D_PROCFAMILY will
  1674. # result in that daemon logging all of its interactions with the
  1675. # ProcD.
  1676. #
  1677. PROCD_LOG = $(LOG)/ProcLog
  1678.  
  1679. # This is the maximum period that the procd will use for taking
  1680. # snapshots (the actual period may be lower if a condor daemon registers
  1681. # a family for which it wants more frequent snapshots)
  1682. #
  1683. PROCD_MAX_SNAPSHOT_INTERVAL = 60
  1684.  
  1685. # On Windows, we send a process a "soft kill" via a WM_CLOSE message.
  1686. # This binary is used by the ProcD (and other Condor daemons if PRIVSEP
  1687. # is not enabled) to help when sending soft kills.
  1688. WINDOWS_SOFTKILL = $(SBIN)/condor_softkill
  1689.  
  1690. ##--------------------------------------------------------------------
  1691. ## condor_submit
  1692. ##--------------------------------------------------------------------
  1693. ## If you want condor_submit to automatically append an expression to
  1694. ## the Requirements expression or Rank expression of jobs at your
  1695. ## site, uncomment these entries.
  1696. #APPEND_REQUIREMENTS = (expression to append job requirements)
  1697. #APPEND_RANK = (expression to append job rank)
  1698.  
  1699. ## If you want expressions only appended for either standard or
  1700. ## vanilla universe jobs, you can uncomment these entries. If any of
  1701. ## them are defined, they are used for the given universe, instead of
  1702. ## the generic entries above.
  1703. #APPEND_REQ_VANILLA = (expression to append to vanilla job requirements)
  1704. #APPEND_REQ_STANDARD = (expression to append to standard job requirements)
  1705. #APPEND_RANK_STANDARD = (expression to append to vanilla job rank)
  1706. #APPEND_RANK_VANILLA = (expression to append to standard job rank)
  1707.  
  1708. ## This can be used to define a default value for the rank expression
  1709. ## if one is not specified in the submit file.
  1710. #DEFAULT_RANK = (default rank expression for all jobs)
  1711.  
  1712. ## If you want universe-specific defaults, you can use the following
  1713. ## entries:
  1714. #DEFAULT_RANK_VANILLA = (default rank expression for vanilla jobs)
  1715. #DEFAULT_RANK_STANDARD = (default rank expression for standard jobs)
  1716.  
  1717. ## If you want condor_submit to automatically append expressions to
  1718. ## the job ClassAds it creates, you can uncomment and define the
  1719. ## SUBMIT_EXPRS setting. It works just like the STARTD_EXPRS
  1720. ## described above with respect to ClassAd vs. config file syntax,
  1721. ## strings, etc. One common use would be to have the full hostname
  1722. ## of the machine where a job was submitted placed in the job
  1723. ## ClassAd. You would do this by uncommenting the following lines:
  1724. #MACHINE = "$(FULL_HOSTNAME)"
  1725. #SUBMIT_EXPRS = MACHINE
  1726.  
  1727. ## Condor keeps a buffer of recently-used data for each file an
  1728. ## application opens. This macro specifies the default maximum number
  1729. ## of bytes to be buffered for each open file at the executing
  1730. ## machine.
  1731. #DEFAULT_IO_BUFFER_SIZE = 524288
  1732.  
  1733. ## Condor will attempt to consolidate small read and write operations
  1734. ## into large blocks. This macro specifies the default block size
  1735. ## Condor will use.
  1736. #DEFAULT_IO_BUFFER_BLOCK_SIZE = 32768
  1737.  
  1738. ##--------------------------------------------------------------------
  1739. ## condor_preen
  1740. ##--------------------------------------------------------------------
  1741. ## Who should condor_preen send email to?
  1742. #PREEN_ADMIN = $(CONDOR_ADMIN)
  1743.  
  1744. ## What files should condor_preen leave in the spool directory?
  1745. VALID_SPOOL_FILES = job_queue.log, job_queue.log.tmp, history, \
  1746. Accountant.log, Accountantnew.log, \
  1747. local_univ_execute, .quillwritepassword, \
  1748. .pgpass, \
  1749. .schedd_address, .schedd_classad
  1750.  
  1751. ## What files should condor_preen remove from the log directory?
  1752. INVALID_LOG_FILES = core
  1753.  
  1754. ##--------------------------------------------------------------------
  1755. ## Java parameters:
  1756. ##--------------------------------------------------------------------
  1757. ## If you would like this machine to be able to run Java jobs,
  1758. ## then set JAVA to the path of your JVM binary. If you are not
  1759. ## interested in Java, there is no harm in leaving this entry
  1760. ## empty or incorrect.
  1761.  
  1762. JAVA = /usr/bin/java
  1763.  
  1764. ## JAVA_CLASSPATH_DEFAULT gives the default set of paths in which
  1765. ## Java classes are to be found. Each path is separated by spaces.
  1766. ## If your JVM needs to be informed of additional directories, add
  1767. ## them here. However, do not remove the existing entries, as Condor
  1768. ## needs them.
  1769.  
  1770. JAVA_CLASSPATH_DEFAULT = $(LIB) $(LIB)/scimark2lib.jar .
  1771.  
  1772. ## JAVA_CLASSPATH_ARGUMENT describes the command-line parameter
  1773. ## used to introduce a new classpath:
  1774.  
  1775. JAVA_CLASSPATH_ARGUMENT = -classpath
  1776.  
  1777. ## JAVA_CLASSPATH_SEPARATOR describes the character used to mark
  1778. ## one path element from another:
  1779.  
  1780. JAVA_CLASSPATH_SEPARATOR = :
  1781.  
  1782. ## JAVA_BENCHMARK_TIME describes the number of seconds for which
  1783. ## to run Java benchmarks. A longer time yields a more accurate
  1784. ## benchmark, but consumes more otherwise useful CPU time.
  1785. ## If this time is zero or undefined, no Java benchmarks will be run.
  1786.  
  1787. JAVA_BENCHMARK_TIME = 2
  1788.  
  1789. ## If your JVM requires any special arguments not mentioned in
  1790. ## the options above, then give them here.
  1791.  
  1792. JAVA_EXTRA_ARGUMENTS =
  1793.  
  1794. ##
  1795. ##--------------------------------------------------------------------
  1796. ## Condor-G settings
  1797. ##--------------------------------------------------------------------
  1798. ## Where is the GridManager binary installed?
  1799.  
  1800. GRIDMANAGER = $(SBIN)/condor_gridmanager
  1801. GT2_GAHP = $(SBIN)/gahp_server
  1802. GRID_MONITOR = $(SBIN)/grid_monitor
  1803.  
  1804. ##--------------------------------------------------------------------
  1805. ## Settings that control the daemon's debugging output:
  1806. ##--------------------------------------------------------------------
  1807. ##
  1808. ## Note that the Gridmanager runs as the User, not a Condor daemon, so
  1809. ## all users must have write permission to the directory that the
  1810. ## Gridmanager will use for it's logfile. Our suggestion is to create a
  1811. ## directory called GridLogs in $(LOG) with UNIX permissions 1777
  1812. ## (just like /tmp )
  1813. ## Another option is to use /tmp as the location of the GridManager log.
  1814. ##
  1815.  
  1816. MAX_GRIDMANAGER_LOG = 1000000
  1817. GRIDMANAGER_DEBUG =
  1818.  
  1819. GRIDMANAGER_LOG = $(LOG)/GridmanagerLog.$(USERNAME)
  1820. GRIDMANAGER_LOCK = $(LOCK)/GridmanagerLock.$(USERNAME)
  1821.  
  1822. ##--------------------------------------------------------------------
  1823. ## Various other settings that the Condor-G can use.
  1824. ##--------------------------------------------------------------------
  1825.  
  1826. ## The number of seconds between status update requests. You can make
  1827. ## this short (5 seconds) if you want Condor to respond quickly to
  1828. ## instances as they terminate, or you can make it long (300 seconds = 5
  1829. ## minutes) if you know your instances will run for awhile and don't
  1830. ## mind delay between when they stop and when Condor responds to them
  1831. ## stopping.
  1832. GRIDMANAGER_JOB_PROBE_INTERVAL = 300
  1833.  
  1834. ## For grid-type gt2 jobs (pre-WS GRAM), limit the number of jobmanager
  1835. ## processes the gridmanager will let run on the headnode. Letting too
  1836. ## many jobmanagers run causes severe load on the headnode.
  1837. GRIDMANAGER_MAX_JOBMANAGERS_PER_RESOURCE = 10
  1838.  
  1839. ## If we're talking to a Globus 2.0 resource, Condor-G will use the new
  1840. ## version of the GRAM protocol. The first option is how often to check the
  1841. ## proxy on the submit site of things. If the GridManager discovers a new
  1842. ## proxy, it will restart itself and use the new proxy for all future
  1843. ## jobs launched. In seconds, and defaults to 10 minutes
  1844. #GRIDMANAGER_CHECKPROXY_INTERVAL = 600
  1845.  
  1846. ## The GridManager will shut things down 3 minutes before loosing Contact
  1847. ## because of an expired proxy.
  1848. ## In seconds, and defaults to 3 minutes
  1849. #GRDIMANAGER_MINIMUM_PROXY_TIME = 180
  1850.  
  1851. ## Condor requires that each submitted job be designated to run under a
  1852. ## particular "universe".
  1853. ##
  1854. ## If no universe is specified in the submit file, Condor must pick one
  1855. ## for the job to use. By default, it chooses the "vanilla" universe.
  1856. ## The default can be overridden in the config file with the DEFAULT_UNIVERSE
  1857. ## setting, which is a string to insert into a job submit description if the
  1858. ## job does not try and define it's own universe
  1859. ##
  1860. #DEFAULT_UNIVERSE = vanilla
  1861.  
  1862. #
  1863. # The Cred_min_time_left is the first-pass at making sure that Condor-G
  1864. # does not submit your job without it having enough time left for the
  1865. # job to finish. For example, if you have a job that runs for 20 minutes, and
  1866. # you might spend 40 minutes in the queue, it's a bad idea to submit with less
  1867. # than an hour left before your proxy expires.
  1868. # 2 hours seemed like a reasonable default.
  1869. #
  1870. CRED_MIN_TIME_LEFT = 120
  1871.  
  1872.  
  1873. ##
  1874. ## The GridMonitor allows you to submit many more jobs to a GT2 GRAM server
  1875. ## than is normally possible.
  1876. #ENABLE_GRID_MONITOR = TRUE
  1877.  
  1878. ##
  1879. ## When an error occurs with the GridMonitor, how long should the
  1880. ## gridmanager wait before trying to submit a new GridMonitor job?
  1881. ## The default is 1 hour (3600 seconds).
  1882. #GRID_MONITOR_DISABLE_TIME = 3600
  1883.  
  1884. ##
  1885. ## The location of the wrapper for invoking
  1886. ## Condor GAHP server
  1887. ##
  1888. CONDOR_GAHP = $(SBIN)/condor_c-gahp
  1889. CONDOR_GAHP_WORKER = $(SBIN)/condor_c-gahp_worker_thread
  1890.  
  1891. ##
  1892. ## The Condor GAHP server has its own log. Like the Gridmanager, the
  1893. ## GAHP server is run as the User, not a Condor daemon, so all users must
  1894. ## have write permission to the directory used for the logfile. Our
  1895. ## suggestion is to create a directory called GridLogs in $(LOG) with
  1896. ## UNIX permissions 1777 (just like /tmp )
  1897. ## Another option is to use /tmp as the location of the CGAHP log.
  1898. ##
  1899. MAX_C_GAHP_LOG = 1000000
  1900.  
  1901. #C_GAHP_LOG = $(LOG)/GridLogs/CGAHPLog.$(USERNAME)
  1902. C_GAHP_LOG = /tmp/CGAHPLog.$(USERNAME)
  1903. C_GAHP_LOCK = /tmp/CGAHPLock.$(USERNAME)
  1904. C_GAHP_WORKER_THREAD_LOG = /tmp/CGAHPWorkerLog.$(USERNAME)
  1905. C_GAHP_WORKER_THREAD_LOCK = /tmp/CGAHPWorkerLock.$(USERNAME)
  1906.  
  1907. ##
  1908. ## Location of the PBS/LSF gahp and its associated binaries
  1909. ##
  1910. GLITE_LOCATION = $(LIBEXEC)/glite
  1911. BATCH_GAHP = $(GLITE_LOCATION)/bin/batch_gahp
  1912.  
  1913. ##
  1914. ## The location of the wrapper for invoking the Unicore GAHP server
  1915. ##
  1916. UNICORE_GAHP = $(SBIN)/unicore_gahp
  1917.  
  1918. ##
  1919. ## The location of the wrapper for invoking the NorduGrid GAHP server
  1920. ##
  1921. NORDUGRID_GAHP = $(SBIN)/nordugrid_gahp
  1922.  
  1923. ## The location of the CREAM GAHP server
  1924. CREAM_GAHP = $(SBIN)/cream_gahp
  1925.  
  1926. ## Condor-G and CredD can use MyProxy to refresh GSI proxies which are
  1927. ## about to expire.
  1928. #MYPROXY_GET_DELEGATION = /path/to/myproxy-get-delegation
  1929.  
  1930. ## The location of the Deltacloud GAHP server
  1931. DELTACLOUD_GAHP = $(SBIN)/deltacloud_gahp
  1932.  
  1933. ##
  1934. ## EC2 (REST): Universe = Grid, Grid_Resource = ec2
  1935. ##
  1936.  
  1937. ## The location of the ec2_gahp program, required
  1938. EC2_GAHP = $(SBIN)/ec2_gahp
  1939.  
  1940. ## Location of log files, useful for debugging, must be in
  1941. ## a directory writable by any user, such as /tmp
  1942. #EC2_GAHP_DEBUG = D_FULLDEBUG
  1943. EC2_GAHP_LOG = /tmp/EC2GahpLog.$(USERNAME)
  1944.  
  1945. ## As of this writing EC2 has a hard limit of 20 concurrently
  1946. ## running instances, so a limit of 20 is imposed so the GridManager
  1947. ## does not waste its time sending requests that will be rejected.
  1948. GRIDMANAGER_MAX_SUBMITTED_JOBS_PER_RESOURCE_EC2 = 20
  1949.  
  1950. ##
  1951. ##--------------------------------------------------------------------
  1952. ## condor_credd credential management daemon
  1953. ##--------------------------------------------------------------------
  1954. ## Where is the CredD binary installed?
  1955. CREDD = $(SBIN)/condor_credd
  1956.  
  1957. ## When the credd starts up, it can place it's address (IP and port)
  1958. ## into a file. This way, tools running on the local machine don't
  1959. ## need an additional "-n host:port" command line option. This
  1960. ## feature can be turned off by commenting out this setting.
  1961. CREDD_ADDRESS_FILE = $(LOG)/.credd_address
  1962.  
  1963. ## Specify a remote credd server here,
  1964. #CREDD_HOST = $(CONDOR_HOST):$(CREDD_PORT)
  1965.  
  1966. ## CredD startup arguments
  1967. ## Start the CredD on a well-known port. Uncomment to to simplify
  1968. ## connecting to a remote CredD. Note: that this interface may change
  1969. ## in a future release.
  1970. CREDD_PORT = 9620
  1971. CREDD_ARGS = -p $(CREDD_PORT) -f
  1972.  
  1973. ## CredD daemon debugging log
  1974. CREDD_LOG = $(LOG)/CredLog
  1975. CREDD_DEBUG = D_FULLDEBUG
  1976. MAX_CREDD_LOG = 4000000
  1977.  
  1978. ## The credential owner submits the credential. This list specififies
  1979. ## other user who are also permitted to see all credentials. Defaults
  1980. ## to root on Unix systems, and Administrator on Windows systems.
  1981. #CRED_SUPER_USERS =
  1982.  
  1983. ## Credential storage location. This directory must exist
  1984. ## prior to starting condor_credd. It is highly recommended to
  1985. ## restrict access permissions to _only_ the directory owner.
  1986. CRED_STORE_DIR = $(LOCAL_DIR)/cred_dir
  1987.  
  1988. ## Index file path of saved credentials.
  1989. ## This file will be automatically created if it does not exist.
  1990. #CRED_INDEX_FILE = $(CRED_STORE_DIR/cred-index
  1991.  
  1992. ## condor_credd will attempt to refresh credentials when their
  1993. ## remaining lifespan is less than this value. Units = seconds.
  1994. #DEFAULT_CRED_EXPIRE_THRESHOLD = 3600
  1995.  
  1996. ## condor-credd periodically checks remaining lifespan of stored
  1997. ## credentials, at this interval.
  1998. #CRED_CHECK_INTERVAL = 60
  1999.  
  2000. ##
  2001. ##--------------------------------------------------------------------
  2002. ## Stork data placement server
  2003. ##--------------------------------------------------------------------
  2004. ## Where is the Stork binary installed?
  2005. STORK = $(SBIN)/stork_server
  2006.  
  2007. ## When Stork starts up, it can place it's address (IP and port)
  2008. ## into a file. This way, tools running on the local machine don't
  2009. ## need an additional "-n host:port" command line option. This
  2010. ## feature can be turned off by commenting out this setting.
  2011. STORK_ADDRESS_FILE = $(LOG)/.stork_address
  2012.  
  2013. ## Specify a remote Stork server here,
  2014. #STORK_HOST = $(CONDOR_HOST):$(STORK_PORT)
  2015.  
  2016. ## STORK_LOG_BASE specifies the basename for heritage Stork log files.
  2017. ## Stork uses this macro to create the following output log files:
  2018. ## $(STORK_LOG_BASE): Stork server job queue classad collection
  2019. ## journal file.
  2020. ## $(STORK_LOG_BASE).history: Used to track completed jobs.
  2021. ## $(STORK_LOG_BASE).user_log: User level log, also used by DAGMan.
  2022. STORK_LOG_BASE = $(LOG)/Stork
  2023.  
  2024. ## Modern Condor DaemonCore logging feature.
  2025. STORK_LOG = $(LOG)/StorkLog
  2026. STORK_DEBUG = D_FULLDEBUG
  2027. MAX_STORK_LOG = 4000000
  2028.  
  2029. ## Stork startup arguments
  2030. ## Start Stork on a well-known port. Uncomment to to simplify
  2031. ## connecting to a remote Stork. Note: that this interface may change
  2032. ## in a future release.
  2033. #STORK_PORT = 34048
  2034. STORK_PORT = 9621
  2035. STORK_ARGS = -p $(STORK_PORT) -f -Serverlog $(STORK_LOG_BASE)
  2036.  
  2037. ## Stork environment. Stork modules may require external programs and
  2038. ## shared object libraries. These are located using the PATH and
  2039. ## LD_LIBRARY_PATH environments. Further, some modules may require
  2040. ## further specific environments. By default, Stork inherits a full
  2041. ## environment when invoked from condor_master or the shell. If the
  2042. ## default environment is not adequate for all Stork modules, specify
  2043. ## a replacement environment here. This environment will be set by
  2044. ## condor_master before starting Stork, but does not apply if Stork is
  2045. ## started directly from the command line.
  2046. #STORK_ENVIRONMENT = TMP=/tmp;CONDOR_CONFIG=/special/config;PATH=/lib
  2047.  
  2048. ## Limits the number of concurrent data placements handled by Stork.
  2049. #STORK_MAX_NUM_JOBS = 5
  2050.  
  2051. ## Limits the number of retries for a failed data placement.
  2052. #STORK_MAX_RETRY = 5
  2053.  
  2054. ## Limits the run time for a data placement job, after which the
  2055. ## placement is considered failed.
  2056. #STORK_MAXDELAY_INMINUTES = 10
  2057.  
  2058. ## Temporary credential storage directory used by Stork.
  2059. #STORK_TMP_CRED_DIR = /tmp
  2060.  
  2061. ## Directory containing Stork modules.
  2062. #STORK_MODULE_DIR = $(LIBEXEC)
  2063.  
  2064. ##
  2065. ##--------------------------------------------------------------------
  2066. ## Quill Job Queue Mirroring Server
  2067. ##--------------------------------------------------------------------
  2068. ## Where is the Quill binary installed and what arguments should be passed?
  2069. QUILL = $(SBIN)/condor_quill
  2070. #QUILL_ARGS =
  2071.  
  2072. # Where is the log file for the quill daemon?
  2073. QUILL_LOG = $(LOG)/QuillLog
  2074.  
  2075. # The identification and location of the quill daemon for local clients.
  2076. QUILL_ADDRESS_FILE = $(LOG)/.quill_address
  2077.  
  2078. # If this is set to true, then the rest of the QUILL arguments must be defined
  2079. # for quill to function. If it is False or left undefined, then quill will not
  2080. # be consulted by either the scheduler or the tools, but in the case of a
  2081. # remote quill query where the local client has quill turned off, but the
  2082. # remote client has quill turned on, things will still function normally.
  2083. #QUILL_ENABLED = TRUE
  2084.  
  2085. #
  2086. # If Quill is enabled, by default it will only mirror the current job
  2087. # queue into the database. For historical jobs, and classads from other
  2088. # sources, the SQL Log must be enabled.
  2089. #QUILL_USE_SQL_LOG=FALSE
  2090.  
  2091. #
  2092. # The SQL Log can be enabled on a per-daemon basis. For example, to collect
  2093. # historical job information, but store no information about execute machines,
  2094. # uncomment these two lines
  2095. #QUILL_USE_SQL_LOG = FALSE
  2096. #SCHEDD.QUILL_USE_SQL_LOG = TRUE
  2097.  
  2098. # This will be the name of a quill daemon using this config file. This name
  2099. # should not conflict with any other quill name--or schedd name.
  2100. #QUILL_NAME = quill@postgresql-server.machine.com
  2101.  
  2102. # The Postgreql server requires usernames that can manipulate tables. This will
  2103. # be the username associated with this instance of the quill daemon mirroring
  2104. # a schedd's job queue. Each quill daemon must have a unique username
  2105. # associated with it otherwise multiple quill daemons will corrupt the data
  2106. # held under an identical user name.
  2107. #QUILL_DB_NAME = name_of_db
  2108.  
  2109. # The required password for the DB user which quill will use to read
  2110. # information from the database about the queue.
  2111. #QUILL_DB_QUERY_PASSWORD = foobar
  2112.  
  2113. # What kind of database server is this?
  2114. # For now, only PGSQL is supported
  2115. #QUILL_DB_TYPE = PGSQL
  2116.  
  2117. # The machine and port of the postgres server.
  2118. # Although this says IP Addr, it can be a DNS name.
  2119. # It must match whatever format you used for the .pgpass file, however
  2120. #QUILL_DB_IP_ADDR = machine.domain.com:5432
  2121.  
  2122. # The login to use to attach to the database for updating information.
  2123. # There should be an entry in file $SPOOL/.pgpass that gives the password
  2124. # for this login id.
  2125. #QUILL_DB_USER = quillwriter
  2126.  
  2127. # Polling period, in seconds, for when quill reads transactions out of the
  2128. # schedd's job queue log file and puts them into the database.
  2129. #QUILL_POLLING_PERIOD = 10
  2130.  
  2131. # Allows or disallows a remote query to the quill daemon and database
  2132. # which is reading this log file. Defaults to true.
  2133. #QUILL_IS_REMOTELY_QUERYABLE = TRUE
  2134.  
  2135. # Add debugging flags to here if you need to debug quill for some reason.
  2136. #QUILL_DEBUG = D_FULLDEBUG
  2137.  
  2138. # Number of seconds the master should wait for the Quill daemon to respond
  2139. # before killing it. This number might need to be increased for very
  2140. # large logfiles.
  2141. # The default is 3600 (one hour), but kicking it up to a few hours won't hurt
  2142. #QUILL_NOT_RESPONDING_TIMEOUT = 3600
  2143.  
  2144. # Should Quill hold open a database connection to the DBMSD?
  2145. # Each open connection consumes resources at the server, so large pools
  2146. # (100 or more machines) should set this variable to FALSE. Note the
  2147. # default is TRUE.
  2148. #QUILL_MAINTAIN_DB_CONN = TRUE
  2149.  
  2150. ##
  2151. ##--------------------------------------------------------------------
  2152. ## Database Management Daemon settings
  2153. ##--------------------------------------------------------------------
  2154. ## Where is the DBMSd binary installed and what arguments should be passed?
  2155. DBMSD = $(SBIN)/condor_dbmsd
  2156. DBMSD_ARGS = -f
  2157.  
  2158. # Where is the log file for the quill daemon?
  2159. DBMSD_LOG = $(LOG)/DbmsdLog
  2160.  
  2161. # Interval between consecutive purging calls (in seconds)
  2162. #DATABASE_PURGE_INTERVAL = 86400
  2163.  
  2164. # Interval between consecutive database reindexing operations
  2165. # This is only used when dbtype = PGSQL
  2166. #DATABASE_REINDEX_INTERVAL = 86400
  2167.  
  2168. # Number of days before purging resource classad history
  2169. # This includes things like machine ads, daemon ads, submitters
  2170. #QUILL_RESOURCE_HISTORY_DURATION = 7
  2171.  
  2172. # Number of days before purging job run information
  2173. # This includes job events, file transfers, matchmaker matches, etc
  2174. # This does NOT include the final job ad. condor_history does not need
  2175. # any of this information to work.
  2176. #QUILL_RUN_HISTORY_DURATION = 7
  2177.  
  2178. # Number of days before purging job classad history
  2179. # This is the information needed to run condor_history
  2180. #QUILL_JOB_HISTORY_DURATION = 3650
  2181.  
  2182. # DB size threshold for warning the condor administrator. This is checked
  2183. # after every purge. The size is given in gigabytes.
  2184. #QUILL_DBSIZE_LIMIT = 20
  2185.  
  2186. # Number of seconds the master should wait for the DBMSD to respond before
  2187. # killing it. This number might need to be increased for very large databases
  2188. # The default is 3600 (one hour).
  2189. #DBMSD_NOT_RESPONDING_TIMEOUT = 3600
  2190.  
  2191. ##
  2192. ##--------------------------------------------------------------------
  2193. ## VM Universe Parameters
  2194. ##--------------------------------------------------------------------
  2195. ## Where is the Condor VM-GAHP installed? (Required)
  2196. VM_GAHP_SERVER = $(SBIN)/condor_vm-gahp
  2197.  
  2198. ## If the VM-GAHP is to have its own log, define
  2199. ## the location of log file.
  2200. ##
  2201. ## Optionally, if you do NOT define VM_GAHP_LOG, logs of VM-GAHP will
  2202. ## be stored in the starter's log file.
  2203. ## However, on Windows machine you must always define VM_GAHP_LOG.
  2204. #
  2205. VM_GAHP_LOG = $(LOG)/VMGahpLog
  2206. MAX_VM_GAHP_LOG = 1000000
  2207. #VM_GAHP_DEBUG = D_FULLDEBUG
  2208.  
  2209. ## What kind of virtual machine program will be used for
  2210. ## the VM universe?
  2211. ## The three primary options are KVM, Xen and VMware. (Required: no default)
  2212. #VM_TYPE = kvm
  2213.  
  2214. ## How much memory can be used for the VM universe? (Required)
  2215. ## This value is the maximum amount of memory that can be used by the
  2216. ## virtual machine program.
  2217. #VM_MEMORY = 128
  2218.  
  2219. ## Want to support networking for VM universe?
  2220. ## Default value is FALSE
  2221. #VM_NETWORKING = FALSE
  2222.  
  2223. ## What kind of networking types are supported?
  2224. ##
  2225. ## If you set VM_NETWORKING to TRUE, you must define this parameter.
  2226. ## VM_NETWORKING_TYPE = nat
  2227. ## VM_NETWORKING_TYPE = bridge
  2228. ## VM_NETWORKING_TYPE = nat, bridge
  2229. ##
  2230. ## If multiple networking types are defined, you may define
  2231. ## VM_NETWORKING_DEFAULT_TYPE for default networking type.
  2232. ## Otherwise, nat is used for default networking type.
  2233. ## VM_NETWORKING_DEFAULT_TYPE = nat
  2234. #VM_NETWORKING_DEFAULT_TYPE = nat
  2235. #VM_NETWORKING_TYPE = nat
  2236.  
  2237. ## In default, the number of possible virtual machines is same as
  2238. ## NUM_CPUS.
  2239. ## Since too many virtual machines can cause the system to be too slow
  2240. ## and lead to unexpected problems, limit the number of running
  2241. ## virtual machines on this machine with
  2242. #VM_MAX_NUMBER = 2
  2243.  
  2244. ## When a VM universe job is started, a status command is sent
  2245. ## to the VM-GAHP to see if the job is finished.
  2246. ## If the interval between checks is too short, it will consume
  2247. ## too much of the CPU. If the VM-GAHP fails to get status 5 times in a row,
  2248. ## an error will be reported to startd, and then startd will check
  2249. ## the availability of VM universe.
  2250. ## Default value is 60 seconds and minimum value is 30 seconds
  2251. #VM_STATUS_INTERVAL = 60
  2252.  
  2253. ## How long will we wait for a request sent to the VM-GAHP to be completed?
  2254. ## If a request is not completed within the timeout, an error will be reported
  2255. ## to the startd, and then the startd will check
  2256. ## the availability of vm universe. Default value is 5 minutes.
  2257. #VM_GAHP_REQ_TIMEOUT = 300
  2258.  
  2259. ## When VMware or Xen causes an error, the startd will disable the
  2260. ## VM universe. However, because some errors are just transient,
  2261. ## we will test one more
  2262. ## whether vm universe is still unavailable after some time.
  2263. ## In default, startd will recheck vm universe after 10 minutes.
  2264. ## If the test also fails, vm universe will be disabled.
  2265. #VM_RECHECK_INTERVAL = 600
  2266.  
  2267. ## Usually, when we suspend a VM, the memory being used by the VM
  2268. ## will be saved into a file and then freed.
  2269. ## However, when we use soft suspend, neither saving nor memory freeing
  2270. ## will occur.
  2271. ## For VMware, we send SIGSTOP to a process for VM in order to
  2272. ## stop the VM temporarily and send SIGCONT to resume the VM.
  2273. ## For Xen, we pause CPU. Pausing CPU doesn't save the memory of VM
  2274. ## into a file. It only stops the execution of a VM temporarily.
  2275. #VM_SOFT_SUSPEND = TRUE
  2276.  
  2277. ## If Condor runs as root and a job comes from a different UID domain,
  2278. ## Condor generally uses "nobody", unless SLOTx_USER is defined.
  2279. ## If "VM_UNIV_NOBODY_USER" is defined, a VM universe job will run
  2280. ## as the user defined in "VM_UNIV_NOBODY_USER" instead of "nobody".
  2281. ##
  2282. ## Notice: In VMware VM universe, "nobody" can not create a VMware VM.
  2283. ## So we need to define "VM_UNIV_NOBODY_USER" with a regular user.
  2284. ## For VMware, the user defined in "VM_UNIV_NOBODY_USER" must have a
  2285. ## home directory. So SOFT_UID_DOMAIN doesn't work for VMware VM universe job.
  2286. ## If neither "VM_UNIV_NOBODY_USER" nor "SLOTx_VMUSER"/"SLOTx_USER" is defined,
  2287. ## VMware VM universe job will run as "condor" instead of "nobody".
  2288. ## As a result, the preference of local users for a VMware VM universe job
  2289. ## which comes from the different UID domain is
  2290. ## "VM_UNIV_NOBODY_USER" -> "SLOTx_VMUSER" -> "SLOTx_USER" -> "condor".
  2291. #VM_UNIV_NOBODY_USER = login name of a user who has home directory
  2292.  
  2293. ## If Condor runs as root and "ALWAYS_VM_UNIV_USE_NOBODY" is set to TRUE,
  2294. ## all VM universe jobs will run as a user defined in "VM_UNIV_NOBODY_USER".
  2295. #ALWAYS_VM_UNIV_USE_NOBODY = FALSE
  2296.  
  2297. ##--------------------------------------------------------------------
  2298. ## VM Universe Parameters Specific to VMware
  2299. ##--------------------------------------------------------------------
  2300.  
  2301. ## Where is perl program? (Required)
  2302. VMWARE_PERL = perl
  2303.  
  2304. ## Where is the Condor script program to control VMware? (Required)
  2305. VMWARE_SCRIPT = $(SBIN)/condor_vm_vmware
  2306.  
  2307. ## Networking parameters for VMware
  2308. ##
  2309. ## What kind of VMware networking is used?
  2310. ##
  2311. ## If multiple networking types are defined, you may specify different
  2312. ## parameters for each networking type.
  2313. ##
  2314. ## Examples
  2315. ## (e.g.) VMWARE_NAT_NETWORKING_TYPE = nat
  2316. ## (e.g.) VMWARE_BRIDGE_NETWORKING_TYPE = bridged
  2317. ##
  2318. ## If there is no parameter for specific networking type, VMWARE_NETWORKING_TYPE is used.
  2319. ##
  2320. #VMWARE_NAT_NETWORKING_TYPE = nat
  2321. #VMWARE_BRIDGE_NETWORKING_TYPE = bridged
  2322. VMWARE_NETWORKING_TYPE = nat
  2323.  
  2324. ## The contents of this file will be inserted into the .vmx file of
  2325. ## the VMware virtual machine before Condor starts it.
  2326. #VMWARE_LOCAL_SETTINGS_FILE = /path/to/file
  2327.  
  2328. ##--------------------------------------------------------------------
  2329. ## VM Universe Parameters common to libvirt controlled vm's (xen & kvm)
  2330. ##--------------------------------------------------------------------
  2331.  
  2332. ## Networking parameters for Xen & KVM
  2333. ##
  2334. ## This is the path to the XML helper command; the libvirt_simple_script.awk
  2335. ## script just reproduces what Condor already does for the kvm/xen VM
  2336. ## universe
  2337. LIBVIRT_XML_SCRIPT = $(LIBEXEC)/libvirt_simple_script.awk
  2338.  
  2339. ## This is the optional debugging output file for the xml helper
  2340. ## script. Scripts that need to output debugging messages should
  2341. ## write them to the file specified by this argument, which will be
  2342. ## passed as the second command line argument when the script is
  2343. ## executed
  2344.  
  2345. #LIBVRT_XML_SCRIPT_ARGS = /dev/stderr
  2346.  
  2347. ##--------------------------------------------------------------------
  2348. ## VM Universe Parameters Specific to Xen
  2349. ##--------------------------------------------------------------------
  2350.  
  2351. ## Where is bootloader for Xen domainU? (Required)
  2352. ##
  2353. ## The bootloader will be used in the case that a kernel image includes
  2354. ## a disk image
  2355. #XEN_BOOTLOADER = /usr/bin/pygrub
  2356.  
  2357. ##
  2358. ##--------------------------------------------------------------------
  2359. ## condor_lease_manager lease manager daemon
  2360. ##--------------------------------------------------------------------
  2361. ## Where is the LeaseManager binary installed?
  2362. LeaseManager = $(SBIN)/condor_lease_manager
  2363.  
  2364. # Turn on the lease manager
  2365. #DAEMON_LIST = $(DAEMON_LIST), LeaseManager
  2366.  
  2367. # The identification and location of the lease manager for local clients.
  2368. LeaseManger_ADDRESS_FILE = $(LOG)/.lease_manager_address
  2369.  
  2370. ## LeaseManager startup arguments
  2371. #LeaseManager_ARGS = -local-name generic
  2372.  
  2373. ## LeaseManager daemon debugging log
  2374. LeaseManager_LOG = $(LOG)/LeaseManagerLog
  2375. LeaseManager_DEBUG = D_FULLDEBUG
  2376. MAX_LeaseManager_LOG = 1000000
  2377.  
  2378. # Basic parameters
  2379. LeaseManager.GETADS_INTERVAL = 60
  2380. LeaseManager.UPDATE_INTERVAL = 300
  2381. LeaseManager.PRUNE_INTERVAL = 60
  2382. LeaseManager.DEBUG_ADS = False
  2383.  
  2384. LeaseManager.CLASSAD_LOG = $(SPOOL)/LeaseManagerState
  2385. #LeaseManager.QUERY_ADTYPE = Any
  2386. #LeaseManager.QUERY_CONSTRAINTS = MyType == "SomeType"
  2387. #LeaseManager.QUERY_CONSTRAINTS = TargetType == "SomeType"
  2388.  
  2389. ##
  2390. ##--------------------------------------------------------------------
  2391. ## KBDD - keyboard activity detection daemon
  2392. ##--------------------------------------------------------------------
  2393. ## When the KBDD starts up, it can place it's address (IP and port)
  2394. ## into a file. This way, tools running on the local machine don't
  2395. ## need an additional "-n host:port" command line option. This
  2396. ## feature can be turned off by commenting out this setting.
  2397. KBDD_ADDRESS_FILE = $(LOG)/.kbdd_address
  2398.  
  2399. ##
  2400. ##--------------------------------------------------------------------
  2401. ## condor_ssh_to_job
  2402. ##--------------------------------------------------------------------
  2403. # NOTE: condor_ssh_to_job is not supported under Windows.
  2404.  
  2405. # Tell the starter (execute side) whether to allow the job owner or
  2406. # queue super user on the schedd from which the job was submitted to
  2407. # use condor_ssh_to_job to access the job interactively (e.g. for
  2408. # debugging). TARGET is the job; MY is the machine.
  2409. #ENABLE_SSH_TO_JOB = true
  2410.  
  2411. # Tell the schedd (submit side) whether to allow the job owner or
  2412. # queue super user to use condor_ssh_to_job to access the job
  2413. # interactively (e.g. for debugging). MY is the job; TARGET is not
  2414. # defined.
  2415. #SCHEDD_ENABLE_SSH_TO_JOB = true
  2416.  
  2417. # Command condor_ssh_to_job should use to invoke the ssh client.
  2418. # %h --> remote host
  2419. # %i --> ssh key file
  2420. # %k --> known hosts file
  2421. # %u --> remote user
  2422. # %x --> proxy command
  2423. # %% --> %
  2424. #SSH_TO_JOB_SSH_CMD = "ssh -oUser=%u -oIdentityFile=%i -oStrictHostKeyChecking=yes -oUserKnownHostsFile=%k -oGlobalKnownHostsFile=%k -oProxyCommand=%x %h"
  2425.  
  2426. # Additional ssh clients may be configured. They all have the same
  2427. # default as ssh, except for scp, which omits the %h:
  2428. #SSH_TO_JOB_SCP_CMD = "scp -oUser=%u -oIdentityFile=%i -oStrictHostKeyChecking=yes -oUserKnownHostsFile=%k -oGlobalKnownHostsFile=%k -oProxyCommand=%x"
  2429.  
  2430. # Path to sshd
  2431. #SSH_TO_JOB_SSHD = /usr/sbin/sshd
  2432.  
  2433. # Arguments the starter should use to invoke sshd in inetd mode.
  2434. # %f --> sshd config file
  2435. # %% --> %
  2436. #SSH_TO_JOB_SSHD_ARGS = "-i -e -f %f"
  2437.  
  2438. # sshd configuration template used by condor_ssh_to_job_sshd_setup.
  2439. #SSH_TO_JOB_SSHD_CONFIG_TEMPLATE = $(LIB)/condor_ssh_to_job_sshd_config_template
  2440.  
  2441. # Path to ssh-keygen
  2442. #SSH_TO_JOB_SSH_KEYGEN = /usr/bin/ssh-keygen
  2443.  
  2444. # Arguments to ssh-keygen
  2445. # %f --> key file to generate
  2446. # %% --> %
  2447. #SSH_TO_JOB_SSH_KEYGEN_ARGS = "-N '' -C '' -q -f %f -t rsa"
  2448.  
  2449. ######################################################################
  2450. ##
  2451. ## Condor HDFS
  2452. ##
  2453. ## This is the default local configuration file for configuring Condor
  2454. ## daemon responsible for running services related to hadoop
  2455. ## distributed storage system. You should copy this file to the
  2456. ## appropriate location and customize it for your needs.
  2457. ##
  2458. ## Unless otherwise specified, settings that are commented out show
  2459. ## the defaults that are used if you don't define a value. Settings
  2460. ## that are defined here MUST BE DEFINED since they have no default
  2461. ## value.
  2462. ##
  2463. ######################################################################
  2464.  
  2465. ######################################################################
  2466. ## FOLLOWING MUST BE CHANGED
  2467. ######################################################################
  2468.  
  2469. ## The location for hadoop installation directory. The default location
  2470. ## is under 'libexec' directory. The directory pointed by HDFS_HOME
  2471. ## should contain a lib folder that contains all the required Jars necessary
  2472. ## to run HDFS name and data nodes.
  2473. #HDFS_HOME = $(RELEASE_DIR)/libexec/hdfs
  2474.  
  2475. ## The host and port for hadoop's name node. If this machine is the
  2476. ## name node (see HDFS_SERVICES) then the specified port will be used
  2477. ## to run name node.
  2478. HDFS_NAMENODE = hdfs://example.com:9000
  2479. HDFS_NAMENODE_WEB = example.com:8000
  2480.  
  2481. HDFS_BACKUPNODE = hdfs://example.com:50100
  2482. HDFS_BACKUPNODE_WEB = example.com:50105
  2483.  
  2484. ## You need to pick one machine as name node by setting this parameter
  2485. ## to HDFS_NAMENODE. The remaining machines in a storage cluster will
  2486. ## act as data nodes (HDFS_DATANODE).
  2487. HDFS_NODETYPE = HDFS_DATANODE
  2488.  
  2489. ## If machine is selected to be NameNode then by a role should defined.
  2490. ## If it selected to be DataNode then this parameter is ignored.
  2491. ## Available options:
  2492. ## ACTIVE: Active NameNode role (default value)
  2493. ## BACKUP: Always synchronized with the active NameNode state, thus
  2494. ## creating a backup of the namespace. Currently the NameNode
  2495. ## supports one Backup node at a time.
  2496. ## CHECKPOINT: Periodically creates checkpoints of the namespace.
  2497. HDFS_NAMENODE_ROLE = ACTIVE
  2498.  
  2499. ## The two set of directories that are required by HDFS are for name
  2500. ## node (HDFS_NAMENODE_DIR) and data node (HDFS_DATANODE_DIR). The
  2501. ## directory for name node is only required for a machine running
  2502. ## name node service and is used to store critical meta data for
  2503. ## files. The data node needs its directory to store file blocks and
  2504. ## their replicas.
  2505. HDFS_NAMENODE_DIR = /tmp/hadoop_name
  2506. HDFS_DATANODE_DIR = /scratch/tmp/hadoop_data
  2507.  
  2508. ## Unlike name node address settings (HDFS_NAMENODE), that needs to be
  2509. ## well known across the storage cluster, data node can run on any
  2510. ## arbitrary port of given host.
  2511. #HDFS_DATANODE_ADDRESS = 0.0.0.0:0
  2512.  
  2513. ####################################################################
  2514. ## OPTIONAL
  2515. #####################################################################
  2516.  
  2517. ## Sets the log4j debug level. All the emitted debug output from HDFS
  2518. ## will go in 'hdfs.log' under $(LOG) directory.
  2519. #HDFS_LOG4J=DEBUG
  2520.  
  2521. ## The access to HDFS services both name node and data node can be
  2522. ## restricted by specifying IP/host based filters. By default settings
  2523. ## from ALLOW_READ/ALLOW_WRITE and DENY_READ/DENY_WRITE
  2524. ## are used to specify allow and deny list. The below two parameters can
  2525. ## be used to override these settings. Read the Condor manual for
  2526. ## specification of these filters.
  2527. ## WARN: HDFS doesn't make any distinction between read or write based connection.
  2528. #HDFS_ALLOW=*
  2529. #HDFS_DENY=*
  2530.  
  2531. #Fully qualified name for Name node and Datanode class.
  2532. #HDFS_NAMENODE_CLASS=org.apache.hadoop.hdfs.server.namenode.NameNode
  2533. #HDFS_DATANODE_CLASS=org.apache.hadoop.hdfs.server.datanode.DataNode
  2534. #HDFS_DFSADMIN_CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin
  2535.  
  2536. ## In case an old name for hdfs configuration files is required.
  2537. #HDFS_SITE_FILE = hdfs-site.xml
  2538.  
  2539.  
  2540. ##
  2541. ##--------------------------------------------------------------------
  2542. ## file transfer plugin defaults
  2543. ##--------------------------------------------------------------------
  2544. FILETRANSFER_PLUGINS = $(LIBEXEC)/curl_plugin, $(LIBEXEC)/data_plugin
Add Comment
Please, Sign In to add comment