[common] #is default value x509_host_key=/etc/grid-security/hostkey.pem #is default value x509_host_cert=/etc/grid-security/hostcert.pem #is default value x509_cert_dir=/etc/grid-security/certificates hostname=atlas.bluegrass.nsc.liu.se [lrms] lrms=slurm bluegrass defaultmemory=2048 [authgroup:banned-users] file=/arc/poolmaps/banned-users [authgroup:local-static] file=/arc/poolmaps/local-static [authgroup:local-pool] file=/arc/poolmaps/local-pool [authgroup:ndgfops] #file=/arc/poolmaps/vomsdata/ndgfops voms=ops.ndgf.org * * * [authgroup:cernops] #file=/arc/poolmaps/vomsdata/cernops voms=ops * * * [authgroup:atlassoft] voms=atlas * lcgadmin * [authgroup:atlasprod] voms=atlas * production * [authgroup:atlasuser] voms=atlas * * * #file=/arc/poolmaps/vomsdata/atlas [mapping] #for banned-users - use denyaccess instead once imnplemented in ARC 6 map_with_file=banned-users /arc/poolmaps/banned-users map_with_file=local-static /arc/poolmaps/local-static map_with_plugin=local-pool 5 /arc/poolmaps/poolmap.pl %D cernops map_with_plugin=atlassoft 5 /arc/poolmaps/poolmap.pl %D atlassoft map_with_plugin=atlasprod 5 /arc/poolmaps/poolmap.pl %D atlasprod map_with_plugin=atlasuser 5 /arc/poolmaps/poolmap.pl %D atlasuser map_with_plugin=ndgfops 5 /arc/poolmaps/poolmap.pl %D ndgfops map_with_plugin=cernops 5 /arc/poolmaps/poolmap.pl %D cernops #map_to_user in this case will not be needed in future, since failure of mapping above will imply access denied map_to_user=nonexisting:nonexisting all [arex] watchdog=yes #loglevel 3 is default value loglevel=3 logfile=/arc/logs/atlas/grid-manager.log joblog=/arc/logs/atlas/gm-jobs.log #wakeupperiod 180 is default value wakeupperiod=180 defaultttl=604800 1209600 mail=grid-admin@nsc.liu.se ## delegationdb db_name - specify which DB to use to store delegations. delegationdb=sqlite #is default # Make a copy of the controldir files on job finishing #authplugin=FINISHED timeout=60,onfailure=log,ontimeout=log /arc/controldir-saved/save %I #in ARC6 workernodes do not need see runtimedir anymore (need not be shared for WN) runtimedir=/arc/runtime controldir=/arc/controldir #scratchdir=${SNIC_TMP} #scratchdir should instead be implemented with RTE default ENV/LRMS_SCRATCH #arcctl rte set-params ENV/LRMS_SCRATCH SCRATCH_VAR SNIC_TMP #arcctl rte default ENV/LRMS_SCRATCH sessiondir=/arc/session/atlas-cache01 sessiondir=/arc/session/atlas-cache02 sessiondir=/arc/session/atlas-cache03 [arex/ws] # Enable the Web Services interface and expose the cache wsurl=https://atlas.bluegrass.nsc.liu.se:443/arex logfile=/arc/logs/atlas/ws-interface.log joblog=/arc/logs/atlas/arex-jobs.log #Maiken added max_data_transfer_requests=20 [arex/ws/emies] #denyaccess to be implemented #denyaccess=banned-users allowaccess=local-static allowaccess=local-pool allowaccess=ndgfops allowaccess=cernops allowaccess=atlassoft allowaccess=atlasprod allowaccess=atlasuser [arex/ws/cache] cacheaccess=.* voms:vo ops.ndgf.org cacheaccess=.* voms:vo atlas #do you know what the previous enable_cache_service does? #you might not want the candypond (new name) [arex/ws/candypond] [arex/cache] cachedir=/arc/cache/atlas-cache01 cachedir=/arc/cache/atlas-cache02 cachedir=/arc/cache/atlas-cache03 # Accounting [arex/jura] logfile=/arc/logs/atlas/accounting-jura.log urdelivery_keepfailed=60 [arex/jura/archiving] archivedir=/export/accounting [arex/jura/sgas:bluegrass] targeturl=https://accounting.ndgf.org:6143/sgas urbatchsize=50 # gridftp server config [gridftpd] logfile=/arc/logs/atlas/gridftpd.log globus_tcp_port_range=10001,15000 globus_udp_port_range=10001,15000 # job submission interface via gridftp [gridftpd/jobs] allownew=yes #denyaccess to be implemented #denyaccess=banned-users allowaccess=local-static allowaccess=local-pool allowaccess=ndgfops allowaccess=cernops allowaccess=atlassoft allowaccess=atlasprod allowaccess=atlasuser [arex/data-staging] #loglevel 3 is default no need to specify loglevel=3 maxtransfertries=30 # Maximum number of concurrent file transfers, i.e. active transfers # using network bandwidth. This is the total number for the whole # system including any remote staging hosts. maxdelivery=90 # Maximum number of concurrent files in each pre- and post- processing # state, eg cache check or replica resolution. Default is 10. maxprocessor=20 # Maximum emergency slots which can be assigned to transfer shares # when all slots up to the limits configured by the above two options # are used by other shares. This ensures shares cannot be blocked by # others. maxemergency=20 # Maximum number of files in a prepared state, i.e. pinned on a remote # storage such as SRM for transfer. A good value is a small multiple # of maxdelivery. Default is 200. maxprepared=180 #default is no localdelivery=no remotesizelimit=100000 sharepolicy=voms:role sharepriority=atlas:production 50 sharepriority=atlas:pilot 50 sharepriority=atlas:null 1 deliveryservice=https://atlas-cache01.bluegrass.nsc.liu.se:60002/datadeliveryservice deliveryservice=https://atlas-cache02.bluegrass.nsc.liu.se:60002/datadeliveryservice deliveryservice=https://atlas-cache03.bluegrass.nsc.liu.se:60002/datadeliveryservice # Enable remote cache stealing #use_remote_acix=https://cacheindex.ndgf.org:6443/data/index preferredpattern=pandaserver.cern.ch$|ndgf.org$|.se$|.dk$|.no$|.fi$|.si$ # openldap server config [infosys] logfile=/arc/logs/atlas/infoprovider.log #default loglevel is 1 loglevel=3 [infosys/ldap] slapd_loglevel=0 bdii_log_dir=/arc/logs/atlas #port 2135 is default value port=2135 [infosys/nordugrid] # Glue1.2 config [infosys/glue1] resource_location=Linköping, Sweden resource_latitude=58.392183 resource_longitude=15.571528 cpu_scaling_reference_si00=3893 processor_other_description=Cores=16,Benchmark=15.925-HEP-SPEC06 glue_site_web=http://www.nsc.liu.se glue_site_unique_id=NDGF-T1 # Glue2 config [infosys/glue2] admindomain_name=NDGF-T1 admindomain_description=The Nordic DataGrid Facility admindomain_www=http://www.ndgf.org/ admindomain_distributed=yes admindomain_owner=support@ndgf.org [infosys/glue2/ldap] # Infosys view of the computing cluster (service) [infosys/cluster] cluster_alias=Bluegrass - Atlas (NSC) comment=NDGF T1 cluster homogeneity=True architecture=x86_64 nodeaccess=outbound nodecpu=Intel(R) Xeon(R) CPU E5-2660 0 @ 2.20GHz nodememory=16384 cpudistribution=16cpu:40 #benchmark=SPECFP2000 11000 #benchmark=SPECINT2000 1905 benchmark=HEPSPEC2006 15.925 cluster_location=SE-58183 cluster_owner=NSC cluster_owner=University of Linkoping clustersupport=grid-admin@nsc.liu.se advertisedvo=atlas opsys=CentOS opsys=7.5.1804 opsys=Final # every CE needs at least one queue [queue:bluegrass] comment=This is for ATLAS only #advertisedvo=atlas #allowaccess=atlassoft #allowaccess=atlasprod #allowaccess=atlasuser ##you might want another queue for the ops authgroups #[queue:ops] #allowaccess=ndgfops #allowaccess=cernops # ACIX Cache Scanner [acix-scanner] logfile=/arc/logs/atlas/acix-cache.log # # Define VOs and generate mapfiles from user list maintained by VO databases. # #[vo] is now userlist:ndgfops,cernops and atlas but instead now implemented in authgroups above #Indexing is now done with archery, and registration is gone from config #issues with arex/ganglia - under investigation, keep commented out now #[arex/ganglia] # period of gathering the info, in seconds. Default is 20. #frequency=120 # log file of the daemon. Default is /var/log/arc/gangliarc.log #logfile=/arc/logs/atlas/gangliarc.log