# File: # /etc/arc.conf # # Author: # Miguel Gila # # This version: # preliminary conversion for upgrade to ARC 6 # Gianfranco Sciacca # ARC 6 camp 20181108 # GENERAL NOTE: this version is not final (validation not run) [common] hostname=arc04.lcg.cscs.ch x509_cert_dir=/cvmfs/grid.cern.ch/etc/grid-security/certificates x509_host_key=/etc/grid-security/hostkey.pem x509_host_cert=/etc/grid-security/hostcert.pem [lrms] lrms=slurm slurm_wakeupperiod=45 slurm_bin_path=/usr/bin/ slurm_use_sacct=yes [authgroup:atlas-lcgadmin] voms=atlas * lcgadmin * [authgroup:atlas-production] voms=atlas * production * # The examples below show how to add a SINGLE DN, a file listing DNs. Can also use - to subtract fron a authgroup. # Watch out for conflicts: the first match wins (rules are evaluated sequentially) # subject=DN # file=filename with DNs # -subject=DN # (this one bans) [authgroup:atlas-pilot] voms=atlas * pilot * [authgroup/atlas-users] voms=atlas * * * [authgroup:atlas-ch] voms=atlas /atlas/ch * * [authgroup:cms-ch] voms=cms /cms/ch * * voms=cms /cms/chcms * * [authgroup:cms-lcgadmin] voms=cms * lcgadmin * voms=cms /cms/ALARM pilot * [authgroup:cms-pilot] voms=cms * pilot * [authgroup:cms-production] voms=cms * production * [authgroup:cms-users] voms=cms * * * [authgroup:lhcb-lcgadmin] voms=lhcb * lcgadmin * [authgroup:lhcb-production] voms=lhcb * production * [authgroup:lhcb-pilot] voms=lhcb * pilot * [authgroup:lhcb-users] voms=lhcb * * * [authgroup:ops] voms=ops * * * [authgroup:ops-lcgadmin] voms=ops * * * [authgroup:dteam] voms=dteam * * * # Adding the block below, consolidating some groups defined above. # Use it to simplify the auth in the gridftpd block. The group name is arbitrarily chosen [authgroup:cscs] authgroup=atlas-users authgroup=cms-users authgroup=lhcb-users authgroup=dteam authgroup=ops # Can also add something like this for banning users if needed # [authgroup:banned-users] # file=/path/to/banned-users [mapping] map_to_user=atlassgm:atlas group atlas-lcgadmin # check this syntax # can also have something like this (referring to [authgroup:banned-users]) # map_with_file=banned-users /path/to/banned-users map_with_pools=atlas-production /scratch/lhc/etc/grid-security/gridmapdir/atlas-production map_with_pools=atlas-pilot /scratch/lhc/etc/grid-security/gridmapdir/atlas-pilot map_with_pools=atlas-users /scratch/lhc/etc/grid-security/gridmapdir/atlas-users map_with_pools=atlas-ch /scratch/lhc/etc/grid-security/gridmapdir/atlas-ch map_with_pools=ops /scratch/lhc/etc/grid-security/gridmapdir/ops1 map_with_pools=ops-lcgadmin /scratch/lhc/etc/grid-security/gridmapdir/ops-lcgadmin map_with_pools=ops-production /scratch/lhc/etc/grid-security/gridmapdir/ops-production map_with_pools=dteam /scratch/lhc/etc/grid-security/gridmapdir/dteam map_with_pools=cms-lcgadmin /scratch/lhc/etc/grid-security/gridmapdir/cms-lcgadmin map_with_pools=cms-production /scratch/lhc/etc/grid-security/gridmapdir/cms-production map_with_pools=cms-pilot /scratch/lhc/etc/grid-security/gridmapdir/cms-pilot map_with_pools=cms-users /scratch/lhc/etc/grid-security/gridmapdir/cms-users map_with_pools=cms-ch /scratch/lhc/etc/grid-security/gridmapdir/cms-ch map_with_pools=lhcb-lcgadmin /scratch/lhc/etc/grid-security/gridmapdir/lhcb-lcgadmin map_with_pools=lhcb-production /scratch/lhc/etc/grid-security/gridmapdir/lhcb-production map_with_pools=lhcb-pilot /scratch/lhc/etc/grid-security/gridmapdir/lhcb-pilot map_with_pools=lhcb-users /scratch/lhc/etc/grid-security/gridmapdir/lhcb-users [arex] loglevel=3 # 3 days session dir lifetime # 6 days info about jobs lifetime defaultttl=259200 518400 sessiondir=/scratch/lhc/arc_session/arc04 # This is LOCAL for max performance controldir=/var/spool/nordugrid/jobstatus pidfile=/tmp/grid-manager.pid mail=grid-rt@cscs.ch tmpdir=/tmp shared_filesystem=yes # default # Not used by the nodes in ARC6, but useful to have it shared for multiple CEs runtimedir=/scratch/lhc/arc_rte # 1800 at the end means that it wont cancel/submit more than 1800 jobs at the same time maxjobs=40000 20000 8000 80000 1800 # Renamed authplugin= to statecallout= - Check how to change the two authplugin= lines below: # /usr/libexec/arc/arc-vomsac-check deprecated: use allowaccess inside queue block # /usr/local/bin/default_rte_plugin.py use arcttl rte default instead # authplugin=ACCEPTED 10 /usr/libexec/arc/arc-vomsac-check -L %C/job.%I.local -P %C/job.%I.proxy # authplugin=PREPARING timeout=60,onfailure=pass,onsuccess=pass /usr/local/bin/default_rte_plugin.py %S %C %I ENV/PROXY statecallout=FINISHED timeout=60,onfailure=pass,onsuccess=pass /scratch/lhc/apps/arc_extras/copy_session_dir.v2.sh %S /var/spool/nordugrid/jobstatus %I /var/spool/nordugrid/completed_jobs %U 100 yes # check if the lines above are for the cleaner block [arex/cache] # there is no . at the end, which means files are soft-linked # if instead of . you can put drain cachedir=/scratch/lhc/arc_cache/ [arex/cache/cleaner] cachesize=30 20 cachelifetime=30d calculatesize=cachedir # cacheloglevel=3 # check if now it's called loglevel loglevel=3 logfile=/var/log/arc/cache-clean.log [arex/jura] [arex/jura/archive] archivedir=/var/spool/nordugrid_account_archive [arex/jura/apel:egi] targeturl=APEL:https://mq.cro-ngi.hr:6162 topic=/queue/global.accounting.cpu.central gocdb_name=CSCS-LCG2 benchmark_type=HEPSPEC benchmark_value=10.63 benchmark_description=HS06 use_ssl=yes urbatchsize=1000 [arex/data-staging] loglevel=3 # max number of concurrent transfers using the network. total number # for the whole system, including remote staging hosts maxdelivery=100 maxprepared=1000 maxprocessor=50 maxemergency=15 sharepolicy=voms:role # check if the one below is really needed, if yes, declare gloubus port ranges here as well # passivetransfer=no deliveryservice=http://arcds1.lcg.cscs.ch:443/datadeliveryservice remotesizelimit=1000000 # this should make CSCS and CH transfers in general faster preferredpattern=cscs.ch$|unibe.ch$|.ch$ # gridftp server config [gridftpd] loglevel=5 pidfile=/tmp/gridftpd.pid globus_tcp_port_range=9000,9300 globus_udp_port_range=9000,9300 maxconnections=1000 # this is crazy, but might work since we got tons of RAM here # 655360 = 640kB (default) # 6655360 = 6.4MB # 665536000 = 64MB maxbuffer=65536000 # this is crazy, but might work since we got tons of RAM here # 65536 = 64kB (default) # 655360 = 640kB # 6655360 = 6.4MB # 665536000 = 64MB defaultbuffer=655360 # job submission interface via gridftp [gridftpd/jobs] allownew=yes allowaccess=cscs # this is the authgroup:cscs # openldap server config [infosys] loglevel=1 # consider removing the one below, it will default to the system defined user. If doing so, MUST remember to change files/dirs permissions where needed (pain) user=root logfile=/var/log/arc/infoprovider.log [infosys/ldap] slapd_loglevel=0 port=2135 threads=128 # check if so many threads needed, better omit and use the default setting timelimit=360 # Add this empty block for the nordugrid schema [infosys/nordugrid] # Double-check that all params are valid in the following block [infosys/glue1] resource_location=Lugano, Switzerland resource_latitude=46.025277 resource_longitude=8.959871 cpu_scaling_reference_si00=4640 processor_other_description=Cores=28, Benchmark=12.01-HEP-SPEC06 glue_site_web=http://www.cscs.ch/ glue_site_unique_id=CSCS-LCG2 provide_glue_site_info=true # if willing to turn on glue1 ldap, add the following empty block # [infosys/glue1/ldap] [infosys/glue2] admindomain_name=CSCS-LCG2 # admindomain_name= , etc admindomain_description=Lugano, Switzerland admindomain_www=http://www.cscs.ch/ admindomain_distributed=no # if willing to turn on glue2 ldap, add the following empty block # [infosys/glue2/ldap] [infosys/cluster] cluster_alias=Daint cluster_location=CH-6900 cluster_owner=Swiss National Supercomputing Centre (CSCS) clustersupport=grid-rt@cscs.ch comment=This is a Cray XC supercomputer running LHC applications architecture=x86_64 opsys=CentOS opsys=6.7 opsys=Carbon # MG 2017.04.05 - need to set this up, otherwise it reports 221427 CPUs totalcpus=3876 nodecpu=Intel(R) Xeon(R) CPU E5-2695 v4 @ 2.10GHz nodememory=126000 defaultmemory=2000 # move this to the lrms block cpudistribution=72cpu:57 # nodeaccess= <- nodes not reachable to/from internet # nodeaccess=outbound <- nodes can only access internet # nodeaccess=inbound <- nodes can be reached from internet # it is possible to set inbound and outbound on two lines, meaning full access # to/from internet nodeaccess=outbound lrmsconfig=shared system homogeneity=True #cachetime=30 #timelimit=1800 #sizelimit=10 benchmark=SPECINT2000 4640 advertisedvo=atlas advertisedvo=cms advertisedvo=lhcb advertisedvo=ops advertisedvo=dteam [queue:wlcg] name=wlcg scheduling_policy=FIFO advertisedvo=atlas advertisedvo=cms advertisedvo=lhcb advertisedvo=ops advertisedvo=dteam allowccess=cscs # this is the authgroup:cscs # custom blocks [custom:name] will be implemented # [custom:cscs] # Minimum memory that a job will allocate per VO (atlas,cms,lhcb,'all') # and per job type, SC=Single Core, MC=MultiCore # Puppet controls these values # min_mem_atlas_sc = '6000' # min_mem_atlas_mc = '6000' # min_mem_cms_sc = '6000' # min_mem_cms_mc = '6000' # min_mem_lhcb_sc = '6000' # min_mem_lhcb_mc = '6000' # min_mem_all_sc = '6000' # min_mem_all_mc = '6000' # node_constraint = 'wlcg' # reservation = '' # gres = 'craynetwork:0' # run_thru_shifter = true # shifter_vo_whitelist = 'cms' # comma separated values # shifter_image = 'cscs/wlcg_wn:20180710' # shifter_module = 'shifter-ng' # shifter_job_store_path = '/scratch/lhc/arc_shifter_job_store' # shifter_job_store_keep = false # shifter_job_store_keep_path = '/var/spool/nordugrid/shifter_slurm_mods' # shifter_options = '--mount=type=bind,source=/scratch/lhc/apps/cle60.up04/bin,destination=/usr/local/bin/ --mount=type=bind,source=/scratch/lhc,destination=/scratch/lhc --mount=type=bind,source=${HOME},destination=${HOME}' # features_base_path = '/scratch/lhc/features_per_job' # location where machinefeatures/jobfeatures will be generated