# REQUIREMENTS: valid host certificate and authentication config, a proper # grid-mapfile (see [common] block config values). # You can start the CE with the gridftpd, a-rex, nordugrid-arc-slapd and # nordugrid-arc-bdii startup scripts. # # File: # /etc/arc.conf # # Author: # Miguel Gila # # GENERAL NOTE: REMOVED quotes EVERYWHERE [common] hostname=arc04.lcg.cscs.ch x509_cert_dir=/cvmfs/grid.cern.ch/etc/grid-security/certificates # values below are default, param names have changed x509_user_key=/etc/grid-security/hostkey.pem x509_user_cert=/etc/grid-security/hostcert.pem # x509_user_key=/etc/grid-security/hostkey.pem # x509_user_cert=/etc/grid-security/hostcert.pem # should not use gridmap preferably, the param is no longer valid #gridmap=/scratch/lhc/etc/grid-security/grid-mapfile # stuff below moved from [common] to the [lrms] block [lrms] lrms=slurm slurm_wakeupperiod=45 slurm_bin_path=/usr/bin/ slurm_use_sacct=yes # moved to [gridftp] block # globus_tcp_port_range=9000,9300 # Removed all [vo] blocks. These are now called [userlist:id], but I have removed all and fully implemented groups # using the [authgroup:groupname] blocks below. # [vo] # id=atlas # vo=atlas # source=vomss://voms2.cern.ch:8443/voms/atlas?/atlas # file=/scratch/lhc/etc/grid-security/grid-mapfile # mapped_unixid=atlas01 # [vo] # id=cms # vo=cms # source=vomss://voms2.cern.ch:8443/voms/cms?/cms # file=/scratch/lhc/etc/grid-security/grid-mapfile # mapped_unixid=cms01 # [vo] # id=lhcb # vo=lhcb # source=vomss://voms2.cern.ch:8443/voms/lhcb?/lhcb # file=/scratch/lhc/etc/grid-security/grid-mapfile # mapped_unixid=lhcb01 # [vo] # id=ops # vo=ops # source=vomss://voms2.cern.ch:8443/voms/ops?/ops # file=/scratch/lhc/etc/grid-security/grid-mapfile # mapped_unixid=ops01 # [vo] # id=dteam # vo=dteam # source=vomss://voms2.hellasgrid.gr:8443/voms/dteam # file=/scratch/lhc/etc/grid-security/grid-mapfile # mapped_unixid=dteam02 # [group/groupname] block names changed to [authgroup:groupname] # [group/atlas-lcgadmin] [authgroup:atlas-lcgadmin] voms=atlas * lcgadmin * # [group/atlas-production] [authgroup:atlas-production] voms=atlas * production * # The examples below show how to add a SINGLE DN, a file listing DNs. Can also use - to subtract fron a authgroup. # Watch out for conflicts: the first match wins (rules are evaluated sequentially) # subject=DN # file=filename with DNs # -subject=DN bans # [group/atlas-pilot] [authgroup:atlas-pilot] voms=atlas * pilot * # [group/atlas-ch] -> this value corrected to atlas-users [authgroup/atlas-users] voms=atlas * * * # [group/atlas-ch] [authgroup:atlas-ch] voms=atlas /atlas/ch * * # [group/cms-ch] [authgroup:cms-ch] voms=cms /cms/ch * * voms=cms /cms/chcms * * # [group/cms-lcgadmin] [authgroup:cms-lcgadmin] voms=cms * lcgadmin * voms=cms /cms/ALARM pilot * # [group/cms-pilot] [authgroup:cms-pilot] voms=cms * pilot * # [group/cms-production] [authgroup:cms-production] voms=cms * production * # [group/cms-users] [authgroup:cms-users] voms=cms * * * # [group/lhcb-lcgadmin] [authgroup:lhcb-lcgadmin] voms=lhcb * lcgadmin * # [group/lhcb-production] [authgroup:lhcb-production] voms=lhcb * production * # [group/lhcb-pilot] [authgroup:lhcb-pilot] voms=lhcb * pilot * # [group/lhcb-users] [authgroup:lhcb-users] voms=lhcb * * * # [group/ops] [authgroup:ops] voms=ops * * * # [group/ops-lcgadmin] [authgroup:ops-lcgadmin] voms=ops * * * # [group/dteam] [authgroup:dteam] voms=dteam * * * # Adding the block below, use it to simplify the auth in the gridftpd block. The group name is arbitrarily chosen [authgroup:cscs] authgroup=atlas-users authgroup=cms-users authgroup=lhcb-users authgroup=dteam authgroup=ops # Can also add something like this for banning users if needed # [authgroup:banned-users] # file=/arc/poolmaps/banned-users # New [mapping] block includes stuff previously in [gridftpd], must be placed after [authgroup:groupname] [mapping] # unixmap is now called map_to_user= # unixmap=atlassgm:atlas group atlas-lcgadmin map_to_user=atlassgm:atlas group atlas-lcgadmin #unixgroup=atlas-lcgadmin simplepool /etc/grid-security/gridmapdir/atlas-lcgadmin # can also have something like this (referring to [authgroup:banned-users]) # map_with_file=banned-users /arc/poolmaps/banned-users # change all the lines below to this format: # map_with_pools=atlas-production /scratch/lhc/etc/grid-security/gridmapdir/atlas-production # unixgroup=atlas-production simplepool /scratch/lhc/etc/grid-security/gridmapdir/atlas-production # unixgroup=atlas-pilot simplepool /scratch/lhc/etc/grid-security/gridmapdir/atlas-pilot # unixgroup=atlas-users simplepool /scratch/lhc/etc/grid-security/gridmapdir/atlas-users # unixgroup=atlas-ch simplepool /scratch/lhc/etc/grid-security/gridmapdir/atlas-ch # unixgroup=ops simplepool /scratch/lhc/etc/grid-security/gridmapdir/ops1 # unixgroup=ops-lcgadmin simplepool /scratch/lhc/etc/grid-security/gridmapdir/ops-lcgadmin # unixgroup=ops-production simplepool /scratch/lhc/etc/grid-security/gridmapdir/ops-production # unixgroup=dteam simplepool /scratch/lhc/etc/grid-security/gridmapdir/dteam # unixgroup=cms-lcgadmin simplepool /scratch/lhc/etc/grid-security/gridmapdir/cms-lcgadmin # unixgroup=cms-production simplepool /scratch/lhc/etc/grid-security/gridmapdir/cms-production # unixgroup=cms-pilot simplepool /scratch/lhc/etc/grid-security/gridmapdir/cms-pilot # unixgroup=cms-users simplepool /scratch/lhc/etc/grid-security/gridmapdir/cms-users # unixgroup=cms-ch simplepool /scratch/lhc/etc/grid-security/gridmapdir/cms-ch # unixgroup=lhcb-lcgadmin simplepool /scratch/lhc/etc/grid-security/gridmapdir/lhcb-lcgadmin # unixgroup=lhcb-production simplepool /scratch/lhc/etc/grid-security/gridmapdir/lhcb-production # unixgroup=lhcb-pilot simplepool /scratch/lhc/etc/grid-security/gridmapdir/lhcb-pilot # unixgroup=lhcb-users simplepool /scratch/lhc/etc/grid-security/gridmapdir/lhcb-users map_with_pools=atlas-production /scratch/lhc/etc/grid-security/gridmapdir/atlas-production map_with_pools=atlas-pilot /scratch/lhc/etc/grid-security/gridmapdir/atlas-pilot map_with_pools=atlas-users /scratch/lhc/etc/grid-security/gridmapdir/atlas-users map_with_pools=atlas-ch /scratch/lhc/etc/grid-security/gridmapdir/atlas-ch map_with_pools=ops /scratch/lhc/etc/grid-security/gridmapdir/ops1 map_with_pools=ops-lcgadmin /scratch/lhc/etc/grid-security/gridmapdir/ops-lcgadmin map_with_pools=ops-production /scratch/lhc/etc/grid-security/gridmapdir/ops-production map_with_pools=dteam /scratch/lhc/etc/grid-security/gridmapdir/dteam map_with_pools=cms-lcgadmin /scratch/lhc/etc/grid-security/gridmapdir/cms-lcgadmin map_with_pools=cms-production /scratch/lhc/etc/grid-security/gridmapdir/cms-production map_with_pools=cms-pilot /scratch/lhc/etc/grid-security/gridmapdir/cms-pilot map_with_pools=cms-users /scratch/lhc/etc/grid-security/gridmapdir/cms-users map_with_pools=cms-ch /scratch/lhc/etc/grid-security/gridmapdir/cms-ch map_with_pools=lhcb-lcgadmin /scratch/lhc/etc/grid-security/gridmapdir/lhcb-lcgadmin map_with_pools=lhcb-production /scratch/lhc/etc/grid-security/gridmapdir/lhcb-production map_with_pools=lhcb-pilot /scratch/lhc/etc/grid-security/gridmapdir/lhcb-pilot map_with_pools=lhcb-users /scratch/lhc/etc/grid-security/gridmapdir/lhcb-users # Now colled [arex] # [grid-manager] [arex] # following ones are default, might as well omit # delegationdb=sqlite # user=root # all debug= changed to loglevel= # debug=3 loglevel=3 # 3 days session dir lifetime # 6 days info about jobs lifetime defaultttl=259200 518400 # remove the following # enable_cache_service=no sessiondir=/scratch/lhc/arc_session/arc04 # This is LOCAL for max performance controldir=/var/spool/nordugrid/jobstatus # Following is default, might as well remove # logfile=/var/log/arc/grid-manager.log pidfile=/tmp/grid-manager.pid # Following is moved to a new [arex/cache/cleaner] block # and param now called logfile= # cachelogfile=/var/log/arc/cache-clean.log logfile=/var/log/arc/cache-clean.log mail=grid-rt@cscs.ch # The file below is now called /var/log/arc/arex-jobs.log, is default, no need to declare it #joblog=/var/log/arc/gm-jobs.log tmpdir=/tmp shared_filesystem=yes # default # Not used by the nodes in ARC6, but useful to have it shared for multiple CEs runtimedir=/scratch/lhc/arc_rte # 1800 at the end means that it wont cancel/submit more than 1800 jobs at the same time maxjobs=40000 20000 8000 80000 1800 # renamed authplugin= to statecallout= - Check how to change the two authplugin= lines below: # /usr/libexec/arc/arc-vomsac-check deprecated: use allowaccess inside queue block # /usr/local/bin/default_rte_plugin.py use arcttl rte default instead # authplugin=ACCEPTED 10 /usr/libexec/arc/arc-vomsac-check -L %C/job.%I.local -P %C/job.%I.proxy # authplugin=PREPARING timeout=60,onfailure=pass,onsuccess=pass /usr/local/bin/default_rte_plugin.py %S %C %I ENV/PROXY # authplugin=FINISHED timeout=60,onfailure=pass,onsuccess=pass /scratch/lhc/apps/arc_extras/copy_session_dir.v2.sh %S /var/spool/nordugrid/jobstatus %I /var/spool/nordugrid/completed_jobs %U 100 yes statecallout=FINISHED timeout=60,onfailure=pass,onsuccess=pass /scratch/lhc/apps/arc_extras/copy_session_dir.v2.sh %S /var/spool/nordugrid/jobstatus %I /var/spool/nordugrid/completed_jobs %U 100 yes # check if the lines above are for the cleaner block # cache stuff below moves to the new [arex/cache] block [arex/cache] # there is no . at the end, which means files are soft-linked # if instead of . you can put drain cachedir=/scratch/lhc/arc_cache/arc04 # change to one cache only for all ce's, params below are obsoleted # remotecachedir=/scratch/lhc/arc_cache/arc05 # remotecachedir=/scratch/lhc/arc_cache/arc06 # remotecachedir=/scratch/lhc/arc_cache/arc07 # params below moved to the new block [arex/cache/cleaner] [arex/cache/cleaner] cachesize=30 20 cachelifetime=30d # cacheshared=yes # now called calculatesize=cachedir calculatesize=cachedir # cacheloglevel=3 # check if now it's called loglevel loglevel=3 logfile=/var/log/arc/cache-clean.log # accounting goes to the new block [arex/jura] and the sub-blocks [arex/jura] [arex/jura/archive] archivedir=/var/spool/nordugrid_account_archive [arex/jura/apel:egi] targeturl=APEL:https://mq.cro-ngi.hr:6162 topic=/queue/global.accounting.cpu.central gocdb_name=CSCS-LCG2 benchmark_type=HEPSPEC benchmark_value=10.63 benchmark_description=HS06 use_ssl=yes urbatchsize=1000 # jobreport_publisher=jura # jobreport_credentials=/etc/grid-security/hostkey.pem /etc/grid-security/hostcert.pem /etc/grid-security/certificates # jobreport=APEL:https://mq.cro-ngi.hr:6162 # jobreport_options=archiving:/var/spool/nordugrid_account_archive,topic:/queue/global.accounting.cpu.central,gocdb_name:CSCS-LCG2,benchmark_type:HEPSPEC,benchmark_value:10.63,use_ssl:true # MG 08.02.16 TO BE TESTED #arex_mount_point=https://arc03.lcg.cscs.ch:60000/arex #enable_arc_interface=yes #enable_emies_interface=yes # now called [arex/data-staging] # [data-staging] [arex/data-staging] # debug=3 # loglevel=3 loglevel=3 # max number of concurrent transfers using the network. total number # for the whole system, including remote staging hosts maxdelivery=100 maxprepared=1000 maxprocessor=50 maxemergency=15 # check if the one below is really needed, if yes, declare gloubus port ranges here as well # passivetransfer=no #sharetype=voms:vo => should be voms:role ? (only ATLAS uses data staging) # now called sharepolicy= sharepolicy=voms:role #definedshare=atlas 38 # this param has changed name to sharepriority= #definedshare=cms 38 #definedshare=lhcb 20 #definedshare=dteam 2 #definedshare=ops 2 deliveryservice=http://arcds1.lcg.cscs.ch:443/datadeliveryservice #deliveryservice=http://arcds1.lcg.cscs.ch:443/datadeliveryservice remotesizelimit=1000000 # The one below now called statefile= -> move location to the controldir, or better comment out, so it is created in the default location (it's not a log) # dtrlog=/var/log/arc/dtrstate.log # this should make CSCS and CH transfers in general faster preferredpattern=cscs.ch$|unibe.ch$|.ch$ # gridftp server config [gridftpd] # user=root # is default # debug=5 # now called loglevel=5 loglevel=5 # Following is default, might as well remove # logfile=/var/log/arc/gridftpd.log pidfile=/tmp/gridftpd.pid # Following two params removed # port=2811 # is default, param removed # allowunknown=yes # param removed # port ranges OK here globus_tcp_port_range=9000,9300 globus_udp_port_range=9000,9300 # Here we can specify mapping to some harmless local user account for # safety reasons. If that account is not allowed to submit jobs to # LRMS then this will also work as authorization effectively cutting # off users without proper VOMS attributes. # unixmap=nobody:nobody all # introduce one authgroup, the group "all" does not exist anymore by default. In practice this is no longer needed # the one below now called allowencryption= recommended to be no (default) -> heavy and can only be requested by the client # encryption=yes maxconnections=1000 # this is crazy, but might work since we got tons of RAM here # 655360 = 640kB (default) # 6655360 = 6.4MB # 665536000 = 64MB maxbuffer=65536000 # this is crazy, but might work since we got tons of RAM here # 65536 = 64kB (default) # 655360 = 640kB # 6655360 = 6.4MB # 665536000 = 64MB defaultbuffer=655360 # job submission interface via gridftp [gridftpd/jobs] # path= and plugin= removed # path=/jobs # plugin=jobplugin.so allownew=yes # add the following, refer to the authgroup:cscs befined above allowaccess=cscs # openldap server config [infosys] # debug=1 # now called loglevel= loglevel=1 # consider removing the one below, it will default to the system defined user. If doing so, MUST remember to change files/dirs permissions where needed (pain) user=root logfile=/var/log/arc/infoprovider.log # provider_loglevel=1 # remove this # Remove both # infosys_glue12=enable # infosys_glue2_ldap=enable # the params below removed # overwrite_config=yes # oldconfsuffix=.oldconfig # provider_timeout=900 # following goes under the new block [infosys/ldap] [infosys/ldap] slapd_loglevel=0 port=2135 threads=128 # check if so many threads needed, better use the default setting timelimit=360 # Add this empty block for the nordugrid schema [infosys/nordugrid] # param below removed # registrationlog=/var/log/arc/inforegistration.log # block below changed to [infosys/glue1] - Check that all params are valid # [infosys/glue12] [infosys/glue1] resource_location=Lugano, Switzerland resource_latitude=46.025277 resource_longitude=8.959871 cpu_scaling_reference_si00=4640 processor_other_description=Cores=28, Benchmark=12.01-HEP-SPEC06 glue_site_web=http://www.cscs.ch/ glue_site_unique_id=CSCS-LCG2 provide_glue_site_info=true # if willing to turn on glue1 ldap, add the following empty block # [infosys/glue1/ldap] # GLUE2 AdminDomain configuration # now called [infosys/glue2] # [infosys/admindomain] [infosys/glue2] # prefix the params below with admindomain_ # name=CSCS-LCG2 # admindomain_name= , etc # description=Lugano, Switzerland # www=http://www.cscs.ch/ # distributed=no admindomain_name=CSCS-LCG2 # admindomain_name= , etc admindomain_description=Lugano, Switzerland admindomain_www=http://www.cscs.ch/ admindomain_distributed=no # if willing to turn on glue2 ldap, add the following empty block # [infosys/glue2/ldap] # Remove all registrations # [infosys/cluster/registration/GrisToAtlas] # targethostname=atlasgiis.nbi.dk # targetport=2135 # targetsuffix=mds-vo-name=Atlas,o=grid # regperiod=30 # [infosys/cluster/registration/Atlas2] # targetsuffix=mds-vo-name=Atlas,o=grid # targethostname=arcgiis.grid.uio.no # targetport=2135 # regperiod=600 # [infosys/cluster/registration/ClusterToSwitzerland] # targethostname=giis.lhep.unibe.ch # targetport=2135 # targetsuffix=mds-vo-name=Switzerland,o=grid # regperiod=40 # [infosys/cluster/registration/AtlasTest] # targetsuffix=mds-vo-name=Atlas-titan-test,o=grid # targethostname=ce03.titan.uio.no # targetport=2135 # regperiod=600 # remove this block # [nordugridmap] # mapuser_processing='overwrite' # This block now called [infosys/cluster] # [cluster] [infosys/cluster] cluster_alias=Daint cluster_location=CH-6900 cluster_owner=Swiss National Supercomputing Centre (CSCS) clustersupport=grid-rt@cscs.ch comment=This is a Cray XC supercomputer running LHC applications architecture=x86_64 opsys=CentOS opsys=6.7 opsys=Carbon # MG 2017.04.05 - need to set this up, otherwise it reports 221427 CPUs totalcpus=3876 nodecpu=Intel(R) Xeon(R) CPU E5-2695 v4 @ 2.10GHz nodememory=126000 defaultmemory=2000 # move this to the lrms block cpudistribution=72cpu:57 # nodeaccess= <- nodes not reachable to/from internet # nodeaccess=outbound <- nodes can only access internet # nodeaccess=inbound <- nodes can be reached from internet # it is possible to set inbound and outbound on two lines, meaning full access # to/from internet nodeaccess=outbound gm_mount_point=/jobs # remove gm_port=2811 # remove lrmsconfig=shared system homogeneity=True #cachetime=30 #timelimit=1800 #sizelimit=10 benchmark=SPECINT2000 4640 # params below now called advertisedvo=, add them also under the queue block # authorizedvo=atlas # authorizedvo=cms # authorizedvo=lhcb # authorizedvo=ops # authorizedvo=dteam advertisedvo=atlas advertisedvo=cms advertisedvo=lhcb advertisedvo=ops advertisedvo=dteam # Needs at least one queue block: [queue:name] # [queue/wlcg] [queue:wlcg] name=wlcg scheduling_policy=FIFO advertisedvo=atlas advertisedvo=cms advertisedvo=lhcb advertisedvo=ops advertisedvo=dteam # the params below are specified in the form allowccess=authgroupnames # ac_policy=+VOMS: /VO=lhcb # ac_policy=+VOMS: /VO=cms # ac_policy=+VOMS: /VO=dteam # ac_policy=+VOMS: /VO=atlas # ac_policy=+VOMS: /VO=ops allowccess=cscs # NOTE: check if this is correct (cscs group ingludes all VOs) # still not clear what ganglia implementation will be used. [arex/ganglia] is an alternative, but it is currently broken. # Anyway, the [gangliarc] block here is no longer valid # [gangliarc] # logfile=/var/spool/nordugrid/log/gangliarc.log # custom blocks [custom:name] will be implemented # [custom:cscs] # Minimum memory that a job will allocate per VO (atlas,cms,lhcb,'all') # and per job type, SC=Single Core, MC=MultiCore # Puppet controls these values # min_mem_atlas_sc = '6000' # min_mem_atlas_mc = '6000' # min_mem_cms_sc = '6000' # min_mem_cms_mc = '6000' # min_mem_lhcb_sc = '6000' # min_mem_lhcb_mc = '6000' # min_mem_all_sc = '6000' # min_mem_all_mc = '6000' # node_constraint = 'wlcg' # reservation = '' # gres = 'craynetwork:0' # run_thru_shifter = true # shifter_vo_whitelist = 'cms' # comma separated values # shifter_image = 'cscs/wlcg_wn:20180710' # shifter_module = 'shifter-ng' # shifter_job_store_path = '/scratch/lhc/arc_shifter_job_store' # shifter_job_store_keep = false # shifter_job_store_keep_path = '/var/spool/nordugrid/shifter_slurm_mods' # shifter_options = '--mount=type=bind,source=/scratch/lhc/apps/cle60.up04/bin,destination=/usr/local/bin/ --mount=type=bind,source=/scratch/lhc,destination=/scratch/lhc --mount=type=bind,source=${HOME},destination=${HOME}' # features_base_path = '/scratch/lhc/features_per_job' # location where machinefeatures/jobfeatures will be generated