# REQUIREMENTS: valid host certificate and authentication config, a proper # grid-mapfile (see [common] block config values). # You can "start the CE" with the gridftpd, a-rex, nordugrid-arc-slapd and # nordugrid-arc-bdii startup scripts. # # File: # /etc/arc.conf # # Author: # Miguel Gila # [common] hostname="arc04.lcg.cscs.ch" x509_user_key="/etc/grid-security/hostkey.pem" x509_user_cert="/etc/grid-security/hostcert.pem" x509_cert_dir="/cvmfs/grid.cern.ch/etc/grid-security/certificates" gridmap="/scratch/lhc/etc/grid-security/grid-mapfile" lrms="slurm" slurm_wakeupperiod="45" slurm_bin_path="/usr/bin/" slurm_use_sacct="yes" globus_tcp_port_range="9000,9300" [vo] id="atlas" vo="atlas" source="vomss://voms2.cern.ch:8443/voms/atlas?/atlas" file="/scratch/lhc/etc/grid-security/grid-mapfile" mapped_unixid="atlas01" [vo] id="cms" vo="cms" source="vomss://voms2.cern.ch:8443/voms/cms?/cms" file="/scratch/lhc/etc/grid-security/grid-mapfile" mapped_unixid="cms01" [vo] id="lhcb" vo="lhcb" source="vomss://voms2.cern.ch:8443/voms/lhcb?/lhcb" file="/scratch/lhc/etc/grid-security/grid-mapfile" mapped_unixid="lhcb01" [vo] id="ops" vo="ops" source="vomss://voms2.cern.ch:8443/voms/ops?/ops" file="/scratch/lhc/etc/grid-security/grid-mapfile" mapped_unixid="ops01" [vo] id="dteam" vo="dteam" source="vomss://voms2.hellasgrid.gr:8443/voms/dteam" file="/scratch/lhc/etc/grid-security/grid-mapfile" mapped_unixid="dteam02" [group/atlas-lcgadmin] voms="atlas * lcgadmin *" [group/atlas-production] voms="atlas * production *" [group/atlas-pilot] voms="atlas * pilot *" [group/atlas-users] voms="atlas * * *" [group/atlas-ch] voms="atlas /atlas/ch * *" [group/cms-ch] voms="cms /cms/ch * *" voms="cms /cms/chcms * *" [group/cms-lcgadmin] voms="cms * lcgadmin *" voms="cms /cms/ALARM pilot *" [group/cms-pilot] voms="cms * pilot *" [group/cms-production] voms="cms * production *" [group/cms-users] voms="cms * * *" [group/lhcb-lcgadmin] voms="lhcb * lcgadmin *" [group/lhcb-production] voms="lhcb * production *" [group/lhcb-pilot] voms="lhcb * pilot *" [group/lhcb-users] voms="lhcb * * *" [group/ops] voms="ops * * *" [group/ops-lcgadmin] voms="ops * * *" [group/dteam] voms="dteam * * *" [grid-manager] delegationdb="sqlite" user="root" cachesize="30 20" cachelifetime="30d" cacheshared="yes" cacheloglevel="3" # there is no . at the end, which means files are soft-linked # if instead of . you can put drain cachedir="/scratch/lhc/arc_cache/arc04" remotecachedir="/scratch/lhc/arc_cache/arc05" remotecachedir="/scratch/lhc/arc_cache/arc06" remotecachedir="/scratch/lhc/arc_cache/arc07" sessiondir="/scratch/lhc/arc_session/arc04" # This is LOCAL for max performance controldir="/var/spool/nordugrid/jobstatus" runtimedir="/scratch/lhc/arc_rte" debug="3" logfile="/var/log/arc/grid-manager.log" pidfile="/tmp/grid-manager.pid" cachelogfile="/var/log/arc/cache-clean.log" mail="grid-rt@cscs.ch" #joblog="/var/log/arc/gm-jobs.log" tmpdir="/tmp" shared_filesystem="yes" jobreport_publisher="jura" jobreport_credentials="/etc/grid-security/hostkey.pem /etc/grid-security/hostcert.pem /etc/grid-security/certificates" jobreport="APEL:https://mq.cro-ngi.hr:6162" jobreport_options="archiving:/var/spool/nordugrid_account_archive,topic:/queue/global.accounting.cpu.central,gocdb_name:CSCS-LCG2,benchmark_type:HEPSPEC,benchmark_value:10.63,use_ssl:true" authplugin="ACCEPTED 10 /usr/libexec/arc/arc-vomsac-check -L %C/job.%I.local -P %C/job.%I.proxy" authplugin="PREPARING timeout=60,onfailure=pass,onsuccess=pass /usr/local/bin/default_rte_plugin.py %S %C %I ENV/PROXY" authplugin="FINISHED timeout=60,onfailure=pass,onsuccess=pass /scratch/lhc/apps/arc_extras/copy_session_dir.v2.sh %S /var/spool/nordugrid/jobstatus %I /var/spool/nordugrid/completed_jobs %U 100 yes" # 800 at the end means that it wont cancel/submit more than 1800 jobs at the same time maxjobs="40000 20000 8000 80000 1800" # MG 08.02.16 TO BE TESTED #arex_mount_point="https://arc03.lcg.cscs.ch:60000/arex" #enable_arc_interface="yes" #enable_emies_interface="yes" watchdog="yes" enable_cache_service="no" # 3 days session dir lifetime # 6 days info about jobs lifetime defaultttl="259200 518400" [data-staging] debug="3" # max number of concurrent transfers using the network. total number # for the whole system, including remote staging hosts maxdelivery="100" maxprepared="1000" #sharetype="voms:vo" maxprocessor="50" maxemergency="15" passivetransfer="no" #definedshare="atlas 38" #definedshare="cms 38" #definedshare="lhcb 20" #definedshare="dteam 2" #definedshare="ops 2" deliveryservice="http://arcds1.lcg.cscs.ch:443/datadeliveryservice" #deliveryservice="http://arcds1.lcg.cscs.ch:443/datadeliveryservice" remotesizelimit="1000000" dtrlog="/var/log/arc/dtrstate.log" # this should make CSCS and CH transfers in general faster preferredpattern="cscs.ch$|unibe.ch$|.ch$" # gridftp server config [gridftpd] user="root" unixmap="atlassgm:atlas group atlas-lcgadmin" #unixgroup="atlas-lcgadmin simplepool /etc/grid-security/gridmapdir/atlas-lcgadmin" unixgroup="atlas-production simplepool /scratch/lhc/etc/grid-security/gridmapdir/atlas-production" unixgroup="atlas-pilot simplepool /scratch/lhc/etc/grid-security/gridmapdir/atlas-pilot" unixgroup="atlas-users simplepool /scratch/lhc/etc/grid-security/gridmapdir/atlas-users" unixgroup="atlas-ch simplepool /scratch/lhc/etc/grid-security/gridmapdir/atlas-ch" unixgroup="ops simplepool /scratch/lhc/etc/grid-security/gridmapdir/ops1" unixgroup="ops-lcgadmin simplepool /scratch/lhc/etc/grid-security/gridmapdir/ops-lcgadmin" unixgroup="ops-production simplepool /scratch/lhc/etc/grid-security/gridmapdir/ops-production" unixgroup="dteam simplepool /scratch/lhc/etc/grid-security/gridmapdir/dteam" unixgroup="cms-lcgadmin simplepool /scratch/lhc/etc/grid-security/gridmapdir/cms-lcgadmin" unixgroup="cms-production simplepool /scratch/lhc/etc/grid-security/gridmapdir/cms-production" unixgroup="cms-pilot simplepool /scratch/lhc/etc/grid-security/gridmapdir/cms-pilot" unixgroup="cms-users simplepool /scratch/lhc/etc/grid-security/gridmapdir/cms-users" unixgroup="cms-ch simplepool /scratch/lhc/etc/grid-security/gridmapdir/cms-ch" unixgroup="lhcb-lcgadmin simplepool /scratch/lhc/etc/grid-security/gridmapdir/lhcb-lcgadmin" unixgroup="lhcb-production simplepool /scratch/lhc/etc/grid-security/gridmapdir/lhcb-production" unixgroup="lhcb-pilot simplepool /scratch/lhc/etc/grid-security/gridmapdir/lhcb-pilot" unixgroup="lhcb-users simplepool /scratch/lhc/etc/grid-security/gridmapdir/lhcb-users" debug="5" logfile="/var/log/arc/gridftpd.log" pidfile="/tmp/gridftpd.pid" port="2811" allowunknown="yes" globus_tcp_port_range="9000,9300" globus_udp_port_range="9000,9300" # Here we can specify mapping to some harmless local user account for # safety reasons. If that account is not allowed to submit jobs to # LRMS then this will also work as authorization effectively cutting # off users without proper VOMS attributes. unixmap="nobody:nobody all" encryption="yes" maxconnections="1000" # this is crazy, but might work since we got tons of RAM here # 655360 = 640kB (default) # 6655360 = 6.4MB # 665536000 = 64MB maxbuffer="65536000" # this is crazy, but might work since we got tons of RAM here # 65536 = 64kB (default) # 655360 = 640kB # 6655360 = 6.4MB # 665536000 = 64MB defaultbuffer="655360" # job submission interface via gridftp [gridftpd/jobs] path="/jobs" plugin="jobplugin.so" allownew="yes" [queue/wlcg] name="wlcg" scheduling_policy="FIFO" authorizedvo="cms" authorizedvo="lhcb" authorizedvo="dteam" authorizedvo="atlas" authorizedvo="ops" ac_policy="+VOMS: /VO=lhcb" ac_policy="+VOMS: /VO=cms" ac_policy="+VOMS: /VO=dteam" ac_policy="+VOMS: /VO=atlas" ac_policy="+VOMS: /VO=ops" # openldap server config [infosys] user="root" overwrite_config="yes" oldconfsuffix=".oldconfig" port="2135" debug="1" slapd_loglevel="0" registrationlog="/var/log/arc/inforegistration.log" providerlog="/var/log/arc/infoprovider.log" provider_loglevel="1" infosys_glue12=enable infosys_glue2_ldap="enable" threads="128" timelimit="360" provider_timeout="900" [infosys/glue12] resource_location="Lugano, Switzerland" resource_latitude="46.025277" resource_longitude="8.959871" cpu_scaling_reference_si00="4640" processor_other_description="Cores=28, Benchmark=12.01-HEP-SPEC06" glue_site_web="http://www.cscs.ch/" glue_site_unique_id="CSCS-LCG2" provide_glue_site_info="true" # GLUE2 AdminDomain configuration [infosys/admindomain] name="CSCS-LCG2" description="Lugano, Switzerland" www="http://www.cscs.ch/" distributed="no" [infosys/cluster/registration/GrisToAtlas] targethostname="atlasgiis.nbi.dk" targetport="2135" targetsuffix="mds-vo-name=Atlas,o=grid" regperiod="30" [infosys/cluster/registration/Atlas2] targetsuffix="mds-vo-name=Atlas,o=grid" targethostname="arcgiis.grid.uio.no" targetport="2135" regperiod="600" [infosys/cluster/registration/ClusterToSwitzerland] targethostname="giis.lhep.unibe.ch" targetport="2135" targetsuffix="mds-vo-name=Switzerland,o=grid" regperiod="40" [infosys/cluster/registration/AtlasTest] targetsuffix="mds-vo-name=Atlas-titan-test,o=grid" targethostname="ce03.titan.uio.no" targetport="2135" regperiod="600" [nordugridmap] mapuser_processing='overwrite' [cluster] cluster_alias="Daint" cluster_location="CH-6900" cluster_owner="Swiss National Supercomputing Centre (CSCS)" clustersupport="grid-rt@cscs.ch" comment="This is a Cray XC supercomputer running LHC applications" architecture="x86_64" opsys="CentOS" opsys="6.7" opsys="Carbon" # MG 2017.04.05 - need to set this up, otherwise it reports 221427 CPUs totalcpus="3876" nodecpu="Intel(R) Xeon(R) CPU E5-2695 v4 @ 2.10GHz" nodememory="126000" defaultmemory="2000" cpudistribution="72cpu:57" # nodeaccess= <- nodes not reachable to/from internet # nodeaccess="outbound" <- nodes can only access internet # nodeaccess="inbound" <- nodes can be reached from internet # it is possible to set inbound and outbound on two lines, meaning full access # to/from internet nodeaccess="outbound" gm_mount_point="/jobs" gm_port="2811" lrmsconfig="shared system" homogeneity="True" #cachetime="30" #timelimit="1800" #sizelimit="10" benchmark="SPECINT2000 4640" authorizedvo="atlas" authorizedvo="cms" authorizedvo="lhcb" authorizedvo="ops" authorizedvo="dteam" [gangliarc] logfile="/var/spool/nordugrid/log/gangliarc.log" [cscs] # Minimum memory that a job will allocate per VO (atlas,cms,lhcb,'all') # and per job type, SC=Single Core, MC=MultiCore # Puppet controls these values min_mem_atlas_sc = '6000' min_mem_atlas_mc = '6000' min_mem_cms_sc = '6000' min_mem_cms_mc = '6000' min_mem_lhcb_sc = '6000' min_mem_lhcb_mc = '6000' min_mem_all_sc = '6000' min_mem_all_mc = '6000' node_constraint = 'wlcg' reservation = '' gres = 'craynetwork:0' run_thru_shifter = true shifter_vo_whitelist = 'cms' # comma separated values shifter_image = 'cscs/wlcg_wn:20180710' shifter_module = 'shifter-ng' shifter_job_store_path = '/scratch/lhc/arc_shifter_job_store' shifter_job_store_keep = false shifter_job_store_keep_path = '/var/spool/nordugrid/shifter_slurm_mods' shifter_options = '--mount=type=bind,source=/scratch/lhc/apps/cle60.up04/bin,destination=/usr/local/bin/ --mount=type=bind,source=/scratch/lhc,destination=/scratch/lhc --mount=type=bind,source=${HOME},destination=${HOME}' features_base_path = '/scratch/lhc/features_per_job' # location where machinefeatures/jobfeatures will be generated