# ARC-6.0.0 [common] x509_host_key=/etc/grid-security/hostkey.pem x509_host_cert=/etc/grid-security/hostcert.pem x509_cert_dir=/etc/grid-security/certificates x509_voms_dir=/etc/grid-security/vomsdir [authgroup:users] plugin=10 /usr/libexec/arc/arc-lcas %D %P liblcas.so /usr/lib64 /etc/lcas/lcas.db [authgroup:gridmapusers] file=/etc/grid-security/grid-mapfile [mapping] map_with_file=gridmapusers /etc/grid-security/grid-mapfile map_with_plugin=users 30 /usr/libexec/arc/arc-lcmaps %D %P liblcmaps.so /usr/lib64 /etc/lcmaps/lcmaps.db voms #do not use fall-back mapping #map_to_user=users 999:999 [lrms] lrms=condor condor_bin_path=/usr/bin defaultmemory=2000 [arex] #default is /var/run/arc/arex.pid #pidfile=/var/run/arc/grid-manager.pid #controldir should be local, maybe even dedicated partition, performance related controldir=/arccache/var/spool/arc/jobstatus runtimedir=/etc/arc/runtime #we are not sure what you do here, since you have shared_filesystem=no and defined scratchdir sessiondir=/arccache/var/spool/arc/grid scratchdir = /tmp shared_filesystem=no loglevel=2 logfile=/arccache/var/log/arc/grid-manager.log joblog=/arccache/var/log/arc/gm-jobs.log mail=gang.qin@cern.ch defaultttl=345600 432000 statecallout=FINISHING timeout=60,onfailure=pass,onsuccess=pass /usr/local/bin/scaling_factors_plugin.py %S %C %I [arex/jura] #these are in fact default values and do not need to be specified x509_host_key=/etc/grid-security/hostkey.pem x509_host_cert=/etc/grid-security/hostcert.pem x509_cert_dir=/etc/grid-security/certificates [arex/jura/apel:egi] targeturl=http://mq.cro-ngi.hr:6162 topic=/queue/global.accounting.cpu.central gocdb_name=UKI-SCOTGRID-GLASGOW benchmark_type=Si2k benchmark_value=2047 benchmark_description=Si2k use_ssl=yes #you had 50 - might be too small? urbatchsize=250 [arex/jura/archiving] archivedir=/arccache/var/run/arc/archive # gridftp server config [gridftpd] loglevel=2 logfile=/arccache/var/log/arc/gridftpd.log #this is default, not needed #pidfile=/var/run/arc/gridftpd.pid maxconnections=300 globus_tcp_port_range=9000,9300 globus_udp_port_range=9000,9300 # job submission interface via gridftp [gridftpd/jobs] allownew=yes #we think you intend to allow these groups, but we are not sure, since we do not have access to your grid-mapfile, lcas and lcmaps, please check allowaccess=users allowaccess=gridmapusers # openldap server config [infosys] logfile=/arccache/var/log/arc/infoprovider.log loglevel=3 [infosys/ldap] #do not run as root, and should go in ldap block #user=root #recommended is WARNING bdii_debug_level=ERROR bdii_log_dir=/arccache/var/log/arc/bdii [infosys/nordugrid] [infosys/glue1] resource_location=Glasgow,UK resource_longitude=-3.5891122 resource_latitude=55.0481019 glue_site_web=http://www.scotgrid.ac.uk/ glue_site_unique_id=UKI-SCOTGRID-GLASGOW cpu_scaling_reference_si00=2185 processor_other_description=Cores=8, Benchmark=8.74-HEP-SPEC06 [infosys/glue2] admindomain_name=UKI-SCOTGRID-GLASGOW # infosys view of the computing cluster (service) [infosys/cluster] cluster_alias=svr009.gla.scotgrid.ac.uk comment=UKI-SCOTGRID-GLASGOW Condor Pool homogeneity=True architecture=x86_64 nodeaccess=outbound opsys=CentOS opsys=6.6 opsys=Carbon nodememory=6000 advertisedvo=atlas advertisedvo=lhcb advertisedvo=cms advertisedvo=ops advertisedvo=dteam advertisedvo=gridpp advertisedvo=epic.vo.gridpp.ac.uk advertisedvo=pheno advertisedvo=ilc advertisedvo=enmr.eu advertisedvo=vo.scotgrid.ac.uk advertisedvo=mice advertisedvo=cernatschool.org advertisedvo=na62.vo.gridpp.ac.uk advertisedvo=lsst benchmark=SPECINT2000 2185 #benchmark=SPECFP2000 2600 totalcpus=5032 # infosys view of the queue behind the computing service, # every CE needs at least one queue #[queue:fork] #name=fork #homogeneity=True #scheduling_policy=FIFO #comment=This queue is nothing more than a fork host #nodecpu=adotf #architecture=adotf [queue:condor_q2d] condor_requirements=(OpSys == LINUX) #[queue:condor_queue] #condor_requirements=(OpSys == LINUX) #[queue:testq] #condor_requirements=(OpSys == LINUX) #name=testq #max_user_run = 10 #max_running = 10 #scheduling_policy=MAUI