################################# # # # A Simulation Script # # (For use in ganga 5.1.6 # # + patch for md5sum/adler32) # # # ################################# ## Import random number generator for random seed import random ## Define the input dataset name dataset = 'users.craigwiglesworth.ganga.datafiles.H130zz4mu.EVGEN.14.2.20.1' ## List the partition numbers in the input dataset partitions = ['1','2','3','4','5','6','7','8','9','10','11','12','13','14','15'] ## Loop over the partitions for p in partitions: ## Create a new job j = Job() ## Send any local files that the job needs in the inputsandbox j.inputsandbox = ['/hepstore/store2/wiglesworth/AthenaMCgangaFiles/Simulation/misalignID.py','/hepstore/store2/wiglesworth/AthenaMCgangaFiles/Simulation/misalignMS.py'] ## Configure the job to be an MC production job j.application = AthenaMC() j.application.mode = 'template' ## This is a temporary solution to fix some kind of simulation bug j.application.verbosity = 'INFO' ## Define the Athena Release to use j.application.atlas_release = '14.2.25.3' ## Define some job parameters from which the output files will be named j.application.process_name = 'H130zz4mu.idmisal.msmisal' j.application.production_name = 'HITS' j.application.run_number = '000000' j.application.version = j.application.atlas_release ## Define the number of events for each (sub)job j.application.number_events_job = 100 ## Define the output dataset name j.outputdata=AthenaMCOutputDatasets() j.outputdata.outrootfiles['HITS'] = j.application.process_name+'.'+j.application.production_name ## Define which transform to use j.application.transform_script = 'csc_atlasG4_trf.py' ## Configure the transform j.application.extraArgs = 'inputEvgenFile=$inputfiles maxEvents=$number_events_job skipEvents=$skip geometryVersion=ATLAS-CSC-01-00-00 physicsList=QGSP_BERT jobConfig=misalignID.py,misalignMS.py ConditionsTag=OFLCOND-CSC-00-00-00' j.application.extraIncArgs ='randomSeed=%s' % str(int(random.random()*100000000)) ## Split the job into subjobs j.splitter = AthenaMCSplitterJob() j.splitter.numsubjobs = 50 ## Define the input dataset name j.inputdata = AthenaMCInputDatasets() j.inputdata.datasetType = 'DQ2' j.inputdata.DQ2dataset = dataset ## Define the input file for this job j.inputdata.redefine_partitions = p j.inputdata.number_events_file = 5000 ## Configure the job such that it is sent to the grid j.backend = LCG() j.backend.middleware = 'GLITE' ## Define where the job should run j.backend.requirements = AtlasLCGRequirements() j.backend.requirements.cloud = 'UK' ## Define the storage location for the output dataset j.application.se_name = 'UKI-NORTHGRID-LIV-HEP_LOCALGROUPDISK' ## Give each job a unique name j.name = j.application.process_name+'_'+p ## Submit the job j.submit() pass