diff --git a/RecoLocalCalo/Configuration/python/customizeHcalOnlyForProfiling.py b/RecoLocalCalo/Configuration/python/customizeHcalOnlyForProfiling.py index 93da82c580d99..1ba182960b82c 100644 --- a/RecoLocalCalo/Configuration/python/customizeHcalOnlyForProfiling.py +++ b/RecoLocalCalo/Configuration/python/customizeHcalOnlyForProfiling.py @@ -23,7 +23,7 @@ def customizeHcalOnlyForProfilingGPUOnly(process): # Currently, this means: # - running the unpacker on CPU, converting the digis into SoA format and copying them to GPU # - running the HBHE local reconstruction, including MAHI, on GPU. -# - running the HBHE PFRecHit and PFCluster producers on GPU [including copy of them to host, as we cannot untangle the copy back at a moment] +# - running the HBHE PFRecHit and PFCluster producers on GPU [without copy of them to host] def customizeHcalPFOnlyForProfilingGPUOnly(process): process.consumer = cms.EDAnalyzer("GenericConsumer", @@ -33,6 +33,8 @@ def customizeHcalPFOnlyForProfilingGPUOnly(process): process.consume_step = cms.EndPath(process.consumer) process.schedule = cms.Schedule(process.raw2digi_step, process.reconstruction_step, process.consume_step) + process.particleFlowClusterHBHEOnly.cuda.produceLegacy = cms.bool(False) + process.particleFlowRecHitHBHEOnly.cuda.produceLegacy = cms.bool(False) return process diff --git a/RecoParticleFlow/PFClusterProducer/plugins/DeclsForKernels.h b/RecoParticleFlow/PFClusterProducer/plugins/DeclsForKernels.h index 46310729d5a5e..f27e576afe3b0 100644 --- a/RecoParticleFlow/PFClusterProducer/plugins/DeclsForKernels.h +++ b/RecoParticleFlow/PFClusterProducer/plugins/DeclsForKernels.h @@ -161,8 +161,8 @@ namespace PFRecHit { namespace PFClustering { namespace HCAL { struct ConfigurationParameters { - uint32_t maxRH = 3000; // previously: 2000 - uint32_t maxPFCFracs = 600000; // previously: 80000 + uint32_t maxRH = 4000; // previously: 2000 + uint32_t maxPFCFracs = 200000; // previously: 80000 uint32_t maxNeighbors = 8; }; @@ -332,6 +332,7 @@ namespace PFClustering { } }; } // namespace HCAL + } // namespace PFClustering #endif // RecoParticleFlow_PFClusterProducer_plugins_DeclsForKernels_h diff --git a/RecoParticleFlow/PFClusterProducer/plugins/PFClusterCudaHCAL.cu b/RecoParticleFlow/PFClusterProducer/plugins/PFClusterCudaHCAL.cu index d29e3794aa3b1..9734f46f4e7eb 100644 --- a/RecoParticleFlow/PFClusterProducer/plugins/PFClusterCudaHCAL.cu +++ b/RecoParticleFlow/PFClusterProducer/plugins/PFClusterCudaHCAL.cu @@ -73,11 +73,11 @@ namespace PFClusterCudaHCAL { // --- kernel summary -- // initializeCudaConstants // PFRechitToPFCluster_HCAL_entryPoint - // seedingTopoThreshKernel_HCAL - // prepareTopoInputs - // topoClusterLinking - // topoClusterContraction - // fillRhfIndex + // seedingTopoThreshKernel_HCAL: apply seeding/topo-clustering threshold to RecHits, also ensure a peak (outputs: pfrh_isSeed, pfrh_passTopoThresh) [OutputDataGPU] + // prepareTopoInputs: prepare "edge" data (outputs: nEdges, pfrh_edgeId, pfrh_edgeList [nEdges dimension]) + // topoClusterLinking(KH): run topo clustering (output: pfrh_topoId) + // topoClusterContraction: find parent of parent (or parent (of parent ...)) (outputs: pfrh_parent, topoSeedCount, topoSeedOffsets, topoSeedList, seedFracOffsets, pcrhfracind, pcrhfrac) + // fillRhfIndex: fill rhfracind (PFCluster RecHitFraction constituent PFRecHit indices) // hcalFastCluster_selection // dev_hcalFastCluster_optimizedSimple // dev_hcalFastCluster_optimizedComplex @@ -1830,7 +1830,7 @@ namespace PFClusterCudaHCAL { pos4.z += rechitPos.z * norm; pos4.w += norm; // position_norm }; - /* + /* auto computeClusterPosAtomic = [&] (float4& pos4, float _frac, int rhInd, bool isDebug) { float4 rechitPos = make_float4(pfrh_x[rhInd], pfrh_y[rhInd], pfrh_z[rhInd], 1.0); @@ -1839,7 +1839,7 @@ namespace PFClusterCudaHCAL { (_frac < minFractionInCalc ? 0.0f : max(0.0f, logf(rh_energy * rhENormInv))); if (isDebug) printf("\t\t\trechit %d: norm = %f\tfrac = %f\trh_energy = %f\tpos = (%f, %f, %f)\n", rhInd, norm, _frac, rh_energy, rechitPos.x, rechitPos.y, rechitPos.z); - + atomicAdd(&pos4.x, rechitPos.x * norm); atomicAdd(&pos4.y, rechitPos.y * norm); atomicAdd(&pos4.z, rechitPos.z * norm); @@ -2168,7 +2168,7 @@ namespace PFClusterCudaHCAL { pos4.z += rechitPos.z * norm; pos4.w += norm; // position_norm }; - /* + /* auto computeClusterPosAtomic = [&] (float4& pos4, float _frac, int rhInd, bool isDebug) { float4 rechitPos = make_float4(pfrh_x[rhInd], pfrh_y[rhInd], pfrh_z[rhInd], 1.0); @@ -2177,7 +2177,7 @@ namespace PFClusterCudaHCAL { (_frac < minFractionInCalc ? 0.0f : max(0.0f, logf(rh_energy * rhENormInv))); if (isDebug) printf("\t\t\trechit %d: norm = %f\tfrac = %f\trh_energy = %f\tpos = (%f, %f, %f)\n", rhInd, norm, _frac, rh_energy, rechitPos.x, rechitPos.y, rechitPos.z); - + atomicAdd(&pos4.x, rechitPos.x * norm); atomicAdd(&pos4.y, rechitPos.y * norm); atomicAdd(&pos4.z, rechitPos.z * norm); @@ -3520,34 +3520,6 @@ namespace PFClusterCudaHCAL { } } - // Contraction in a single block - __global__ void topoClusterContraction(size_t size, int* pfrh_parent, int* pfrh_isSeed) { - __shared__ int notDone; - if (threadIdx.x == 0) - notDone = 0; - __syncthreads(); - - do { - volatile bool threadNotDone = false; - for (int i = threadIdx.x; i < size; i += blockDim.x) { - int parent = pfrh_parent[i]; - if (parent >= 0 && parent != pfrh_parent[parent]) { - threadNotDone = true; - pfrh_parent[i] = pfrh_parent[parent]; - } - } - if (threadIdx.x == 0) - notDone = 0; - __syncthreads(); - - atomicAdd(¬Done, (int)threadNotDone); - //if (threadNotDone) notDone = true; - //notDone |= threadNotDone; - __syncthreads(); - - } while (notDone); - } - // Contraction in a single block __global__ void topoClusterContraction(size_t size, int* pfrh_parent, @@ -3638,10 +3610,13 @@ namespace PFClusterCudaHCAL { } } __syncthreads(); + if (threadIdx.x == 0) { *pcrhFracSize = totalSeedFracOffset; - //printf("At the end of topoClusterContraction, found *pcrhFracSize = %d\n", *pcrhFracSize); + if (*pcrhFracSize>200000) // DeclsForKernels.h maxPFCFracs + printf("At the end of topoClusterContraction, found large *pcrhFracSize = %d\n", *pcrhFracSize); } + } // Prefill the rechit index for all PFCluster fractions @@ -3676,7 +3651,7 @@ namespace PFClusterCudaHCAL { int* pcrhfracind) { //int debugSeedIdx = 500; - /* + /* printf("rhCount = \n["); for (int i = 0; i < (int)nRH; i++) { if (i != 0) printf(", "); @@ -3729,6 +3704,27 @@ namespace PFClusterCudaHCAL { return false; } + // when on the left edge of the edgeId/List block, returns true + __device__ __forceinline__ bool isLeftEdgeKH(const int idx, + const int nEdges, + const int* __restrict__ pfrh_edgeId, + const int* __restrict__ pfrh_edgeMask) { + int temp = idx - 1; + if (idx > 0) { + int edgeId = pfrh_edgeId[idx]; + int tempId = pfrh_edgeId[temp]; + if (edgeId != tempId) { + // Different topo Id here! + return true; + } + } else if (temp < 0) { // idx==0 + return true; + } + + // Invalid index + return false; + } + __device__ __forceinline__ bool isRightEdge(const int idx, const int nEdges, const int* __restrict__ pfrh_edgeId, @@ -3766,7 +3762,7 @@ namespace PFClusterCudaHCAL { int* pfrh_edgeMask, const int* pfrh_passTopoThresh, int* topoIter) { - __shared__ bool notDone; + __shared__ int notDone; // This is better be bool, but somehow it leads to out of bound __shared__ int iter, gridStride, nEdges; int start = blockIdx.x * blockDim.x + threadIdx.x; @@ -3787,10 +3783,13 @@ namespace PFClusterCudaHCAL { else pfrh_edgeMask[idx] = 0; } + __syncthreads();//!! do { + __syncthreads();//!! + if (threadIdx.x == 0) { - notDone = false; + notDone = 0; } __syncthreads(); @@ -3814,26 +3813,28 @@ namespace PFClusterCudaHCAL { // edgeMask set to true if elements of edgeId and edgeList are different if (pfrh_edgeId[idx] != pfrh_edgeList[idx]) { pfrh_edgeMask[idx] = 1; - notDone = true; + notDone = 1; } else { pfrh_edgeMask[idx] = 0; } } } + + __syncthreads();//!! + if (threadIdx.x == 0) iter++; __syncthreads(); - if (!notDone) + if (notDone==0) break; __syncthreads();//!! if (threadIdx.x == 0) { - notDone = false; + notDone = 0; } - __syncthreads(); // Even linking @@ -3844,7 +3845,6 @@ namespace PFClusterCudaHCAL { pfrh_parent[i] = (int)max(i, pfrh_edgeList[idx]); } } - __syncthreads(); // edgeParent @@ -3858,27 +3858,132 @@ namespace PFClusterCudaHCAL { // edgeMask set to true if elements of edgeId and edgeList are different if (pfrh_edgeId[idx] != pfrh_edgeList[idx]) { pfrh_edgeMask[idx] = 1; - notDone = true; + notDone = 1; } else { pfrh_edgeMask[idx] = 0; } } } + __syncthreads();//!! + if (threadIdx.x == 0) iter++; __syncthreads(); - } while (notDone); + } while (notDone==1); + + __syncthreads();//!! + + if (threadIdx.x == 0) + *topoIter = iter; + } + + __global__ void topoClusterLinkingKH(int nRH, + int* nEdgesIn, + int* pfrh_parent, + int* pfrh_edgeId, + int* pfrh_edgeList, + int* pfrh_edgeMask, + const int* pfrh_passTopoThresh, + int* topoIter) { + __shared__ int notDone; // This is better be bool, but somehow it leads to out of bound + __shared__ int notDone2; + __shared__ int gridStride, nEdges; + + // Initialization + int start = blockIdx.x * blockDim.x + threadIdx.x; + + if (threadIdx.x == 0) { + *topoIter = 0; + nEdges = *nEdgesIn; + gridStride = blockDim.x * gridDim.x; // For single block kernel this is the number of threads + notDone = 0; + notDone2 = 0; + } + + __syncthreads(); + + // Check if pairs in edgeId,edgeList contain a rh not passing topo threshold + // If found, set the mask to 0 + // But, for now, not using edgeMask hereafter, because the same threshold cut is applied at the PFRecHit level + // for (int idx = start; idx < nEdges; idx += gridStride) { + // if (pfrh_passTopoThresh[pfrh_edgeId[idx]] && pfrh_passTopoThresh[pfrh_edgeList[idx]]) + // pfrh_edgeMask[idx] = 1; + // else + // pfrh_edgeMask[idx] = 0; + // } + + // __syncthreads(); + + // First attempt of topo clustering + // First edge [set parents to those smaller numbers] + for (int idx = start; idx < nEdges; idx += gridStride) { + int i = pfrh_edgeId[idx]; // Get edge topo id + if (pfrh_edgeMask[idx] > 0 && isLeftEdgeKH(idx, nEdges, pfrh_edgeId, pfrh_edgeMask)) { // isLeftEdgeKH + pfrh_parent[i] = (int)min(i, pfrh_edgeList[idx]); + } else { + pfrh_parent[i] = i; + } + } + + __syncthreads(); + + // + // loop until topo clustering iteration converges + for (int ii=0; ii<100; ii++) { + + // for notDone + if (threadIdx.x == 0) { + notDone2 = 0; + } + + // Follow parents of parents .... to contract parent structure + do { + volatile bool threadNotDone = false; + for (int i = threadIdx.x; i < nRH; i += blockDim.x) { + int parent = pfrh_parent[i]; + if (parent >= 0 && parent != pfrh_parent[parent]) { + threadNotDone = true; + pfrh_parent[i] = pfrh_parent[parent]; + } + } + if (threadIdx.x == 0) + notDone = 0; + __syncthreads(); + + atomicAdd(¬Done, (int)threadNotDone); + __syncthreads(); + + } while (notDone); + + __syncthreads(); + + // All rechit pairs in edge id-list have the same topo cluster label? + for (int idx = start; idx < nEdges; idx += gridStride) { + //for (int idx = 0; idx < nEdges; idx++) { + int i = pfrh_edgeId[idx]; // Get edge topo id + int j = pfrh_edgeList[idx]; // Get edge neighbor list + int parent_target = pfrh_parent[i]; + int parent_neighbor = pfrh_parent[j]; + if (parent_target!=parent_neighbor){ + notDone2 = 1; + //printf("hmm. they should have the same parent, but they don't. why... %d %d %d\n",i,j,ii); + int min_parent = (int)min(parent_target,parent_neighbor); + int max_parent = (int)max(parent_target,parent_neighbor); + int idx_max = i; + if (parent_neighbor == max_parent) idx_max = j; + pfrh_parent[idx_max] = min_parent; + } + } + + __syncthreads(); + if (notDone2==0) // if topocluster finding is converged, terminate the for-ii loop + break; + + } // for-loop ii - *topoIter = iter; -#ifdef DEBUG_GPU_HCAL -// if (threadIdx.x == 0) { -// printf("*** Topo clustering converged in %d iterations ***\n", iter); -// } -// __syncthreads(); -#endif } __device__ __forceinline__ void sortSwap(int* toSort, int a, int b) { @@ -3938,6 +4043,7 @@ namespace PFClusterCudaHCAL { } __device__ __forceinline__ int scan1Inclusive(int idata, volatile int* s_Data, int size) { + assert(size == 32); int pos = 2 * threadIdx.x - (threadIdx.x & (size - 1)); s_Data[pos] = 0; pos += size; @@ -3945,7 +4051,9 @@ namespace PFClusterCudaHCAL { for (int offset = 1; offset < size; offset <<= 1) { int t = s_Data[pos] + s_Data[pos - offset]; + __syncwarp(); s_Data[pos] = t; + __syncwarp(); } return s_Data[pos]; @@ -4435,18 +4543,9 @@ namespace PFClusterCudaHCAL { ::PFClustering::HCAL::ScratchDataGPU& scratchGPU, float (&timer)[8]) { -#ifdef DEBUG_GPU_HCAL - cudaProfilerStart(); - cudaEvent_t start, stop; - cudaEventCreate(&start); - cudaEventCreate(&stop); - cudaEventRecord(start, cudaStream); -#endif - int nRH = inputPFRecHits.size; // Combined seeding & topo clustering thresholds, array initialization - seedingTopoThreshKernel_HCAL<<<(nRH + 31) / 32, 64, 0, cudaStream>>>(nRH, inputPFRecHits.pfrh_energy.get(), inputPFRecHits.pfrh_x.get(), @@ -4466,72 +4565,27 @@ namespace PFClusterCudaHCAL { outputGPU.topoSeedList.get(), outputGPU.pfc_iter.get()); - cudaCheck(cudaStreamSynchronize(cudaStream)); - -#ifdef DEBUG_GPU_HCAL - cudaEventRecord(stop, cudaStream); - cudaEventSynchronize(stop); - cudaEventElapsedTime(&timer[0], start, stop); - cudaEventRecord(start, cudaStream); -#endif - - // prepareTopoInputsSerial<<<1, 1, 4 * (8+4) * sizeof(int), cudaStream>>>( - // nRH, - // outputGPU.nEdges.get(), - // outputGPU.pfrh_passTopoThresh.get(), - // inputPFRecHits.pfrh_neighbours.get(), - // scratchGPU.pfrh_edgeId.get(), - // scratchGPU.pfrh_edgeList.get()); - // Topo clustering // Fill edgeId, edgeList arrays with rechit neighbors // Has a bug when using more than 128 threads.. + // prepareTopoInputsSerial<<<1, 1, 4 * (8+4) * sizeof(int), cudaStream>>>( prepareTopoInputs<<<1, 128, 128 * (8 + 4) * sizeof(int), cudaStream>>>(nRH, outputGPU.nEdges.get(), outputGPU.pfrh_passTopoThresh.get(), inputPFRecHits.pfrh_neighbours.get(), scratchGPU.pfrh_edgeId.get(), scratchGPU.pfrh_edgeList.get()); - cudaCheck(cudaStreamSynchronize(cudaStream)); - - // prepareTopoInputs<<<1, 256, 256 * (8+4) * sizeof(int), cudaStream>>>( - // nRH, - // outputGPU.nEdges.get(), - // outputGPU.pfrh_passTopoThresh.get(), - // inputPFRecHits.pfrh_neighbours.get(), - // scratchGPU.pfrh_edgeId.get(), - // scratchGPU.pfrh_edgeList.get()); - -#ifdef DEBUG_GPU_HCAL - cudaEventRecord(stop, cudaStream); - cudaEventSynchronize(stop); - cudaEventElapsedTime(&timer[4], start, stop); - //printf("\nprepareTopoInputs took %f ms\n", timer[4]); - - compareEdgeArrays<<<1, 1, 0, cudaStream>>>(outputGPU.nEdges.get(), - scratchGPU.pfrh_edgeId.get(), - scratchGPU.pfrh_edgeList.get(), - nEdges, - inputGPU.pfrh_edgeId.get(), - inputGPU.pfrh_edgeList.get(), - nRH, - inputGPU.pfNeighFourInd.get(), - inputPFRecHits.pfrh_neighbours.get()); - - cudaEventRecord(start, cudaStream); -#endif // Topo clustering - topoClusterLinking<<<1, 512, 0, cudaStream>>>(nRH, - outputGPU.nEdges.get(), - outputGPU.pfrh_topoId.get(), - scratchGPU.pfrh_edgeId.get(), - scratchGPU.pfrh_edgeList.get(), - scratchGPU.pfrh_edgeMask.get(), - //inputGPU.pfrh_edgeMask.get(), - outputGPU.pfrh_passTopoThresh.get(), - outputGPU.topoIter.get()); - cudaCheck(cudaStreamSynchronize(cudaStream)); + //topoClusterLinking<<<1, 512, 0, cudaStream>>>(nRH, + topoClusterLinkingKH<<<1, 512, 0, cudaStream>>>(nRH, + outputGPU.nEdges.get(), + outputGPU.pfrh_topoId.get(), + scratchGPU.pfrh_edgeId.get(), + scratchGPU.pfrh_edgeList.get(), + scratchGPU.pfrh_edgeMask.get(), + outputGPU.pfrh_passTopoThresh.get(), + outputGPU.topoIter.get()); topoClusterContraction<<<1, 512, 0, cudaStream>>>(nRH, outputGPU.pfrh_topoId.get(), @@ -4546,13 +4600,6 @@ namespace PFClusterCudaHCAL { outputGPU.pcrh_frac.get(), outputGPU.pcrhFracSize.get()); -#ifdef DEBUG_GPU_HCAL - cudaEventRecord(stop, cudaStream); - cudaEventSynchronize(stop); - cudaEventElapsedTime(&timer[1], start, stop); - cudaEventRecord(start, cudaStream); -#endif - dim3 grid((nRH + 31) / 32, (nRH + 31) / 32); dim3 block(32, 32); @@ -4565,13 +4612,6 @@ namespace PFClusterCudaHCAL { scratchGPU.rhcount.get(), outputGPU.pcrh_fracInd.get()); -#ifdef DEBUG_GPU_HCAL - cudaEventRecord(stop, cudaStream); - cudaEventSynchronize(stop); - cudaEventElapsedTime(&timer[2], start, stop); - cudaEventRecord(start, cudaStream); -#endif - hcalFastCluster_selection<<>>(nRH, inputPFRecHits.pfrh_x.get(), inputPFRecHits.pfrh_y.get(), @@ -4595,11 +4635,5 @@ namespace PFClusterCudaHCAL { inputGPU.pfc_prevPos4.get(), inputGPU.pfc_energy.get(), outputGPU.pfc_iter.get()); -#ifdef DEBUG_GPU_HCAL - cudaEventRecord(stop, cudaStream); - cudaEventSynchronize(stop); - cudaEventElapsedTime(&timer[3], start, stop); - cudaProfilerStop(); -#endif } } // namespace PFClusterCudaHCAL diff --git a/RecoParticleFlow/PFClusterProducer/plugins/PFClusterProducerCudaHCAL.cc b/RecoParticleFlow/PFClusterProducer/plugins/PFClusterProducerCudaHCAL.cc index 4cf0d9525b98e..3dd5a66718421 100644 --- a/RecoParticleFlow/PFClusterProducer/plugins/PFClusterProducerCudaHCAL.cc +++ b/RecoParticleFlow/PFClusterProducer/plugins/PFClusterProducerCudaHCAL.cc @@ -317,6 +317,7 @@ void PFClusterProducerCudaHCAL::acquire(edm::Event const& event, nRH_ = PFRecHits.size; if (nRH_ == 0) return; + if (nRH_>4000) std::cout << "nRH(PFRecHitSize)>4000: " << nRH_ << std::endl; const int numbytes_int = nRH_ * sizeof(int); int totalNeighbours = 0; // Running count of 8 neighbour edges for edgeId, edgeList diff --git a/RecoParticleFlow/PFClusterProducer/python/particleFlowClusterHBHE_cfi.py b/RecoParticleFlow/PFClusterProducer/python/particleFlowClusterHBHE_cfi.py index 7dea30a6d7dc7..ab713030f821b 100644 --- a/RecoParticleFlow/PFClusterProducer/python/particleFlowClusterHBHE_cfi.py +++ b/RecoParticleFlow/PFClusterProducer/python/particleFlowClusterHBHE_cfi.py @@ -111,11 +111,14 @@ positionReCalc = cms.PSet(), energyCorrector = cms.PSet() ) +_module_config_cuda = _module_config.clone() #### PF CLUSTER HCAL #### _particleFlowClusterHBHE_cpu = cms.EDProducer("PFClusterProducer", _module_config.clone()) _particleFlowClusterHBHE_cuda = cms.EDProducer("PFClusterProducerCudaHCAL", _module_config.clone()) _particleFlowClusterHBHE_cuda.PFRecHitsLabelIn = cms.InputTag("particleFlowRecHitHBHE","") +_particleFlowClusterHBHE_cuda.produceLegacy = cms.bool(True) +_particleFlowClusterHBHE_cuda.produceSoA = cms.bool(True) ##### diff --git a/RecoParticleFlow/PFClusterProducer/python/particleFlowRecHitHBHE_cfi.py b/RecoParticleFlow/PFClusterProducer/python/particleFlowRecHitHBHE_cfi.py index 33b77bccff92a..0dcec57965c03 100644 --- a/RecoParticleFlow/PFClusterProducer/python/particleFlowRecHitHBHE_cfi.py +++ b/RecoParticleFlow/PFClusterProducer/python/particleFlowRecHitHBHE_cfi.py @@ -44,6 +44,10 @@ _module_pset_cuda = _module_pset.clone() _module_pset_cuda.producers[0].src= "hbheRecHitProducerGPU" # use GPU version as input instead of legacy version +_module_pset_cuda.produceSoA = cms.bool(True) +_module_pset_cuda.produceLegacy = cms.bool(True) +_module_pset_cuda.produceCleanedLegacy = cms.bool(True) + for idx, x in enumerate(_module_pset_cuda.producers): for idy, y in enumerate(x.qualityTests): if y.name._value == "PFRecHitQTestHCALThresholdVsDepth": # when applying phase1 depth-dependent HCAL thresholds