diff options
Diffstat (limited to 'src/collectives/device/all_gather.h')
-rw-r--r-- | src/collectives/device/all_gather.h | 369 |
1 files changed, 191 insertions, 178 deletions
diff --git a/src/collectives/device/all_gather.h b/src/collectives/device/all_gather.h index 0ad5ba9..e057dc8 100644 --- a/src/collectives/device/all_gather.h +++ b/src/collectives/device/all_gather.h @@ -1,5 +1,5 @@ /************************************************************************* - * Copyright (c) 2015-2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved. * * See LICENSE.txt for license information ************************************************************************/ @@ -8,188 +8,201 @@ #include "primitives.h" #include "collectives.h" -template<int UNROLL, class FUNC, typename T> -__device__ void ncclAllGatherRingKernel(struct CollectiveArgs* args) { - const int tid = threadIdx.x; - const int nthreads = args->nThreads-WARP_SIZE; - const int bid = args->bid; - struct ncclDevComm* comm = args->comm; - struct ncclChannel* channel = comm->channels+blockIdx.x; - struct ncclRing* ring = &channel->ring; - const ssize_t size = args->N; - const int nranks = comm->nRanks; - const int stepSize = channel->buffSize / (sizeof(T)*NCCL_STEPS); - const int chunkSize = stepSize * ALLGATHER_CHUNKSTEPS; - const ssize_t loopSize = args->nChannels*(ssize_t)chunkSize; - - // Compute pointers - const T * __restrict__ thisInput = (const T*)args->ThisInput; - T * __restrict__ thisOutput = (T*)args->ThisOutput; - - ncclPrimitives<UNROLL, ALLGATHER_CHUNKSTEPS/ALLGATHER_SLICESTEPS, ALLGATHER_SLICESTEPS, T, 1, 1, FUNC> - prims(tid, args->nThreads, &ring->prev, &ring->next, thisOutput, stepSize, channel, comm, args->opCount); - - for (ssize_t gridOffset = 0; gridOffset < size; gridOffset += loopSize) { - int realChunkSize = min(chunkSize, DIVUP(size-gridOffset,args->nChannels)); - ALIGN_SIZE(realChunkSize, nthreads*sizeof(uint64_t)/sizeof(T)); - ssize_t chunkOffset = gridOffset + bid*realChunkSize; - - /////////////// begin AllGather steps /////////////// - ssize_t offset; - int nelem = min(realChunkSize, size-chunkOffset); - int rankDest; - - // step 0: push data to next GPU - rankDest = ring->devUserRanks[0]; - offset = chunkOffset + rankDest * size; - - if (thisInput + chunkOffset == thisOutput + offset) { // In place - prims.directSend(thisInput+chunkOffset, offset, nelem); - } else { - prims.directCopySend(thisInput+chunkOffset, thisOutput+offset, offset, nelem); +template<class FUNC, typename T, int UNROLL> +class ncclFunction<ncclFuncAllGather, NCCL_ALGO_RING, NCCL_PROTO_SIMPLE, FUNC, T, UNROLL> { + public: + __device__ void run(struct ncclWorkElem* args) { + const int tid = threadIdx.x; + const int nthreads = args->nThreads-WARP_SIZE; + const int bid = args->coll.bid; + const int nChannels = args->coll.nChannels; + struct ncclDevComm* comm = args->comm; + struct ncclChannel* channel = comm->channels+blockIdx.x; + struct ncclRing* ring = &channel->ring; + const int stepSize = comm->buffSizes[NCCL_PROTO_SIMPLE] / (sizeof(T)*NCCL_STEPS); + const int chunkSize = stepSize * ALLGATHER_CHUNKSTEPS; + const int nranks = comm->nRanks; + const ssize_t loopSize = nChannels*(ssize_t)chunkSize; + const ssize_t size = args->coll.count; + + // Compute pointers + const T * __restrict__ thisInput = (const T*)args->sendbuff; + T * __restrict__ thisOutput = (T*)args->recvbuff; + + ncclPrimitives<UNROLL, ALLGATHER_CHUNKSTEPS/ALLGATHER_SLICESTEPS, ALLGATHER_SLICESTEPS, T, 1, 1, 1, FUNC> + prims(tid, nthreads, &ring->prev, &ring->next, thisOutput, stepSize, channel, comm, ncclShmem->ptrs, 0); + + for (ssize_t gridOffset = 0; gridOffset < size; gridOffset += loopSize) { + int realChunkSize = min(chunkSize, DIVUP(size-gridOffset,nChannels)); + ALIGN_SIZE(realChunkSize, nthreads*sizeof(uint64_t)/sizeof(T)); + ssize_t chunkOffset = gridOffset + bid*realChunkSize; + + /////////////// begin AllGather steps /////////////// + ssize_t offset; + int nelem = min(realChunkSize, size-chunkOffset); + int rankDest; + + // step 0: push data to next GPU + rankDest = ring->devUserRanks[0]; + offset = chunkOffset + rankDest * size; + + if (thisInput + chunkOffset == thisOutput + offset) { // In place + prims.directSend(thisInput+chunkOffset, offset, nelem); + } else { + prims.directCopySend(thisInput+chunkOffset, thisOutput+offset, offset, nelem); + } + + // k-2 steps: copy to next GPU + for (int j=1; j<nranks-1; ++j) { + rankDest = ring->devUserRanks[nranks-j]; + offset = chunkOffset + rankDest * size; + + prims.directRecvCopySend(thisOutput+offset, offset, nelem); + } + + // Make final copy from buffer to dest. + rankDest = ring->devUserRanks[1]; + offset = chunkOffset + rankDest * size; + + // Final wait/copy. + prims.directRecv(thisOutput+offset, offset, nelem); + } } - - // k-2 steps: copy to next GPU - for (int j=1; j<nranks-1; ++j) { - rankDest = ring->devUserRanks[nranks-j]; - offset = chunkOffset + rankDest * size; - - prims.directRecvCopySend(thisOutput+offset, offset, nelem); +}; + +template<class FUNC, typename T, int UNROLL> +class ncclFunction<ncclFuncAllGather, NCCL_ALGO_RING, NCCL_PROTO_LL, FUNC, T, UNROLL> { + public: + __device__ void run(struct ncclWorkElem* args) { + const int tid = threadIdx.x; + const int nthreads = args->nThreads; + const int bid = args->coll.bid; + const int nChannels = args->coll.nChannels; + struct ncclDevComm* comm = args->comm; + struct ncclChannel* channel = comm->channels+blockIdx.x; + struct ncclRing* ring = &channel->ring; + const int stepLines = comm->buffSizes[NCCL_PROTO_LL] / (sizeof(union ncclLLFifoLine)*NCCL_STEPS); + ssize_t chunkSize = stepLines * sizeof(uint64_t) / sizeof(T); + const int nranks = comm->nRanks; + const ssize_t loopSize = nChannels*chunkSize; + const ssize_t size = args->coll.count; + + ncclLLPrimitives<T, FUNC, 1, 1> LLprims(tid, nthreads, &ring->prev, &ring->next, stepLines, channel, comm); + + // Compute pointers + const T * __restrict__ thisInput = (const T*)args->sendbuff; + T * __restrict__ thisOutput = (T*)args->recvbuff; + + for (ssize_t gridOffset = 0; gridOffset < size; gridOffset += loopSize) { + if (size-gridOffset < loopSize) { + chunkSize = args->coll.lastChunkSize; + } + ssize_t chunkOffset = gridOffset + bid*chunkSize; + + /////////////// begin AllGather steps /////////////// + ssize_t offset; + int nelem = min(chunkSize, size-chunkOffset); + int rankDest; + + // step 0: push data to next GPU + rankDest = ring->devUserRanks[0]; + offset = chunkOffset + rankDest * size; + + if (thisInput + chunkOffset == thisOutput + offset) { // In place + LLprims.send(thisInput+chunkOffset, nelem); + } else { + LLprims.copySend(thisInput+chunkOffset, thisOutput+offset, nelem); + } + + // k-2 steps: copy to next GPU + for (int j=1; j<nranks-1; ++j) { + rankDest = ring->devUserRanks[nranks-j]; + offset = chunkOffset + rankDest * size; + + LLprims.recvCopySend(thisOutput+offset, nelem); + } + + // step k-1: final store + rankDest = ring->devUserRanks[1]; + offset = chunkOffset + rankDest * size; + + LLprims.recv(thisOutput+offset, nelem); + } } - - // Make final copy from buffer to dest. - rankDest = ring->devUserRanks[1]; - offset = chunkOffset + rankDest * size; - - // Final wait/copy. - prims.directRecv(thisOutput+offset, offset, nelem); - } -} - -template<int UNROLL, class FUNC, typename T> -__device__ void ncclAllGatherTreeKernel(struct CollectiveArgs* args) { } - -template<int UNUSED, class FUNC, typename T> -__device__ void ncclAllGatherRingLLKernel(struct CollectiveArgs* args) { - const int tid = threadIdx.x; - const int bid = args->bid; - const int nthreads = args->nThreads; - struct ncclDevComm* comm = args->comm; - struct ncclChannel* channel = comm->channels+blockIdx.x; - struct ncclRing* ring = &channel->ring; - - ncclLLPrimitives<T, FUNC, 1, 1> LLprims(tid, nthreads, &ring->prev, &ring->next, channel, comm, args->opCount); - - const ssize_t size = args->N; - //const int rank = comm->rank; - const int nranks = comm->nRanks; - ssize_t chunkSize = NCCL_LL_SLICE_LINES * sizeof(uint64_t) / sizeof(T); - const ssize_t loopSize = args->nChannels*chunkSize; - - // Compute pointers - const T * __restrict__ thisInput = (const T*)args->ThisInput; - T * __restrict__ thisOutput = (T*)args->ThisOutput; - - for (ssize_t gridOffset = 0; gridOffset < size; gridOffset += loopSize) { - if (size-gridOffset < loopSize) { - chunkSize = args->lastChunkSize; - } - ssize_t chunkOffset = gridOffset + bid*chunkSize; - - /////////////// begin AllGather steps /////////////// - ssize_t offset; - int nelem = min(chunkSize, size-chunkOffset); - int rankDest; - - // step 0: push data to next GPU - rankDest = ring->devUserRanks[0]; - offset = chunkOffset + rankDest * size; - - if (thisInput + chunkOffset == thisOutput + offset) { // In place - LLprims.send(thisInput+chunkOffset, nelem); - } else { - LLprims.copySend(thisInput+chunkOffset, thisOutput+offset, nelem); - } - - // k-2 steps: copy to next GPU - for (int j=1; j<nranks-1; ++j) { - rankDest = ring->devUserRanks[nranks-j]; - offset = chunkOffset + rankDest * size; - - LLprims.recvCopySend(thisOutput+offset, nelem); - } - - // step k-1: final store - rankDest = ring->devUserRanks[1]; - offset = chunkOffset + rankDest * size; - - LLprims.recv(thisOutput+offset, nelem); - } -} - -template<int UNUSED, class FUNC, typename T> -__device__ void ncclAllGatherTreeLLKernel(struct CollectiveArgs* args) { } +}; #include "prims_ll128.h" -template<int UNUSED, class FUNC, typename T> -__device__ void ncclAllGatherRingLL128Kernel(struct CollectiveArgs* args) { - const int tid = threadIdx.x; - const int bid = args->bid; - const int nthreads = args->nThreads; - struct ncclDevComm* comm = args->comm; - struct ncclChannel* channel = comm->channels+blockIdx.x; - struct ncclRing* ring = &channel->ring; - - ncclLL128Primitives<T, FUNC, 1, 1> LLprims(tid, nthreads, &ring->prev, &ring->next, channel, comm, args->opCount); - - const ssize_t size = args->N; - //const int rank = comm->rank; - const int nranks = comm->nRanks; - ssize_t chunkSize = (NCCL_LL128_ELEMS_PER_THREAD*nthreads*NCCL_LL128_DATAELEMS*sizeof(uint64_t))/(NCCL_LL128_LINEELEMS*sizeof(T)); - // We should not need the final /2 but it makes performance much, much smoother. Might be a bug somewhere. - const ssize_t minChunkSize = (NCCL_LL128_SHMEM_ELEMS_PER_THREAD*nthreads*NCCL_LL128_DATAELEMS*sizeof(uint64_t))/(NCCL_LL128_LINEELEMS*sizeof(T))/2; - - const ssize_t loopSize = args->nChannels*chunkSize; - - // Compute pointers - const T * __restrict__ thisInput = (const T*)args->ThisInput; - T * __restrict__ thisOutput = (T*)args->ThisOutput; - - for (ssize_t gridOffset = 0; gridOffset < size; gridOffset += loopSize) { - chunkSize = min(DIVUP(size-gridOffset, args->nChannels*minChunkSize)*minChunkSize, chunkSize); - - ssize_t chunkOffset = gridOffset + bid*chunkSize; - - /////////////// begin AllGather steps /////////////// - ssize_t offset; - int nelem = min(chunkSize, size-chunkOffset); - int rankDest; - - // step 0: push data to next GPU - rankDest = ring->devUserRanks[0]; - offset = chunkOffset + rankDest * size; - - if (thisInput + chunkOffset == thisOutput + offset) { // In place - LLprims.send(thisInput+chunkOffset, nelem); - } else { - LLprims.copySend(thisInput+chunkOffset, thisOutput+offset, nelem); - } - - // k-2 steps: copy to next GPU - for (int j=1; j<nranks-1; ++j) { - rankDest = ring->devUserRanks[nranks-j]; - offset = chunkOffset + rankDest * size; - - LLprims.recvCopySend(thisOutput+offset, nelem); +template<class FUNC, typename T, int UNROLL> +class ncclFunction<ncclFuncAllGather, NCCL_ALGO_RING, NCCL_PROTO_LL128, FUNC, T, UNROLL> { + public: + __device__ void run(struct ncclWorkElem* args) { + const int tid = threadIdx.x; + const int nthreads = args->nThreads; + const int bid = args->coll.bid; + const int nChannels = args->coll.nChannels; + struct ncclDevComm* comm = args->comm; + struct ncclChannel* channel = comm->channels+blockIdx.x; + struct ncclRing* ring = &channel->ring; + const int stepSize = comm->buffSizes[NCCL_PROTO_LL128] / (sizeof(uint64_t)*NCCL_STEPS); + ssize_t chunkSize = stepSize*NCCL_LL128_DATAELEMS*sizeof(uint64_t) / (NCCL_LL128_LINEELEMS*sizeof(T)); + // We should not need the final /2 but it makes performance much, much smoother. Might be a bug somewhere. + const ssize_t minChunkSize = (NCCL_LL128_SHMEM_ELEMS_PER_THREAD*nthreads*NCCL_LL128_DATAELEMS*sizeof(uint64_t))/(NCCL_LL128_LINEELEMS*sizeof(T))/2; + const int nranks = comm->nRanks; + const ssize_t loopSize = nChannels*chunkSize; + const ssize_t size = args->coll.count; + + ncclLL128Primitives<T, FUNC, 1, 1> LLprims(tid, nthreads, &ring->prev, &ring->next, stepSize, channel, comm); + + // Compute pointers + const T * __restrict__ thisInput = (const T*)args->sendbuff; + T * __restrict__ thisOutput = (T*)args->recvbuff; + + for (ssize_t gridOffset = 0; gridOffset < size; gridOffset += loopSize) { + chunkSize = min(DIVUP(size-gridOffset, nChannels*minChunkSize)*minChunkSize, chunkSize); + + ssize_t chunkOffset = gridOffset + bid*chunkSize; + + /////////////// begin AllGather steps /////////////// + ssize_t offset; + int nelem = min(chunkSize, size-chunkOffset); + int rankDest; + + // step 0: push data to next GPU + rankDest = ring->devUserRanks[0]; + offset = chunkOffset + rankDest * size; + + if (thisInput + chunkOffset == thisOutput + offset) { // In place + LLprims.send(thisInput+chunkOffset, nelem); + } else { + LLprims.copySend(thisInput+chunkOffset, thisOutput+offset, nelem); + } + + // k-2 steps: copy to next GPU + for (int j=1; j<nranks-1; ++j) { + rankDest = ring->devUserRanks[nranks-j]; + offset = chunkOffset + rankDest * size; + + LLprims.recvCopySend(thisOutput+offset, nelem); + } + + // step k-1: final store + rankDest = ring->devUserRanks[1]; + offset = chunkOffset + rankDest * size; + + LLprims.recv(thisOutput+offset, nelem); + } } +}; - // step k-1: final store - rankDest = ring->devUserRanks[1]; - offset = chunkOffset + rankDest * size; +template<int PROTO, class FUNC, typename T, int UNROLL> +class ncclFunction<ncclFuncAllGather, NCCL_ALGO_TREE, PROTO, FUNC, T, UNROLL> { + public: + __device__ void run(struct ncclWorkElem* args) {} +}; - LLprims.recv(thisOutput+offset, nelem); - } -} +template<int PROTO, class FUNC, typename T, int UNROLL> +class ncclFunction<ncclFuncAllGather, NCCL_ALGO_COLLNET, PROTO, FUNC, T, UNROLL> { + public: + __device__ void run(struct ncclWorkElem* args) {} +}; -template<int UNUSED, class FUNC, typename T> -__device__ void ncclAllGatherTreeLL128Kernel(struct CollectiveArgs* args) { } |