Skip to content

Commit

Permalink
Re-organize layout_transform_ops (#2133)
Browse files Browse the repository at this point in the history
Summary:

- Migrate layout_transform_ops into its own directory

Reviewed By: spcyppt

Differential Revision: D51290598
  • Loading branch information
Benson Ma authored and facebook-github-bot committed Nov 15, 2023
1 parent 975cb01 commit 04a2858
Show file tree
Hide file tree
Showing 10 changed files with 13 additions and 17 deletions.
16 changes: 8 additions & 8 deletions fbgemm_gpu/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -568,8 +568,8 @@ set(fbgemm_gpu_sources_static_cpu
src/jagged_tensor_ops/jagged_tensor_ops_autograd.cpp
src/jagged_tensor_ops/jagged_tensor_ops_meta.cpp
src/jagged_tensor_ops/jagged_tensor_ops_cpu.cpp
src/input_combine_cpu.cpp
src/layout_transform_ops_cpu.cpp
src/input_combine_ops/input_combine_cpu.cpp
src/layout_transform_ops/layout_transform_ops_cpu.cpp
src/quantize_ops/quantize_ops_cpu.cpp
src/quantize_ops/quantize_ops_meta.cpp
src/sparse_ops/sparse_ops_cpu.cpp
Expand All @@ -589,16 +589,16 @@ if(NOT FBGEMM_CPU_ONLY)
codegen/embedding_bounds_check_host.cpp
src/memory_utils/memory_utils.cpp
src/memory_utils/memory_utils_ops.cpp
src/layout_transform_ops_gpu.cpp
src/layout_transform_ops/layout_transform_ops_gpu.cpp
src/permute_pooled_embedding_ops/permute_pooled_embedding_ops_gpu.cpp
src/permute_pooled_embedding_ops/permute_pooled_embedding_ops_split_gpu.cpp
src/quantize_ops/quantize_ops_gpu.cpp
src/sparse_ops/sparse_ops_gpu.cpp
src/split_embeddings_utils.cpp
src/split_embeddings_cache/split_embeddings_cache_ops.cu
src/metric_ops_host.cpp
src/metric_ops/metric_ops_host.cpp
src/embedding_inplace_ops/embedding_inplace_update_gpu.cpp
src/input_combine_gpu.cpp
src/input_combine_ops/input_combine_gpu.cpp
codegen/batch_index_select_dim0_host.cpp)

if(NVML_LIB_PATH)
Expand Down Expand Up @@ -633,7 +633,7 @@ if(NOT FBGEMM_CPU_ONLY)
src/memory_utils/memory_utils_ops.cu
src/embedding_inplace_ops/embedding_inplace_update.cu
src/histogram_binning_calibration_ops.cu
src/input_combine.cu
src/input_combine_ops/input_combine.cu
src/jagged_tensor_ops/batched_dense_vec_jagged_2d_mul_backward.cu
src/jagged_tensor_ops/batched_dense_vec_jagged_2d_mul_forward.cu
src/jagged_tensor_ops/dense_to_jagged_forward.cu
Expand All @@ -651,8 +651,8 @@ if(NOT FBGEMM_CPU_ONLY)
src/jagged_tensor_ops/jagged_to_padded_dense_forward.cu
src/jagged_tensor_ops/jagged_unique_indices.cu
src/jagged_tensor_ops/keyed_jagged_index_select_dim1.cu
src/layout_transform_ops.cu
src/metric_ops.cu
src/layout_transform_ops/layout_transform_ops.cu
src/metric_ops/metric_ops.cu
src/permute_pooled_embedding_ops/permute_pooled_embedding_ops_split.cu
src/permute_pooled_embedding_ops/permute_pooled_embedding_ops.cu
src/quantize_ops/quantize_bfloat16.cu
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -13,19 +13,16 @@
#include "fbgemm_gpu/cub_namespace_postfix.cuh"
// clang-format on

#include "fbgemm_gpu/layout_transform_ops.cuh"
#include "fbgemm_gpu/sparse_ops.h"
#include "fbgemm_gpu/sparse_ops_utils.h"

#include <ATen/ATen.h>
#include <ATen/core/op_registration/op_registration.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/Exceptions.h>
#include <c10/cuda/CUDAGuard.h>

#include <torch/library.h>

#include "ATen/Parallel.h"
#include "fbgemm_gpu/layout_transform_ops.cuh"
#include "fbgemm_gpu/sparse_ops.h"
#include "fbgemm_gpu/sparse_ops_utils.h"

using Tensor = at::Tensor;

Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,11 @@
* LICENSE file in the root directory of this source tree.
*/

#include "fbgemm_gpu/sparse_ops.h"
#include "fbgemm_gpu/sparse_ops_utils.h"

#include <ATen/ATen.h>
#include <ATen/core/op_registration/op_registration.h>
#include <torch/library.h>
#include "fbgemm_gpu/sparse_ops.h"
#include "fbgemm_gpu/sparse_ops_utils.h"

TORCH_LIBRARY_IMPL(fbgemm, CUDA, m) {
DISPATCH_TO_CUDA(
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.

0 comments on commit 04a2858

Please sign in to comment.