Skip to content

Commit

Permalink
add printfs
Browse files Browse the repository at this point in the history
  • Loading branch information
prathikr committed Dec 18, 2024
1 parent 3dc9dcf commit 3f5deb4
Showing 1 changed file with 30 additions and 0 deletions.
30 changes: 30 additions & 0 deletions onnxruntime/core/providers/webgpu/tensor/slice.cc
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ ONNX_OPERATOR_KERNEL_EX(
Slice);

Status SliceProgram::GenerateShaderCode(ShaderHelper& shader) const {
std::cout << "generate shader code" << std::endl;
const ShaderVariableHelper& input = shader.AddInput("input", ShaderUsage::UseUniform | ShaderUsage::UseIndicesTypeAlias);
const ShaderVariableHelper& output = shader.AddOutput("output", ShaderUsage::UseUniform | ShaderUsage::UseIndicesTypeAlias);

Expand Down Expand Up @@ -73,15 +74,19 @@ Status SliceProgram::GenerateShaderCode(ShaderHelper& shader) const {

shader.MainFunctionBody() << output.SetByOffset("global_idx", input.GetByIndices("input_indices"));

std::cout << "shader code generated" << std::endl;
return Status::OK();
}

Status Slice::ComputeInternal(ComputeContext& context) const {
// READ INPUTS
std::cout << "read input" << std::endl;
const Tensor* input_tensor = context.Input(0);
const TensorShape& input_shape = input_tensor->Shape();
int64_t input_rank = static_cast<int64_t>(input_shape.NumDimensions());

std::cout << "read starts/ends from either attr or input" << std::endl;

auto starts_raw = hasStartsAttr ? gsl::make_span(attr_starts_) : context.Input(1)->DataAsSpan<int64_t>();
auto ends_raw = hasEndsAttr ? gsl::make_span(attr_ends_) : context.Input(2)->DataAsSpan<int64_t>();

Expand All @@ -92,6 +97,8 @@ Status Slice::ComputeInternal(ComputeContext& context) const {
const Tensor* axes_tensor = nullptr;
const Tensor* steps_tensor = nullptr;

std::cout << "read axes and steps from input" << std::endl;

if (input_count >= 4) {
// axes provided as input
axes_tensor = context.Input(3);
Expand All @@ -102,6 +109,8 @@ Status Slice::ComputeInternal(ComputeContext& context) const {
steps_tensor = context.Input(4);
}

std::cout << "inject defaults if axes or steps not provided" << std::endl;

std::vector<int64_t> axes_default;
if (axes_tensor == nullptr) {
// if axes not provided, set to [0, ..., len(starts)-1]
Expand All @@ -121,6 +130,8 @@ Status Slice::ComputeInternal(ComputeContext& context) const {
auto steps_raw = steps_tensor == nullptr ? gsl::make_span(steps_default) : steps_tensor->DataAsSpan<int64_t>();

// PROCESS INPUTS
std::cout << "processing inputs" << std::endl;
std::cout << "process starts" << std::endl;
std::vector<uint32_t> starts;
for (unsigned int i = 0; i < starts_raw.size(); i++) {
int64_t val = starts_raw[i];
Expand All @@ -135,6 +146,8 @@ Status Slice::ComputeInternal(ComputeContext& context) const {
starts.push_back(static_cast<uint32_t>(val));
}

std::cout << "process ends" << std::endl;

std::vector<uint32_t> ends;
for (unsigned int i = 0; i < ends_raw.size(); i++) {
int64_t val = ends_raw[i];
Expand All @@ -149,11 +162,15 @@ Status Slice::ComputeInternal(ComputeContext& context) const {
ends.push_back(static_cast<uint32_t>(val));
}

std::cout << "process axes" << std::endl;

std::vector<uint32_t> axes;
for (unsigned int i = 0; i < axes_raw.size(); i++) {
axes.push_back(static_cast<int32_t>(axes_raw[i]));
}

std::cout << "process steps with INT_MAX" << std::endl;

// temporary steps vector to handle negative steps
std::vector<int32_t> steps_tmp;
for (unsigned int i = 0; i < steps_raw.size(); i++) {
Expand All @@ -164,6 +181,8 @@ Status Slice::ComputeInternal(ComputeContext& context) const {
}
}

std::cout << "insert missing dimensions" << std::endl;

if (static_cast<int64_t>(axes.size()) != input_rank) {
for (uint32_t i = 0; i < input_rank; i++) {
int idx = -1;
Expand All @@ -182,12 +201,16 @@ Status Slice::ComputeInternal(ComputeContext& context) const {
}
}

std::cout << "retain the sign of the steps" << std::endl;

// retain the sign of the steps
std::vector<int32_t> signs;
for (unsigned int i = 0; i < steps_tmp.size(); i++) {
signs.push_back(steps_tmp[i] < 0 ? -1 : (steps_tmp[i] > 0 ? 1 : 0));
}

std::cout << "convert negative steps to positive steps and reverse starts and ends" << std::endl;

// Convert negative steps to positive steps and reverse starts and ends
for (unsigned int i = 0; i < steps_tmp.size(); i++) {
if (steps_tmp[i] < 0) {
Expand All @@ -201,12 +224,16 @@ Status Slice::ComputeInternal(ComputeContext& context) const {
}
}

std::cout << "final steps vector" << std::endl;

// final steps vector of type unsigned int
std::vector<uint32_t> steps;
for (unsigned int i = 0; i < steps_tmp.size(); i++) {
steps.push_back(static_cast<uint32_t>(steps_tmp[i]));
}

std::cout << "calculate output dims" << std::endl;

// calculate output dims
std::vector<int64_t> output_dims;

Check warning on line 238 in onnxruntime/core/providers/webgpu/tensor/slice.cc

View workflow job for this annotation

GitHub Actions / Optional Lint C++

[cpplint] reported by reviewdog 🐶 Add #include <vector> for vector<> [build/include_what_you_use] [4] Raw Output: onnxruntime/core/providers/webgpu/tensor/slice.cc:238: Add #include <vector> for vector<> [build/include_what_you_use] [4]
for (unsigned int i = 0; i < axes.size(); i++) {
Expand All @@ -224,9 +251,12 @@ Status Slice::ComputeInternal(ComputeContext& context) const {
uint32_t output_size = static_cast<uint32_t>(output_shape.Size());

if (output_size == 0) {
std::cout << "output size is 0" << std::endl;
return Status::OK();
}

std::cout << "run program" << std::endl;

Check warning on line 258 in onnxruntime/core/providers/webgpu/tensor/slice.cc

View workflow job for this annotation

GitHub Actions / Optional Lint C++

[cpplint] reported by reviewdog 🐶 Add #include <iostream> for cout [build/include_what_you_use] [4] Raw Output: onnxruntime/core/providers/webgpu/tensor/slice.cc:258: Add #include <iostream> for cout [build/include_what_you_use] [4]

SliceProgram program{};
program
.AddInputs({{input_tensor, ProgramTensorMetadataDependency::TypeAndRank}})
Expand Down

0 comments on commit 3f5deb4

Please sign in to comment.