Skip to content

Commit

Permalink
Remove unused function to fix build error.
Browse files Browse the repository at this point in the history
Fix some long lines.
  • Loading branch information
skottmckay committed Dec 23, 2024
1 parent 41fb824 commit d8ef92b
Show file tree
Hide file tree
Showing 3 changed files with 10 additions and 17 deletions.
2 changes: 2 additions & 0 deletions include/onnxruntime/core/session/onnxruntime_c_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -1344,6 +1344,8 @@ struct OrtApi {
* Create a tensor with user's buffer. You can fill the buffer either before calling this function or after.
* p_data is owned by caller. ReleaseValue won't release p_data.
*
* If you wish to transfer ownership of p_data to ORT use CreateTensorWithDataAndDeleterAsOrtValue.
*
* \param[in] info Memory description of where the p_data buffer resides (CPU vs GPU etc).
* \param[in] p_data Pointer to the data buffer.
* \param[in] p_data_len The number of bytes in the data buffer.
Expand Down
14 changes: 8 additions & 6 deletions onnxruntime/core/framework/onnxruntime_typeinfo.cc
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,8 @@ ORT_API_STATUS_IMPL(OrtApis::CastTypeInfoToOptionalTypeInfo, _In_ const OrtTypeI
API_IMPL_END
}

ORT_API_STATUS_IMPL(OrtApis::GetDenotationFromTypeInfo, _In_ const OrtTypeInfo* type_info, _Out_ const char** const out,
_Out_ size_t* len) {
ORT_API_STATUS_IMPL(OrtApis::GetDenotationFromTypeInfo, _In_ const OrtTypeInfo* type_info,
_Out_ const char** const out, _Out_ size_t* len) {
API_IMPL_BEGIN
*out = type_info->denotation.c_str();
*len = type_info->denotation.size();
Expand Down Expand Up @@ -115,8 +115,8 @@ ORT_API_STATUS_IMPL(OrtApis::CreateSparseTensorTypeInfo, _In_ const OrtTensorTyp
API_IMPL_END
}

ORT_API_STATUS_IMPL(OrtApis::CreateMapTypeInfo, ONNXTensorElementDataType map_key_type, _In_ const OrtTypeInfo* map_value_type,
_Out_ OrtTypeInfo** type_info) {
ORT_API_STATUS_IMPL(OrtApis::CreateMapTypeInfo, ONNXTensorElementDataType map_key_type,
_In_ const OrtTypeInfo* map_value_type, _Out_ OrtTypeInfo** type_info) {
API_IMPL_BEGIN
auto ti = std::make_unique<OrtTypeInfo>(ONNXType::ONNX_TYPE_MAP);
ti->map_type_info = std::make_unique<OrtMapTypeInfo>(map_key_type, map_value_type->Clone());
Expand All @@ -126,7 +126,8 @@ ORT_API_STATUS_IMPL(OrtApis::CreateMapTypeInfo, ONNXTensorElementDataType map_ke
API_IMPL_END
}

ORT_API_STATUS_IMPL(OrtApis::CreateSequenceTypeInfo, _In_ const OrtTypeInfo* sequence_type, _Out_ OrtTypeInfo** type_info) {
ORT_API_STATUS_IMPL(OrtApis::CreateSequenceTypeInfo, _In_ const OrtTypeInfo* sequence_type,
_Out_ OrtTypeInfo** type_info) {
API_IMPL_BEGIN
auto ti = std::make_unique<OrtTypeInfo>(ONNXType::ONNX_TYPE_SEQUENCE);
ti->sequence_type_info = std::make_unique<OrtSequenceTypeInfo>(sequence_type->Clone());
Expand All @@ -136,7 +137,8 @@ ORT_API_STATUS_IMPL(OrtApis::CreateSequenceTypeInfo, _In_ const OrtTypeInfo* seq
API_IMPL_END
}

ORT_API_STATUS_IMPL(OrtApis::CreateOptionalTypeInfo, _In_ const OrtTypeInfo* contained_type, _Out_ OrtTypeInfo** type_info) {
ORT_API_STATUS_IMPL(OrtApis::CreateOptionalTypeInfo, _In_ const OrtTypeInfo* contained_type,
_Out_ OrtTypeInfo** type_info) {
API_IMPL_BEGIN
auto ti = std::make_unique<OrtTypeInfo>(ONNXType::ONNX_TYPE_OPTIONAL);
ti->optional_type_info = std::make_unique<OrtOptionalTypeInfo>(contained_type->Clone());
Expand Down
11 changes: 0 additions & 11 deletions onnxruntime/core/session/onnxruntime_c_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -126,17 +126,6 @@ ORT_STATUS_PTR CreateTensorImpl(MLDataType ml_type, const int64_t* shape, size_t
return nullptr;
}

ORT_STATUS_PTR CreateTensorImplForSeq(MLDataType elem_type, const int64_t* shape, size_t shape_len, Tensor& out) {
OrtAllocator* allocator;
// TODO(pranav): what allocator should be used to create the tensor here?
// for the sake of simplicity of the API using the default one here
ORT_API_RETURN_IF_ERROR(OrtApis::GetAllocatorWithDefaultOptions(&allocator));
AllocatorPtr alloc_ptr = std::make_shared<onnxruntime::IAllocatorImplWrappingOrtAllocator>(allocator);
TensorShape tensor_shape(shape, shape_len);
out = Tensor(elem_type, tensor_shape, std::move(alloc_ptr));
return nullptr;
}

// Create Tensor with existing data. Tensor does not own memory.
ORT_STATUS_PTR CreateTensorImpl(MLDataType ml_type,
const int64_t* shape, size_t shape_len,
Expand Down

0 comments on commit d8ef92b

Please sign in to comment.