Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable TRT provider option configuration for C# (updated version) #7808

Merged
merged 44 commits into from
Jun 25, 2021
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
44 commits
Select commit Hold shift + click to select a range
7c2d4c6
prepare for C# to configure provider options
chilo-ms May 17, 2021
1921ec4
add c# code
chilo-ms May 17, 2021
9ec0f35
revert modification
chilo-ms May 18, 2021
6ac01ca
Add update provider info configuration in trt ep side
chilo-ms May 18, 2021
de6cb01
fix bugs
chilo-ms May 18, 2021
c80a02a
fix bug for compiler error C2259
chilo-ms May 18, 2021
a2b8984
Add c# test
chilo-ms May 19, 2021
681c319
fix bug
chilo-ms May 19, 2021
a67fcf3
fix bug
chilo-ms May 19, 2021
ace27d2
Properly deal with string
chilo-ms May 20, 2021
12b7cdc
Add c# api for accepting trt provider options
chilo-ms May 20, 2021
a763a64
fix bug
chilo-ms May 21, 2021
557724b
Merge branch 'c_sharp_trt_provider_options' of https://github.com/mic…
chilo-ms May 21, 2021
b628bf5
Modify C# test
chilo-ms May 21, 2021
9677dfc
add shared lib test
chilo-ms May 21, 2021
3e9d013
Add get provider options functionality
chilo-ms May 24, 2021
d8c18aa
clean up
chilo-ms May 24, 2021
69d37e8
clean up
chilo-ms May 24, 2021
d40122b
fix bug
chilo-ms May 24, 2021
b456e6c
Merge branch 'master' into c_sharp_trt_provider_options
chilo-ms May 24, 2021
2a87b40
fix bugs for CI
chilo-ms May 25, 2021
99774ae
Fix bugs for CI and documentation
chilo-ms May 25, 2021
9a0b07c
Move TRT EP provider options related functions out of C API
chilo-ms May 25, 2021
96851de
revert
chilo-ms May 25, 2021
30cc55c
fix bug
chilo-ms May 25, 2021
2576645
refactor
chilo-ms May 26, 2021
497550d
add check for provider options string
chilo-ms May 26, 2021
748cb95
Merge branch 'master' into c_sharp_trt_provider_options
chilo-ms Jun 4, 2021
1f815e4
code refactor
chilo-ms Jun 8, 2021
0a173c4
fix CI bug
chilo-ms Jun 8, 2021
ae45fe8
Fix CI bugs
chilo-ms Jun 8, 2021
1126559
clean up
chilo-ms Jun 8, 2021
e6953e8
fix bug
chilo-ms Jun 8, 2021
5fff868
Fix bug for Post Analysis
chilo-ms Jun 8, 2021
7a5f903
fix accidental bug
chilo-ms Jun 8, 2021
5e3f600
Add API_IMPL_BEGIN/API_IMPL_END
chilo-ms Jun 9, 2021
3ee5b20
clean up
chilo-ms Jun 9, 2021
b312090
code refactor
chilo-ms Jun 15, 2021
1f6280e
code refactor
chilo-ms Jun 18, 2021
c48c9ba
Merge branch 'master' into c_sharp_trt_provider_options
chilo-ms Jun 18, 2021
41a8dee
fix CI fail
chilo-ms Jun 18, 2021
3649fa9
fix bug
chilo-ms Jun 18, 2021
2ca2a19
use string append
chilo-ms Jun 22, 2021
a2456af
Change the code to better handle strncpy and string append
chilo-ms Jun 23, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 19 additions & 5 deletions csharp/src/Microsoft.ML.OnnxRuntime/NativeMethods.cs
Original file line number Diff line number Diff line change
Expand Up @@ -201,9 +201,10 @@ public struct OrtApi
public IntPtr ReleasePrepackedWeightsContainer;
public IntPtr CreateSessionWithPrepackedWeightsContainer;
public IntPtr CreateSessionFromArrayWithPrepackedWeightsContainer;
public IntPtr SessionOptionsAppendExecutionProvider_TensorRT_V2;
public IntPtr CreateTensorRTProviderOptions;
public IntPtr UpdateTensorRTProviderOptions;
public IntPtr GetTensorRTProviderOptions;
public IntPtr GetTensorRTProviderOptionsAsString;
public IntPtr ReleaseTensorRTProviderOptions;
}

Expand Down Expand Up @@ -360,9 +361,11 @@ static NativeMethods()
OrtCreatePrepackedWeightsContainer = (DOrtCreatePrepackedWeightsContainer)Marshal.GetDelegateForFunctionPointer(api_.CreatePrepackedWeightsContainer, typeof(DOrtCreatePrepackedWeightsContainer));
OrtReleasePrepackedWeightsContainer = (DOrtReleasePrepackedWeightsContainer)Marshal.GetDelegateForFunctionPointer(api_.ReleasePrepackedWeightsContainer, typeof(DOrtReleasePrepackedWeightsContainer));

SessionOptionsAppendExecutionProvider_TensorRT_V2 = (DSessionOptionsAppendExecutionProvider_TensorRT_V2)Marshal.GetDelegateForFunctionPointer(
api_.SessionOptionsAppendExecutionProvider_TensorRT_V2, typeof(DSessionOptionsAppendExecutionProvider_TensorRT_V2));
OrtCreateTensorRTProviderOptions = (DOrtCreateTensorRTProviderOptions)Marshal.GetDelegateForFunctionPointer(api_.CreateTensorRTProviderOptions, typeof(DOrtCreateTensorRTProviderOptions));
OrtUpdateTensorRTProviderOptions = (DOrtUpdateTensorRTProviderOptions)Marshal.GetDelegateForFunctionPointer(api_.UpdateTensorRTProviderOptions, typeof(DOrtUpdateTensorRTProviderOptions));
OrtGetTensorRTProviderOptions = (DOrtGetTensorRTProviderOptions)Marshal.GetDelegateForFunctionPointer(api_.GetTensorRTProviderOptions, typeof(DOrtGetTensorRTProviderOptions));
OrtGetTensorRTProviderOptionsAsString = (DOrtGetTensorRTProviderOptionsAsString)Marshal.GetDelegateForFunctionPointer(api_.GetTensorRTProviderOptionsAsString, typeof(DOrtGetTensorRTProviderOptionsAsString));
OrtReleaseTensorRTProviderOptions = (DOrtReleaseTensorRTProviderOptions)Marshal.GetDelegateForFunctionPointer(api_.ReleaseTensorRTProviderOptions, typeof(DOrtReleaseTensorRTProviderOptions));
}

Expand Down Expand Up @@ -411,14 +414,15 @@ static NativeMethods()
public static DOrtUpdateTensorRTProviderOptions OrtUpdateTensorRTProviderOptions;

/// <summary>
/// Updates native OrtTensorRTProviderOptions instance using given key/value pairs
/// Get native OrtTensorRTProviderOptionsV2 in serialized string
/// </summary>
/// <param name="allocator">instance of OrtAllocator</param>
/// <param name="ptr">is a UTF-8 null terminated string allocated using 'allocator'</param>
public delegate IntPtr /* OrtStatus* */DOrtGetTensorRTProviderOptions(
public delegate IntPtr /* OrtStatus* */DOrtGetTensorRTProviderOptionsAsString(
IntPtr /*(OrtTensorRTProviderOptionsV2**)*/ trtProviderOptionsInstance,
IntPtr /*(OrtAllocator*)*/ allocator,
out IntPtr /*(char**)*/ptr);
public static DOrtGetTensorRTProviderOptions OrtGetTensorRTProviderOptions;
public static DOrtGetTensorRTProviderOptionsAsString OrtGetTensorRTProviderOptionsAsString;

/// <summary>
/// Releases native OrtTensorRTProviderOptions instance
Expand Down Expand Up @@ -704,6 +708,16 @@ IntPtr[] outputValues /* An array of output value pointers. Array must be alloca
IntPtr /*(const OrtTensorRTProviderOptions*)*/ trtProviderOptions);
public static DSessionOptionsAppendExecutionProvider_TensorRT SessionOptionsAppendExecutionProvider_TensorRT;

/// <summary>
/// Append a TensorRT EP instance (configured based on given provider options) to the native OrtSessionOptions instance
/// </summary>
/// <param name="options">Native OrtSessionOptions instance</param>
/// <param name="trtProviderOptions">Native OrtTensorRTProviderOptionsV2 instance</param>
public delegate IntPtr /*(OrtStatus*)*/DSessionOptionsAppendExecutionProvider_TensorRT_V2(
IntPtr /*(OrtSessionOptions*)*/ options,
IntPtr /*(const OrtTensorRTProviderOptionsV2*)*/ trtProviderOptions);
public static DSessionOptionsAppendExecutionProvider_TensorRT_V2 SessionOptionsAppendExecutionProvider_TensorRT_V2;

/// <summary>
/// Free Dimension override (by denotation)
/// </summary>
Expand Down
2 changes: 1 addition & 1 deletion csharp/src/Microsoft.ML.OnnxRuntime/ProviderOptions.cs
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ public string GetOptions()

// Process provider options string
IntPtr providerOptions = IntPtr.Zero;
NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorRTProviderOptions(allocator.Pointer, out providerOptions));
NativeApiStatus.VerifySuccess(NativeMethods.OrtGetTensorRTProviderOptionsAsString(handle, allocator.Pointer, out providerOptions));
using (var ortAllocation = new OrtMemoryAllocation(allocator, providerOptions, 0))
{
return NativeOnnxValueHelper.StringFromNativeUtf8(providerOptions);
Expand Down
30 changes: 30 additions & 0 deletions include/onnxruntime/core/platform/tensorrt_provider_options.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.

#pragma once

/// <summary>
/// Options for the TensorRT provider that are passed to SessionOptionsAppendExecutionProvider_TensorRT_V2.
/// Please note that this sturct is identical to OrtTensorRTProviderOptions but only to be used internally.
// User can only get the instance of OrtTensorRTProviderOptionsV2 via CreateTensorRTProviderOptions.
/// </summary>
struct OrtTensorRTProviderOptionsV2 {
int device_id; // cuda device id.
int has_user_compute_stream; // indicator of user specified CUDA compute stream.
void* user_compute_stream; // user specified CUDA compute stream.
int trt_max_partition_iterations; // maximum iterations for TensorRT parser to get capability
int trt_min_subgraph_size; // minimum size of TensorRT subgraphs
size_t trt_max_workspace_size; // maximum workspace size for TensorRT.
int trt_fp16_enable; // enable TensorRT FP16 precision. Default 0 = false, nonzero = true
int trt_int8_enable; // enable TensorRT INT8 precision. Default 0 = false, nonzero = true
const char* trt_int8_calibration_table_name; // TensorRT INT8 calibration table name.
int trt_int8_use_native_calibration_table; // use native TensorRT generated calibration table. Default 0 = false, nonzero = true
int trt_dla_enable; // enable DLA. Default 0 = false, nonzero = true
int trt_dla_core; // DLA core number. Default 0
int trt_dump_subgraphs; // dump TRT subgraph. Default 0 = false, nonzero = true
int trt_engine_cache_enable; // enable engine caching. Default 0 = false, nonzero = true
const char* trt_engine_cache_path; // specify engine cache path
int trt_engine_decryption_enable; // enable engine decryption. Default 0 = false, nonzero = true
const char* trt_engine_decryption_lib_path; // specify engine decryption library path
int trt_force_sequential_engine_build; // force building TensorRT engine sequentially. Default 0 = false, nonzero = true
};
54 changes: 39 additions & 15 deletions include/onnxruntime/core/session/onnxruntime_c_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,7 @@ ORT_RUNTIME_CLASS(ThreadPoolParams);
ORT_RUNTIME_CLASS(ThreadingOptions);
ORT_RUNTIME_CLASS(ArenaCfg);
ORT_RUNTIME_CLASS(PrepackedWeightsContainer);
ORT_RUNTIME_CLASS(TensorRTProviderOptionsV2);

#ifdef _WIN32
typedef _Return_type_success_(return == 0) OrtStatus* OrtStatusPtr;
Expand All @@ -193,6 +194,7 @@ typedef OrtStatus* OrtStatusPtr;
_Success_(return == 0) _Check_return_ _Ret_maybenull_ OrtStatusPtr ORT_API_CALL NAME(__VA_ARGS__) NO_EXCEPTION

#define ORT_CLASS_RELEASE(X) void(ORT_API_CALL * Release##X)(_Frees_ptr_opt_ Ort##X * input)
#define ORT_CLASS_RELEASE2(X) void(ORT_API_CALL * Release##X)(_Frees_ptr_opt_ Ort##X##V2 * input)
pranavsharma marked this conversation as resolved.
Show resolved Hide resolved

// When passing in an allocator to any ORT function, be sure that the allocator object
// is not destroyed until the last allocated object using it is freed.
Expand Down Expand Up @@ -1236,7 +1238,7 @@ struct OrtApi {
ORT_API2_STATUS(ModelMetadataGetGraphDescription, _In_ const OrtModelMetadata* model_metadata,
_Inout_ OrtAllocator* allocator, _Outptr_ char** value);
/**
* Append TensorRT execution provider to the session options
* Append TensorRT execution provider to the session options with TensorRT provider options.
* If TensorRT is not available (due to a non TensorRT enabled build), this function will return failure.
*/
ORT_API2_STATUS(SessionOptionsAppendExecutionProvider_TensorRT,
Expand Down Expand Up @@ -1384,38 +1386,60 @@ struct OrtApi {
_Outptr_ OrtSession** out);

/**
* Use this API to create the configuration of a TensorRT Execution Provider.
* Append TensorRT execution provider to the session options with TensorRT provider options.
* If TensorRT is not available (due to a non TensorRT enabled build), this function will return failure.
* Note: this API is slightly different than SessionOptionsAppendExecutionProvider_TensorRT.
* SessionOptionsAppendExecutionProvider_TensorRT takes struct OrtTensorRTProviderOptions which is open to user as argument,
* but this API takes opaque struct OrtTensorRTProviderOptionsV2 which must be created by CreateTensorRTProviderOptions.
* User needs to instantiate OrtTensorRTProviderOptions as well as allocate/release buffers for some members of OrtTensorRTProviderOptions.
* However, for using OrtTensorRTProviderOptionsV2, CreateTensorRTProviderOptions and ReleaseTensorRTProviderOptions will do the memory allocation and release for you.
*
* \param options - OrtSessionOptions instance
* \param tensorrt_options - OrtTensorRTProviderOptionsV2 instance
*/
ORT_API2_STATUS(SessionOptionsAppendExecutionProvider_TensorRT_V2,
_In_ OrtSessionOptions* options, _In_ const OrtTensorRTProviderOptionsV2* tensorrt_options);

/**
* Use this API to create the configuration of a TensorRT Execution Provider which is an instance of OrtTensorRTProviderOptionsV2.
*
* \param out - pointer to the pointer of TensorRT EP provider options instance.
*/
ORT_API2_STATUS(CreateTensorRTProviderOptions, _Outptr_ OrtTensorRTProviderOptions** out);
ORT_API2_STATUS(CreateTensorRTProviderOptions, _Outptr_ OrtTensorRTProviderOptionsV2** out);

/**
* Use this API to set appropriate configuration knobs of a TensorRT Execution Provider.
* \tensorrt_provider_options - OrtTensorRTProviderOptions instance
* \provider_options_keys - array of UTF-8 null-terminated string for provider options keys
* \provider_options_values - array of UTF-8 null-terminated string for provider options values
* \num_keys - number of keys
*
* Please reference to https://www.onnxruntime.ai/docs/reference/execution-providers/TensorRT-ExecutionProvider.html#c-api-example
* to know the available keys and values. key should be in string format of the member of OrtTensorRTProviderOptions and value should be it's related range.
* For example, key="trt_max_workspace_size" and value="2147483648"
*
* \param tensorrt_options - OrtTensorRTProviderOptionsV2 instance
* \param provider_options_keys - array of UTF-8 null-terminated string for provider options keys
* \param provider_options_values - array of UTF-8 null-terminated string for provider options values
* \param num_keys - number of keys
*/
ORT_API2_STATUS(UpdateTensorRTProviderOptions, _Inout_ OrtTensorRTProviderOptions* tensorrt_provider_options,
ORT_API2_STATUS(UpdateTensorRTProviderOptions, _Inout_ OrtTensorRTProviderOptionsV2* tensorrt_options,
_In_reads_(num_keys) const char* const* provider_options_keys,
_In_reads_(num_keys) const char* const* provider_options_values,
_In_ size_t num_keys);

/**
* Get serialized provider options string of a TensorRT Execution Provider.
* First time calling this API returns the default TensorRT EP provider options in serialized string.
* If you want to change some provider options, you can call UpdateTensorRTProviderOptions
* and then call this API to get returned string to check whether the options have been configured.
* Get serialized TensorRT provider options string.
*
* For example, "trt_max_workspace_size=2147483648;trt_max_partition_iterations=10;trt_int8_enable=1;......"
*
* \param tensorrt_options - OrTensorRTProviderOptionsV2 instance
* \param allocator - a ptr to an instance of OrtAllocator obtained with CreateAllocator() or GetAllocatorWithDefaultOptions()
* the specified allocator will be used to allocate continuous buffers for output strings and lengths.
* \param ptr - is a UTF-8 null terminated string allocated using 'allocator'. The caller is responsible for using the same allocator to free it.
*/
ORT_API2_STATUS(GetTensorRTProviderOptions, _Inout_ OrtAllocator* allocator, _Outptr_ char** ptr);
ORT_API2_STATUS(GetTensorRTProviderOptionsAsString, _In_ const OrtTensorRTProviderOptionsV2* tensorrt_options, _Inout_ OrtAllocator* allocator, _Outptr_ char** ptr);

/**
* Use this API to release the configuration of a TensorRT Execution Provider.
* Use this API to release the instance of OrtTensorRTProviderV2.
*/
ORT_CLASS_RELEASE(TensorRTProviderOptions);
ORT_CLASS_RELEASE2(TensorRTProviderOptions);
pranavsharma marked this conversation as resolved.
Show resolved Hide resolved
};

/*
Expand Down
43 changes: 32 additions & 11 deletions onnxruntime/core/framework/provider_bridge_ort.cc
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,7 @@ using IndexedSubGraph_MetaDef = IndexedSubGraph::MetaDef;
#include "core/providers/dnnl/dnnl_provider_factory.h"
#include "core/providers/tensorrt/tensorrt_provider_factory.h"
#include "core/providers/openvino/openvino_provider_factory.h"
#include "core/platform/tensorrt_provider_options.h"

// The filename extension for a shared library is different per platform
#ifdef _WIN32
Expand Down Expand Up @@ -1103,9 +1104,9 @@ void UpdateProviderInfo_Tensorrt(OrtTensorRTProviderOptions* provider_options, c
}
}

ProviderOptions GetProviderInfo_Tensorrt() {
ProviderOptions GetProviderInfo_Tensorrt(const OrtTensorRTProviderOptions* provider_options) {
if (auto provider = s_library_tensorrt.Get()) {
return provider->GetProviderOptions();
return provider->GetProviderOptions(reinterpret_cast<const void*>(provider_options));
}

return {};
Expand Down Expand Up @@ -1202,13 +1203,32 @@ ORT_API_STATUS_IMPL(OrtApis::SessionOptionsAppendExecutionProvider_CUDA, _In_ Or
API_IMPL_END
}

ORT_API_STATUS_IMPL(OrtApis::CreateTensorRTProviderOptions, _Outptr_ OrtTensorRTProviderOptions** out) {
ORT_API_STATUS_IMPL(OrtApis::SessionOptionsAppendExecutionProvider_TensorRT_V2, _In_ OrtSessionOptions* options, _In_ const OrtTensorRTProviderOptionsV2* tensorrt_options) {
return OrtApis::SessionOptionsAppendExecutionProvider_TensorRT(options, reinterpret_cast<const OrtTensorRTProviderOptions*>(tensorrt_options));
}

ORT_API_STATUS_IMPL(OrtApis::CreateTensorRTProviderOptions, _Outptr_ OrtTensorRTProviderOptionsV2** out) {
API_IMPL_BEGIN
#ifdef USE_TENSORRT
*out = new OrtTensorRTProviderOptions();
*out = new OrtTensorRTProviderOptionsV2();
(*out)->device_id = 0;
(*out)->has_user_compute_stream = 0;
(*out)->user_compute_stream = nullptr;
(*out)->trt_max_partition_iterations = 1000;
(*out)->trt_min_subgraph_size = 1;
(*out)->trt_max_workspace_size = 1 << 30;
(*out)->trt_fp16_enable = false;
(*out)->trt_int8_enable = false;
(*out)->trt_int8_calibration_table_name = nullptr;
(*out)->trt_int8_use_native_calibration_table = false;
(*out)->trt_dla_enable = false;
(*out)->trt_dla_core = false;
(*out)->trt_dump_subgraphs = false;
(*out)->trt_engine_cache_enable= false;
(*out)->trt_engine_cache_path = nullptr;
(*out)->trt_engine_decryption_enable = false;
(*out)->trt_engine_decryption_lib_path = nullptr;
(*out)->trt_force_sequential_engine_build = false;
return nullptr;
#else
ORT_UNUSED_PARAMETER(out);
Expand All @@ -1218,7 +1238,7 @@ ORT_API_STATUS_IMPL(OrtApis::CreateTensorRTProviderOptions, _Outptr_ OrtTensorRT
}

ORT_API_STATUS_IMPL(OrtApis::UpdateTensorRTProviderOptions,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

are these the implementations of the C API functions?
if so, should we ensure that exceptions do not propagate outside of these calls (e.g., with API_IMPL_BEGIN/API_IMPL_END)? and why are these C API functions defined in provider_bridge_ort.cc?

Copy link
Contributor Author

@chilo-ms chilo-ms May 25, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I was wondering what kind of functions should be in the C API?
For these four functions, it's related to TRT EP functionality. So I think I will move the definition of them out of C API and define/implemented them in provider_bridge_ort.cc and tensorrt_provider_factor.h

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry, looks like you had to go back & forth with these functions & the C API, but this is good. The only issue I see is if they need API_IMPL_BEGIN/API_IMPL_END like Edward mentions above, just to be sure we don't throw any exceptions through them.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, API_IMPL_BEGIN/API_IMPL_END is needed. Just added them.

_Inout_ OrtTensorRTProviderOptions* tensorrt_provider_options,
_Inout_ OrtTensorRTProviderOptionsV2* tensorrt_options,
_In_reads_(num_keys) const char* const* provider_options_keys,
_In_reads_(num_keys) const char* const* provider_options_values,
size_t num_keys) {
Expand All @@ -1234,10 +1254,11 @@ ORT_API_STATUS_IMPL(OrtApis::UpdateTensorRTProviderOptions,
provider_options_map[provider_options_keys[i]] = provider_options_values[i];
}

onnxruntime::UpdateProviderInfo_Tensorrt(tensorrt_provider_options, reinterpret_cast<const onnxruntime::ProviderOptions&>(provider_options_map));
onnxruntime::UpdateProviderInfo_Tensorrt(reinterpret_cast<OrtTensorRTProviderOptions*>(tensorrt_options),
reinterpret_cast<const onnxruntime::ProviderOptions&>(provider_options_map));
return nullptr;
#else
ORT_UNUSED_PARAMETER(tensorrt_provider_options);
ORT_UNUSED_PARAMETER(tensorrt_options);
ORT_UNUSED_PARAMETER(provider_options_keys);
ORT_UNUSED_PARAMETER(provider_options_values);
ORT_UNUSED_PARAMETER(num_keys);
Expand All @@ -1246,11 +1267,11 @@ ORT_API_STATUS_IMPL(OrtApis::UpdateTensorRTProviderOptions,
API_IMPL_END
}

ORT_API_STATUS_IMPL(OrtApis::GetTensorRTProviderOptions, _Inout_ OrtAllocator* allocator,
ORT_API_STATUS_IMPL(OrtApis::GetTensorRTProviderOptionsAsString, _In_ const OrtTensorRTProviderOptionsV2* tensorrt_options, _Inout_ OrtAllocator* allocator,
_Outptr_ char** ptr) {
API_IMPL_BEGIN
#ifdef USE_TENSORRT
onnxruntime::ProviderOptions options = onnxruntime::GetProviderInfo_Tensorrt();
onnxruntime::ProviderOptions options = onnxruntime::GetProviderInfo_Tensorrt(reinterpret_cast<const OrtTensorRTProviderOptions*>(tensorrt_options));
onnxruntime::ProviderOptions::iterator it = options.begin();
std::string options_str = "";

Expand All @@ -1273,7 +1294,7 @@ ORT_API_STATUS_IMPL(OrtApis::GetTensorRTProviderOptions, _Inout_ OrtAllocator* a
API_IMPL_END
}

ORT_API(void, OrtApis::ReleaseTensorRTProviderOptions, _Frees_ptr_opt_ OrtTensorRTProviderOptions* ptr) {
ORT_API(void, OrtApis::ReleaseTensorRTProviderOptions, _Frees_ptr_opt_ OrtTensorRTProviderOptionsV2* ptr) {
#ifdef USE_TENSORRT
if (ptr != nullptr) {
if (ptr->trt_int8_calibration_table_name != nullptr) {
Expand All @@ -1293,4 +1314,4 @@ ORT_API(void, OrtApis::ReleaseTensorRTProviderOptions, _Frees_ptr_opt_ OrtTensor
#else
ORT_UNUSED_PARAMETER(ptr);
#endif
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@ struct Provider {

virtual void* GetInfo() { return nullptr; } // Returns a provider specific information interface if it exists

virtual ProviderOptions GetProviderOptions() { return {}; } // Returns a provider options interface if it exists
// Convert provider options struct to ProviderOptions which is a map
virtual ProviderOptions GetProviderOptions(const void* /*provider options struct*/) { return {}; }

// Update provider options from key-value string configuration
virtual void UpdateProviderOptions(void* /*provider options to be configured*/, const ProviderOptions& /*key-value string provider options*/){};
Expand Down
Loading