Skip to content

Commit

Permalink
Fix build for --enable_language_interop_ops + DISABLE_ABSEIL=ON (#14469)
Browse files Browse the repository at this point in the history
### Fix build error on Windows when building with "
--enable_language_interop_ops -cmake_extra_defines
onnxruntime_DISABLE_ABSEIL=ON"

This is a subsequent fix after
#14309, which fixed build
for onnxruntime_DISABLE_ABSEIL=ON build.

Going furthur, if we enable --enable_language_interop_ops, there are
following two errors:

```
 test_symm_qgemm.cpp
  test_transpose.cpp
onnxruntime_session.lib(inference_session.obj) : error LNK2019: unresolved external symbol "void __cdecl onnxruntime::L
oadInterOp(class std::basic_string<wchar_t,struct std::char_traits<wchar_t>,class std::allocator<wchar_t> > const &,cla
ss std::vector<struct Ort::CustomOpDomain,class std::allocator<struct Ort::CustomOpDomain> > &,class std::function<void
 __cdecl(char const *)> const &)" (?LoadInterOp@onnxruntime@@YAXAEBV?$basic_string@_WU?$char_traits@_W@std@@v?$allocato
r@_W@2@@std@@aeav?$vector@UCustomOpDomain@Ort@@v?$allocator@UCustomOpDomain@Ort@@@std@@@3@AEBV?$function@$$A6AXPEBD@Z@3
@@z) referenced in function "public: __cdecl <lambda_f3a907e0b0a0e11d80d305605215cce8>::operator()(class std::shared_pt
r<class onnxruntime::Model> &)const " (??R<lambda_f3a907e0b0a0e11d80d305605215cce8>@@qeba@AEAV?$shared_ptr@VModel@onnxr
untime@@@std@@@z) [C:\Users\pengwa\dev\onnxruntime\build\Windows\RelWithDebInfo\onnxruntime_test_trainer.vcxproj]
onnxruntime_session.lib(inference_session.obj) : error LNK2019: unresolved external symbol "void __cdecl onnxruntime::L
oadInterOp(class onnx::ModelProto const &,class std::vector<struct Ort::CustomOpDomain,class std::allocator<struct Ort:
:CustomOpDomain> > &,class std::function<void __cdecl(char const *)> const &)" (?LoadInterOp@onnxruntime@@YAXAEBVModelP
roto@onnx@@aeav?$vector@UCustomOpDomain@Ort@@v?$allocator@UCustomOpDomain@Ort@@@std@@@std@@aebv?$function@$$A6AXPEBD@Z@
5@@z) referenced in function "public: __cdecl <lambda_340b7b787b9c0f81848d348e60fe6c91>::operator()(class std::shared_p
tr<class onnxruntime::Model> &)const " (??R<lambda_340b7b787b9c0f81848d348e60fe6c91>@@qeba@AEAV?$shared_ptr@VModel@onnx
runtime@@@std@@@z) [C:\Users\pengwa\dev\onnxruntime\build\Windows\RelWithDebInfo\onnxruntime_test_trainer.vcxproj]
C:\Users\pengwa\dev\onnxruntime\build\Windows\RelWithDebInfo\RelWithDebInfo\onnxruntime_test_trainer.exe : fatal error
LNK1120: 2 unresolved externals [C:\Users\pengwa\dev\onnxruntime\build\Windows\RelWithDebInfo\onnxruntime_test_trainer.
vcxproj]
  onnxruntime.vcxproj -> C:\Users\pengwa\dev\onnxruntime\build\Windows\RelWithDebInfo\RelWithDebInfo\onnxruntime.dll
  onnxruntime_test_utils.vcxproj -> C:\Users\pengwa\dev\onnxruntime\build\Windows\RelWithDebInfo\RelWithDebInfo\onnxrun
  time_test_utils.lib
CUDACOMPILE : nvcc warning : The 'compute_35', 'compute_37', 'sm_35', and 'sm_37' architectures are deprecated, and may
 be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). [C:\Users\pengwa\dev\onnxruntime
\build\Windows\RelWithDebInfo\custom_op_library.vcxproj]
  cuda_ops.cu
CUDACOMPILE : nvcc warning : The 'compute_35', 'compute_37', 'sm_35', and 'sm_37' architectures are deprecated, and may
 be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). [C:\Users\pengwa\dev\onnxruntime
\build\Windows\RelWithDebInfo\onnxruntime_test_cuda_ops_lib.vcxproj]
```



```
  kernel_type_str_resolver_utils_test.cc
  local_kernel_registry_test.cc
C:\Users\pengwa\dev\onnxruntime\onnxruntime\test\framework\allocation_planner_test.cc(1388,9): error C2220: the followin
g warning is treated as an error [C:\Users\pengwa\dev\onnxruntime\build\Windows\RelWithDebInfo\onnxruntime_test_all.vcxp
roj]
C:\Users\pengwa\dev\onnxruntime\onnxruntime\test\framework\allocation_planner_test.cc(1388,9): warning C4067: unexpected
 tokens following preprocessor directive - expected a newline [C:\Users\pengwa\dev\onnxruntime\build\Windows\RelWithDebI
nfo\onnxruntime_test_all.vcxproj]
```


### Motivation and Context
<!-- - Why is this change required? What problem does it solve?
- If it fixes an open issue, please link to the issue here. -->
  • Loading branch information
pengwa authored Jan 31, 2023
1 parent a5b620e commit e2dd131
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 8 deletions.
5 changes: 5 additions & 0 deletions cmake/onnxruntime_unittests.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -1342,6 +1342,11 @@ if (NOT onnxruntime_ENABLE_TRAINING_TORCH_INTEROP)
onnxruntime_common
onnxruntime_flatbuffers
)

if (onnxruntime_ENABLE_LANGUAGE_INTEROP_OPS)
list(APPEND ONNXRUNTIME_TEST_LIBS onnxruntime_language_interop onnxruntime_pyop)
endif()

target_link_libraries(onnxruntime_test_trainer PRIVATE
${ONNXRUNTIME_TEST_LIBS}
${onnxruntime_EXTERNAL_LIBRARIES}
Expand Down
15 changes: 7 additions & 8 deletions onnxruntime/test/framework/allocation_planner_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ struct UnaryNode {
: UnaryNode(graph, "Transpose", p_input_arg, p_output_arg) {}

UnaryNode(onnxruntime::Graph& graph, std::string& node_name, const std::string& op, std::vector<onnxruntime::NodeArg*>& inputs,
std::vector<onnxruntime::NodeArg*>& outputs) : input_args(inputs), output_args(outputs) {
std::vector<onnxruntime::NodeArg*>& outputs) : input_args(inputs), output_args(outputs) {
p_node = &graph.AddNode(node_name, op, "test op", input_args, output_args);
}
};
Expand Down Expand Up @@ -1314,9 +1314,9 @@ TEST_F(PlannerTest, MultiStreamMultiOutput) {
std::vector<onnxruntime::NodeArg*> input1{Arg(Graph_input1), Arg(Graph_input2), Arg(Graph_input3)}, output1{Arg(Arg1), Arg(Arg2)}, input2{Arg(Arg1), Arg(Arg2)}, output2{Arg(Arg3)};
AddNode(*cudaKernel, node1, input1, output1);

std::unique_ptr<::onnxruntime::KernelDef> cpuKernel = KernelDefBuilder().SetName("Add").Provider(kCpuExecutionProvider).SinceVersion(7,12).Build();
std::unique_ptr<::onnxruntime::KernelDef> cpuKernel = KernelDefBuilder().SetName("Add").Provider(kCpuExecutionProvider).SinceVersion(7, 12).Build();
AddNode(*cpuKernel, node2, input2, output2);

CUDAExecutionProviderInfo epi;
onnxruntime::ProviderInfo_CUDA& ep = onnxruntime::GetProviderInfo_CUDA();
auto epFactory = ep.CreateExecutionProviderFactory(epi);
Expand Down Expand Up @@ -1345,7 +1345,7 @@ TEST_F(PlannerTest, MultiStreamMultiOutput) {
// \ /
// node3
// node1 and node2 are in the same stream, both has an output which will be consumed by node3 in a different stream
// TODO(leca): the ideal case is there is only 1 wait step before launching node3,
// TODO(leca): the ideal case is there is only 1 wait step before launching node3,
// as there is a specific order between node1 and node2 if they are in the same stream, thus node3 will only need to wait the latter one
TEST_F(PlannerTest, MultiStream2NodesSameStreamConsumedBy1NodeInDifferentStream) {
std::unique_ptr<::onnxruntime::KernelDef> cudaKernel = KernelDefBuilder().SetName("Transpose").Provider(kCudaExecutionProvider).SinceVersion(1, 10).Build();
Expand Down Expand Up @@ -1385,7 +1385,7 @@ TEST_F(PlannerTest, MultiStream2NodesSameStreamConsumedBy1NodeInDifferentStream)
}
#endif

#if not defined(__wasm__) and defined(ORT_ENABLE_STREAM)
#if !defined(__wasm__) && defined(ORT_ENABLE_STREAM)

TEST_F(PlannerTest, ParaPlanCreation) {
TypeProto graph_in_type;
Expand Down Expand Up @@ -1823,13 +1823,12 @@ TEST_F(PlannerTest, ParaPlanCreation) {
std::string reused;
ORT_ENFORCE(main_graph_ort_value_index_map.GetName(per_value_plan.reused_buffer, reused).IsOK());
reuse_pairs.erase(reused);
} //if
} //for
} // if
} // for
ASSERT_TRUE(reuse_pairs.empty());
}

TEST_F(PlannerTest, TestMultiStreamConfig) {

const char* type = "DeviceBasedPartitioner";
constexpr size_t type_len = 22;

Expand Down

0 comments on commit e2dd131

Please sign in to comment.