skip to Main Content

I am trying to create a tensorrt engine from ONNX model using the TensorRT C++ API. I have written code to read, serialize and write a tensorrt engine to disk as per the documentation. I have installed tensorrt7 on colab using debian installation instructions.

This is my c++ code that I am compiling using g++ rnxt.cpp -o rnxt

#include <cuda_runtime_api.h>
#include <NvOnnxParser.h>
#include <NvInfer.h>

#include <cstdlib>
#include <fstream>
#include <iostream>
#include <sstream>
#include <iterator>
#include <algorithm>

class Logger : public nvinfer1::ILogger           
 {
     void log(Severity severity, const char* msg) override
     {
         // suppress info-level messages
         if (severity != Severity::kINFO)
             std::cout << msg << std::endl;
     }
 } gLogger;


int main(){

    int maxBatchSize = 32;

    nvinfer1::IBuilder* builder = nvinfer1::createInferBuilder(gLogger);
    const auto explicitBatch = 1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);  
    nvinfer1::INetworkDefinition* network = builder->createNetworkV2(explicitBatch);

    nvonnxparser::IParser* parser = nvonnxparser::createParser(*network, gLogger);
    
    
    parser->parseFromFile("saved_resnext.onnx", 1);
    for (int i = 0; i < parser->getNbErrors(); ++i)
    {
        std::cout << parser->getError(i)->desc() << std::endl;
    }

    builder->setMaxBatchSize(maxBatchSize);
    nvinfer1::IBuilderConfig* config = builder->createBuilderConfig();
    config->setMaxWorkspaceSize(1 << 20);
    nvinfer1::ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config);

    parser->destroy();
    network->destroy();
    config->destroy();
    builder->destroy();

    nvinfer1::IHostMemory *serializedModel = engine->serialize();

    std::ofstream engine_file("saved_resnext.engine");

    engine_file.write((const char*)serializedModel->data(),serializedModel->size());

    serializedModel->destroy();
    return 0;
 }

While compiling, I get the following error:

/tmp/ccJaGxCX.o: In function `nvinfer1::(anonymous namespace)::createInferBuilder(nvinfer1::ILogger&)':
rnxt.cpp:(.text+0x19): undefined reference to `createInferBuilder_INTERNAL'
/tmp/ccJaGxCX.o: In function `nvonnxparser::(anonymous namespace)::createParser(nvinfer1::INetworkDefinition&, nvinfer1::ILogger&)':
rnxt.cpp:(.text+0x43): undefined reference to `createNvOnnxParser_INTERNAL'
collect2: error: ld returned 1 exit status

I also get error related to <cuda_runtime_api.h> so I have added (pasted) those files from cuda’s include directory (/usr/local/cuda-11.0/targets/x86_64-linux/include) to the /usr/include directoryafter which I am getting the said error. I don’t have much experience with C++ and any help would be appreciated.

Edit: I have also installed libnvinfer using

!apt-get install -y libnvinfer7=7.1.3-1+cuda11.0
!apt-get install -y libnvinfer-dev=7.1.3-1+cuda11.0

2

Answers


  1. This problem is due to nvonnxparser.so was not linked in Makefile. Just add

    target_link_libraries(${TARGET_NAME} nvonnxparser)
    

    in your CMake.

    Login or Signup to reply.
  2. in my case the same problem, i fixed it by adding nvonnxparser.lib to the Additional dependencies

    Login or Signup to reply.
Please signup or login to give your own answer.
Back To Top
Search