TFLite C++调用导致android上的seg错误

plicqrtu  于 2023-02-17  发布在  Android
关注(0)|答案(1)|浏览(161)

我正在尝试用tflite构建一个android c++应用程序。我自己编译. so文件失败了,所以我从互联网上得到了一些预编译的文件。我创建了一个虚拟项目来测试它,它运行得很好。但是当我在我的项目上实现它时,一切都崩溃了。我检查了输入是否正确,它们是正确的,大小是否正确,初始化是否正确,它和我的虚拟项目几乎一模一样,那有什么问题呢?
下面是我的代码:
tflite.cpp:

#include "tflite.hpp"

    tflite::tflite(uint8_t *data, size_t size)
    {
        try
        {
            lib_tflite::ErrorReporter* error_reporter;

            this->m_env = lib_tflite::FlatBufferModel::BuildFromBuffer((const char *)data, size, error_reporter);

            lib_tflite::ops::builtin::BuiltinOpResolver resolver;

            lib_tflite::InterpreterBuilder(*this->m_env, resolver)(&m_interpreter);

            if (m_interpreter->AllocateTensors() != kTfLiteOk)
            {
                throw std::runtime_error("Failed to allocate tensor");
            }
            m_interpreter->SetNumThreads(2);

            this->m_input_node_count = m_interpreter->inputs().size();
            this->m_output_node_count = m_interpreter->outputs().size();

            for (size_t idx = 0; idx < this->m_input_node_count; ++idx)
            {
                int input = m_interpreter->inputs()[idx];
                auto height =   m_interpreter->tensor(input)->dims->data[1];
                auto width =    m_interpreter->tensor(input)->dims->data[2];
                auto channels = m_interpreter->tensor(input)->dims->data[3];

                std::vector<int> res = {(int)this->m_input_node_count, channels, width, height};
                this->m_inputDims.push_back(res);

                const TfLiteTensor* input_tensor = m_interpreter->input_tensor(idx);

                size_t element_count = 1;
                for (int i = 0; i < input_tensor->dims->size; i++)
                {
                    element_count *= input_tensor->dims->data[i];
                }

                this->m_input_elem_size.push_back(element_count);
            }

            for (size_t idx = 0; idx < this->m_output_node_count; ++idx)
            {
                int output = m_interpreter->outputs()[idx];
                auto height =   m_interpreter->tensor(output)->dims->data[1];
                auto width =    m_interpreter->tensor(output)->dims->data[2];
                auto channels = m_interpreter->tensor(output)->dims->data[3];

                std::vector<int> res = {(int)this->m_output_node_count, channels, width, height};
                this->m_outputDims.push_back(res);

                const TfLiteTensor* output_tensor = m_interpreter->output_tensor(idx);

                int element_count = 1;
                for (int i = 0; i < output_tensor->dims->size; i++) {
                    element_count *= output_tensor->dims->data[i];
                }
                this->m_output_elem_size.push_back(element_count);
            }

            for (size_t idx = 0; idx < this->m_input_node_count; ++idx)
            {
                this->m_input_buffer.emplace_back(this->m_input_elem_size[idx], 0
                );
            }

            for (size_t idx = 0; idx < this->m_output_node_count; ++idx)
            {
                this->m_output_buffer.emplace_back(this->m_output_elem_size[idx], 0
                );
            }
    }

    bool tflite::run(std::vector<float> &t_out_buffer,
                     std::vector<float> &t_cls_buffer,
                     std::vector<float> &t_buffer,
                     size_t region_size) noexcept
    {

        for(size_t idx = 0; idx < this->m_input_node_count; idx++)
        {
            float* data_ptr = m_interpreter->typed_input_tensor<float>(idx);
            memcpy(data_ptr, t_buffer.data(), this->m_input_elem_size[idx]);
        }

        // This is where it fails
        if (kTfLiteOk != this->m_interpreter->Invoke())
        {
            log_error("Failed to invoke\n");
            return false;
        }

        for(size_t idx = 0; idx < this->m_output_node_count; idx++)
        {
            float* output = this->m_interpreter->typed_output_tensor<float>(idx);

            this->m_output_buffer[idx] = std::vector<float> (output,
                                                             output + this->m_output_elem_size[idx]);
        }

        t_cls_buffer = this->m_output_buffer[0];
        t_out_buffer =  m_output_buffer[1];
    }

/* end_of_file */

tflite.hpp:

#ifndef TFLITE_DRIVER_HPP
#define TFLITE_DRIVER_HPP

#include <memory>

#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/register.h"
#include "tensorflow/lite/model.h"
#include "tensorflow/lite/optional_debug_tools.h"

    namespace lib_tflite = ::tflite;

    class tflite
    {
    public:

        tflite() = delete;

        virtual ~tflite() noexcept = default;

        tflite(tflite &&) = delete;

        tflite & operator=(tflite &&) = delete;

        tflite(const tflite &) = delete;

        tflite & operator=(tflite &) = delete;

        tflite(uint8_t *data, size_t size);

        bool run(   std::vector<float> &t_out_buffer,
                    std::vector<float> &t_cls_buffer,
                    std::vector<float> &t_buffer,
                    size_t region_size) noexcept;
    private:
        lib_tflite::ErrorReporter* error_reporter;
        
        lib_tflite::ops::builtin::BuiltinOpResolver resolver;

        std::unique_ptr<lib_tflite::FlatBufferModel> m_env;

        std::unique_ptr<lib_tflite::Interpreter> m_interpreter;

        std::vector<const char *> m_input_names;

        std::vector<const char *> m_output_names;

        size_t m_input_node_count;

        size_t m_output_node_count;

        std::vector<lib_tflite::Tensor> m_inputTensors;

        std::vector<lib_tflite::Tensor> m_outputTensors;

        std::vector<size_t> m_input_elem_size;

        std::vector<size_t> m_output_elem_size;

        std::vector<std::vector<int>> m_inputDims;

        std::vector<std::vector<int>> m_outputDims;

        std::vector<std::vector<float>> m_input_buffer;

        std::vector<std::vector<float>> m_output_buffer;

        std::unique_ptr<lib_tflite::MemoryAllocation> m_memory_info;

    };

#endif // TFLITE_DRIVER_HPP
/* end_of_file */

注意我的项目运行在两个线程上,一个获取输入,另一个调用tflite。正如我所说的,输入是正确的。构造函数运行一次,但是run函数运行每一帧。
下面是我运行它时得到的输出:

E/libc: Access denied finding property "ro.mediatek.platform"
E/libc: Access denied finding property "ro.chipname"
A/libc: Fatal signal 11 (SIGSEGV), code 1 (SEGV_MAPERR), fault addr 0x13c6759bfc61 in tid 27936 (processing), pid 27893 (ample.nerveblox)

我已经调试并发现发生此错误的行是我在run函数中调用this->m_interpreter->Invoke()的地方。

izkcnapc

izkcnapc1#

如果你运行“adb shell getprop”,访问被拒绝的错误似乎没有“ro.mediatek.platform”和“ro.chipname”的属性。所以,我认为,“ro.mediatek.platform”和“ro.chipname”不是属性。

相关问题