diff --git a/LibQNNHelper/CMakeLists.txt b/LibQNNHelper/CMakeLists.txt deleted file mode 100644 index 33edf16..0000000 --- a/LibQNNHelper/CMakeLists.txt +++ /dev/null @@ -1,12 +0,0 @@ -#============================================================================= -# -# Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause -# -#============================================================================= - -cmake_minimum_required(VERSION 3.4...3.18) -project(LibQNNHelper) - -add_subdirectory(src) diff --git a/LibQNNHelper/README.txt b/LibQNNHelper/README.txt deleted file mode 100644 index c295d0b..0000000 --- a/LibQNNHelper/README.txt +++ /dev/null @@ -1,2 +0,0 @@ -For information on how to build and execute qnn-sample-app, -please point your web browser to ${QNN_SDK_ROOT}/docs/QNN/general/sample_app.html. \ No newline at end of file diff --git a/LibQNNHelper/src/CMakeLists.txt b/LibQNNHelper/src/CMakeLists.txt deleted file mode 100644 index 66952ed..0000000 --- a/LibQNNHelper/src/CMakeLists.txt +++ /dev/null @@ -1,44 +0,0 @@ -#============================================================================= -# -# Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause -# -#============================================================================= - -set(APP "libqnnhelper") -set(APP_SOURCES "QnnSampleApp.cpp" - "main.cpp" - "Log/Logger.cpp" - "Log/LogUtils.cpp" - "PAL/src/windows/Common.cpp" - "PAL/src/windows/Directory.cpp" - "PAL/src/windows/DynamicLoading.cpp" - "PAL/src/windows/FileOp.cpp" - "PAL/src/windows/Path.cpp" - "PAL/src/common/GetOpt.cpp" - "PAL/src/common/StringOp.cpp" - "Utils/DataUtil.cpp" - "Utils/DynamicLoadUtil.cpp" - "Utils/IOTensor.cpp" - "Utils/QnnSampleAppUtils.cpp" - "WrapperUtils/QnnWrapperUtils.cpp" - "LibQNNHelper.cpp") - -ADD_LIBRARY(${APP} SHARED ${APP_SOURCES}) - -SET(LIBRARY_OUTPUT_PATH "${PROJECT_SOURCE_DIR}/../lib") - -target_compile_definitions(${APP} PUBLIC "-DNOMINMAX") -target_link_libraries(${APP} PRIVATE Shlwapi Shell32) -set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MDd") -set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MD /O2 /Ob2") -target_include_directories(${APP} PUBLIC CachingUtil - Log - PAL/include - Utils - WrapperUtils - ${CMAKE_BINARY_DIR} - $ENV{QNN_SDK_ROOT}/include/QNN - ../../SvcQNNHelper/src - ./) diff --git a/LibQNNHelper/src/Log/LogUtils.cpp b/LibQNNHelper/src/Log/LogUtils.cpp deleted file mode 100644 index f823b08..0000000 --- a/LibQNNHelper/src/Log/LogUtils.cpp +++ /dev/null @@ -1,63 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -#include "LogUtils.hpp" -#include "windows.h" // zw. - - -void qnn::log::utils::logCreateLock() { // zw: We need share the lock between processes. - sg_logUtilMutex = OpenMutexA(MUTEX_ALL_ACCESS, FALSE, "logStdoutCallbackSharedMutex"); - if (!sg_logUtilMutex) { - sg_logUtilMutex = CreateMutexA(NULL, FALSE, "logStdoutCallbackSharedMutex"); - } -} - -extern std::string g_ProcName; - -void qnn::log::utils::logStdoutCallback(const char* fmt, - QnnLog_Level_t level, - uint64_t timestamp, - va_list argp) { - const char* levelStr = ""; - switch (level) { - case QNN_LOG_LEVEL_ERROR: - levelStr = " ERROR "; - break; - case QNN_LOG_LEVEL_WARN: - levelStr = "WARNING"; - break; - case QNN_LOG_LEVEL_INFO: - levelStr = " INFO "; - break; - case QNN_LOG_LEVEL_DEBUG: - levelStr = " DEBUG "; - break; - case QNN_LOG_LEVEL_VERBOSE: - levelStr = "VERBOSE"; - break; - case QNN_LOG_LEVEL_MAX: - levelStr = "UNKNOWN"; - break; - } - - double ms = (double)timestamp / 1000000.0; - // To avoid interleaved messages - { // zw: enhance the log print. - DWORD dwWaitResult = WaitForSingleObject(sg_logUtilMutex, INFINITE); - if (WAIT_OBJECT_0 == dwWaitResult) { - //std::lock_guard lock(sg_logUtilMutex); - fprintf(stdout, "%8.1fms [%s][%d][%-7s] ", ms, g_ProcName.c_str(), GetCurrentProcessId(), levelStr); - vfprintf(stdout, fmt, argp); - if (fmt[strlen(fmt) - 1] != '\n') { - fprintf(stdout, "\n"); - } - fflush(stdout); - } - ReleaseMutex(sg_logUtilMutex); - } -} diff --git a/LibQNNHelper/src/Log/LogUtils.hpp b/LibQNNHelper/src/Log/LogUtils.hpp deleted file mode 100644 index d681e0e..0000000 --- a/LibQNNHelper/src/Log/LogUtils.hpp +++ /dev/null @@ -1,30 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -#pragma once - -#include -#include -#include -#include -#include // zw. - -#include "QnnLog.h" - -namespace qnn { -namespace log { -namespace utils { - -void logStdoutCallback(const char* fmt, QnnLog_Level_t level, uint64_t timestamp, va_list argp); -void logCreateLock(); -// static std::mutex sg_logUtilMutex; // zw. -static HANDLE sg_logUtilMutex = nullptr; // zw: We need share the lock between processes. - -} // namespace utils -} // namespace log -} // namespace qnn diff --git a/LibQNNHelper/src/Log/Logger.cpp b/LibQNNHelper/src/Log/Logger.cpp deleted file mode 100644 index c3d467f..0000000 --- a/LibQNNHelper/src/Log/Logger.cpp +++ /dev/null @@ -1,143 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -#include -#include -#include -#include - -#include "LogUtils.hpp" -#include "Logger.hpp" - -using namespace qnn::log; - -std::shared_ptr Logger::s_logger = nullptr; - -std::mutex Logger::s_logMutex; - -std::shared_ptr Logger::createLogger(QnnLog_Callback_t callback, - QnnLog_Level_t maxLevel, - QnnLog_Error_t* status) { - std::lock_guard lock(s_logMutex); - if ((maxLevel > QNN_LOG_LEVEL_VERBOSE) || (maxLevel == 0)) { - if (status) { - *status = QNN_LOG_ERROR_INVALID_ARGUMENT; - } - return nullptr; - } - if (!s_logger) { - s_logger = std::shared_ptr(new (std::nothrow) Logger(callback, maxLevel, status)); - } - *status = QNN_LOG_NO_ERROR; - return s_logger; -} - -Logger::Logger(QnnLog_Callback_t callback, QnnLog_Level_t maxLevel, QnnLog_Error_t* status) - : m_callback(callback), m_maxLevel(maxLevel), m_epoch(getTimestamp()) { - if (!callback) { - m_callback = utils::logStdoutCallback; - qnn::log::utils::logCreateLock(); // zw: We need share the lock between processes. - } -} - -void Logger::log(QnnLog_Level_t level, const char* file, long line, const char* fmt, ...) { - if (m_callback) { - if (level > m_maxLevel.load(std::memory_order_seq_cst)) { - return; - } - va_list argp; - va_start(argp, fmt); - std::string logString(fmt); - std::ignore = file; - std::ignore = line; - (*m_callback)(logString.c_str(), level, getTimestamp() - m_epoch, argp); - va_end(argp); - } -} - -uint64_t Logger::getTimestamp() const { - return std::chrono::duration_cast( - std::chrono::system_clock::now().time_since_epoch()) - .count(); -} - -// zw: Add for sync the time between processes. -uint64_t Logger::getTimediff() { - return getTimestamp() - m_epoch; -} - -uint64_t Logger::getEpoch() { - return m_epoch; -} - -void Logger::setEpoch(uint64_t epoch) { - m_epoch = epoch; -} - -std::shared_ptr<::qnn::log::Logger> g_logger{nullptr}; - -bool qnn::log::initializeLogging() { - QnnLog_Level_t logLevel; - QnnLog_Error_t status; - - if (g_logger) // zw. - return true; - -#ifdef QNN_ENABLE_DEBUG - logLevel = QNN_LOG_LEVEL_DEBUG; -#else - logLevel = QNN_LOG_LEVEL_INFO; -#endif - // Default log stream is enabled in Core/Logger component - g_logger = ::qnn::log::Logger::createLogger(nullptr, logLevel, &status); - if (QNN_LOG_NO_ERROR != status || !g_logger) { - return false; - } - return true; -} - -QnnLog_Callback_t qnn::log::getLogCallback() { - if (g_logger == nullptr) { - fprintf(stdout, "Logger hasn't been initialized!\n"); - return nullptr; - } - - return g_logger->getLogCallback(); -} - -QnnLog_Level_t qnn::log::getLogLevel() { return g_logger->getMaxLevel(); } - -bool qnn::log::isLogInitialized() { - if (g_logger == nullptr) { - return false; - } - return true; -} - -bool qnn::log::setLogLevel(QnnLog_Level_t maxLevel) { - if (!::qnn::log::Logger::isValid() || - !(maxLevel >= QNN_LOG_LEVEL_ERROR && maxLevel <= QNN_LOG_LEVEL_DEBUG)) { - return false; - } - - g_logger->setMaxLevel(maxLevel); - return true; -} - -// zw: Add for sync the time between processes. -uint64_t qnn::log::getTimediff() { - return g_logger->getTimediff(); -} - -uint64_t qnn::log::getEpoch() { - return g_logger->getEpoch(); -} - -void qnn::log::setEpoch(uint64_t epoch) { - return g_logger->setEpoch(epoch); -} diff --git a/LibQNNHelper/src/Log/Logger.hpp b/LibQNNHelper/src/Log/Logger.hpp deleted file mode 100644 index f72a7e2..0000000 --- a/LibQNNHelper/src/Log/Logger.hpp +++ /dev/null @@ -1,117 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -#pragma once - -#include -#include -#include -#include -#include - -#include "QnnLog.h" - -#define __FILENAME__ (strrchr(__FILE__, '/') + 1) - -/** - * @brief Log something with the current logger. Always valid to call, though - * it won't do something if no logger has been set. - */ - -#define QNN_LOG_LEVEL(level, fmt, ...) \ - do { \ - auto logger = ::qnn::log::Logger::getLogger(); \ - if (logger) { \ - logger->log(level, __FILENAME__, __LINE__, fmt, ##__VA_ARGS__); \ - } \ - } while (0) - -#define QNN_ERROR(fmt, ...) QNN_LOG_LEVEL(QNN_LOG_LEVEL_ERROR, fmt, ##__VA_ARGS__) - -#define QNN_ERROR_EXIT(fmt, ...) \ - { \ - QNN_ERROR(fmt, ##__VA_ARGS__); \ - exit(EXIT_FAILURE); \ - } - -#define QNN_WARN(fmt, ...) QNN_LOG_LEVEL(QNN_LOG_LEVEL_WARN, fmt, ##__VA_ARGS__) - -#define QNN_INFO(fmt, ...) QNN_LOG_LEVEL(QNN_LOG_LEVEL_INFO, fmt, ##__VA_ARGS__) - -#define QNN_DEBUG(fmt, ...) QNN_LOG_LEVEL(QNN_LOG_LEVEL_DEBUG, fmt, ##__VA_ARGS__) - -#define QNN_VERBOSE(fmt, ...) QNN_LOG_LEVEL(QNN_LOG_LEVEL_VERBOSE, fmt, ##__VA_ARGS__) - -#define QNN_FUNCTION_ENTRY_LOG QNN_LOG_LEVEL(QNN_LOG_LEVEL_VERBOSE, "Entering %s", __func__) - -#define QNN_FUNCTION_EXIT_LOG QNN_LOG_LEVEL(QNN_LOG_LEVEL_VERBOSE, "Returning from %s", __func__) - -namespace qnn { -namespace log { - -bool initializeLogging(); - -QnnLog_Callback_t getLogCallback(); - -QnnLog_Level_t getLogLevel(); - -bool isLogInitialized(); - -bool setLogLevel(QnnLog_Level_t maxLevel); - -// zw: Add for sync the time between processes. -uint64_t getTimediff(); -uint64_t getEpoch(); -void setEpoch(uint64_t epoch); - -class Logger final { - public: - Logger(const Logger&) = delete; - Logger& operator=(const Logger&) = delete; - Logger(Logger&&) = delete; - Logger& operator=(Logger&&) = delete; - - void setMaxLevel(QnnLog_Level_t maxLevel) { - m_maxLevel.store(maxLevel, std::memory_order_seq_cst); - } - - QnnLog_Level_t getMaxLevel() { return m_maxLevel.load(std::memory_order_seq_cst); } - - QnnLog_Callback_t getLogCallback() { return m_callback; } - - void log(QnnLog_Level_t level, const char* file, long line, const char* fmt, ...); - - static std::shared_ptr createLogger(QnnLog_Callback_t callback, - QnnLog_Level_t maxLevel, - QnnLog_Error_t* status); - - static bool isValid() { return (s_logger != nullptr); } - - static std::shared_ptr getLogger() { return s_logger; } - - static void reset() { s_logger = nullptr; } - -// zw: Add for sync the time between processes. - uint64_t getTimediff(); - uint64_t getEpoch(); - void setEpoch(uint64_t epoch); - - private: - Logger(QnnLog_Callback_t callback, QnnLog_Level_t maxLevel, QnnLog_Error_t* status); - - uint64_t getTimestamp() const; - - QnnLog_Callback_t m_callback = nullptr; - std::atomic m_maxLevel; - uint64_t m_epoch; - static std::shared_ptr s_logger; - static std::mutex s_logMutex; -}; - -} // namespace log -} // namespace qnn diff --git a/LibQNNHelper/src/PAL/include/PAL/Debug.hpp b/LibQNNHelper/src/PAL/include/PAL/Debug.hpp deleted file mode 100644 index 57efb93..0000000 --- a/LibQNNHelper/src/PAL/include/PAL/Debug.hpp +++ /dev/null @@ -1,21 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -#pragma once - -#define DEBUG_ON 0 - -#if DEBUG_ON -#define DEBUG_MSG(...) \ - { \ - fprintf(stderr, __VA_ARGS__); \ - fprintf(stderr, "\n"); \ - } -#else -#define DEBUG_MSG(...) -#endif diff --git a/LibQNNHelper/src/PAL/include/PAL/Directory.hpp b/LibQNNHelper/src/PAL/include/PAL/Directory.hpp deleted file mode 100644 index ec5d135..0000000 --- a/LibQNNHelper/src/PAL/include/PAL/Directory.hpp +++ /dev/null @@ -1,80 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -//--------------------------------------------------------------------------- -/// @file -/// This file includes APIs for directory operations on supported platforms -//--------------------------------------------------------------------------- - -#pragma once - -#include - -#include "PAL/FileOp.hpp" - -namespace pal { -class Directory; -} - -class pal::Directory { - public: - using DirMode = pal::FileOp::FileMode; - //--------------------------------------------------------------------------- - /// @brief - /// Creates a directory in the file system. - /// @param path - /// Name of directory to create. - /// @param dirmode - /// Directory mode - /// @return - /// True if - /// 1. create a directory successfully - /// 2. or directory exist already - /// False otherwise - /// - /// For example: - /// - /// - Create a directory in default. - /// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - /// pal::Directory::Create(path, pal::Directory::DirMode::S_DEFAULT_); - /// pal::Directory::Create(path); - /// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - /// - /// - Create a directory with specific permission. - /// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - /// pal::Directory::Create(path, pal::Directory::DirMode::S_IRWXU_| - /// pal::Directory::DirMode::S_IRWXG_| - /// pal::Directory::DirMode::S_IRWXO_); - /// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - /// - /// @note For windows, dirmode is not used. - /// @note For linux, dirmode is used to set the permission of the folder. - //--------------------------------------------------------------------------- - static bool create(const std::string &path, - pal::Directory::DirMode dirmode = pal::Directory::DirMode::S_DEFAULT_); - - //--------------------------------------------------------------------------- - /// @brief - /// Removes the entire directory whether it's empty or not. - /// @param path - /// Name of directory to delete. - /// @return - /// True if the directory was successfully deleted, false otherwise. - //--------------------------------------------------------------------------- - static bool remove(const std::string &path); - - //--------------------------------------------------------------------------- - /// @brief - /// Creates a directory and all parent directories required. - /// @param path - /// Path of directory to create. - /// @return - /// True if the directory was successfully created, false otherwise. - //--------------------------------------------------------------------------- - static bool makePath(const std::string &path); -}; diff --git a/LibQNNHelper/src/PAL/include/PAL/DynamicLoading.hpp b/LibQNNHelper/src/PAL/include/PAL/DynamicLoading.hpp deleted file mode 100644 index 020448f..0000000 --- a/LibQNNHelper/src/PAL/include/PAL/DynamicLoading.hpp +++ /dev/null @@ -1,99 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -//--------------------------------------------------------------------------- -/// @file -/// This file includes APIs for dynamic loading on supported platforms -//--------------------------------------------------------------------------- - -#pragma once - -#include - -namespace pal { -namespace dynamicloading { -// we only support subset of POSIX of dlopen/dlsym/dladdr/dlerror/dlclose -// except the following flags for dlopen, others should be done only -// when we really need them -// DL_NOW is MUST -// DL_LOCAL is enabled if not specified -enum { - DL_NOW = 0x0001, - DL_LOCAL = 0x0002, - DL_GLOBAL = 0x0004, -}; - -// specify this address to distingiush from NULL pointer -#define DL_DEFAULT (void *)(0x4) - -//--------------------------------------------------------------------------- -/// @brief -/// Loads the dynamic shared object -/// @param filename -/// If contains path separators, treat it as relative or absolute pathname -/// or search it for the rule of dynamic linker -/// @param flags -/// - DL_NOW: resolve undefined symbols before return. MUST be specified. -/// - DL_LOCAL: optional, but the default specified. Symbols defined in this -/// shared object are not made available to resolve references in subsequently -/// loaded shared objects -/// - DL_GLOBAL: optional, resolve symbol globally -/// @return -/// On success, a non-NULL handle for the loaded library. -/// On error, NULL -//--------------------------------------------------------------------------- -void *dlOpen(const char *filename, int flags); - -//--------------------------------------------------------------------------- -/// @brief -/// Obtain address of a symbol in a shared object or executable -/// @param handle -/// A handle of a dynamic loaded shared object returned by dlopen -/// @param symbol -/// A null-terminated symbol name -/// @return -/// On success, return the address associated with symbol -/// On error, NULL -//--------------------------------------------------------------------------- -void *dlSym(void *handle, const char *symbol); - -//--------------------------------------------------------------------------- -/// @brief -/// Translate the address of a symbol to the path of the belonging shared object -/// @param addr -/// Address of symbol in a shared object -/// @param path -/// Full name of shared object that contains address, usually it is an absolute path -/// @return -/// On success, return a non-zero value -/// On error, return 0 -//--------------------------------------------------------------------------- -int dlAddrToLibName(void *addr, std::string &name); - -//--------------------------------------------------------------------------- -/// @brief -/// Decrements the reference count on the dynamically loaded shared object -/// referred to by handle. If the reference count drops to 0, then the -/// object is unloaded. -/// @return -/// On success, 0; on error, a nonzero value -//--------------------------------------------------------------------------- -int dlClose(void *handle); - -//--------------------------------------------------------------------------- -/// @brief -/// Obtain error diagnostic for functions in the dl-family APIs. -/// @return -/// Returns a human-readable, null-terminated string describing the most -/// recent error that occurred from a call to one of the functions in the -/// dl-family APIs. -//--------------------------------------------------------------------------- -char *dlError(void); - -} // namespace dynamicloading -} // namespace pal diff --git a/LibQNNHelper/src/PAL/include/PAL/FileOp.hpp b/LibQNNHelper/src/PAL/include/PAL/FileOp.hpp deleted file mode 100644 index 08669d9..0000000 --- a/LibQNNHelper/src/PAL/include/PAL/FileOp.hpp +++ /dev/null @@ -1,239 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -//------------------------------------------------------------------------------ -/// @file -/// This file includes APIs for file operations on the supported platforms -//------------------------------------------------------------------------------ - -#pragma once - -#include - -#include -#include -#include - -namespace pal { -class FileOp; -} - -//------------------------------------------------------------------------------ -/// @brief -/// FileOp contains OS Specific file system functionality. -//------------------------------------------------------------------------------ -class pal::FileOp { - public: - // enum for symbolic constants mode, strictly follow linux usage - // windows or another OS user should transfer the usage - // ref : http://man7.org/linux/man-pages/man2/open.2.html - enum class FileMode : uint32_t { - S_DEFAULT_ = 0777, - S_IRWXU_ = 0700, - S_IRUSR_ = 0400, - S_IWUSR_ = 0200, - S_IXUSR_ = 0100, - S_IRWXG_ = 0070, - S_IRGRP_ = 0040, - S_IWGRP_ = 0020, - S_IXGRP_ = 0010, - S_IRWXO_ = 0007, - S_IROTH_ = 0004, - S_IWOTH_ = 0002, - S_IXOTH_ = 0001 - }; - - //--------------------------------------------------------------------------- - /// @brief - /// Copies a file from one location to another, overwrites if the - /// destination already exists. - /// @param source - /// File name of the source file. - /// @param target - /// File name of the target file. - /// @return - /// True on success, otherwise false. - //--------------------------------------------------------------------------- - static bool copyOverFile(const std::string &source, const std::string &target); - - //--------------------------------------------------------------------------- - /// @brief - /// Checks whether the file exists or not. - /// @param fileName - /// File name of the source file, including its complete path. - /// @return - /// True on success, otherwise false. - //--------------------------------------------------------------------------- - static bool checkFileExists(const std::string &fileName); - - //--------------------------------------------------------------------------- - /// @brief - /// Renames an existing file. If the file with target name exists, this call - /// overwrites it with the file with source name. - /// @param source - /// Current File name. - /// @param target - /// New name of the file. - /// @param overwrite - /// Flag indicating to overwrite existing file with newName - /// @return - /// True if successful, otherwise false. - /// @warning - /// Does not work if source and target are on different filesystems. - //--------------------------------------------------------------------------- - static bool move(const std::string &source, const std::string &target, bool overwrite); - - //--------------------------------------------------------------------------- - /// @brief - /// Delete an existing file - /// @param fileName - /// File name of the file to be deleted. - /// @return - /// True if successful, otherwise false. - //--------------------------------------------------------------------------- - static bool deleteFile(const std::string &fileName); - - //--------------------------------------------------------------------------- - /// @brief - /// Check if path is a directory or not - /// @param path - /// Path to check - /// @return - /// True if successful, otherwise false. - //--------------------------------------------------------------------------- - static bool checkIsDir(const std::string &path); - - //--------------------------------------------------------------------------- - /// @brief Data type representing parts of a filename - //--------------------------------------------------------------------------- - typedef struct { - //--------------------------------------------------------------------------- - /// @brief Name of the file without the extension (i.e., basename) - //--------------------------------------------------------------------------- - std::string basename; - - //--------------------------------------------------------------------------- - /// @brief Name of the file extension (i.e., .txt or .hlnd, .html) - //--------------------------------------------------------------------------- - std::string extension; - - //--------------------------------------------------------------------------- - /// @brief - /// Location of the file (i.e., /abc/xyz/foo.bar <-- /abc/xyz/). - /// If the file name has no location then the Directory points to - /// empty string - //--------------------------------------------------------------------------- - std::string directory; - } FilenamePartsType_t; - - //--------------------------------------------------------------------------- - /// @brief - /// Determines the components of a given filename, being the directory, - /// basename and extension. If the file has no location or extension, these - /// components remain empty - /// @param filename - /// Path of the file for which the components are to be determined - /// @param filenameParts - /// Will contain the file name components when this function returns - /// @return - /// True if successful, false otherwise - //--------------------------------------------------------------------------- - static bool getFileInfo(const std::string &filename, FilenamePartsType_t &filenameParts); - - //--------------------------------------------------------------------------- - /// @brief - /// Typedef for a vector of FilenamePartsType_t - //--------------------------------------------------------------------------- - typedef std::vector FilenamePartsListType_t; - - //--------------------------------------------------------------------------- - /// @brief - /// Typedef for a vector of FilenamePartsType_t const iterator - //--------------------------------------------------------------------------- - typedef std::vector::const_iterator FilenamePartsListTypeIter_t; - - //--------------------------------------------------------------------------- - /// @brief - /// Returns a vector of FilenamePartsType_t objects for a given directory - /// @param path - /// Path to scan for files - /// @return - /// True if successful, false otherwise - //--------------------------------------------------------------------------- - static bool getFileInfoList(const std::string &path, FilenamePartsListType_t &filenamePartsList); - - //--------------------------------------------------------------------------- - /// @brief - /// Returns a vector of FilenamePartsType_t objects for a given directory - /// and the child directories inside. - /// @param path - /// Path to directory to scan for files for - /// @note if path is not a directory - the function will return false - /// @param filenamePartList - /// List to append to - /// @param ignoreDirs - /// If this flag is set to true, directories (and symbolic links to directories) - /// are not included in the list. Only actual files below the specified - /// directory path will be appended. - /// @return True if successful, false otherwise - /// @note Directories in list only populate Directory member variable of the struct. - /// That is Basename and Extension will be empty strings. - /// @note Symbolic links to directories are not followed. This is to avoid possible - /// infinite recursion. However the initial call to this method can have - /// path to be a symbolic link to a directory. If ignoreDirs is true, - /// symbolic links to directories are also ignored. - /// @note The order in which the files/directories are listed is platform - /// dependent. However files inside a directory always come before the - /// directory itself. - //--------------------------------------------------------------------------- - static bool getFileInfoListRecursive(const std::string &path, - FilenamePartsListType_t &filenamePartsList, - const bool ignoreDirs); - - //--------------------------------------------------------------------------- - /// @brief - /// Create an absolute path from the supplied path - /// @param path - /// Path should not contain trailing '/' or '\\' - /// @return - /// Return absolute path without trailing '/' or '\\' - //--------------------------------------------------------------------------- - static std::string getAbsolutePath(const std::string &path); - - //--------------------------------------------------------------------------- - /// @brief Get the file name from a path - //--------------------------------------------------------------------------- - static std::string getFileName(const std::string &file); - - //--------------------------------------------------------------------------- - /// @brief Get the directory path to a file - //--------------------------------------------------------------------------- - static std::string getDirectory(const std::string &file); - - //--------------------------------------------------------------------------- - /// @brief Get the current working directory. - /// @returns The absolute CWD or empty string if the path could not be - /// retrieved (because it was too long or deleted for example). - //--------------------------------------------------------------------------- - static std::string getCurrentWorkingDirectory(); - - //--------------------------------------------------------------------------- - /// @brief Set the current working directory - //--------------------------------------------------------------------------- - static bool setCurrentWorkingDirectory(const std::string &workingDir); - - //--------------------------------------------------------------------------- - /// @brief Returns true if the file contains any extension or false. - //--------------------------------------------------------------------------- - static bool hasFileExtension(const std::string &file); - - //--------------------------------------------------------------------------- - /// @brief Returns full path of file, Directory/Basename(.Extension, if any) - //--------------------------------------------------------------------------- - static std::string partsToString(const FilenamePartsType_t &filenameParts); -}; diff --git a/LibQNNHelper/src/PAL/include/PAL/GetOpt.hpp b/LibQNNHelper/src/PAL/include/PAL/GetOpt.hpp deleted file mode 100644 index 9147283..0000000 --- a/LibQNNHelper/src/PAL/include/PAL/GetOpt.hpp +++ /dev/null @@ -1,93 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -//-------------------------------------------------------------------------------- -/// @file -/// This file includes APIs for the command line parsing on supported platforms -//-------------------------------------------------------------------------------- - -#pragma once - -namespace pal { -// we implement a similar API for POSIX.2 -// so that some global var are necessary - -extern const char *g_optArg; -extern int g_optInd; - -enum { - no_argument = 0, - required_argument = 1, - optional_argument = 2, -}; - -//-------------------------------------------------------------------------------------------------- -/// @brief -/// This structure describes a single long option name for the sake of getopt_long. The argument -/// longopts must be an array of these structures, one for each long option. Terminate the array -/// with an element containing all zeros. -//-------------------------------------------------------------------------------------------------- -struct Option { - //-------------------------------------------------------------------------------------------------- - /// @brief The name of the long option. - //-------------------------------------------------------------------------------------------------- - const char *name; - - //-------------------------------------------------------------------------------------------------- - /// @brief - /// If the option does not take an argument, no_argument (or 0). - /// If the option requires an argument, required_argument (or 1). - //-------------------------------------------------------------------------------------------------- - int hasArg; - - //-------------------------------------------------------------------------------------------------- - /// @brief - /// Specifies how results are returned for a long option. - /// If flag is NULL, then GetOptLongOnly() returns val. Otherwise, it returns 0, and flag - /// points to a variable which is set to val if the option is found, but - /// left unchanged if the option is not found. - //-------------------------------------------------------------------------------------------------- - int *flag; - - //-------------------------------------------------------------------------------------------------- - /// @brief - /// The value to return, or to load into the variable pointed to by flag. - /// The last element of the array has to be filled with zeros. - //-------------------------------------------------------------------------------------------------- - int val; -}; - -//-------------------------------------------------------------------------------------------------- -/// @brief -/// This parses command-line options as POSIX getopt_long_only() -/// but we don't support optstring and optonal_argument now -/// @param argc -/// Argument count -/// @param argv -/// Argument array -/// @param optstring -/// Legitimate option characters, short options, don't support now -/// @param longopts -/// A pointer to the first element of an array of struct option, -/// has_arg field in the struct option indicates 3 possibilities, -/// no_argument, required_argument or optional_argument. we don't -/// support optional_argument now -/// @param longindex -/// If longindex is not NULL, it points to a variable which is set -/// to the index of the long option relative to longopts -/// @return -/// -1 for parsing done, '?' for non-recognized arguments, 0 for -/// flag in longopts is not NULL and saved the val to it -//-------------------------------------------------------------------------------------------------- -int getOptLongOnly(int argc, - const char *const argv[], - const char *optstring, - const struct Option *longopts, - int *longindex); - -} // namespace pal diff --git a/LibQNNHelper/src/PAL/include/PAL/Path.hpp b/LibQNNHelper/src/PAL/include/PAL/Path.hpp deleted file mode 100644 index 0c9a1e5..0000000 --- a/LibQNNHelper/src/PAL/include/PAL/Path.hpp +++ /dev/null @@ -1,51 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -//------------------------------------------------------------------------------ -/// @file -/// The file includes APIs for path related operations on supported platforms -//------------------------------------------------------------------------------ - -#pragma once - -#include -#include - -namespace pal { -class Path; -} - -class pal::Path { - public: - //--------------------------------------------------------------------------- - /// @brief Returns path separator for the system - //--------------------------------------------------------------------------- - static char getSeparator(); - - //--------------------------------------------------------------------------- - /// @brief Concatenate s1 and s2 - //--------------------------------------------------------------------------- - static std::string combine(const std::string &s1, const std::string &s2); - - //--------------------------------------------------------------------------- - /// @brief Get the directory name - //--------------------------------------------------------------------------- - static std::string getDirectoryName(const std::string &path); - - //--------------------------------------------------------------------------- - /// @brief Get absolute path - //--------------------------------------------------------------------------- - static std::string getAbsolute(const std::string &path); - - //--------------------------------------------------------------------------- - /// @brief Check if the input path is absolute path - //--------------------------------------------------------------------------- - static bool isAbsolute(const std::string &path); - - private: -}; diff --git a/LibQNNHelper/src/PAL/include/PAL/StringOp.hpp b/LibQNNHelper/src/PAL/include/PAL/StringOp.hpp deleted file mode 100644 index 1714517..0000000 --- a/LibQNNHelper/src/PAL/include/PAL/StringOp.hpp +++ /dev/null @@ -1,60 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -//----------------------------------------------------------------------------- -/// @file -/// The file inludes APIs for string operations on supported platforms -//----------------------------------------------------------------------------- - -#pragma once - -#include - -namespace pal { -class StringOp; -} - -//------------------------------------------------------------------------------ -/// @brief -/// FileOp contains OS Specific file system functionality. -//------------------------------------------------------------------------------ -class pal::StringOp { - public: - //--------------------------------------------------------------------------- - /// @brief - /// Copy copy_size bytes from buffer src to buffer dst. Behaviour of the - /// function is undefined if src and dst overlap. - /// @param dst - /// Destination buffer - /// @param dst_size - /// Size of destination buffer - /// @param src - /// Source buffer - /// @param copy_size - /// Number of bytes to copy - /// @return - /// Number of bytes copied - //--------------------------------------------------------------------------- - static size_t memscpy(void *dst, size_t dstSize, const void *src, size_t copySize); - - //--------------------------------------------------------------------------- - /// @brief - /// Returns a pointer to a null-terminated byte string, which contains copies - /// of at most size bytes from the string pointed to by str. If the null - /// terminator is not encountered in the first size bytes, it is added to the - /// duplicated string. - /// @param source - /// Source string - /// @param maxlen - /// Max number of bytes to copy from str - /// @return - /// A pointer to the newly allocated string, or a null pointer if an error - /// occurred. - //--------------------------------------------------------------------------- - static char *strndup(const char *source, size_t maxlen); -}; diff --git a/LibQNNHelper/src/PAL/src/common/GetOpt.cpp b/LibQNNHelper/src/PAL/src/common/GetOpt.cpp deleted file mode 100644 index 78e2655..0000000 --- a/LibQNNHelper/src/PAL/src/common/GetOpt.cpp +++ /dev/null @@ -1,154 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -#include - -#include - -#include "PAL/GetOpt.hpp" - -using namespace std; - -namespace pal { - -const char *g_optArg = nullptr; -int g_optInd = 1; - -static const struct Option *findOpt(const string str, - const struct Option *longopts, - int *longindex) { - const struct Option *opt = nullptr; - int idx = 0; - size_t searchEnd = str.find_first_of("="); - - for (opt = longopts; opt->name && strlen(opt->name) > 0; opt++, idx++) { - if (str.substr(0, searchEnd) == opt->name) { - if (longindex) { - *longindex = idx; - } - break; - } - } - // if not found, opt would point to the last element of longopts - // whose name MUST be empty - return opt->name ? opt : nullptr; -} - -int getOptLongOnly(int argc, - const char *const argv[], - const char *, - const struct Option *longopts, - int *longindex) { - const struct Option *opt; - int argLen = 0; - bool isShort = false; - const char *arg = ""; - - g_optArg = nullptr; - // no arg, means the end of command - if (g_optInd >= argc) { - return -1; - } - - arg = argv[g_optInd]; - - if (arg[0] != '-') { - g_optInd += 1; - return '?'; - } - - argLen = strlen(arg); - - if (argLen < 2) { - g_optInd += 1; - return '?'; - } - - if (!longopts) { - g_optInd += 1; - return '?'; - } - - // check short options with this form, -a arg - if (argLen == 2) { - isShort = true; - // check short options with this form, -a=arg - } else if (argLen > 3 && arg[2] == '=') { - isShort = true; - // check for long options, can be used for both forms - } else if (argLen > 2 && arg[1] != '=') { - if (arg[1] != '-') { - g_optInd += 1; - return '?'; - } - isShort = false; - } - - // start after -- to find the option - const char *const optStr = isShort ? &arg[1] : &arg[2]; - opt = findOpt(optStr, longopts, longindex); - if (!opt) { - g_optInd += 1; - return '?'; - } - - if (opt->hasArg == no_argument) { - g_optInd += 1; - - if (!opt->flag) { - return opt->val; - } else { - *(opt->flag) = opt->val; - return 0; - } - } - - if (opt->hasArg == required_argument) { - string optStr = argv[g_optInd]; - size_t assignIdx = optStr.find_first_of("="); - bool advance = (assignIdx == string::npos); - - // if it is --opt arg form, this will be true, - // so we need to advance one step to get arg - // otherwise, need to stop advance step & extract arg from argv[g_optInd] - if (advance) { - g_optInd += 1; - } - - if (g_optInd >= argc) { - return '?'; - } else { - // if advance, means it is the form --opt arg - // otherwise, the form, --opt=arg - if (advance) { - // since g_optInd is advanced, g_optArg can be assigned directly - g_optArg = argv[g_optInd]; - } else { - if (assignIdx == optStr.size()) { - return '?'; - } - // for not advanced form, - // g_optArg should point to the address right after "=" - g_optArg = &argv[g_optInd][assignIdx + 1]; - } - // OK, now we are ready to handle the next pair - g_optInd += 1; - - if (!opt->flag) { - return opt->val; - } else { - *(opt->flag) = opt->val; - return 0; - } - } - } - - return '?'; -} // end of getOptLongOnly - -} // namespace pal diff --git a/LibQNNHelper/src/PAL/src/common/StringOp.cpp b/LibQNNHelper/src/PAL/src/common/StringOp.cpp deleted file mode 100644 index 3306041..0000000 --- a/LibQNNHelper/src/PAL/src/common/StringOp.cpp +++ /dev/null @@ -1,45 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -#include -#include - -#include "PAL/StringOp.hpp" - -//--------------------------------------------------------------------------- -// pal::StringOp::memscpy -//--------------------------------------------------------------------------- -size_t pal::StringOp::memscpy(void *dst, size_t dstSize, const void *src, size_t copySize) { - if (!dst || !src || !dstSize || !copySize) return 0; - - size_t minSize = dstSize < copySize ? dstSize : copySize; - - memcpy(dst, src, minSize); - - return minSize; -} - -//--------------------------------------------------------------------------- -// pal::StringOp::strndup -//--------------------------------------------------------------------------- -char *pal::StringOp::strndup(const char *source, size_t maxlen) { -#ifdef _WIN32 - size_t length = ::strnlen(source, maxlen); - - char *destination = (char *)malloc((length + 1) * sizeof(char)); - if (destination == nullptr) return nullptr; - - // copy length bytes to destination and leave destination[length] to be - // null terminator - strncpy_s(destination, length + 1, source, length); - - return destination; -#else - return ::strndup(source, maxlen); -#endif -} diff --git a/LibQNNHelper/src/PAL/src/windows/Common.cpp b/LibQNNHelper/src/PAL/src/windows/Common.cpp deleted file mode 100644 index 0234174..0000000 --- a/LibQNNHelper/src/PAL/src/windows/Common.cpp +++ /dev/null @@ -1,46 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "Common.hpp" -#include "PAL/Debug.hpp" - -int32_t pal::scanDir(const std::string &path, std::vector &namelist) { - // example : "C:/Users/guest" scan nothing, "C:/Users/guest/*" can scan the - // entire directory instead - std::string scanPath = path + "/*"; - WIN32_FIND_DATAA findFileData; - HANDLE hFind = FindFirstFileA(scanPath.c_str(), &findFileData); - if (hFind == INVALID_HANDLE_VALUE) { - DEBUG_MSG("scanDir fail! Error code : %d", GetLastError()); - return -1; - } - - do { - // will compare char until '\0' to allow filename with first char = '.' - if (strncmp(findFileData.cFileName, ".", 2) == 0 || - strncmp(findFileData.cFileName, "..", 3) == 0) { - continue; - } - namelist.push_back(findFileData); - } while (FindNextFileA(hFind, &findFileData)); - FindClose(hFind); - - return namelist.size(); -} - -void pal::normalizeSeparator(std::string &path) { replace(path.begin(), path.end(), '\\', '/'); } diff --git a/LibQNNHelper/src/PAL/src/windows/Common.hpp b/LibQNNHelper/src/PAL/src/windows/Common.hpp deleted file mode 100644 index 070bb6c..0000000 --- a/LibQNNHelper/src/PAL/src/windows/Common.hpp +++ /dev/null @@ -1,37 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -#pragma once - -#include -#include - -#include -#include - -namespace pal { -/** - * @brief - * Scans elements in a directory. - * @param path - * Path in string which we are going to scan. - * @param namelist - * Data struct for each element, which will be stored as WIN32_FIND_DATAA. - * @return - * Number of elements in this path, return -1 if fail. - */ -int32_t scanDir(const std::string &path, std::vector &namelist); - -/** - * @brief - * Replace all the '\\' in path with '/' to keep consistency. - * @param path - * The string which you want to format. - */ -void normalizeSeparator(std::string &path); -} // namespace pal diff --git a/LibQNNHelper/src/PAL/src/windows/Directory.cpp b/LibQNNHelper/src/PAL/src/windows/Directory.cpp deleted file mode 100644 index d8a0c93..0000000 --- a/LibQNNHelper/src/PAL/src/windows/Directory.cpp +++ /dev/null @@ -1,105 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -#include -#include -#include - -#include -#include - -#include "Common.hpp" -#include "PAL/Debug.hpp" -#include "PAL/Directory.hpp" -#include "PAL/FileOp.hpp" -#include "PAL/Path.hpp" - -//-------------------------------------------------------------------------------------- -// pal::Directory::Create -//-------------------------------------------------------------------------------------- -bool pal::Directory::create(const std::string &path, pal::Directory::DirMode dirmode) { - struct stat st; - // it create a directory successfully or directory exists already, return true. - if ((stat(path.c_str(), &st) != 0 && (CreateDirectoryA(path.c_str(), NULL) != 0)) || - ((st.st_mode & S_IFDIR) != 0)) { - return true; - } else { - DEBUG_MSG("Create Folder fail! Error code : %d", GetLastError()); - } - return false; -} - -//-------------------------------------------------------------------------------------- -// pal::Directory::Remove -//-------------------------------------------------------------------------------------- -bool pal::Directory::remove(const std::string &dirName) { - struct stat st; - if (stat(dirName.c_str(), &st) == 0) { - if ((st.st_mode & S_IFDIR) != 0) { - // a directory exist and remove it ! - std::string fullPath = dirName; - if (pal::Path::isAbsolute(dirName) == 0) { - fullPath = pal::Path::getAbsolute(dirName); - } - // Note This string MUST be double-null terminated. - fullPath = fullPath + '\0' + '\0'; - SHFILEOPSTRUCTA fileOp = { - NULL, // hwnd - FO_DELETE, // wFunc, delete usage - fullPath.c_str(), // pFrom, delete target folder - "", // pTo, delete operation can ignore this - FOF_NO_UI, // Perform operation silently, presenting no UI to user - false, // fAnyOperationsAborted, - 0, // hNameMappings - "" // lpszProgressTitle, used only if for FOF_SIMPLEPROGRESS - }; - if (SHFileOperationA(&fileOp) == 0) { - return true; - } else { - DEBUG_MSG("Delete folder fail! Error code : %d", GetLastError()); - } - } - } else { - // If the directory doesn't exist then just, return true. Behaves like Linux - if (errno == ENOENT) { - return true; - } else { - DEBUG_MSG("Remove stat fail! Error code : %d", errno); - } - } - return false; -} - -//-------------------------------------------------------------------------------------- -// pal::Directory::MakePath -//-------------------------------------------------------------------------------------- -bool pal::Directory::makePath(const std::string &path) { - struct stat st; - bool rc = false; - if (path == ".") { - rc = true; - } else if (stat(path.c_str(), &st) == 0) { - if ((st.st_mode & S_IFDIR) != 0) { - // if a directory path is already exist - rc = true; - } - } else { - size_t offset = std::min(path.find_last_of('/'), path.find_last_of('\\')); - if (offset != std::string::npos) { - std::string newPath = path.substr(0, offset); - if (!makePath(newPath)) { - return false; - } - } - pal::Directory::create(path.c_str()); - if ((stat(path.c_str(), &st) == 0) && ((st.st_mode & S_IFDIR) != 0)) { - rc = true; - } - } - return rc; -} \ No newline at end of file diff --git a/LibQNNHelper/src/PAL/src/windows/DynamicLoading.cpp b/LibQNNHelper/src/PAL/src/windows/DynamicLoading.cpp deleted file mode 100644 index eae0f94..0000000 --- a/LibQNNHelper/src/PAL/src/windows/DynamicLoading.cpp +++ /dev/null @@ -1,220 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -// clang-format off -#include -#include -#include -#include -#include -// clang-format on - -#include -#include - -#include "PAL/Debug.hpp" -#include "PAL/DynamicLoading.hpp" - -#define STRINGIFY(x) #x -#define TOSTRING(x) STRINGIFY(x) - -static std::set mod_handles; -static thread_local char *sg_lastErrMsg = ""; - -void *pal::dynamicloading::dlOpen(const char *filename, int flags) { - HMODULE mod; - HANDLE cur_proc; - DWORD as_is, to_be; - bool loadedBefore = false; - - if (!filename || ::strlen(filename) == 0) { - // TODO: we don't support empty filename now - sg_lastErrMsg = "filename is null or empty"; - return NULL; - } - - // POSIX asks one of symbol resolving approaches: - // NOW or LAZY must be specified - if (!(flags & DL_NOW)) { - // TODO: since Windows does not provide existing API so lazy - // symbol resolving needs to do relocation by ourself - // that would be too costly. SNPE didn't use this feature now - // , wait until we really need it. keep the flexibility here - // ask caller MUST pass DL_NOW - sg_lastErrMsg = "flags must include DL_NOW"; - return NULL; - } - - cur_proc = GetCurrentProcess(); - - if (EnumProcessModules(cur_proc, NULL, 0, &as_is) == 0) { - sg_lastErrMsg = "enumerate modules failed before loading module"; - return NULL; - } - - // search from system lib path first - mod = LoadLibraryExA(filename, NULL, LOAD_WITH_ALTERED_SEARCH_PATH); - if (!mod) { - sg_lastErrMsg = "load library failed"; - return NULL; - } - - if (EnumProcessModules(cur_proc, NULL, 0, &to_be) == 0) { - sg_lastErrMsg = "enumerate modules failed after loading module"; - FreeLibrary(mod); - return NULL; - } - - if (as_is == to_be) { - loadedBefore = true; - } - - // (not loadedBefore) and DL_LOCAL means this lib was not loaded yet - // add it into the local set - // - // If loadedBefore and DL_LOCAL, means this lib was already loaded - // 2 cases here for how it was loaded before: - // a. with DL_LOCAL, just ignore since it was already in local set - // b. with DL_GLOBAL, POSIX asks it in global, ignore it, too - if ((!loadedBefore) && (flags & DL_LOCAL)) { - mod_handles.insert(mod); - } - - // once callers ask for global, needs to be in global thereafter - // so the lib should be removed from local set - if (flags & DL_GLOBAL) { - mod_handles.erase(mod); - } - - return static_cast(mod); -} - -void *pal::dynamicloading::dlSym(void *handle, const char *symbol) { - FARPROC sym_addr = NULL; - HANDLE cur_proc; - DWORD size, size_needed; - HMODULE *mod_list; - HMODULE mod = 0; - - if ((!handle) || (!symbol)) { - return NULL; - } - - cur_proc = GetCurrentProcess(); - - if (EnumProcessModules(cur_proc, NULL, 0, &size) == 0) { - sg_lastErrMsg = "enumerate modules failed before memory allocation"; - return NULL; - } - - mod_list = static_cast(malloc(size)); - if (!mod_list) { - sg_lastErrMsg = "malloc failed"; - return NULL; - } - - if (EnumProcessModules(cur_proc, mod_list, size, &size_needed) == 0) { - sg_lastErrMsg = "enumerate modules failed after memory allocation"; - free(mod_list); - return NULL; - } - - // DL_DEFAULT needs to bypass those modules with DL_LOCAL flag - if (handle == DL_DEFAULT) { - for (size_t i = 0; i < (size / sizeof(HMODULE)); i++) { - auto iter = mod_handles.find(mod_list[i]); - if (iter != mod_handles.end()) { - continue; - } - // once find the first non-local module with symbol - // return its address here to avoid unnecessary looping - sym_addr = GetProcAddress(mod_list[i], symbol); - if (sym_addr) { - free(mod_list); - return *(void **)(&sym_addr); - } - } - } else { - mod = static_cast(handle); - } - - free(mod_list); - sym_addr = GetProcAddress(mod, symbol); - if (!sym_addr) { - sg_lastErrMsg = "can't resolve symbol"; - return NULL; - } - - return *(void **)(&sym_addr); -} - -int pal::dynamicloading::dlAddrToLibName(void *addr, std::string &name) { - // Clean the output buffer - name = std::string(); - - // If the address is empty, return zero as treating failure - if (!addr) { - DEBUG_MSG("Input address is nullptr."); - return 0; - } - - HMODULE hModule = NULL; - // TODO: Need to use TCHAR for the compatibility of ASCII and Unicode - CHAR nameBuf[MAX_PATH]; - - // (1st flag) The lpModuleName parameter is an address in the module - // (2nd flag) The reference count for the module is not incremented - DWORD flags = - GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT; - - // Retrieves a module handle for the specified module by its symbol address - if (!GetModuleHandleExA(flags, reinterpret_cast(addr), &hModule) || hModule == NULL) { - DEBUG_MSG("Failed to get module handle. Error code: %d", GetLastError()); - return 0; - } - - // Retrieves the fully qualified path for the file that contains the specified module - DWORD dwSize = GetModuleFileNameA(hModule, nameBuf, sizeof(nameBuf)); - - // dwSize == 0 indicates function failure - // If the path is too long (greater than MAX_PATH), treat it as failure - if (dwSize == 0 || ERROR_INSUFFICIENT_BUFFER == GetLastError()) { - DEBUG_MSG("Failed to get module file name. Error code: %d", GetLastError()); - return 0; - } - - name = std::string(nameBuf); - - // Return a non-zero value to represent the function successes - return 1; -} - -int pal::dynamicloading::dlClose(void *handle) { - if (!handle) { - return 0; - } - - HMODULE mod = static_cast(handle); - - if (FreeLibrary(mod) == 0) { - sg_lastErrMsg = "free library failed"; - return -1; - } - - mod_handles.erase(mod); - - return 0; -} - -char *pal::dynamicloading::dlError(void) { - char *retStr = sg_lastErrMsg; - - sg_lastErrMsg = ""; - - return retStr; -} diff --git a/LibQNNHelper/src/PAL/src/windows/FileOp.cpp b/LibQNNHelper/src/PAL/src/windows/FileOp.cpp deleted file mode 100644 index 5a7cd6e..0000000 --- a/LibQNNHelper/src/PAL/src/windows/FileOp.cpp +++ /dev/null @@ -1,297 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "Common.hpp" -#include "PAL/Debug.hpp" -#include "PAL/Directory.hpp" -#include "PAL/FileOp.hpp" -#include "PAL/Path.hpp" - -//------------------------------------------------------------------------------- -// pal::FileOp::checkFileExists -//------------------------------------------------------------------------------- -bool pal::FileOp::checkFileExists(const std::string &fileName) { - struct stat st; - if (stat(fileName.c_str(), &st) != 0) { - DEBUG_MSG("Check File fail! Error code : %d", errno); - return false; - } - return true; -} - -//------------------------------------------------------------------------------- -// pal::FileOp::copyOverFile -//------------------------------------------------------------------------------- -bool pal::FileOp::copyOverFile(const std::string &fromFile, const std::string &toFile) { - if (CopyFileA(fromFile.c_str(), toFile.c_str(), 0) == 0) { - DEBUG_MSG("Copy file fail! Error code : %d", GetLastError()); - return false; - } - return true; -} - -//------------------------------------------------------------------------------- -// pal::FileOp::move -//------------------------------------------------------------------------------- -bool pal::FileOp::move(const std::string ¤tName, const std::string &newName, bool overwrite) { - struct stat st; - // if currentName doesn't exist, return false in case newName got deleted - if (stat(currentName.c_str(), &st) != 0) { - DEBUG_MSG("CurrentName check status fail! Error code : %d", errno); - return false; - } - if (stat(newName.c_str(), &st) == 0) { - if ((st.st_mode & S_IFDIR) != 0) { - // if newName is directory and overwrite = false, cannot move, return false - // if newName is directory and overwrite = true, delete it and rename - if (overwrite == false) { - return false; - } - pal::Directory::remove(newName); - } else { - deleteFile(newName); - } - } - // in windows, if newName exist already, rename will return -1 - // only when newName doesn't exist, rename will return 0 - return (rename(currentName.c_str(), newName.c_str()) == 0); -} - -//------------------------------------------------------------------------------- -// pal::FileOp::deleteFile -//------------------------------------------------------------------------------- -bool pal::FileOp::deleteFile(const std::string &fileName) { - return (DeleteFileA(fileName.c_str()) != 0); -} - -//------------------------------------------------------------------------------- -// pal::FileOp::checkIsDir -//------------------------------------------------------------------------------- -bool pal::FileOp::checkIsDir(const std::string &fileName) { - DWORD result = GetFileAttributesA(fileName.c_str()); - if (result == static_cast(FILE_INVALID_FILE_ID)) { - DEBUG_MSG("File attribute is invalid_file_id!"); - return false; - } - return (result & FILE_ATTRIBUTE_DIRECTORY) != 0; -} - -//------------------------------------------------------------------------------- -// pal::FileOp::getFileInfo -//------------------------------------------------------------------------------- -bool pal::FileOp::getFileInfo(const std::string &filename, - pal::FileOp::FilenamePartsType_t &filenameParts) { - std::string name; - int32_t lastPathSeparator = std::max(static_cast(filename.find_last_of('\\')), - static_cast(filename.find_last_of('/'))); - if (lastPathSeparator == static_cast(std::string::npos)) { - // No directory - name = filename; - } else { - // has a directory part - filenameParts.directory = filename.substr(0, lastPathSeparator); - name = filename.substr(lastPathSeparator + 1); - } - - size_t ext = name.find_last_of("."); - if (ext == std::string::npos) { - // no extension - filenameParts.basename = name; - } else { - // has extension - filenameParts.basename = name.substr(0, ext); - filenameParts.extension = name.substr(ext + 1); - } - pal::normalizeSeparator(filenameParts.directory); - return true; -} - -//------------------------------------------------------------------------------- -// pal::FileOp::getFileInfoListRecursiveImpl -//------------------------------------------------------------------------------- -static bool getFileInfoListRecursiveImpl(const std::string &path, - pal::FileOp::FilenamePartsListType_t &filenamePartsList, - const bool ignoreDirs, - size_t maxDepth) { - // base case - if (maxDepth == 0) { - return true; - } - if (pal::FileOp::checkIsDir(path) == false) { - return false; - } - int32_t entryCount = 0; - std::vector nameList; - entryCount = pal::scanDir(path.c_str(), nameList); - if (entryCount < 0) { - return false; - } - while (entryCount--) { - const std::string dName = std::string(nameList[entryCount].cFileName); - // skip current directory, previous directory and empty string - if (dName.empty() || dName == "." || dName == "..") { - continue; - } - std::string curPath = path + pal::Path::getSeparator() + dName; - // recursive if directory but avoid symbolic links to directories - if (pal::FileOp::checkIsDir(curPath)) { - struct stat st; - if (stat(curPath.c_str(), &st) == 0 && ((st.st_mode & S_IFDIR) != 0) && - (!getFileInfoListRecursiveImpl(curPath, filenamePartsList, ignoreDirs, maxDepth - 1))) { - return false; - } - if (curPath.back() != pal::Path::getSeparator()) { - curPath += pal::Path::getSeparator(); - } - // continue here to prevent this object from adding filenameparts in - // vector but we still need this directory to go recursive - if (ignoreDirs) { - continue; - } - } - // add to vector - pal::FileOp::FilenamePartsType_t filenameParts = {std::string(), std::string(), std::string()}; - if (pal::FileOp::getFileInfo(curPath, filenameParts)) { - filenamePartsList.push_back(filenameParts); - } - } - return true; -} - -//------------------------------------------------------------------------------- -// pal::FileOp::getFileInfoList -//------------------------------------------------------------------------------- -bool pal::FileOp::getFileInfoList(const std::string &path, - FilenamePartsListType_t &filenamePartsList) { - return getFileInfoListRecursiveImpl(path, filenamePartsList, false, 1); -} - -//------------------------------------------------------------------------------- -// pal::FileOp::getFileInfoListRecursive -//------------------------------------------------------------------------------- -bool pal::FileOp::getFileInfoListRecursive(const std::string &path, - FilenamePartsListType_t &filenamePartsList, - const bool ignoreDirs) { - return getFileInfoListRecursiveImpl(path, filenamePartsList, ignoreDirs, UINT_MAX); -} - -//------------------------------------------------------------------------------- -// pal::FileOp::getAbsolutePath -//------------------------------------------------------------------------------- -std::string pal::FileOp::getAbsolutePath(const std::string &path) { - char fullPath[MAX_PATH]; - if (_fullpath(fullPath, path.c_str(), MAX_PATH) == NULL) { - DEBUG_MSG("GetAbsolute path fail! Error code : %d", errno); - return std::string(); - } - std::string reStr = std::string(fullPath); - pal::normalizeSeparator(reStr); - return reStr; -} - -//------------------------------------------------------------------------------- -// pal::FileOp::getDirectory -//------------------------------------------------------------------------------- -std::string pal::FileOp::getDirectory(const std::string &file) { - std::string rc = file; - int32_t index = std::max(static_cast(file.find_last_of('\\')), - static_cast(file.find_last_of('/'))); - if (index != static_cast(std::string::npos)) { - rc = file.substr(0, index); - } - pal::normalizeSeparator(rc); - return rc; -} - -//------------------------------------------------------------------------------- -// pal::FileOp::GetFileName -//------------------------------------------------------------------------------- -std::string pal::FileOp::getFileName(const std::string &file) { - std::string rc = file; - int32_t index = std::max(static_cast(file.find_last_of('\\')), - static_cast(file.find_last_of('/'))); - if (index != static_cast(std::string::npos)) { - rc = file.substr(index + 1); // +1 to skip path separator - } - return rc; -} - -//------------------------------------------------------------------------------- -// pal::FileOp::hasFileExtension -//------------------------------------------------------------------------------- -bool pal::FileOp::hasFileExtension(const std::string &file) { - FilenamePartsType_t parts = {std::string(), std::string(), std::string()}; - getFileInfo(file, parts); - return !parts.extension.empty(); -} - -//------------------------------------------------------------------------------- -// pal::FileOp::getCurrentWorkingDirectory -//------------------------------------------------------------------------------- -std::string pal::FileOp::getCurrentWorkingDirectory() { - char buffer[MAX_PATH + 1]; - buffer[0] = '\0'; - - // If there is any failure return empty string. It is technically possible - // to handle paths exceeding PATH_MAX on some flavors of *nix but platforms - // like Android (Bionic) do no provide such capability. For consistency we - // will not handle extra long path names. - if (0 == GetCurrentDirectoryA(MAX_PATH, buffer)) { - DEBUG_MSG("Get current working directory fail! Error code : %d", GetLastError()); - return std::string(); - } - std::string res = std::string(buffer); - pal::normalizeSeparator(res); - return res; -} - -//------------------------------------------------------------------------------- -// pal::FileOp::setCurrentWorkingDirectory -//------------------------------------------------------------------------------- -bool pal::FileOp::setCurrentWorkingDirectory(const std::string &workingDir) { - return _chdir(workingDir.c_str()) == 0; -} - -//------------------------------------------------------------------------------- -// pal::FileOp::PartsToString -//------------------------------------------------------------------------------- -std::string pal::FileOp::partsToString(const FilenamePartsType_t &filenameParts) { - std::string path; - - if (!filenameParts.directory.empty()) { - path += filenameParts.directory; - path += Path::getSeparator(); - } - if (!filenameParts.basename.empty()) { - path += filenameParts.basename; - } - if (!filenameParts.extension.empty()) { - path += "."; - path += filenameParts.extension; - } - pal::normalizeSeparator(path); - return path; -} \ No newline at end of file diff --git a/LibQNNHelper/src/PAL/src/windows/Path.cpp b/LibQNNHelper/src/PAL/src/windows/Path.cpp deleted file mode 100644 index 2c49097..0000000 --- a/LibQNNHelper/src/PAL/src/windows/Path.cpp +++ /dev/null @@ -1,72 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -#include -#include - -#include -#include -#include - -#include "Common.hpp" -#include "PAL/FileOp.hpp" -#include "PAL/Path.hpp" - -//------------------------------------------------------------------------------ -// PAL::Path::GetSeparator -//------------------------------------------------------------------------------ -char pal::Path::getSeparator() { return '/'; } - -//------------------------------------------------------------------------------ -// pal::Path::Combine -//------------------------------------------------------------------------------ -std::string pal::Path::combine(const std::string &s1, const std::string &s2) { - std::stringstream ss; - ss << s1; - if (s1.size() > 0 && ((s1[s1.size() - 1] != '/') && (s1[s1.size() - 1] != '\\'))) { - ss << getSeparator(); - } - ss << s2; - return ss.str(); -} - -//------------------------------------------------------------------------------ -// pal::Path::getDirectoryName -//------------------------------------------------------------------------------ -std::string pal::Path::getDirectoryName(const std::string &path) { - std::string rc = path; - int32_t index = std::max(static_cast(path.find_last_of('\\')), - static_cast(path.find_last_of('/'))); - if (index != static_cast(std::string::npos)) { - rc = path.substr(0, index); - } - pal::normalizeSeparator(rc); - return rc; -} - -//------------------------------------------------------------------------------ -// pal::Path::getAbsolute -//------------------------------------------------------------------------------ -std::string pal::Path::getAbsolute(const std::string &path) { - std::string res = pal::FileOp::getAbsolutePath(path); - pal::normalizeSeparator(res); - return res; -} - -//------------------------------------------------------------------------------ -// PAL::Path::isAbsolute -// requirement : shlwapi.lib -//------------------------------------------------------------------------------ -bool pal::Path::isAbsolute(const std::string &path) { - std::string windowsPath = path; - // in windows, when we need to check relative or absolute path, - // separator MUST be '\\' rather than '/' - // for more information : https://docs.microsoft.com/en-us/dotnet/standard/io/file-path-formats - replace(windowsPath.begin(), windowsPath.end(), '/', '\\'); - return PathIsRelativeA(windowsPath.c_str()) == false; -} diff --git a/LibQNNHelper/src/QnnSampleApp.cpp b/LibQNNHelper/src/QnnSampleApp.cpp deleted file mode 100644 index 2cb9a38..0000000 --- a/LibQNNHelper/src/QnnSampleApp.cpp +++ /dev/null @@ -1,1026 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -#include - -#include -#include -#include - -#include "DataUtil.hpp" -#include "Logger.hpp" -#include "PAL/Directory.hpp" -#include "PAL/FileOp.hpp" -#include "PAL/Path.hpp" -#include "PAL/StringOp.hpp" -#include "QnnSampleApp.hpp" -#include "QnnSampleAppUtils.hpp" -#include "QnnWrapperUtils.hpp" - -// zw. -#include "QnnTypeMacros.hpp" -#include "IOTensor.hpp" -#include "LibQNNHelper.hpp" - - -using namespace qnn; -using namespace qnn::tools; -using namespace qnn::tools::iotensor; - -static const int sg_lowerLatency = 40; // Should be used on V66 and above only -static const int sg_lowLatency = 100; // This will limit sleep modes available while running -static const int sg_mediumLatency = 1000; // This will limit sleep modes available while running -static const int sg_highLatency = 2000; -static const uint32_t sg_powerConfigId = 1; - -bool disableDcvs(QnnHtpDevice_PerfInfrastructure_t perfInfra) { - QnnHtpPerfInfrastructure_PowerConfig_t powerConfig; - memset(&powerConfig, 0, sizeof(powerConfig)); - powerConfig.option = QNN_HTP_PERF_INFRASTRUCTURE_POWER_CONFIGOPTION_DCVS_V3; - powerConfig.dcvsV3Config.dcvsEnable = 0; // FALSE - powerConfig.dcvsV3Config.setDcvsEnable = 1; - powerConfig.dcvsV3Config.powerMode = QNN_HTP_PERF_INFRASTRUCTURE_POWERMODE_ADJUST_UP_DOWN; - powerConfig.dcvsV3Config.contextId = sg_powerConfigId; - - const QnnHtpPerfInfrastructure_PowerConfig_t *powerConfigs[] = {&powerConfig, NULL}; - - if (QNN_SUCCESS != perfInfra.setPowerConfig(sg_powerConfigId, powerConfigs)) { - QNN_ERROR("Failure in setPowerConfig() from disableDcvs"); - return false; - } - return true; -} - -bool enableDcvs(QnnHtpDevice_PerfInfrastructure_t perfInfra) { - QnnHtpPerfInfrastructure_PowerConfig_t powerConfig; - memset(&powerConfig, 0, sizeof(powerConfig)); - powerConfig.option = QNN_HTP_PERF_INFRASTRUCTURE_POWER_CONFIGOPTION_DCVS_V3; - powerConfig.dcvsV3Config.dcvsEnable = 1; - powerConfig.dcvsV3Config.setDcvsEnable = 1; - powerConfig.dcvsV3Config.powerMode = QNN_HTP_PERF_INFRASTRUCTURE_POWERMODE_ADJUST_UP_DOWN; - powerConfig.dcvsV3Config.contextId = sg_powerConfigId; - - const QnnHtpPerfInfrastructure_PowerConfig_t *powerConfigs[] = {&powerConfig, NULL}; - - if (QNN_SUCCESS != perfInfra.setPowerConfig(sg_powerConfigId, powerConfigs)) { - QNN_ERROR("Failure in setPowerConfig() from disableDcvs"); - return false; - } - return true; -} - -bool boostPerformance(QnnHtpDevice_PerfInfrastructure_t perfInfra, std::string perfProfile) { - // Initialize the power config and select the voltage corner values for the performance setting. - QnnHtpPerfInfrastructure_PowerConfig_t powerConfig; - memset(&powerConfig, 0, sizeof(powerConfig)); - - QNN_INF("PERF::boostPerformance"); - - powerConfig.option = QNN_HTP_PERF_INFRASTRUCTURE_POWER_CONFIGOPTION_DCVS_V3; - powerConfig.dcvsV3Config.dcvsEnable = 0; - powerConfig.dcvsV3Config.setDcvsEnable = 1; - powerConfig.dcvsV3Config.contextId = sg_powerConfigId; - - // refer QnnHtpPerfInfrastructure.h - powerConfig.dcvsV3Config.powerMode = QNN_HTP_PERF_INFRASTRUCTURE_POWERMODE_PERFORMANCE_MODE; - powerConfig.dcvsV3Config.setSleepLatency = 1; - powerConfig.dcvsV3Config.setBusParams = 1; - powerConfig.dcvsV3Config.setCoreParams = 1; - powerConfig.dcvsV3Config.sleepDisable = 0; - powerConfig.dcvsV3Config.setSleepDisable = 0; - - if (perfProfile == "burst") { - QNN_DEBUG("boostPerformance::perfProfile=burst"); - powerConfig.dcvsV3Config.sleepLatency = sg_lowerLatency; // set dsp sleep latency ranges 10-65535 micro sec, refer hexagon sdk; - powerConfig.dcvsV3Config.busVoltageCornerMin = DCVS_VOLTAGE_VCORNER_MAX_VOLTAGE_CORNER; - powerConfig.dcvsV3Config.busVoltageCornerTarget = DCVS_VOLTAGE_VCORNER_MAX_VOLTAGE_CORNER; - powerConfig.dcvsV3Config.busVoltageCornerMax = DCVS_VOLTAGE_VCORNER_MAX_VOLTAGE_CORNER; - powerConfig.dcvsV3Config.coreVoltageCornerMin = DCVS_VOLTAGE_VCORNER_MAX_VOLTAGE_CORNER; - powerConfig.dcvsV3Config.coreVoltageCornerTarget = DCVS_VOLTAGE_VCORNER_MAX_VOLTAGE_CORNER; - powerConfig.dcvsV3Config.coreVoltageCornerMax = DCVS_VOLTAGE_VCORNER_MAX_VOLTAGE_CORNER; - } - else if(perfProfile == "high_performance") { - QNN_DEBUG("boostPerformance::perfProfile=high_performance"); - powerConfig.dcvsV3Config.sleepLatency = sg_lowLatency; - powerConfig.dcvsV3Config.busVoltageCornerMin = DCVS_VOLTAGE_VCORNER_TURBO; - powerConfig.dcvsV3Config.busVoltageCornerTarget = DCVS_VOLTAGE_VCORNER_TURBO; - powerConfig.dcvsV3Config.busVoltageCornerMax = DCVS_VOLTAGE_VCORNER_TURBO; - powerConfig.dcvsV3Config.coreVoltageCornerMin = DCVS_VOLTAGE_VCORNER_TURBO; - powerConfig.dcvsV3Config.coreVoltageCornerTarget = DCVS_VOLTAGE_VCORNER_TURBO; - powerConfig.dcvsV3Config.coreVoltageCornerMax = DCVS_VOLTAGE_VCORNER_TURBO; - } - else { - QNN_ERROR("Invalid performance profile %s to set power configs", perfProfile); - return false; - } - - // Set power config with different performance parameters - const QnnHtpPerfInfrastructure_PowerConfig_t* powerConfigs[] = { &powerConfig, NULL }; - if (QNN_SUCCESS != perfInfra.setPowerConfig(sg_powerConfigId, powerConfigs)) { - QNN_ERROR("Failure in setPowerConfig() from boostPerformance"); - return false; - } - - return disableDcvs(perfInfra); -} - -bool resetPerformance(QnnHtpDevice_PerfInfrastructure_t perfInfra) { - // Initialize the power config and select the voltage corner values for the performance setting. - QnnHtpPerfInfrastructure_PowerConfig_t powerConfig; - memset(&powerConfig, 0, sizeof(powerConfig)); - - QNN_INF("PERF::resetPerformance"); - - powerConfig.option = QNN_HTP_PERF_INFRASTRUCTURE_POWER_CONFIGOPTION_DCVS_V3; - powerConfig.dcvsV3Config.dcvsEnable = 1; - powerConfig.dcvsV3Config.setDcvsEnable = 1; - powerConfig.dcvsV3Config.contextId = sg_powerConfigId; - powerConfig.dcvsV3Config.sleepLatency = sg_highLatency; - powerConfig.dcvsV3Config.setSleepLatency = 1; - powerConfig.dcvsV3Config.sleepDisable = 0; - powerConfig.dcvsV3Config.setSleepDisable = 0; - powerConfig.dcvsV3Config.powerMode = QNN_HTP_PERF_INFRASTRUCTURE_POWERMODE_POWER_SAVER_MODE; - powerConfig.dcvsV3Config.busVoltageCornerMin = DCVS_VOLTAGE_VCORNER_MIN_VOLTAGE_CORNER; - powerConfig.dcvsV3Config.busVoltageCornerTarget = DCVS_VOLTAGE_VCORNER_MIN_VOLTAGE_CORNER; - powerConfig.dcvsV3Config.busVoltageCornerMax = DCVS_VOLTAGE_VCORNER_MIN_VOLTAGE_CORNER; - powerConfig.dcvsV3Config.setBusParams = 1; - powerConfig.dcvsV3Config.coreVoltageCornerMin = DCVS_VOLTAGE_VCORNER_MIN_VOLTAGE_CORNER; - powerConfig.dcvsV3Config.coreVoltageCornerTarget = DCVS_VOLTAGE_VCORNER_MIN_VOLTAGE_CORNER; - powerConfig.dcvsV3Config.coreVoltageCornerMax = DCVS_VOLTAGE_VCORNER_MIN_VOLTAGE_CORNER; - powerConfig.dcvsV3Config.setCoreParams = 1; - - // Set power config with different performance parameters - const QnnHtpPerfInfrastructure_PowerConfig_t* powerConfigs[] = { &powerConfig, NULL }; - if (QNN_SUCCESS != perfInfra.setPowerConfig(sg_powerConfigId, powerConfigs)) { - QNN_ERROR("Failure in setPowerConfig() from resetPerformance"); - return false; - } - - return enableDcvs(perfInfra); -} - - -// Default path where the outputs will be stored if outputPath is -// not supplied. -const std::string sample_app::QnnSampleApp::s_defaultOutputPath = "./output/"; - -sample_app::QnnSampleApp::QnnSampleApp(QnnFunctionPointers qnnFunctionPointers, - std::string inputListPaths, - std::string opPackagePaths, - void* backendLibraryHandle, - std::string outputPath, - bool debug, - iotensor::OutputDataType outputDataType, - iotensor::InputDataType inputDataType, - sample_app::ProfilingLevel profilingLevel, - bool dumpOutputs, - std::string cachedBinaryPath, - std::string saveBinaryName) - : m_qnnFunctionPointers(qnnFunctionPointers), - m_outputPath(outputPath), - m_saveBinaryName(saveBinaryName), - m_cachedBinaryPath(cachedBinaryPath), - m_debug(debug), - m_outputDataType(outputDataType), - m_inputDataType(inputDataType), - m_profilingLevel(profilingLevel), - m_dumpOutputs(dumpOutputs), - m_backendLibraryHandle(backendLibraryHandle), - m_isBackendInitialized(false), - m_isContextCreated(false) { - split(m_inputListPaths, inputListPaths, ','); - split(m_opPackagePaths, opPackagePaths, ','); - if (m_outputPath.empty()) { - m_outputPath = s_defaultOutputPath; - } - - if (!m_cachedBinaryPath.empty()) { - m_runInCpu = false; // Run *.bin in HTP. - QNN_DEBUG("Run model on HTP."); - } - - return; -} - -sample_app::QnnSampleApp::~QnnSampleApp() { - // Free Profiling object if it was created - if (nullptr != m_profileBackendHandle) { - QNN_DEBUG("Freeing backend profile object."); - if (QNN_PROFILE_NO_ERROR != - m_qnnFunctionPointers.qnnInterface.profileFree(m_profileBackendHandle)) { - QNN_ERROR("Could not free backend profile handle."); - } - } - // Free context if not already done - if (m_isContextCreated) { - QNN_DEBUG("Freeing context"); - if (QNN_CONTEXT_NO_ERROR != - m_qnnFunctionPointers.qnnInterface.contextFree(m_context, nullptr)) { - QNN_ERROR("Could not free context"); - } - } - m_isContextCreated = false; - // Terminate backend - if (m_isBackendInitialized && nullptr != m_qnnFunctionPointers.qnnInterface.backendFree) { - QNN_DEBUG("Freeing backend"); - if (QNN_BACKEND_NO_ERROR != m_qnnFunctionPointers.qnnInterface.backendFree(m_backendHandle)) { - QNN_ERROR("Could not free backend"); - } - } - m_isBackendInitialized = false; - // Terminate logging in the backend - if (nullptr != m_qnnFunctionPointers.qnnInterface.logFree && nullptr != m_logHandle) { - if (QNN_SUCCESS != m_qnnFunctionPointers.qnnInterface.logFree(m_logHandle)) { - QNN_WARN("Unable to terminate logging in the backend."); - } - } - return; -} - -std::string sample_app::QnnSampleApp::getBackendBuildId() { - char* backendBuildId{nullptr}; - if (QNN_SUCCESS != - m_qnnFunctionPointers.qnnInterface.backendGetBuildId((const char**)&backendBuildId)) { - QNN_ERROR("Unable to get build Id from the backend."); - } - return (backendBuildId == nullptr ? std::string("") : std::string(backendBuildId)); -} - -// Initialize QnnSampleApp. Things it does: -// 1. Create output directory -// 2. Read all input list paths provided -// during creation. -sample_app::StatusCode sample_app::QnnSampleApp::initialize() { - // Create Output Directory - if (m_dumpOutputs && !::pal::FileOp::checkFileExists(m_outputPath) && - !pal::Directory::makePath(m_outputPath)) { - exitWithMessage("Could not create output directory: " + m_outputPath, EXIT_FAILURE); - } - // Read Input File List - bool readSuccess; - std::tie(m_inputFileLists, m_inputNameToIndex, readSuccess) = readInputLists(m_inputListPaths); - if (!readSuccess) { - exitWithMessage("Could not read input lists", EXIT_FAILURE); - } - // initialize logging in the backend - if (log::isLogInitialized()) { - auto logCallback = log::getLogCallback(); - auto logLevel = log::getLogLevel(); - QNN_INFO("Initializing logging in the backend. Callback: [%p], Log Level: [%d]", - logCallback, - logLevel); - if (QNN_SUCCESS != - m_qnnFunctionPointers.qnnInterface.logCreate(logCallback, logLevel, &m_logHandle)) { - QNN_WARN("Unable to initialize logging in the backend."); - } - } else { - QNN_WARN("Logging not available in the backend."); - } - return StatusCode::SUCCESS; -} - -sample_app::StatusCode sample_app::QnnSampleApp::initializeProfiling() { - if (ProfilingLevel::OFF != m_profilingLevel) { - QNN_INFO("Profiling turned on; level = %d", m_profilingLevel); - if (ProfilingLevel::BASIC == m_profilingLevel) { - QNN_INFO("Basic profiling requested. Creating Qnn Profile object."); - if (QNN_PROFILE_NO_ERROR != - m_qnnFunctionPointers.qnnInterface.profileCreate( - m_backendHandle, QNN_PROFILE_LEVEL_BASIC, &m_profileBackendHandle)) { - QNN_WARN("Unable to create profile handle in the backend."); - return StatusCode::FAILURE; - } - } else if (ProfilingLevel::DETAILED == m_profilingLevel) { - QNN_INFO("Detailed profiling requested. Creating Qnn Profile object."); - if (QNN_PROFILE_NO_ERROR != - m_qnnFunctionPointers.qnnInterface.profileCreate( - m_backendHandle, QNN_PROFILE_LEVEL_DETAILED, &m_profileBackendHandle)) { - QNN_ERROR("Unable to create profile handle in the backend."); - return StatusCode::FAILURE; - } - } - } - return StatusCode::SUCCESS; -} - -// Simple method to report error from app to lib. -int32_t sample_app::QnnSampleApp::reportError(const std::string& err) { - QNN_ERROR("%s", err.c_str()); - return EXIT_FAILURE; -} - -// Initialize a QnnBackend. -sample_app::StatusCode sample_app::QnnSampleApp::initializeBackend() { - auto qnnStatus = m_qnnFunctionPointers.qnnInterface.backendCreate( - m_logHandle, (const QnnBackend_Config_t**)m_backendConfig, &m_backendHandle); - if (QNN_BACKEND_NO_ERROR != qnnStatus) { - QNN_ERROR("Could not initialize backend due to error = %d", qnnStatus); - return StatusCode::FAILURE; - } - QNN_INFO("Initialize Backend Returned Status = %d", qnnStatus); - m_isBackendInitialized = true; - return StatusCode::SUCCESS; -} - -// Terminate the backend after done. -sample_app::StatusCode sample_app::QnnSampleApp::terminateBackend() { - if ((m_isBackendInitialized && nullptr != m_qnnFunctionPointers.qnnInterface.backendFree) && - QNN_BACKEND_NO_ERROR != m_qnnFunctionPointers.qnnInterface.backendFree(m_backendHandle)) { - QNN_ERROR("Could not terminate backend"); - return StatusCode::FAILURE; - } - m_isBackendInitialized = false; - return StatusCode::SUCCESS; -} - -// Register op packages and interface providers supplied during -// object creation. If there are multiple op packages, register -// them sequentially in the order provided. -sample_app::StatusCode sample_app::QnnSampleApp::registerOpPackages() { - const size_t pathIdx = 0; - const size_t interfaceProviderIdx = 1; - for (auto const& opPackagePath : m_opPackagePaths) { - std::vector opPackage; - split(opPackage, opPackagePath, ':'); - QNN_DEBUG("opPackagePath: %s", opPackagePath.c_str()); - const char* target = nullptr; - const size_t targetIdx = 2; - if (opPackage.size() != 2 && opPackage.size() != 3) { - QNN_ERROR("Malformed opPackageString provided: %s", opPackagePath.c_str()); - return StatusCode::FAILURE; - } - if (opPackage.size() == 3) { - target = (char*)opPackage[targetIdx].c_str(); - } - if (nullptr == m_qnnFunctionPointers.qnnInterface.backendRegisterOpPackage) { - QNN_ERROR("backendRegisterOpPackageFnHandle is nullptr."); - return StatusCode::FAILURE; - } - if (QNN_BACKEND_NO_ERROR != m_qnnFunctionPointers.qnnInterface.backendRegisterOpPackage( - m_backendHandle, - (char*)opPackage[pathIdx].c_str(), - (char*)opPackage[interfaceProviderIdx].c_str(), - target)) { - QNN_ERROR("Could not register Op Package: %s and interface provider: %s", - opPackage[pathIdx].c_str(), - opPackage[interfaceProviderIdx].c_str()); - return StatusCode::FAILURE; - } - QNN_INFO("Registered Op Package: %s and interface provider: %s", - opPackage[pathIdx].c_str(), - opPackage[interfaceProviderIdx].c_str()); - } - return StatusCode::SUCCESS; -} - -// Create a Context in a backend. -sample_app::StatusCode sample_app::QnnSampleApp::createContext() { - if (QNN_CONTEXT_NO_ERROR != m_qnnFunctionPointers.qnnInterface.contextCreate( - m_backendHandle, - m_deviceHandle, - (const QnnContext_Config_t**)m_contextConfig, - &m_context)) { - QNN_ERROR("Could not create context"); - return StatusCode::FAILURE; - } - m_isContextCreated = true; - return StatusCode::SUCCESS; -} - -// Free context after done. -sample_app::StatusCode sample_app::QnnSampleApp::freeContext() { - if (QNN_CONTEXT_NO_ERROR != - m_qnnFunctionPointers.qnnInterface.contextFree(m_context, m_profileBackendHandle)) { - QNN_ERROR("Could not free context"); - return StatusCode::FAILURE; - } - if (ProfilingLevel::OFF != m_profilingLevel) { - extractBackendProfilingInfo(m_profileBackendHandle); - } - m_isContextCreated = false; - return StatusCode::SUCCESS; -} - -// Calls composeGraph function in QNN's model.so. -// composeGraphs is supposed to populate graph related -// information in m_graphsInfo and m_graphsCount. -// m_debug is the option supplied to composeGraphs to -// say that all intermediate tensors including output tensors -// are expected to be read by the app. -sample_app::StatusCode sample_app::QnnSampleApp::composeGraphs() { - auto returnStatus = StatusCode::SUCCESS; - if (qnn_wrapper_api::ModelError_t::MODEL_NO_ERROR != - m_qnnFunctionPointers.composeGraphsFnHandle( - m_backendHandle, - m_qnnFunctionPointers.qnnInterface, - m_context, - (const qnn_wrapper_api::GraphConfigInfo_t**)m_graphConfigsInfo, - m_graphConfigsInfoCount, - &m_graphsInfo, - &m_graphsCount, - m_debug, - log::getLogCallback(), - log::getLogLevel())) { - QNN_ERROR("Failed in composeGraphs()"); - returnStatus = StatusCode::FAILURE; - } - return returnStatus; -} - -sample_app::StatusCode sample_app::QnnSampleApp::finalizeGraphs() { - for (size_t graphIdx = 0; graphIdx < m_graphsCount; graphIdx++) { - if (QNN_GRAPH_NO_ERROR != - m_qnnFunctionPointers.qnnInterface.graphFinalize( - (*m_graphsInfo)[graphIdx].graph, m_profileBackendHandle, nullptr)) { - return StatusCode::FAILURE; - } - } - if (ProfilingLevel::OFF != m_profilingLevel) { - extractBackendProfilingInfo(m_profileBackendHandle); - } - auto returnStatus = StatusCode::SUCCESS; - if (!m_saveBinaryName.empty()) { - QNN_INFO("Before saveBinary(): saving context and metadata."); - returnStatus = saveBinary(); - } else { - QNN_DEBUG("m_saveBinaryName is empty()"); - } - return returnStatus; -} - -sample_app::StatusCode sample_app::QnnSampleApp::createFromBinary() { - QNN_FUNCTION_ENTRY_LOG; - if (m_cachedBinaryPath.empty()) { - QNN_ERROR("No name provided to read binary file from."); - return StatusCode::FAILURE; - } - if (nullptr == m_qnnFunctionPointers.qnnSystemInterface.systemContextCreate || - nullptr == m_qnnFunctionPointers.qnnSystemInterface.systemContextGetBinaryInfo || - nullptr == m_qnnFunctionPointers.qnnSystemInterface.systemContextFree) { - QNN_ERROR("QNN System function pointers are not populated."); - return StatusCode::FAILURE; - } - uint64_t bufferSize{0}; - std::shared_ptr buffer{nullptr}; - // read serialized binary into a byte buffer - tools::datautil::StatusCode status{tools::datautil::StatusCode::SUCCESS}; - std::tie(status, bufferSize) = tools::datautil::getFileSize(m_cachedBinaryPath); - if (0 == bufferSize) { - QNN_ERROR("Received path to an empty file. Nothing to deserialize."); - return StatusCode::FAILURE; - } - buffer = std::shared_ptr(new uint8_t[bufferSize], std::default_delete()); - if (!buffer) { - QNN_ERROR("Failed to allocate memory."); - return StatusCode::FAILURE; - } - - status = tools::datautil::readBinaryFromFile( - m_cachedBinaryPath, reinterpret_cast(buffer.get()), bufferSize); - if (status != tools::datautil::StatusCode::SUCCESS) { - QNN_ERROR("Failed to read binary data."); - return StatusCode::FAILURE; - } - - // inspect binary info - auto returnStatus = StatusCode::SUCCESS; - QnnSystemContext_Handle_t sysCtxHandle{nullptr}; - if (QNN_SUCCESS != m_qnnFunctionPointers.qnnSystemInterface.systemContextCreate(&sysCtxHandle)) { - QNN_ERROR("Could not create system handle."); - returnStatus = StatusCode::FAILURE; - } - const QnnSystemContext_BinaryInfo_t* binaryInfo{nullptr}; - Qnn_ContextBinarySize_t binaryInfoSize{0}; - if (StatusCode::SUCCESS == returnStatus && - QNN_SUCCESS != m_qnnFunctionPointers.qnnSystemInterface.systemContextGetBinaryInfo( - sysCtxHandle, - static_cast(buffer.get()), - bufferSize, - &binaryInfo, - &binaryInfoSize)) { - QNN_ERROR("Failed to get context binary info"); - returnStatus = StatusCode::FAILURE; - } - - // fill GraphInfo_t based on binary info - if (StatusCode::SUCCESS == returnStatus && - !copyMetadataToGraphsInfo(binaryInfo, m_graphsInfo, m_graphsCount)) { - QNN_ERROR("Failed to copy metadata."); - returnStatus = StatusCode::FAILURE; - } - m_qnnFunctionPointers.qnnSystemInterface.systemContextFree(sysCtxHandle); - sysCtxHandle = nullptr; - - if (StatusCode::SUCCESS == returnStatus && - nullptr == m_qnnFunctionPointers.qnnInterface.contextCreateFromBinary) { - QNN_ERROR("contextCreateFromBinaryFnHandle is nullptr."); - returnStatus = StatusCode::FAILURE; - } - if (StatusCode::SUCCESS == returnStatus && - m_qnnFunctionPointers.qnnInterface.contextCreateFromBinary( - m_backendHandle, - m_deviceHandle, - (const QnnContext_Config_t**)m_contextConfig, - static_cast(buffer.get()), - bufferSize, - &m_context, - m_profileBackendHandle)) { - QNN_ERROR("Could not create context from binary."); - returnStatus = StatusCode::FAILURE; - } - if (ProfilingLevel::OFF != m_profilingLevel) { - extractBackendProfilingInfo(m_profileBackendHandle); - } - m_isContextCreated = true; - if (StatusCode::SUCCESS == returnStatus) { - for (size_t graphIdx = 0; graphIdx < m_graphsCount; graphIdx++) { - if (nullptr == m_qnnFunctionPointers.qnnInterface.graphRetrieve) { - QNN_ERROR("graphRetrieveFnHandle is nullptr."); - returnStatus = StatusCode::FAILURE; - break; - } - if (QNN_SUCCESS != - m_qnnFunctionPointers.qnnInterface.graphRetrieve( - m_context, (*m_graphsInfo)[graphIdx].graphName, &((*m_graphsInfo)[graphIdx].graph))) { - QNN_ERROR("Unable to retrieve graph handle for graph Idx: %d", graphIdx); - returnStatus = StatusCode::FAILURE; - } - } - } - if (StatusCode::SUCCESS != returnStatus) { - QNN_DEBUG("Cleaning up graph Info structures."); - qnn_wrapper_api::freeGraphsInfo(&m_graphsInfo, m_graphsCount); - } - QNN_FUNCTION_EXIT_LOG; - return returnStatus; -} - -sample_app::StatusCode sample_app::QnnSampleApp::saveBinary() { - if (m_saveBinaryName.empty()) { - QNN_ERROR("No name provided to save binary file."); - return StatusCode::FAILURE; - } - if (nullptr == m_qnnFunctionPointers.qnnInterface.contextGetBinarySize || - nullptr == m_qnnFunctionPointers.qnnInterface.contextGetBinary) { - QNN_ERROR("contextGetBinarySizeFnHandle or contextGetBinaryFnHandle is nullptr."); - return StatusCode::FAILURE; - } - uint64_t requiredBufferSize{0}; - if (QNN_CONTEXT_NO_ERROR != - m_qnnFunctionPointers.qnnInterface.contextGetBinarySize(m_context, &requiredBufferSize)) { - QNN_ERROR("Could not get the required binary size."); - return StatusCode::FAILURE; - } - std::unique_ptr saveBuffer(new uint8_t[requiredBufferSize]); - if (nullptr == saveBuffer) { - QNN_ERROR("Could not allocate buffer to save binary."); - return StatusCode::FAILURE; - } - uint64_t writtenBufferSize{0}; - if (QNN_CONTEXT_NO_ERROR != - m_qnnFunctionPointers.qnnInterface.contextGetBinary(m_context, - reinterpret_cast(saveBuffer.get()), - requiredBufferSize, - &writtenBufferSize)) { - QNN_ERROR("Could not get binary."); - return StatusCode::FAILURE; - } - if (requiredBufferSize < writtenBufferSize) { - QNN_ERROR( - "Illegal written buffer size [%d] bytes. Cannot exceed allocated memory of [%d] bytes", - writtenBufferSize, - requiredBufferSize); - return StatusCode::FAILURE; - } - auto dataUtilStatus = tools::datautil::writeBinaryToFile( - m_outputPath, m_saveBinaryName + ".bin", (uint8_t*)saveBuffer.get(), writtenBufferSize); - if (tools::datautil::StatusCode::SUCCESS != dataUtilStatus) { - QNN_ERROR("Error while writing binary to file."); - return StatusCode::FAILURE; - } - return StatusCode::SUCCESS; -} - -// C:\Qualcomm\AIStack\QNN\\include\QNN\QnnProfile.h -// C:\Qualcomm\AIStack\QNN\\include\QNN\HTP\QnnHtpProfile.h -sample_app::StatusCode sample_app::QnnSampleApp::extractBackendProfilingInfo( - Qnn_ProfileHandle_t profileHandle) { - if (nullptr == m_profileBackendHandle) { - QNN_ERROR("Backend Profile handle is nullptr; may not be initialized."); - return StatusCode::FAILURE; - } - const QnnProfile_EventId_t* profileEvents{nullptr}; - uint32_t numEvents{0}; - if (QNN_PROFILE_NO_ERROR != m_qnnFunctionPointers.qnnInterface.profileGetEvents( - profileHandle, &profileEvents, &numEvents)) { - QNN_ERROR("Failure in profile get events."); - return StatusCode::FAILURE; - } - QNN_INFO("ProfileEvents: [%p], numEvents: [%d]", profileEvents, numEvents); - for (size_t event = 0; event < numEvents; event++) { - extractProfilingEvent(*(profileEvents + event)); - extractProfilingSubEvents(*(profileEvents + event)); - } - return StatusCode::SUCCESS; -} - -sample_app::StatusCode sample_app::QnnSampleApp::extractProfilingSubEvents( - QnnProfile_EventId_t profileEventId) { - const QnnProfile_EventId_t* profileSubEvents{nullptr}; - uint32_t numSubEvents{0}; - if (QNN_PROFILE_NO_ERROR != m_qnnFunctionPointers.qnnInterface.profileGetSubEvents( - profileEventId, &profileSubEvents, &numSubEvents)) { - QNN_ERROR("Failure in profile get sub events."); - return StatusCode::FAILURE; - } - QNN_INFO("ProfileSubEvents: [%p], numSubEvents: [%d]", profileSubEvents, numSubEvents); - for (size_t subEvent = 0; subEvent < numSubEvents; subEvent++) { - extractProfilingEvent(*(profileSubEvents + subEvent)); - extractProfilingSubEvents(*(profileSubEvents + subEvent)); - } - return StatusCode::SUCCESS; -} - -sample_app::StatusCode sample_app::QnnSampleApp::extractProfilingEvent( - QnnProfile_EventId_t profileEventId) { - QnnProfile_EventData_t eventData; - if (QNN_PROFILE_NO_ERROR != - m_qnnFunctionPointers.qnnInterface.profileGetEventData(profileEventId, &eventData)) { - QNN_ERROR("Failure in profile get event type."); - return StatusCode::FAILURE; - } - QNN_INFO("Printing Event Info - Event Type: [%d], Event Value: [%" PRIu64 - "], Event Identifier: [%s], Event Unit: [%d]", - eventData.type, - eventData.value, - eventData.identifier, - eventData.unit); - return StatusCode::SUCCESS; -} - -sample_app::StatusCode sample_app::QnnSampleApp::verifyFailReturnStatus(Qnn_ErrorHandle_t errCode) { - auto returnStatus = sample_app::StatusCode::FAILURE; - switch (errCode) { - case QNN_COMMON_ERROR_SYSTEM_COMMUNICATION: - returnStatus = sample_app::StatusCode::FAILURE_SYSTEM_COMMUNICATION_ERROR; - break; - case QNN_COMMON_ERROR_SYSTEM: - returnStatus = sample_app::StatusCode::FAILURE_SYSTEM_ERROR; - break; - case QNN_COMMON_ERROR_NOT_SUPPORTED: - returnStatus = sample_app::StatusCode::QNN_FEATURE_UNSUPPORTED; - break; - default: - break; - } - return returnStatus; -} - -sample_app::StatusCode sample_app::QnnSampleApp::isDevicePropertySupported() { - if (nullptr != m_qnnFunctionPointers.qnnInterface.propertyHasCapability) { - auto qnnStatus = - m_qnnFunctionPointers.qnnInterface.propertyHasCapability(QNN_PROPERTY_GROUP_DEVICE); - if (QNN_PROPERTY_NOT_SUPPORTED == qnnStatus) { - QNN_WARN("Device property is not supported"); - } - if (QNN_PROPERTY_ERROR_UNKNOWN_KEY == qnnStatus) { - QNN_ERROR("Device property is not known to backend"); - return StatusCode::FAILURE; - } - } - return StatusCode::SUCCESS; -} - -sample_app::StatusCode sample_app::QnnSampleApp::createDevice() { - if (nullptr != m_qnnFunctionPointers.qnnInterface.deviceCreate) { - auto qnnStatus = - m_qnnFunctionPointers.qnnInterface.deviceCreate(m_logHandle, nullptr, &m_deviceHandle); - if (QNN_SUCCESS != qnnStatus && QNN_DEVICE_ERROR_UNSUPPORTED_FEATURE != qnnStatus) { - QNN_ERROR("Failed to create device"); - return verifyFailReturnStatus(qnnStatus); - } - } - return StatusCode::SUCCESS; -} - -sample_app::StatusCode sample_app::QnnSampleApp::freeDevice() { - if (nullptr != m_qnnFunctionPointers.qnnInterface.deviceFree) { - auto qnnStatus = m_qnnFunctionPointers.qnnInterface.deviceFree(m_deviceHandle); - if (QNN_SUCCESS != qnnStatus && QNN_DEVICE_ERROR_UNSUPPORTED_FEATURE != qnnStatus) { - QNN_ERROR("Failed to free device"); - return verifyFailReturnStatus(qnnStatus); - } - } - return StatusCode::SUCCESS; -} - -// executeGraphs() that is currently used by qnn-sample-app's main.cpp. -// This function runs all the graphs present in model.so by reading -// inputs from input_list based files and writes output to .raw files. -sample_app::StatusCode sample_app::QnnSampleApp::executeGraphs() { - auto returnStatus = StatusCode::SUCCESS; - for (size_t graphIdx = 0; graphIdx < m_graphsCount; graphIdx++) { - QNN_DEBUG("Starting execution for graphIdx: %d", graphIdx); - if (graphIdx >= m_inputFileLists.size()) { - QNN_ERROR("No Inputs available for: %d", graphIdx); - returnStatus = StatusCode::FAILURE; - break; - } - Qnn_Tensor_t* inputs = nullptr; - Qnn_Tensor_t* outputs = nullptr; - if (iotensor::StatusCode::SUCCESS != - m_ioTensor.setupInputAndOutputTensors(&inputs, &outputs, (*m_graphsInfo)[graphIdx])) { - QNN_ERROR("Error in setting up Input and output Tensors for graphIdx: %d", graphIdx); - returnStatus = StatusCode::FAILURE; - break; - } - auto inputFileList = m_inputFileLists[graphIdx]; - auto graphInfo = (*m_graphsInfo)[graphIdx]; - if (!inputFileList.empty()) { - size_t totalCount = inputFileList[0].size(); - size_t inputFileIndexOffset = 0; - while (inputFileIndexOffset < totalCount) { - iotensor::StatusCode iotReturnStatus; - size_t numInputFilesPopulated; - size_t batchSize; - std::tie(iotReturnStatus, numInputFilesPopulated, batchSize) = - m_ioTensor.populateInputTensors(graphIdx, - inputFileList, - inputFileIndexOffset, - false, - m_inputNameToIndex[graphIdx], - inputs, - graphInfo, - m_inputDataType); - if (iotensor::StatusCode::SUCCESS != iotReturnStatus) { - returnStatus = StatusCode::FAILURE; - } - if (StatusCode::SUCCESS == returnStatus) { - QNN_DEBUG("Successfully populated input tensors for graphIdx: %d", graphIdx); - Qnn_ErrorHandle_t executeStatus = QNN_GRAPH_NO_ERROR; - executeStatus = - m_qnnFunctionPointers.qnnInterface.graphExecute(graphInfo.graph, - inputs, - graphInfo.numInputTensors, - outputs, - graphInfo.numOutputTensors, - m_profileBackendHandle, - nullptr); - if (QNN_GRAPH_NO_ERROR != executeStatus) { - returnStatus = StatusCode::FAILURE; - } - if (StatusCode::SUCCESS == returnStatus) { - QNN_DEBUG("Successfully executed graphIdx: %d ", graphIdx); - if (iotensor::StatusCode::SUCCESS != - m_ioTensor.writeOutputTensors(graphIdx, - inputFileIndexOffset, - graphInfo.graphName, - outputs, - graphInfo.numOutputTensors, - m_outputDataType, - m_graphsCount, - m_outputPath, - numInputFilesPopulated, - batchSize)) { - returnStatus = StatusCode::FAILURE; - } - } - inputFileIndexOffset += numInputFilesPopulated; - } - if (StatusCode::SUCCESS != returnStatus) { - QNN_ERROR("Execution of Graph: %d failed!", graphIdx); - break; - } - } - } - m_ioTensor.tearDownInputAndOutputTensors( - inputs, outputs, graphInfo.numInputTensors, graphInfo.numOutputTensors); - inputs = nullptr; - outputs = nullptr; - if (StatusCode::SUCCESS != returnStatus) { - break; - } - } - - return returnStatus; -} - -sample_app::StatusCode sample_app::QnnSampleApp::executeGraphsBuffers(std::vector& inputBuffers, - std::vector& outputBuffers, std::vector& outputSize, - std::string perfProfile) { - auto returnStatus = StatusCode::SUCCESS; - - // We push '12345' to 'outputSize' in function 'ModelRun@main.cpp@SvcQNNHelpper.exe'. In this case, share memory will not be freed, we can use the share memory as output buffer directly. - bool shareMemory = false; - uint8_t* pShareBuffer = inputBuffers[0]; - if (outputSize.size() == 1 && outputSize[0] == 12345) { - shareMemory = true; - outputSize.clear(); - - // Find the share buffer entry point, the smalest point address in 'inputBuffers'. - for (int i = 0; i < inputBuffers.size(); i++) { - if (pShareBuffer > inputBuffers[i]) - pShareBuffer = inputBuffers[i]; - } - } - - for (size_t graphIdx = 0; graphIdx < m_graphsCount; graphIdx++) { - QNN_DEBUG("Starting execution for graphIdx: %d", graphIdx); - if (graphIdx >= inputBuffers.size()) { - QNN_ERROR("No Inputs available for: %d", graphIdx); - returnStatus = StatusCode::FAILURE; - break; - } - - Qnn_Tensor_t* inputs = nullptr; - Qnn_Tensor_t* outputs = nullptr; - if (iotensor::StatusCode::SUCCESS != m_ioTensor.setupInputAndOutputTensors(&inputs, &outputs, (*m_graphsInfo)[graphIdx])) { - QNN_ERROR("Error in setting up Input and output Tensors for graphIdx: %d", graphIdx); - returnStatus = StatusCode::FAILURE; - break; - } - - auto graphInfo = (*m_graphsInfo)[graphIdx]; - if (!inputBuffers.empty()) { - //size_t totalCount = inputFileList[0].size(); - //while (!inputFileList[0].empty()) - { - size_t startIdx = 0; // (totalCount - inputFileList[0].size()); - if (iotensor::StatusCode::SUCCESS != - m_ioTensor.populateInputTensors((uint32_t)graphIdx, inputBuffers, inputs, graphInfo, m_inputDataType)) { - returnStatus = StatusCode::FAILURE; - } - - if (StatusCode::SUCCESS == returnStatus) { - QNN_DEBUG("Successfully populated input tensors for graphIdx: %d", graphIdx); - Qnn_ErrorHandle_t executeStatus = QNN_GRAPH_NO_ERROR; - - if (false == m_runInCpu && "default" != perfProfile && false == boostPerformance(m_perfInfra, perfProfile)) { - QNN_ERROR("Performance boost failure"); - } - - executeStatus = - m_qnnFunctionPointers.qnnInterface.graphExecute(graphInfo.graph, - inputs, - graphInfo.numInputTensors, - outputs, - graphInfo.numOutputTensors, - m_profileBackendHandle, - nullptr); - - if (false == m_runInCpu && "default" != perfProfile && false == resetPerformance(m_perfInfra)) { - QNN_ERROR("Performance reset failure"); - } - - if (ProfilingLevel::OFF != m_profilingLevel) { - extractBackendProfilingInfo(m_profileBackendHandle); - } - - if (QNN_GRAPH_NO_ERROR != executeStatus) { - returnStatus = StatusCode::FAILURE; - } - - if (StatusCode::SUCCESS == returnStatus) { - QNN_DEBUG("Successfully executed graphIdx: %d ", graphIdx); - - // populate output buffer directly - size_t offset = 0; - for (size_t outputIdx = 0; outputIdx < graphInfo.numOutputTensors; outputIdx++) { - QNN_DEBUG("Writing output for outputIdx: %d", outputIdx); - - std::vector dims; - m_ioTensor.fillDims(dims, QNN_TENSOR_GET_DIMENSIONS(outputs[outputIdx]), QNN_TENSOR_GET_RANK(outputs[outputIdx])); - size_t elementCount = datautil::calculateElementCount(dims); - size_t size = elementCount * (sizeof(float) / sizeof(uint8_t)); - uint8_t* buffer = nullptr; - - float* floatBuffer = nullptr; - if (shareMemory) { - floatBuffer = (float*)(pShareBuffer + offset); - offset += size; - } - - if (QNN_TENSOR_GET_DATA_TYPE(outputs[outputIdx]) == QNN_DATATYPE_FLOAT_32) { - QNN_DEBUG("Writing in output->dataType == QNN_DATATYPE_FLOAT_32"); - // Run the model in CPU. - if (!floatBuffer) { - floatBuffer = (float*)malloc(size); - } - memcpy(floatBuffer, reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(&(outputs[outputIdx])).data), size); - buffer = reinterpret_cast(floatBuffer); - } - else if (m_outputDataType == OutputDataType::FLOAT_ONLY) { - QNN_DEBUG("Writing in output->dataType == OutputDataType::FLOAT_ONLY"); - - auto ioReturnStatus = m_ioTensor.convertToFloat(&floatBuffer, &outputs[outputIdx]); - if (iotensor::StatusCode::SUCCESS != ioReturnStatus) { - QNN_ERROR("failure in convertToFloat"); - return StatusCode::FAILURE; - } - buffer = reinterpret_cast(floatBuffer); - } - else if (m_outputDataType == OutputDataType::NATIVE_ONLY) { - QNN_DEBUG("Writing in output->dataType == OutputDataType::NATIVE_ONLY"); - QNN_ERROR("Can't handle NATIVE_ONLY data type"); - // TODO: handle native only case. - } - else if (m_outputDataType == OutputDataType::FLOAT_AND_NATIVE) { - QNN_DEBUG("Writing in output->dataType == OutputDataType::FLOAT_AND_NATIVE"); - QNN_ERROR("Can't handle FLOAT_AND_NATIVE data type"); - // TODO: handle float and native case. - } - - if (buffer) { - outputBuffers.push_back(buffer); - outputSize.push_back(size); - } - } - // QNN_ERROR("output buffer size: %d\n", outputBuffers.size()); - } - } - if (StatusCode::SUCCESS != returnStatus) { - QNN_ERROR("Execution of Graph: %d failed!", graphIdx); - break; - } - } - } - m_ioTensor.tearDownInputAndOutputTensors(inputs, outputs, graphInfo.numInputTensors, graphInfo.numOutputTensors); - inputs = nullptr; - outputs = nullptr; - if (StatusCode::SUCCESS != returnStatus) { - break; - } - } - - return returnStatus; -} - -sample_app::StatusCode sample_app::QnnSampleApp::freeGraphs() { - qnn_wrapper_api::freeGraphsInfo(&m_graphsInfo, m_graphsCount); - m_graphsInfo = nullptr; - - return StatusCode::SUCCESS; -} - -sample_app::StatusCode sample_app::QnnSampleApp::initializeLog() { - // initialize logging in the backend - if (log::isLogInitialized()) { - auto logCallback = log::getLogCallback(); - auto logLevel = log::getLogLevel(); - QNN_INFO("Initializing logging in the backend. Callback: [%p], Log Level: [%d]", logCallback, logLevel); - if (QNN_SUCCESS != - m_qnnFunctionPointers.qnnInterface.logCreate(logCallback, logLevel, &m_logHandle)) { - QNN_WARN("Unable to initialize logging in the backend."); - } - } else { - QNN_WARN("Logging not available in the backend."); - } - return StatusCode::SUCCESS; -} - -sample_app::StatusCode sample_app::QnnSampleApp::setLogLevel(QnnLog_Level_t logLevel) { - if (QNN_SUCCESS != m_qnnFunctionPointers.qnnInterface.logSetLogLevel(m_logHandle, logLevel)) { - QNN_WARN("Unable to set logging level in the backend."); - } - - return StatusCode::SUCCESS; -} - -// Performance Setting for HTP -sample_app::StatusCode sample_app::QnnSampleApp::initializePerformance() { - QnnDevice_Infrastructure_t deviceInfra = nullptr; - if (QNN_SUCCESS != m_qnnFunctionPointers.qnnInterface.deviceGetInfrastructure(&deviceInfra)) { - QNN_ERROR("Failure in deviceGetInfrastructure()"); - return StatusCode::FAILURE; - } - - QnnHtpDevice_Infrastructure_t* htpInfra = static_cast(deviceInfra); - m_perfInfra = htpInfra->perfInfra; - uint32_t deviceId = 0; - uint32_t coreId = 0; - if (QNN_SUCCESS != m_perfInfra.createPowerConfigId(deviceId, coreId, &m_powerConfigId)) { - QNN_ERROR("Failure in createPowerConfigId()"); - return StatusCode::FAILURE; - } - - return StatusCode::SUCCESS; -} - -sample_app::StatusCode sample_app::QnnSampleApp::destroyPerformance() { - if (true == m_runInCpu) - return StatusCode::SUCCESS; - - if (QNN_SUCCESS != m_perfInfra.destroyPowerConfigId(m_powerConfigId)) { - QNN_ERROR("Failure in destroyPowerConfigId()"); - return StatusCode::FAILURE; - } - - return StatusCode::SUCCESS; -} - - diff --git a/LibQNNHelper/src/QnnSampleApp.hpp b/LibQNNHelper/src/QnnSampleApp.hpp deleted file mode 100644 index 959a84b..0000000 --- a/LibQNNHelper/src/QnnSampleApp.hpp +++ /dev/null @@ -1,156 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== -#pragma once - -#include -#include - -#include "IOTensor.hpp" -#include "SampleApp.hpp" - -// zw: For supporting BackendExtensions. -#include "HTP/QnnHtpPerfInfrastructure.h" -#include "HTP/QnnHtpDevice.h" - - -bool disableDcvs(QnnHtpDevice_PerfInfrastructure_t perfInfra); -bool enableDcvs(QnnHtpDevice_PerfInfrastructure_t perfInfra); -bool boostPerformance(QnnHtpDevice_PerfInfrastructure_t perfInfra, std::string perfProfile); -bool resetPerformance(QnnHtpDevice_PerfInfrastructure_t perfInfra); - -namespace qnn { -namespace tools { -namespace sample_app { - -enum class StatusCode { - SUCCESS, - FAILURE, - FAILURE_INPUT_LIST_EXHAUSTED, - FAILURE_SYSTEM_ERROR, - FAILURE_SYSTEM_COMMUNICATION_ERROR, - QNN_FEATURE_UNSUPPORTED -}; - -class QnnSampleApp { - public: - QnnSampleApp(QnnFunctionPointers qnnFunctionPointers, - std::string inputListPaths, - std::string opPackagePaths, - void *backendHandle, - std::string outputPath = s_defaultOutputPath, - bool debug = false, - iotensor::OutputDataType outputDataType = iotensor::OutputDataType::FLOAT_ONLY, - iotensor::InputDataType inputDataType = iotensor::InputDataType::FLOAT, - ProfilingLevel profilingLevel = ProfilingLevel::OFF, - bool dumpOutputs = false, - std::string cachedBinaryPath = "", - std::string saveBinaryName = ""); - - // @brief Print a message to STDERR then return a nonzero - // exit status. - int32_t reportError(const std::string &err); - - StatusCode initialize(); - - StatusCode initializeBackend(); - - StatusCode createContext(); - - StatusCode composeGraphs(); - - StatusCode finalizeGraphs(); - - StatusCode executeGraphs(); - - StatusCode registerOpPackages(); - - StatusCode createFromBinary(); - - StatusCode saveBinary(); - - StatusCode freeContext(); - - StatusCode terminateBackend(); - - StatusCode freeGraphs(); - - Qnn_ContextHandle_t getContext(); - - StatusCode initializeProfiling(); - - std::string getBackendBuildId(); - - StatusCode isDevicePropertySupported(); - - StatusCode createDevice(); - - StatusCode freeDevice(); - - StatusCode verifyFailReturnStatus(Qnn_ErrorHandle_t errCode); - - -// zw. - StatusCode executeGraphsBuffers(std::vector& inputBuffers, - std::vector& outputBuffers, std::vector& outputSize, - std::string perfProfile); - - StatusCode initializeLog(); - StatusCode setLogLevel(QnnLog_Level_t logLevel); - - StatusCode initializePerformance(); - StatusCode destroyPerformance(); - - virtual ~QnnSampleApp(); - - private: - StatusCode extractBackendProfilingInfo(Qnn_ProfileHandle_t profileHandle); - - StatusCode extractProfilingSubEvents(QnnProfile_EventId_t profileEventId); - - StatusCode extractProfilingEvent(QnnProfile_EventId_t profileEventId); - - static const std::string s_defaultOutputPath; - - QnnFunctionPointers m_qnnFunctionPointers; - std::vector m_inputListPaths; - std::vector>> m_inputFileLists; - std::vector> m_inputNameToIndex; - std::vector m_opPackagePaths; - std::string m_outputPath; - std::string m_saveBinaryName; - std::string m_cachedBinaryPath; - QnnBackend_Config_t **m_backendConfig = nullptr; - Qnn_ContextHandle_t m_context = nullptr; - QnnContext_Config_t **m_contextConfig = nullptr; - bool m_debug; - iotensor::OutputDataType m_outputDataType; - iotensor::InputDataType m_inputDataType; - ProfilingLevel m_profilingLevel; - bool m_dumpOutputs; - qnn_wrapper_api::GraphInfo_t **m_graphsInfo; - uint32_t m_graphsCount; - void *m_backendLibraryHandle; - iotensor::IOTensor m_ioTensor; - bool m_isBackendInitialized; - bool m_isContextCreated; - Qnn_ProfileHandle_t m_profileBackendHandle = nullptr; - qnn_wrapper_api::GraphConfigInfo_t **m_graphConfigsInfo = nullptr; - uint32_t m_graphConfigsInfoCount; - Qnn_LogHandle_t m_logHandle = nullptr; - Qnn_BackendHandle_t m_backendHandle = nullptr; - Qnn_DeviceHandle_t m_deviceHandle = nullptr; - - // zw. - uint32_t m_powerConfigId = 1; - QnnHtpDevice_PerfInfrastructure_t m_perfInfra = {nullptr}; - bool m_runInCpu = true; -}; -} // namespace sample_app -} // namespace tools -} // namespace qnn - diff --git a/LibQNNHelper/src/QnnTypeMacros.hpp b/LibQNNHelper/src/QnnTypeMacros.hpp deleted file mode 100644 index d274009..0000000 --- a/LibQNNHelper/src/QnnTypeMacros.hpp +++ /dev/null @@ -1,668 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -#pragma once - -#include "QnnTypes.h" - -#define QNN_OP_CFG_VALID(opConfig) ((opConfig).version == QNN_OPCONFIG_VERSION_1) - -inline Qnn_OpConfig_t createQnnOpConfig(const Qnn_OpConfigVersion_t version) { - Qnn_OpConfig_t opConfig = QNN_OPCONFIG_INIT; - opConfig.version = version; - if (version == QNN_OPCONFIG_VERSION_1) { - opConfig.v1 = QNN_OPCONFIG_V1_INIT; - } - return opConfig; -} - -inline const char* getQnnOpConfigName(const Qnn_OpConfig_t& opConfig) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - return opConfig.v1.name; - } - return NULL; -} - -inline const char* getQnnOpConfigName(const Qnn_OpConfig_t* const opConfig) { - return getQnnOpConfigName(*opConfig); -} - -inline const char* getQnnOpConfigPackageName(const Qnn_OpConfig_t& opConfig) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - return opConfig.v1.packageName; - } - return NULL; -} - -inline const char* getQnnOpConfigPackageName(const Qnn_OpConfig_t* const opConfig) { - return getQnnOpConfigPackageName(*opConfig); -} - -inline const char* getQnnOpConfigTypeName(const Qnn_OpConfig_t& opConfig) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - return opConfig.v1.typeName; - } - return NULL; -} - -inline const char* getQnnOpConfigTypeName(const Qnn_OpConfig_t* const opConfig) { - return getQnnOpConfigTypeName(*opConfig); -} - -inline uint32_t getQnnOpConfigNumParams(const Qnn_OpConfig_t& opConfig) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - return opConfig.v1.numOfParams; - } - return 0u; -} - -inline uint32_t getQnnOpConfigNumParams(const Qnn_OpConfig_t* const opConfig) { - return getQnnOpConfigNumParams(*opConfig); -} - -inline const Qnn_Param_t* getQnnOpConfigParams(const Qnn_OpConfig_t& opConfig) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - return opConfig.v1.params; - } - return NULL; -} - -inline const Qnn_Param_t* getQnnOpConfigParams(const Qnn_OpConfig_t* const opConfig) { - return getQnnOpConfigParams(*opConfig); -} - -inline uint32_t getQnnOpConfigNumInputs(const Qnn_OpConfig_t& opConfig) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - return opConfig.v1.numOfInputs; - } - return 0u; -} - -inline uint32_t getQnnOpConfigNumInputs(const Qnn_OpConfig_t* const opConfig) { - return getQnnOpConfigNumInputs(*opConfig); -} - -inline const Qnn_Tensor_t* getQnnOpConfigInputs(const Qnn_OpConfig_t& opConfig) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - return opConfig.v1.inputTensors; - } - return NULL; -} - -inline const Qnn_Tensor_t* getQnnOpConfigInputs(const Qnn_OpConfig_t* const opConfig) { - return getQnnOpConfigInputs(*opConfig); -} - -inline uint32_t getQnnOpConfigNumOutputs(const Qnn_OpConfig_t& opConfig) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - return opConfig.v1.numOfOutputs; - } - return 0u; -} - -inline uint32_t getQnnOpConfigNumOutputs(const Qnn_OpConfig_t* const opConfig) { - return getQnnOpConfigNumOutputs(*opConfig); -} - -inline const Qnn_Tensor_t* getQnnOpConfigOutputs(const Qnn_OpConfig_t& opConfig) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - return opConfig.v1.outputTensors; - } - return NULL; -} - -inline const Qnn_Tensor_t* getQnnOpConfigOutputs(const Qnn_OpConfig_t* const opConfig) { - return getQnnOpConfigOutputs(*opConfig); -} - -inline void setQnnOpConfigName(Qnn_OpConfig_t& opConfig, const char* const name) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - opConfig.v1.name = name; - } -} - -inline void setQnnOpConfigName(Qnn_OpConfig_t* const opConfig, const char* const name) { - setQnnOpConfigName(*opConfig, name); -} - -inline void setQnnOpConfigPackageName(Qnn_OpConfig_t& opConfig, const char* const packageName) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - opConfig.v1.packageName = packageName; - } -} - -inline void setQnnOpConfigPackageName(Qnn_OpConfig_t* const opConfig, - const char* const packageName) { - setQnnOpConfigPackageName(*opConfig, packageName); -} - -inline void setQnnOpConfigTypeName(Qnn_OpConfig_t& opConfig, const char* const typeName) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - opConfig.v1.typeName = typeName; - } -} - -inline void setQnnOpConfigTypeName(Qnn_OpConfig_t* const opConfig, const char* const typeName) { - setQnnOpConfigTypeName(*opConfig, typeName); -} - -inline void setQnnOpConfigParams(Qnn_OpConfig_t& opConfig, - uint32_t const numOfParams, - Qnn_Param_t* const params) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - opConfig.v1.numOfParams = numOfParams; - opConfig.v1.params = params; - } -} - -inline void setQnnOpConfigParams(Qnn_OpConfig_t* const opConfig, - uint32_t const numOfParams, - Qnn_Param_t* const params) { - setQnnOpConfigParams(*opConfig, numOfParams, params); -} - -inline void setQnnOpConfigInputs(Qnn_OpConfig_t& opConfig, - uint32_t const numOfInputs, - Qnn_Tensor_t* const inputTensors) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - opConfig.v1.numOfInputs = numOfInputs; - opConfig.v1.inputTensors = inputTensors; - } -} - -inline void setQnnOpConfigInputs(Qnn_OpConfig_t* const opConfig, - uint32_t const numOfInputs, - Qnn_Tensor_t* const inputTensors) { - setQnnOpConfigInputs(*opConfig, numOfInputs, inputTensors); -} - -inline void setQnnOpConfigOutputs(Qnn_OpConfig_t& opConfig, - uint32_t const numOfOutputs, - Qnn_Tensor_t* const outputTensors) { - if (opConfig.version == QNN_OPCONFIG_VERSION_1) { - opConfig.v1.numOfOutputs = numOfOutputs; - opConfig.v1.outputTensors = outputTensors; - } -} - -inline void setQnnOpConfigOutputs(Qnn_OpConfig_t* const opConfig, - uint32_t const numOfOutputs, - Qnn_Tensor_t* const outputTensors) { - setQnnOpConfigOutputs(*opConfig, numOfOutputs, outputTensors); -} - -inline Qnn_Tensor_t createQnnTensor(const Qnn_TensorVersion_t version) { - Qnn_Tensor_t tensor = QNN_TENSOR_INIT; - tensor.version = version; - if (version == QNN_TENSOR_VERSION_1) { - tensor.v1 = QNN_TENSOR_V1_INIT; - } else if (version == QNN_TENSOR_VERSION_2) { - tensor.v2 = QNN_TENSOR_V2_INIT; - } - return tensor; -} - -inline uint32_t getQnnTensorId(const Qnn_Tensor_t& tensor) { - // TensorCompatTest justifies no need to check version - return tensor.v1.id; -} - -inline uint32_t getQnnTensorId(const Qnn_Tensor_t* const tensor) { return getQnnTensorId(*tensor); } - -inline const char* getQnnTensorName(const Qnn_Tensor_t& tensor) { - // TensorCompatTest justifies no need to check version - return tensor.v1.name; -} - -inline const char* getQnnTensorName(const Qnn_Tensor_t* const tensor) { - return getQnnTensorName(*tensor); -} - -inline Qnn_TensorType_t getQnnTensorType(const Qnn_Tensor_t& tensor) { - // TensorCompatTest justifies no need to check version - return tensor.v1.type; -} - -inline Qnn_TensorType_t getQnnTensorType(const Qnn_Tensor_t* const tensor) { - return getQnnTensorType(*tensor); -} - -inline Qnn_TensorDataFormat_t getQnnTensorDataFormat(const Qnn_Tensor_t& tensor) { - // TensorCompatTest justifies no need to check version - return tensor.v1.dataFormat; -} - -inline Qnn_TensorDataFormat_t getQnnTensorDataFormat(const Qnn_Tensor_t* const tensor) { - return getQnnTensorDataFormat(*tensor); -} - -inline Qnn_DataType_t getQnnTensorDataType(const Qnn_Tensor_t& tensor) { - // TensorCompatTest justifies no need to check version - return tensor.v1.dataType; -} - -inline Qnn_DataType_t getQnnTensorDataType(const Qnn_Tensor_t* const tensor) { - return getQnnTensorDataType(*tensor); -} - -inline Qnn_QuantizeParams_t getQnnTensorQuantParams(const Qnn_Tensor_t& tensor) { - // TensorCompatTest justifies no need to check version - return tensor.v1.quantizeParams; -} - -inline Qnn_QuantizeParams_t getQnnTensorQuantParams(const Qnn_Tensor_t* const tensor) { - if (tensor != nullptr) { - return getQnnTensorQuantParams(*tensor); - } - return QNN_QUANTIZE_PARAMS_INIT; -} - -inline uint32_t getQnnTensorRank(const Qnn_Tensor_t& tensor) { - // TensorCompatTest justifies no need to check version - return tensor.v1.rank; -} - -inline uint32_t getQnnTensorRank(const Qnn_Tensor_t* const tensor) { - if (tensor != nullptr) { - return getQnnTensorRank(*tensor); - } - return 0u; -} - -inline uint32_t* getQnnTensorDimensions(const Qnn_Tensor_t& tensor) { - // TensorCompatTest justifies no need to check version - return tensor.v1.dimensions; -} - -inline uint32_t* getQnnTensorDimensions(const Qnn_Tensor_t* const tensor) { - return getQnnTensorDimensions(*tensor); -} - -inline uint8_t* getQnnTensorIsDynamicDimensions(const Qnn_Tensor_t& tensor) { - if (tensor.version == QNN_TENSOR_VERSION_1) { - return NULL; - } else if (tensor.version == QNN_TENSOR_VERSION_2) { - return tensor.v2.isDynamicDimensions; - } - return NULL; -} - -inline uint8_t* getQnnTensorIsDynamicDimensions(const Qnn_Tensor_t* tensor) { - return getQnnTensorIsDynamicDimensions(*tensor); -} - -inline Qnn_SparseParams_t getQnnTensorSparseParams(const Qnn_Tensor_t& tensor) { - if (tensor.version == QNN_TENSOR_VERSION_1) { - return QNN_SPARSE_PARAMS_INIT; - } else if (tensor.version == QNN_TENSOR_VERSION_2) { - return tensor.v2.sparseParams; - } - return QNN_SPARSE_PARAMS_INIT; -} - -inline Qnn_SparseParams_t getQnnTensorSparseParams(const Qnn_Tensor_t* tensor) { - return getQnnTensorSparseParams(*tensor); -} - -inline Qnn_TensorMemType_t getQnnTensorMemType(const Qnn_Tensor_t& tensor) { - // TensorCompatTest justifies no need to check version - return tensor.v1.memType; -} - -inline Qnn_TensorMemType_t getQnnTensorMemType(const Qnn_Tensor_t* const tensor) { - return getQnnTensorMemType(*tensor); -} - -inline Qnn_ClientBuffer_t getQnnTensorClientBuf(const Qnn_Tensor_t& tensor) { - // TensorCompatTest justifies no need to check version - return tensor.v1.clientBuf; -} - -inline Qnn_ClientBuffer_t getQnnTensorClientBuf(const Qnn_Tensor_t* const tensor) { - return getQnnTensorClientBuf(*tensor); -} - -inline Qnn_MemHandle_t getQnnTensorMemHandle(const Qnn_Tensor_t& tensor) { - // TensorCompatTest justifies no need to check version - return tensor.v1.memHandle; -} - -inline Qnn_MemHandle_t getQnnTensorMemHandle(const Qnn_Tensor_t* const tensor) { - return getQnnTensorMemHandle(*tensor); -} - -inline void setQnnTensorId(Qnn_Tensor_t& tensor, const uint32_t id) { - // TensorCompatTest justifies no need to check version - tensor.v1.id = id; -} - -inline void setQnnTensorId(Qnn_Tensor_t* const tensor, const uint32_t id) { - setQnnTensorId(*tensor, id); -} - -inline void setQnnTensorName(Qnn_Tensor_t& tensor, const char* const name) { - // TensorCompatTest justifies no need to check version - tensor.v1.name = name; -} - -inline void setQnnTensorName(Qnn_Tensor_t* const tensor, const char* const name) { - setQnnTensorName(*tensor, name); -} - -inline void setQnnTensorType(Qnn_Tensor_t& tensor, const Qnn_TensorType_t type) { - // TensorCompatTest justifies no need to check version - tensor.v1.type = type; -} - -inline void setQnnTensorType(Qnn_Tensor_t* const tensor, const Qnn_TensorType_t type) { - setQnnTensorType(*tensor, type); -} - -inline void setQnnTensorDataFormat(Qnn_Tensor_t& tensor, const Qnn_TensorDataFormat_t dataFormat) { - // TensorCompatTest justifies no need to check version - tensor.v1.dataFormat = dataFormat; -} - -inline void setQnnTensorDataFormat(Qnn_Tensor_t* const tensor, - const Qnn_TensorDataFormat_t format) { - setQnnTensorDataFormat(*tensor, format); -} - -inline void setQnnTensorDataType(Qnn_Tensor_t& tensor, const Qnn_DataType_t dataType) { - // TensorCompatTest justifies no need to check version - tensor.v1.dataType = dataType; -} - -inline void setQnnTensorDataType(Qnn_Tensor_t* const tensor, const Qnn_DataType_t dataType) { - setQnnTensorDataType(*tensor, dataType); -} - -inline void setQnnTensorQuantParams(Qnn_Tensor_t& tensor, - const Qnn_QuantizeParams_t quantizeParams) { - // TensorCompatTest justifies no need to check version - tensor.v1.quantizeParams = quantizeParams; -} - -inline void setQnnTensorQuantParams(Qnn_Tensor_t* const tensor, const Qnn_QuantizeParams_t params) { - setQnnTensorQuantParams(*tensor, params); -} - -inline void setQnnTensorRank(Qnn_Tensor_t& tensor, const uint32_t rank) { - // TensorCompatTest justifies no need to check version - tensor.v1.rank = rank; -} - -inline void setQnnTensorRank(Qnn_Tensor_t* const tensor, const uint32_t rank) { - setQnnTensorRank(*tensor, rank); -} - -inline void setQnnTensorDimensions(Qnn_Tensor_t& tensor, uint32_t* const dimensions) { - // TensorCompatTest justifies no need to check version - tensor.v1.dimensions = dimensions; -} - -inline void setQnnTensorDimensions(Qnn_Tensor_t* const tensor, uint32_t* const dimensions) { - setQnnTensorDimensions(*tensor, dimensions); -} - -inline void setQnnTensorIsDynamicDimensions(Qnn_Tensor_t& tensor, - uint8_t* const isDynamicDimensions) { - if (tensor.version == QNN_TENSOR_VERSION_2) { - tensor.v2.isDynamicDimensions = isDynamicDimensions; - } -} - -inline void setQnnTensorIsDynamicDimensions(Qnn_Tensor_t* tensor, - uint8_t* const isDynamicDimensions) { - setQnnTensorIsDynamicDimensions(*tensor, isDynamicDimensions); -} - -inline void setQnnTensorSparseParams(Qnn_Tensor_t& tensor, const Qnn_SparseParams_t sparseParams) { - if (tensor.version == QNN_TENSOR_VERSION_2) { - tensor.v2.sparseParams = sparseParams; - } -} - -inline void setQnnTensorSparseParams(Qnn_Tensor_t* tensor, Qnn_SparseParams_t sparseParams) { - setQnnTensorSparseParams(*tensor, sparseParams); -} - -inline void setQnnTensorMemType(Qnn_Tensor_t& tensor, const Qnn_TensorMemType_t memType) { - // TensorCompatTest justifies no need to check version - tensor.v1.memType = memType; -} - -inline void setQnnTensorMemType(Qnn_Tensor_t* const tensor, const Qnn_TensorMemType_t memType) { - setQnnTensorMemType(*tensor, memType); -} - -inline void setQnnTensorClientBuf(Qnn_Tensor_t& tensor, const Qnn_ClientBuffer_t clientBuf) { - // TensorCompatTest justifies no need to check version - tensor.v1.clientBuf = clientBuf; -} - -inline void setQnnTensorClientBuf(Qnn_Tensor_t* const tensor, const Qnn_ClientBuffer_t clientBuf) { - setQnnTensorClientBuf(*tensor, clientBuf); -} - -inline void setQnnTensorMemHandle(Qnn_Tensor_t& tensor, const Qnn_MemHandle_t memHandle) { - // TensorCompatTest justifies no need to check version - tensor.v1.memHandle = memHandle; -} - -inline void setQnnTensorMemHandle(Qnn_Tensor_t* const tensor, const Qnn_MemHandle_t handle) { - setQnnTensorMemHandle(*tensor, handle); -} - -inline Qnn_TensorSet_t createQnnTensorSet(const Qnn_TensorSetVersion_t version) { - Qnn_TensorSet_t tensorSet = QNN_TENSOR_SET_INIT; - tensorSet.version = version; - if (version == QNN_TENSOR_SET_VERSION_1) { - tensorSet.v1 = QNN_TENSOR_SET_V1_INIT; - } - return tensorSet; -} - -inline uint32_t getQnnTensorSetNumInputs(const Qnn_TensorSet_t& tensorSet) { - if (tensorSet.version == QNN_TENSOR_SET_VERSION_1) { - return tensorSet.v1.numInputs; - } - return 0; -} - -inline uint32_t getQnnTensorSetNumInputs(const Qnn_TensorSet_t* tensorSet) { - return getQnnTensorSetNumInputs(*tensorSet); -} - -inline Qnn_Tensor_t* getQnnTensorSetInputTensors(const Qnn_TensorSet_t& tensorSet) { - if (tensorSet.version == QNN_TENSOR_SET_VERSION_1) { - return tensorSet.v1.inputs; - } - return 0; -} - -inline Qnn_Tensor_t* getQnnTensorSetInputTensors(const Qnn_TensorSet_t* tensorSet) { - return getQnnTensorSetInputTensors(*tensorSet); -} - -inline uint32_t getQnnTensorSetNumOutputs(const Qnn_TensorSet_t& tensorSet) { - if (tensorSet.version == QNN_TENSOR_SET_VERSION_1) { - return tensorSet.v1.numOutputs; - } - return 0; -} - -inline uint32_t getQnnTensorSetNumOutputs(const Qnn_TensorSet_t* tensorSet) { - return getQnnTensorSetNumOutputs(*tensorSet); -} - -inline Qnn_Tensor_t* getQnnTensorSetOutputTensors(const Qnn_TensorSet_t& tensorSet) { - if (tensorSet.version == QNN_TENSOR_SET_VERSION_1) { - return tensorSet.v1.outputs; - } - return 0; -} - -inline Qnn_Tensor_t* getQnnTensorSetOutputTensors(const Qnn_TensorSet_t* tensorSet) { - return getQnnTensorSetOutputTensors(*tensorSet); -} - -inline void setQnnTensorSetInputTensors(Qnn_TensorSet_t& tensorSet, - Qnn_Tensor_t* inputTensors, - uint32_t const numInputs) { - if (tensorSet.version == QNN_TENSOR_SET_VERSION_1) { - tensorSet.v1.inputs = inputTensors; - tensorSet.v1.numInputs = numInputs; - } -} - -inline void setQnnTensorSetInputTensors(Qnn_TensorSet_t* tensorSet, - Qnn_Tensor_t* inputTensors, - uint32_t const numInputs) { - setQnnTensorSetInputTensors(*tensorSet, inputTensors, numInputs); -} - -inline void setQnnTensorSetOutputTensors(Qnn_TensorSet_t& tensorSet, - Qnn_Tensor_t* outputTensors, - const uint32_t numOutputs) { - if (tensorSet.version == QNN_TENSOR_SET_VERSION_1) { - tensorSet.v1.outputs = outputTensors; - tensorSet.v1.numOutputs = numOutputs; - } -} - -inline void setQnnTensorSetOutputTensors(Qnn_TensorSet_t* tensorSet, - Qnn_Tensor_t* outputTensors, - const uint32_t numOutputs) { - setQnnTensorSetOutputTensors(*tensorSet, outputTensors, numOutputs); -} - -// Creator for QNN Op Config -#define QNN_OP_CFG_CREATE(version) createQnnOpConfig(version) - -// Accessors for QNN Op Config -#define QNN_OP_CFG_GET_NAME(opConfig) getQnnOpConfigName(opConfig) -#define QNN_OP_CFG_GET_PACKAGE_NAME(opConfig) getQnnOpConfigPackageName(opConfig) -#define QNN_OP_CFG_GET_TYPE_NAME(opConfig) getQnnOpConfigTypeName(opConfig) -#define QNN_OP_CFG_GET_NUM_PARAMS(opConfig) getQnnOpConfigNumParams(opConfig) -#define QNN_OP_CFG_GET_PARAMS(opConfig) getQnnOpConfigParams(opConfig) -#define QNN_OP_CFG_GET_NUM_INPUTS(opConfig) getQnnOpConfigNumInputs(opConfig) -#define QNN_OP_CFG_GET_INPUTS(opConfig) getQnnOpConfigInputs(opConfig) -#define QNN_OP_CFG_GET_NUM_OUTPUTS(opConfig) getQnnOpConfigNumOutputs(opConfig) -#define QNN_OP_CFG_GET_OUTPUTS(opConfig) getQnnOpConfigOutputs(opConfig) - -// Modifiers for QNN Op Config -#define QNN_OP_CFG_SET_NAME(opConfig, value) setQnnOpConfigName(opConfig, value) -#define QNN_OP_CFG_SET_PACKAGE_NAME(opConfig, value) setQnnOpConfigPackageName(opConfig, value) -#define QNN_OP_CFG_SET_TYPE_NAME(opConfig, value) setQnnOpConfigTypeName(opConfig, value) -#define QNN_OP_CFG_SET_PARAMS(opConfig, numOfParams, params) \ - setQnnOpConfigParams(opConfig, numOfParams, params) -#define QNN_OP_CFG_SET_INPUTS(opConfig, numOfInputs, inputTensors) \ - setQnnOpConfigInputs(opConfig, numOfInputs, inputTensors) -#define QNN_OP_CFG_SET_OUTPUTS(opConfig, numOfOutputs, outputTensors) \ - setQnnOpConfigOutputs(opConfig, numOfOutputs, outputTensors) - -// Creator for QNN Tensor -#define QNN_TENSOR_CREATE(version) createQnnTensor(version) - -// Accessors for QNN Tensor -#define QNN_TENSOR_GET_ID(tensor) getQnnTensorId(tensor) -#define QNN_TENSOR_GET_NAME(tensor) getQnnTensorName(tensor) -#define QNN_TENSOR_GET_TYPE(tensor) getQnnTensorType(tensor) -#define QNN_TENSOR_GET_DATA_FORMAT(tensor) getQnnTensorDataFormat(tensor) -#define QNN_TENSOR_GET_DATA_TYPE(tensor) getQnnTensorDataType(tensor) -#define QNN_TENSOR_GET_QUANT_PARAMS(tensor) getQnnTensorQuantParams(tensor) -#define QNN_TENSOR_GET_RANK(tensor) getQnnTensorRank(tensor) -#define QNN_TENSOR_GET_DIMENSIONS(tensor) getQnnTensorDimensions(tensor) -#define QNN_TENSOR_GET_IS_DYNAMIC_DIMENSIONS(tensor) getQnnTensorIsDynamicDimensions(tensor) -#define QNN_TENSOR_GET_SPARSE_PARAMS(tensor) getQnnTensorSparseParams(tensor) -#define QNN_TENSOR_GET_MEM_TYPE(tensor) getQnnTensorMemType(tensor) -#define QNN_TENSOR_GET_CLIENT_BUF(tensor) getQnnTensorClientBuf(tensor) -#define QNN_TENSOR_GET_MEM_HANDLE(tensor) getQnnTensorMemHandle(tensor) - -// Modifiers for QNN Tensor -#define QNN_TENSOR_SET_ID(tensor, value) setQnnTensorId(tensor, value) -#define QNN_TENSOR_SET_NAME(tensor, value) setQnnTensorName(tensor, value) -#define QNN_TENSOR_SET_TYPE(tensor, value) setQnnTensorType(tensor, value) -#define QNN_TENSOR_SET_DATA_FORMAT(tensor, value) setQnnTensorDataFormat(tensor, value) -#define QNN_TENSOR_SET_DATA_TYPE(tensor, value) setQnnTensorDataType(tensor, value) -#define QNN_TENSOR_SET_QUANT_PARAMS(tensor, value) setQnnTensorQuantParams(tensor, value) -#define QNN_TENSOR_SET_RANK(tensor, value) setQnnTensorRank(tensor, value) -#define QNN_TENSOR_SET_DIMENSIONS(tensor, value) setQnnTensorDimensions(tensor, value) -#define QNN_TENSOR_SET_IS_DYNAMIC_DIMENSIONS(tensor, value) \ - setQnnTensorIsDynamicDimensions(tensor, value) -#define QNN_TENSOR_SET_SPARSE_PARAMS(tensor, value) setQnnTensorSparseParams(tensor, value) -#define QNN_TENSOR_SET_MEM_TYPE(tensor, value) setQnnTensorMemType(tensor, value) -#define QNN_TENSOR_SET_CLIENT_BUF(tensor, value) setQnnTensorClientBuf(tensor, value) -#define QNN_TENSOR_SET_MEM_HANDLE(tensor, value) setQnnTensorMemHandle(tensor, value) - -// Creator for QNN Tensor Set -#define QNN_TENSORSET_CREATE(version) createQnnTensorSet(version) - -// Accessors for QNN Tensor Set -#define QNN_TENSORSET_GET_NUM_INPUTS(tensorSet) getQnnTensorSetNumInputs(tensorSet) -#define QNN_TENSORSET_GET_INPUT_TENSORS(tensorSet) getQnnTensorSetInputTensors(tensorSet) -#define QNN_TENSORSET_GET_NUM_OUTPUTS(tensorSet) getQnnTensorSetNumOutputs(tensorSet) -#define QNN_TENSORSET_GET_OUTPUT_TENSORS(tensorSet) getQnnTensorSetOutputTensors(tensorSet) - -// Modifiers for QNN Tensor Set -#define QNN_TENSORSET_SET_INPUT_TENSORS(tensorSet, inputTensors, numInputs) \ - setQnnTensorSetInputTensors(tensorSet, inputTensors, numInputs) -#define QNN_TENSORSET_SET_OUTPUT_TENSORS(tensorSet, outputTensors, numOutputs) \ - setQnnTensorSetOutputTensors(tensorSet, outputTensors, numOutputs) - -inline bool isQnnTensorV1Compatible(const Qnn_Tensor_t& tensor) { - if (tensor.version == QNN_TENSOR_VERSION_2) { - if (tensor.v2.isDynamicDimensions != NULL) { - return false; - } - - if (tensor.v2.dataFormat == QNN_TENSOR_DATA_FORMAT_SPARSE) { - return false; - } - } - - return true; -} - -inline bool isQnnTensorV1Compatible(const Qnn_Tensor_t* const tensor) { - return isQnnTensorV1Compatible(*tensor); -} - -inline bool isQnnTensorV1Compatible(const Qnn_OpConfig_t& opConfig) { - if ((QNN_OP_CFG_GET_INPUTS(opConfig) != NULL) && (QNN_OP_CFG_GET_NUM_INPUTS(opConfig) > 0u)) { - for (uint32_t tensorIdx = 0u; tensorIdx < QNN_OP_CFG_GET_NUM_INPUTS(opConfig); tensorIdx++) { - if (!isQnnTensorV1Compatible(QNN_OP_CFG_GET_INPUTS(opConfig)[tensorIdx])) { - return false; - } - } - } - if ((QNN_OP_CFG_GET_OUTPUTS(opConfig) != NULL) && (QNN_OP_CFG_GET_NUM_OUTPUTS(opConfig) > 0u)) { - for (uint32_t tensorIdx = 0u; tensorIdx < QNN_OP_CFG_GET_NUM_OUTPUTS(opConfig); tensorIdx++) { - if (!isQnnTensorV1Compatible(QNN_OP_CFG_GET_OUTPUTS(opConfig)[tensorIdx])) { - return false; - } - } - } - if ((QNN_OP_CFG_GET_PARAMS(opConfig) != NULL) && (QNN_OP_CFG_GET_NUM_PARAMS(opConfig) > 0)) { - for (uint32_t paramIdx = 0u; paramIdx < QNN_OP_CFG_GET_NUM_PARAMS(opConfig); paramIdx++) { - const Qnn_Param_t& param = QNN_OP_CFG_GET_PARAMS(opConfig)[paramIdx]; - if (QNN_PARAMTYPE_TENSOR == param.paramType) { - if (!isQnnTensorV1Compatible(param.tensorParam)) { - return false; - } - } - } - } - - return true; -} - -inline bool isQnnTensorV1Compatible(const Qnn_OpConfig_t* const opConfig) { - return isQnnTensorV1Compatible(*opConfig); -} diff --git a/LibQNNHelper/src/SampleApp.hpp b/LibQNNHelper/src/SampleApp.hpp deleted file mode 100644 index 3fd4ae8..0000000 --- a/LibQNNHelper/src/SampleApp.hpp +++ /dev/null @@ -1,43 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -#pragma once - -#include "QnnInterface.h" -#include "QnnWrapperUtils.hpp" -#include "System/QnnSystemInterface.h" - -namespace qnn { -namespace tools { -namespace sample_app { - -// Graph Related Function Handle Types -typedef qnn_wrapper_api::ModelError_t (*ComposeGraphsFnHandleType_t)( - Qnn_BackendHandle_t, - QNN_INTERFACE_VER_TYPE, - Qnn_ContextHandle_t, - const qnn_wrapper_api::GraphConfigInfo_t **, - const uint32_t, - qnn_wrapper_api::GraphInfo_t ***, - uint32_t *, - bool, - QnnLog_Callback_t, - QnnLog_Level_t); -typedef qnn_wrapper_api::ModelError_t (*FreeGraphInfoFnHandleType_t)( - qnn_wrapper_api::GraphInfo_t ***, uint32_t); - -typedef struct QnnFunctionPointers { - ComposeGraphsFnHandleType_t composeGraphsFnHandle; - FreeGraphInfoFnHandleType_t freeGraphInfoFnHandle; - QNN_INTERFACE_VER_TYPE qnnInterface; - QNN_SYSTEM_INTERFACE_VER_TYPE qnnSystemInterface; -} QnnFunctionPointers; - -} // namespace sample_app -} // namespace tools -} // namespace qnn diff --git a/LibQNNHelper/src/Utils/BuildId.hpp b/LibQNNHelper/src/Utils/BuildId.hpp deleted file mode 100644 index aa7339d..0000000 --- a/LibQNNHelper/src/Utils/BuildId.hpp +++ /dev/null @@ -1,17 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -#pragma once - -namespace qnn { -namespace tools { - -inline std::string getBuildId() { return std::string("v2.23.0.240531124219_94801"); } - -} // namespace tools -} // namespace qnn diff --git a/LibQNNHelper/src/Utils/DataUtil.cpp b/LibQNNHelper/src/Utils/DataUtil.cpp deleted file mode 100644 index 00a0163..0000000 --- a/LibQNNHelper/src/Utils/DataUtil.cpp +++ /dev/null @@ -1,407 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== -#pragma warning(push) -#pragma warning(disable:4267) -#include -#include -#include -#include -#include - -#include "DataUtil.hpp" -#include "Logger.hpp" -#include "PAL/Directory.hpp" -#include "PAL/FileOp.hpp" -#include "PAL/Path.hpp" - -using namespace qnn; -using namespace qnn::tools; - -std::tuple datautil::getDataTypeSizeInBytes(Qnn_DataType_t dataType) { - if (g_dataTypeToSize.find(dataType) == g_dataTypeToSize.end()) { - QNN_ERROR("Invalid qnn data type provided"); - return std::make_tuple(StatusCode::INVALID_DATA_TYPE, 0); - } - return std::make_tuple(StatusCode::SUCCESS, g_dataTypeToSize.find(dataType)->second); -} - -size_t datautil::calculateElementCount(std::vector dims) { - if (dims.size() == 0) { - return 0; - } - return std::accumulate(dims.begin(), dims.end(), 1, std::multiplies()); -} - -std::tuple datautil::calculateLength(std::vector dims, - Qnn_DataType_t dataType) { - if (dims.size() == 0) { - QNN_ERROR("dims.size() is zero"); - return std::make_tuple(StatusCode::INVALID_DIMENSIONS, 0); - } - StatusCode returnStatus{StatusCode::SUCCESS}; - size_t length{0}; - std::tie(returnStatus, length) = getDataTypeSizeInBytes(dataType); - if (StatusCode::SUCCESS != returnStatus) { - return std::make_tuple(returnStatus, 0); - } - length *= calculateElementCount(dims); - return std::make_tuple(StatusCode::SUCCESS, length); -} - -datautil::StatusCode datautil::readDataFromFile(std::string filePath, - std::vector dims, - Qnn_DataType_t dataType, - uint8_t* buffer) { - if (nullptr == buffer) { - QNN_ERROR("buffer is nullptr"); - return StatusCode::INVALID_BUFFER; - } - std::ifstream in(filePath, std::ifstream::binary); - if (!in) { - QNN_ERROR("Failed to open input file: %s", filePath.c_str()); - return StatusCode::FILE_OPEN_FAIL; - } - in.seekg(0, in.end); - const size_t length = in.tellg(); - in.seekg(0, in.beg); - StatusCode err{StatusCode::SUCCESS}; - size_t l{0}; - std::tie(err, l) = datautil::calculateLength(dims, dataType); - if (StatusCode::SUCCESS != err) { - return err; - } - if (length != l) { - QNN_ERROR("Input file %s: file size in bytes (%d), should be equal to: %d", - filePath.c_str(), - length, - l); - return StatusCode::DATA_SIZE_MISMATCH; - } - - if (!in.read(reinterpret_cast(buffer), length)) { - QNN_ERROR("Failed to read the contents of: %s", filePath.c_str()); - return StatusCode::DATA_READ_FAIL; - } - return StatusCode::SUCCESS; -} - -datautil::ReadBatchDataRetType_t datautil::readBatchData(const std::vector& filePaths, - const size_t filePathsIndexOffset, - const bool loopBackToStart, - const std::vector& dims, - const Qnn_DataType_t dataType, - uint8_t* buffer) { - if (nullptr == buffer) { - QNN_ERROR("buffer is nullptr"); - return std::make_tuple(StatusCode::INVALID_BUFFER, 0, 0); - } - StatusCode err{StatusCode::SUCCESS}; - size_t tensorLength{0}; - std::tie(err, tensorLength) = datautil::calculateLength(dims, dataType); - if (StatusCode::SUCCESS != err) { - return std::make_tuple(err, 0, 0); - } - size_t numInputsCopied = 0; - size_t numBatchSize = 0; - size_t totalLength = 0; - size_t fileIndex = filePathsIndexOffset; - while (true) { - if (fileIndex >= filePaths.size()) { - if (loopBackToStart) { - fileIndex = fileIndex % filePaths.size(); - } else { - numBatchSize += (tensorLength - totalLength) / (totalLength / numBatchSize); - // pad the vector with zeros - memset(buffer + totalLength, 0, (tensorLength - totalLength) * sizeof(char)); - break; - } - } - std::ifstream in(filePaths[fileIndex], std::ifstream::binary); - if (!in) { - QNN_ERROR("Failed to open input file: %s", (filePaths[fileIndex]).c_str()); - return std::make_tuple(StatusCode::FILE_OPEN_FAIL, numInputsCopied, numBatchSize); - } - in.seekg(0, in.end); - const size_t fileSize = in.tellg(); - in.seekg(0, in.beg); - if ((tensorLength % fileSize) != 0 || fileSize > tensorLength || fileSize == 0) { - QNN_ERROR( - "Given input file %s with file size in bytes %d. If the model expects a batch size of " - "one, the file size should match the tensor extent: %d bytes. If the model expects a " - "batch size > 1, the file size should evenly divide the tensor extent: %d bytes.", - filePaths[fileIndex].c_str(), - fileSize, - tensorLength, - tensorLength); - return std::make_tuple(StatusCode::DATA_SIZE_MISMATCH, numInputsCopied, numBatchSize); - } - if (!in.read(reinterpret_cast(buffer + (numInputsCopied * fileSize)), fileSize)) { - QNN_ERROR("Failed to read the contents of: %s", filePaths.front().c_str()); - return std::make_tuple(StatusCode::DATA_READ_FAIL, numInputsCopied, numBatchSize); - } - totalLength += fileSize; - numInputsCopied += 1; - numBatchSize += 1; - fileIndex += 1; - if (totalLength >= tensorLength) { - break; - } - } - return std::make_tuple(StatusCode::SUCCESS, numInputsCopied, numBatchSize); -} - -std::tuple datautil::getFileSize(std::string filePath) { - std::ifstream in(filePath, std::ifstream::binary); - if (!in) { - QNN_ERROR("Failed to open input file: %s", filePath.c_str()); - return std::make_tuple(StatusCode::FILE_OPEN_FAIL, 0); - } - in.seekg(0, in.end); - const size_t length = in.tellg(); - in.seekg(0, in.beg); - return std::make_tuple(StatusCode::SUCCESS, length); -} - -datautil::StatusCode datautil::readBinaryFromFile(std::string filePath, - uint8_t* buffer, - size_t bufferSize) { - if (nullptr == buffer) { - QNN_ERROR("buffer is nullptr"); - return StatusCode::INVALID_BUFFER; - } - std::ifstream in(filePath, std::ifstream::binary); - if (!in) { - QNN_ERROR("Failed to open input file: %s", filePath.c_str()); - return StatusCode::FILE_OPEN_FAIL; - } - if (!in.read(reinterpret_cast(buffer), bufferSize)) { - QNN_ERROR("Failed to read the contents of: %s", filePath.c_str()); - return StatusCode::DATA_READ_FAIL; - } - return StatusCode::SUCCESS; -} - -datautil::StatusCode datautil::writeDataToFile(std::string fileDir, - std::string fileName, - std::vector dims, - Qnn_DataType_t dataType, - uint8_t* buffer) { - if (nullptr == buffer) { - QNN_ERROR("buffer is nullptr"); - return StatusCode::INVALID_BUFFER; - } - if (!pal::Directory::makePath(fileDir)) { - QNN_ERROR("Failed to create output directory: %s", fileDir.c_str()); - return StatusCode::DIRECTORY_CREATE_FAIL; - } - const std::string outputPath(fileDir + pal::Path::getSeparator() + fileName); - std::ofstream os(outputPath, std::ofstream::binary); - if (!os) { - QNN_ERROR("Failed to open output file for writing: %s", outputPath.c_str()); - return StatusCode::FILE_OPEN_FAIL; - } - StatusCode err{StatusCode::SUCCESS}; - size_t length{0}; - std::tie(err, length) = datautil::calculateLength(dims, dataType); - if (StatusCode::SUCCESS != err) { - return err; - } - for (size_t l = 0; l < length; l++) { - os.write(reinterpret_cast(&(*(buffer + l))), 1); - } - return StatusCode::SUCCESS; -} - -datautil::StatusCode datautil::writeBatchDataToFile(std::vector fileDirs, - std::string fileName, - std::vector dims, - Qnn_DataType_t dataType, - uint8_t* buffer, - const size_t batchSize) { - if (nullptr == buffer) { - QNN_ERROR("buffer is nullptr"); - return StatusCode::INVALID_BUFFER; - } - StatusCode err{StatusCode::SUCCESS}; - size_t length{0}; - std::tie(err, length) = datautil::calculateLength(dims, dataType); - if (StatusCode::SUCCESS != err) { - return err; - } - auto outputSize = (length / batchSize); - for (size_t batchIndex = 0; batchIndex < fileDirs.size(); batchIndex++) { - std::string fileDir = fileDirs[batchIndex]; - if (!pal::Directory::makePath(fileDir)) { - QNN_ERROR("Failed to create output directory: %s", fileDir.c_str()); - return StatusCode::DIRECTORY_CREATE_FAIL; - } - const std::string outputPath(fileDir + pal::Path::getSeparator() + fileName); - std::ofstream os(outputPath, std::ofstream::binary); - if (!os) { - QNN_ERROR("Failed to open output file for writing: %s", outputPath.c_str()); - return StatusCode::FILE_OPEN_FAIL; - } - for (size_t l = 0; l < outputSize; l++) { - size_t bufferIndex = l + (batchIndex * outputSize); - os.write(reinterpret_cast(&(*(buffer + bufferIndex))), 1); - } - } - return StatusCode::SUCCESS; -} - -datautil::StatusCode datautil::writeBinaryToFile(std::string fileDir, - std::string fileName, - uint8_t* buffer, - size_t bufferSize) { - if (nullptr == buffer) { - QNN_ERROR("buffer is nullptr"); - return StatusCode::INVALID_BUFFER; - } - if (!pal::Directory::makePath(fileDir)) { - QNN_ERROR("Failed to create output directory: %s", fileDir.c_str()); - return StatusCode::DIRECTORY_CREATE_FAIL; - } - const std::string outputPath(fileDir + pal::Path::getSeparator() + fileName); - std::ofstream os(outputPath, std::ofstream::binary); - if (!os) { - QNN_ERROR("Failed to open output file for writing: %s", outputPath.c_str()); - return StatusCode::FILE_OPEN_FAIL; - } - os.write(reinterpret_cast(buffer), bufferSize); - return StatusCode::SUCCESS; -} - -template -datautil::StatusCode datautil::floatToTfN( - T_QuantType* out, float* in, int32_t offset, float scale, size_t numElements) { - static_assert(std::is_unsigned::value, "floatToTfN supports unsigned only!"); - - if (nullptr == out || nullptr == in) { - QNN_ERROR("Received a nullptr"); - return StatusCode::INVALID_BUFFER; - } - - size_t dataTypeSizeInBytes = sizeof(T_QuantType); - size_t bitWidth = dataTypeSizeInBytes * g_bitsPerByte; - double trueBitWidthMax = pow(2, bitWidth) - 1; - double encodingMin = offset * scale; - double encodingMax = (trueBitWidthMax + offset) * scale; - double encodingRange = encodingMax - encodingMin; - double avg = trueBitWidthMax / encodingRange; // zw: optimize. - - for (size_t i = 0; i < numElements; ++i) { - int quantizedValue = (int)(avg * (in[i] - encodingMin) + 0.5); // zw: optimze, replace 'round()' with '+ 0.5'. - if (quantizedValue < 0) - quantizedValue = 0; - else if (quantizedValue > (int)trueBitWidthMax) - quantizedValue = (int)trueBitWidthMax; - out[i] = static_cast(quantizedValue); - } - return StatusCode::SUCCESS; -} - -template datautil::StatusCode datautil::floatToTfN( - uint8_t* out, float* in, int32_t offset, float scale, size_t numElements); - -template datautil::StatusCode datautil::floatToTfN( - uint16_t* out, float* in, int32_t offset, float scale, size_t numElements); - -template -datautil::StatusCode datautil::tfNToFloat( - float* out, T_QuantType* in, int32_t offset, float scale, size_t numElements) { - static_assert(std::is_unsigned::value, "tfNToFloat supports unsigned only!"); - - if (nullptr == out || nullptr == in) { - QNN_ERROR("Received a nullptr"); - return StatusCode::INVALID_BUFFER; - } - for (size_t i = 0; i < numElements; i++) { - double quantizedValue = static_cast(in[i]); - double offsetDouble = static_cast(offset); - out[i] = static_cast((quantizedValue + offsetDouble) * scale); - } - return StatusCode::SUCCESS; -} - -template datautil::StatusCode datautil::tfNToFloat( - float* out, uint8_t* in, int32_t offset, float scale, size_t numElements); - -template datautil::StatusCode datautil::tfNToFloat( - float* out, uint16_t* in, int32_t offset, float scale, size_t numElements); - -template -datautil::StatusCode datautil::castToFloat(float* out, T_QuantType* in, size_t numElements) { - if (nullptr == out || nullptr == in) { - QNN_ERROR("Received a nullptr"); - return StatusCode::INVALID_BUFFER; - } - for (size_t i = 0; i < numElements; i++) { - out[i] = static_cast(in[i]); - } - return StatusCode::SUCCESS; -} - -template datautil::StatusCode datautil::castToFloat(float* out, - uint8_t* in, - size_t numElements); - -template datautil::StatusCode datautil::castToFloat(float* out, - uint16_t* in, - size_t numElements); - -template datautil::StatusCode datautil::castToFloat(float* out, - uint32_t* in, - size_t numElements); - -template datautil::StatusCode datautil::castToFloat(float* out, - int8_t* in, - size_t numElements); - -template datautil::StatusCode datautil::castToFloat(float* out, - int16_t* in, - size_t numElements); - -template datautil::StatusCode datautil::castToFloat(float* out, - int32_t* in, - size_t numElements); - -template -datautil::StatusCode datautil::castFromFloat(T_QuantType* out, float* in, size_t numElements) { - if (nullptr == out || nullptr == in) { - QNN_ERROR("Received a nullptr"); - return StatusCode::INVALID_BUFFER; - } - for (size_t i = 0; i < numElements; i++) { - out[i] = static_cast(in[i]); - } - return StatusCode::SUCCESS; -} - -template datautil::StatusCode datautil::castFromFloat(uint8_t* out, - float* in, - size_t numElements); - -template datautil::StatusCode datautil::castFromFloat(uint16_t* out, - float* in, - size_t numElements); - -template datautil::StatusCode datautil::castFromFloat(uint32_t* out, - float* in, - size_t numElements); - -template datautil::StatusCode datautil::castFromFloat(int8_t* out, - float* in, - size_t numElements); - -template datautil::StatusCode datautil::castFromFloat(int16_t* out, - float* in, - size_t numElements); - -template datautil::StatusCode datautil::castFromFloat(int32_t* out, - float* in, - size_t numElements); diff --git a/LibQNNHelper/src/Utils/DataUtil.hpp b/LibQNNHelper/src/Utils/DataUtil.hpp deleted file mode 100644 index 9867e74..0000000 --- a/LibQNNHelper/src/Utils/DataUtil.hpp +++ /dev/null @@ -1,125 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== -#pragma once - -#include -#include -#include - -#include "QnnTypes.h" - -namespace qnn { -namespace tools { -namespace datautil { -enum class StatusCode { - SUCCESS, - DATA_READ_FAIL, - DATA_WRITE_FAIL, - FILE_OPEN_FAIL, - DIRECTORY_CREATE_FAIL, - INVALID_DIMENSIONS, - INVALID_DATA_TYPE, - DATA_SIZE_MISMATCH, - INVALID_BUFFER, -}; - -const size_t g_bitsPerByte = 8; - -using ReadBatchDataRetType_t = std::tuple; - -std::tuple getDataTypeSizeInBytes(Qnn_DataType_t dataType); - -std::tuple calculateLength(std::vector dims, Qnn_DataType_t dataType); - -size_t calculateElementCount(std::vector dims); - -std::tuple getFileSize(std::string filePath); - -StatusCode readDataFromFile(std::string filePath, - std::vector dims, - Qnn_DataType_t dataType, - uint8_t* buffer); - -/* - * Read data in batches from vector and try to matches the model input's - * batches. If the vector is empty while matching the batch size of model, - * pad the remaining buffer with zeros - * @param filePaths image paths vector - * @param filePathsIndexOffset index offset in the vector - * @param loopBackToStart loop the vector to fill the remaining tensor data - * @param dims model input dimensions - * @param dataType to create input buffer from file - * @param buffer to fill the input image data - * - * @return ReadBatchDataRetType_t returns numFilesCopied and batchSize along - * with status - */ -ReadBatchDataRetType_t readBatchData(const std::vector& filePaths, - const size_t filePathsIndexOffset, - const bool loopBackToStart, - const std::vector& dims, - const Qnn_DataType_t dataType, - uint8_t* buffer); - -StatusCode readBinaryFromFile(std::string filePath, uint8_t* buffer, size_t bufferSize); - -StatusCode writeDataToFile(std::string fileDir, - std::string fileName, - std::vector dims, - Qnn_DataType_t dataType, - uint8_t* buffer); - -StatusCode writeBatchDataToFile(std::vector fileDirs, - std::string fileName, - std::vector dims, - Qnn_DataType_t dataType, - uint8_t* buffer, - const size_t batchSize); - -StatusCode writeBinaryToFile(std::string fileDir, - std::string fileName, - uint8_t* buffer, - size_t bufferSize); - -template -datautil::StatusCode floatToTfN( - T_QuantType* out, float* in, int32_t offset, float scale, size_t numElements); - -template -datautil::StatusCode tfNToFloat( - float* out, T_QuantType* in, int32_t offset, float scale, size_t numElements); - -template -datautil::StatusCode castToFloat(float* out, T_QuantType* in, size_t numElements); - -template -datautil::StatusCode castFromFloat(T_QuantType* out, float* in, size_t numElements); - -const std::map g_dataTypeToSize = { - {QNN_DATATYPE_INT_8, 1}, - {QNN_DATATYPE_INT_16, 2}, - {QNN_DATATYPE_INT_32, 4}, - {QNN_DATATYPE_INT_64, 8}, - {QNN_DATATYPE_UINT_8, 1}, - {QNN_DATATYPE_UINT_16, 2}, - {QNN_DATATYPE_UINT_32, 4}, - {QNN_DATATYPE_UINT_64, 8}, - {QNN_DATATYPE_FLOAT_16, 2}, - {QNN_DATATYPE_FLOAT_32, 4}, - {QNN_DATATYPE_FLOAT_64, 8}, - {QNN_DATATYPE_SFIXED_POINT_8, 1}, - {QNN_DATATYPE_SFIXED_POINT_16, 2}, - {QNN_DATATYPE_SFIXED_POINT_32, 4}, - {QNN_DATATYPE_UFIXED_POINT_8, 1}, - {QNN_DATATYPE_UFIXED_POINT_16, 2}, - {QNN_DATATYPE_UFIXED_POINT_32, 4}, - {QNN_DATATYPE_BOOL_8, 1}, -}; -} // namespace datautil -} // namespace tools -} // namespace qnn diff --git a/LibQNNHelper/src/Utils/DynamicLoadUtil.cpp b/LibQNNHelper/src/Utils/DynamicLoadUtil.cpp deleted file mode 100644 index bc64d20..0000000 --- a/LibQNNHelper/src/Utils/DynamicLoadUtil.cpp +++ /dev/null @@ -1,174 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -#include - -#include "DynamicLoadUtil.hpp" -#include "Logger.hpp" -#include "PAL/DynamicLoading.hpp" - -using namespace qnn; -using namespace qnn::tools; - -typedef Qnn_ErrorHandle_t (*QnnInterfaceGetProvidersFn_t)(const QnnInterface_t*** providerList, - uint32_t* numProviders); - -typedef Qnn_ErrorHandle_t (*QnnSystemInterfaceGetProvidersFn_t)( - const QnnSystemInterface_t*** providerList, uint32_t* numProviders); - -template -static inline T resolveSymbol(void* libHandle, const char* sym) { - T ptr = (T)pal::dynamicloading::dlSym(libHandle, sym); - if (ptr == nullptr) { - QNN_ERROR("Unable to access symbol [%s]. pal::dynamicloading::dlError(): %s", - sym, - pal::dynamicloading::dlError()); - } - return ptr; -} - -dynamicloadutil::StatusCode dynamicloadutil::getQnnFunctionPointers( - std::string backendPath, - std::string modelPath, - sample_app::QnnFunctionPointers* qnnFunctionPointers, - void** backendHandleRtn, - bool loadModelLib, - void** modelHandleRtn) { - void* libBackendHandle = pal::dynamicloading::dlOpen( - backendPath.c_str(), pal::dynamicloading::DL_NOW | pal::dynamicloading::DL_LOCAL); - if (nullptr == libBackendHandle) { - QNN_ERROR("Unable to load backend. pal::dynamicloading::dlError(): %s", - pal::dynamicloading::dlError()); - return StatusCode::FAIL_LOAD_BACKEND; - } - if (nullptr != backendHandleRtn) { - *backendHandleRtn = libBackendHandle; - } - // Get QNN Interface - QnnInterfaceGetProvidersFn_t getInterfaceProviders{nullptr}; - getInterfaceProviders = - resolveSymbol(libBackendHandle, "QnnInterface_getProviders"); - if (nullptr == getInterfaceProviders) { - return StatusCode::FAIL_SYM_FUNCTION; - } - QnnInterface_t** interfaceProviders{nullptr}; - uint32_t numProviders{0}; - if (QNN_SUCCESS != - getInterfaceProviders((const QnnInterface_t***)&interfaceProviders, &numProviders)) { - QNN_ERROR("Failed to get interface providers."); - return StatusCode::FAIL_GET_INTERFACE_PROVIDERS; - } - if (nullptr == interfaceProviders) { - QNN_ERROR("Failed to get interface providers: null interface providers received."); - return StatusCode::FAIL_GET_INTERFACE_PROVIDERS; - } - if (0 == numProviders) { - QNN_ERROR("Failed to get interface providers: 0 interface providers."); - return StatusCode::FAIL_GET_INTERFACE_PROVIDERS; - } - bool foundValidInterface{false}; - for (size_t pIdx = 0; pIdx < numProviders; pIdx++) { - if (QNN_API_VERSION_MAJOR == interfaceProviders[pIdx]->apiVersion.coreApiVersion.major && - QNN_API_VERSION_MINOR <= interfaceProviders[pIdx]->apiVersion.coreApiVersion.minor) { - foundValidInterface = true; - qnnFunctionPointers->qnnInterface = interfaceProviders[pIdx]->QNN_INTERFACE_VER_NAME; - break; - } - } - if (!foundValidInterface) { - QNN_ERROR("Unable to find a valid interface."); - libBackendHandle = nullptr; - return StatusCode::FAIL_GET_INTERFACE_PROVIDERS; - } - - if (true == loadModelLib) { - QNN_INFO("Loading model shared library ([model].so)"); - void* libModelHandle = pal::dynamicloading::dlOpen( - modelPath.c_str(), pal::dynamicloading::DL_NOW | pal::dynamicloading::DL_LOCAL); - if (nullptr == libModelHandle) { - QNN_ERROR("Unable to load model. pal::dynamicloading::dlError(): %s", - pal::dynamicloading::dlError()); - return StatusCode::FAIL_LOAD_MODEL; - } - if (nullptr != modelHandleRtn) { - *modelHandleRtn = libModelHandle; - } - - std::string modelPrepareFunc = "QnnModel_composeGraphs"; - qnnFunctionPointers->composeGraphsFnHandle = - resolveSymbol(libModelHandle, - modelPrepareFunc.c_str()); - if (nullptr == qnnFunctionPointers->composeGraphsFnHandle) { - return StatusCode::FAIL_SYM_FUNCTION; - } - - std::string modelFreeFunc = "QnnModel_freeGraphsInfo"; - qnnFunctionPointers->freeGraphInfoFnHandle = - resolveSymbol(libModelHandle, - modelFreeFunc.c_str()); - if (nullptr == qnnFunctionPointers->freeGraphInfoFnHandle) { - return StatusCode::FAIL_SYM_FUNCTION; - } - } else { - QNN_INFO("Model wasn't loaded from a shared library."); - } - return StatusCode::SUCCESS; -} - -dynamicloadutil::StatusCode dynamicloadutil::getQnnSystemFunctionPointers( - std::string systemLibraryPath, sample_app::QnnFunctionPointers* qnnFunctionPointers) { - QNN_FUNCTION_ENTRY_LOG; - if (!qnnFunctionPointers) { - QNN_ERROR("nullptr provided for qnnFunctionPointers"); - return StatusCode::FAILURE; - } - void* systemLibraryHandle = pal::dynamicloading::dlOpen( - systemLibraryPath.c_str(), pal::dynamicloading::DL_NOW | pal::dynamicloading::DL_LOCAL); - if (nullptr == systemLibraryHandle) { - QNN_ERROR("Unable to load system library. pal::dynamicloading::dlError(): %s", - pal::dynamicloading::dlError()); - return StatusCode::FAIL_LOAD_SYSTEM_LIB; - } - QnnSystemInterfaceGetProvidersFn_t getSystemInterfaceProviders{nullptr}; - getSystemInterfaceProviders = resolveSymbol( - systemLibraryHandle, "QnnSystemInterface_getProviders"); - if (nullptr == getSystemInterfaceProviders) { - return StatusCode::FAIL_SYM_FUNCTION; - } - QnnSystemInterface_t** systemInterfaceProviders{nullptr}; - uint32_t numProviders{0}; - if (QNN_SUCCESS != getSystemInterfaceProviders( - (const QnnSystemInterface_t***)&systemInterfaceProviders, &numProviders)) { - QNN_ERROR("Failed to get system interface providers."); - return StatusCode::FAIL_GET_INTERFACE_PROVIDERS; - } - if (nullptr == systemInterfaceProviders) { - QNN_ERROR("Failed to get system interface providers: null interface providers received."); - return StatusCode::FAIL_GET_INTERFACE_PROVIDERS; - } - if (0 == numProviders) { - QNN_ERROR("Failed to get interface providers: 0 interface providers."); - return StatusCode::FAIL_GET_INTERFACE_PROVIDERS; - } - bool foundValidSystemInterface{false}; - for (size_t pIdx = 0; pIdx < numProviders; pIdx++) { - if (QNN_SYSTEM_API_VERSION_MAJOR == systemInterfaceProviders[pIdx]->systemApiVersion.major && - QNN_SYSTEM_API_VERSION_MINOR <= systemInterfaceProviders[pIdx]->systemApiVersion.minor) { - foundValidSystemInterface = true; - qnnFunctionPointers->qnnSystemInterface = - systemInterfaceProviders[pIdx]->QNN_SYSTEM_INTERFACE_VER_NAME; - break; - } - } - if (!foundValidSystemInterface) { - QNN_ERROR("Unable to find a valid system interface."); - return StatusCode::FAIL_GET_INTERFACE_PROVIDERS; - } - QNN_FUNCTION_EXIT_LOG; - return StatusCode::SUCCESS; -} \ No newline at end of file diff --git a/LibQNNHelper/src/Utils/DynamicLoadUtil.hpp b/LibQNNHelper/src/Utils/DynamicLoadUtil.hpp deleted file mode 100644 index d5c494a..0000000 --- a/LibQNNHelper/src/Utils/DynamicLoadUtil.hpp +++ /dev/null @@ -1,36 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -#pragma once - -#include "SampleApp.hpp" - -namespace qnn { -namespace tools { -namespace dynamicloadutil { -enum class StatusCode { - SUCCESS, - FAILURE, - FAIL_LOAD_BACKEND, - FAIL_LOAD_MODEL, - FAIL_SYM_FUNCTION, - FAIL_GET_INTERFACE_PROVIDERS, - FAIL_LOAD_SYSTEM_LIB, -}; - -StatusCode getQnnFunctionPointers(std::string backendPath, - std::string modelPath, - sample_app::QnnFunctionPointers* qnnFunctionPointers, - void** backendHandle, - bool loadModelLib, - void** modelHandleRtn); -StatusCode getQnnSystemFunctionPointers(std::string systemLibraryPath, - sample_app::QnnFunctionPointers* qnnFunctionPointers); -} // namespace dynamicloadutil -} // namespace tools -} // namespace qnn diff --git a/LibQNNHelper/src/Utils/IOTensor.cpp b/LibQNNHelper/src/Utils/IOTensor.cpp deleted file mode 100644 index 667b76c..0000000 --- a/LibQNNHelper/src/Utils/IOTensor.cpp +++ /dev/null @@ -1,903 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -#include -#include -#include -#include - -#include "DataUtil.hpp" -#include "IOTensor.hpp" -#include "Logger.hpp" -#include "PAL/Directory.hpp" -#include "PAL/FileOp.hpp" -#include "PAL/Path.hpp" -#include "PAL/StringOp.hpp" -#include "QnnTypeMacros.hpp" - -using namespace qnn; -using namespace qnn::tools; - -// Helper method to read data from files to a buffer. -iotensor::PopulateInputTensorsRetType_t iotensor::IOTensor::readDataAndAllocateBuffer( - const std::vector& filePaths, - const size_t filePathsIndexOffset, - const bool loopBackToStart, - std::vector dims, - Qnn_DataType_t dataType, - uint8_t** bufferToCopy) { - StatusCode returnStatus = StatusCode::SUCCESS; - *bufferToCopy = nullptr; - returnStatus = allocateBuffer(bufferToCopy, dims, dataType); - size_t numFilesPopulated = 0; - size_t batchSize = 0; - datautil::StatusCode status; - std::tie(status, numFilesPopulated, batchSize) = - datautil::readBatchData(filePaths, - filePathsIndexOffset, - loopBackToStart, - dims, - dataType, - reinterpret_cast(*bufferToCopy)); - if (datautil::StatusCode::SUCCESS != status) { - QNN_ERROR("Failure in datautil::readBatchData"); - returnStatus = StatusCode::FAILURE; - } - if (StatusCode::SUCCESS != returnStatus) { - if (nullptr != *bufferToCopy) { - free(*bufferToCopy); - *bufferToCopy = nullptr; - } - } - return {returnStatus, numFilesPopulated, batchSize}; -} - -// Helper method to copy a float buffer, quantize it, and copy -// it to a tensor (Qnn_Tensor_t) buffer. -iotensor::StatusCode iotensor::IOTensor::copyFromFloatToNative(float* floatBuffer, - Qnn_Tensor_t* tensor) { - if (nullptr == floatBuffer || nullptr == tensor) { - QNN_ERROR("copyFromFloatToNative(): received a nullptr"); - return StatusCode::FAILURE; - } - - StatusCode returnStatus = StatusCode::SUCCESS; - std::vector dims; - fillDims(dims, QNN_TENSOR_GET_DIMENSIONS(tensor), QNN_TENSOR_GET_RANK(tensor)); - - switch (QNN_TENSOR_GET_DATA_TYPE(tensor)) { - case QNN_DATATYPE_UFIXED_POINT_8: - datautil::floatToTfN(static_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - floatBuffer, - QNN_TENSOR_GET_QUANT_PARAMS(tensor).scaleOffsetEncoding.offset, - QNN_TENSOR_GET_QUANT_PARAMS(tensor).scaleOffsetEncoding.scale, - datautil::calculateElementCount(dims)); - break; - - case QNN_DATATYPE_UFIXED_POINT_16: - datautil::floatToTfN(static_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - floatBuffer, - QNN_TENSOR_GET_QUANT_PARAMS(tensor).scaleOffsetEncoding.offset, - QNN_TENSOR_GET_QUANT_PARAMS(tensor).scaleOffsetEncoding.scale, - datautil::calculateElementCount(dims)); - break; - - case QNN_DATATYPE_UINT_8: - if (datautil::StatusCode::SUCCESS != - datautil::castFromFloat( - static_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - floatBuffer, - datautil::calculateElementCount(dims))) { - QNN_ERROR("failure in castFromFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_UINT_16: - if (datautil::StatusCode::SUCCESS != - datautil::castFromFloat( - static_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - floatBuffer, - datautil::calculateElementCount(dims))) { - QNN_ERROR("failure in castFromFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_UINT_32: - if (datautil::StatusCode::SUCCESS != - datautil::castFromFloat( - static_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - floatBuffer, - datautil::calculateElementCount(dims))) { - QNN_ERROR("failure in castFromFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_INT_8: - if (datautil::StatusCode::SUCCESS != - datautil::castFromFloat( - static_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - floatBuffer, - datautil::calculateElementCount(dims))) { - QNN_ERROR("failure in castFromFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_INT_16: - if (datautil::StatusCode::SUCCESS != - datautil::castFromFloat( - static_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - floatBuffer, - datautil::calculateElementCount(dims))) { - QNN_ERROR("failure in castFromFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_INT_32: - if (datautil::StatusCode::SUCCESS != - datautil::castFromFloat( - static_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - floatBuffer, - datautil::calculateElementCount(dims))) { - QNN_ERROR("failure in castFromFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_BOOL_8: - if (datautil::StatusCode::SUCCESS != - datautil::castFromFloat( - static_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - floatBuffer, - datautil::calculateElementCount(dims))) { - QNN_ERROR("failure in castFromFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - default: - QNN_ERROR("Datatype not supported yet!"); - returnStatus = StatusCode::FAILURE; - break; - } - return returnStatus; -} - -// Helper method to populate an input tensor in the graph during execution. -// It relies on reading data from files provided during app creation. -iotensor::PopulateInputTensorsRetType_t iotensor::IOTensor::populateInputTensor( - const std::vector& filePaths, - const size_t filePathsIndexOffset, - const bool loopBackToStart, - Qnn_Tensor_t* input, - iotensor::InputDataType inputDataType) { - if (nullptr == input) { - QNN_ERROR("input is nullptr"); - return {StatusCode::FAILURE, 0, 0}; - } - - auto returnStatus = StatusCode::SUCCESS; - size_t numFilesPopulated = 0; - size_t batchSize = 0; - std::vector dims; - fillDims(dims, QNN_TENSOR_GET_DIMENSIONS(input), QNN_TENSOR_GET_RANK(input)); - - if (inputDataType == InputDataType::FLOAT && - QNN_TENSOR_GET_DATA_TYPE(input) != QNN_DATATYPE_FLOAT_32) { - uint8_t* fileToBuffer = nullptr; - std::tie(returnStatus, numFilesPopulated, batchSize) = - readDataAndAllocateBuffer(filePaths, - filePathsIndexOffset, - loopBackToStart, - dims, - QNN_DATATYPE_FLOAT_32, - &fileToBuffer); - if (StatusCode::SUCCESS == returnStatus) { - QNN_DEBUG("readDataFromFileToBuffer successful"); - returnStatus = copyFromFloatToNative(reinterpret_cast(fileToBuffer), input); - } - if (nullptr != fileToBuffer) { - free(fileToBuffer); - fileToBuffer = nullptr; - } - } else { - datautil::StatusCode status; - std::tie(status, numFilesPopulated, batchSize) = - datautil::readBatchData(filePaths, - filePathsIndexOffset, - loopBackToStart, - dims, - QNN_TENSOR_GET_DATA_TYPE(input), - static_cast(QNN_TENSOR_GET_CLIENT_BUF(input).data)); - if (datautil::StatusCode::SUCCESS != status) { - QNN_ERROR("Failure in datautil::readBatchData"); - returnStatus = StatusCode::FAILURE; - } - } - return {returnStatus, numFilesPopulated, batchSize}; -} - -// Helper method to populate all input tensors during execution. -iotensor::PopulateInputTensorsRetType_t iotensor::IOTensor::populateInputTensors( - uint32_t graphIdx, - const std::vector>& filePathsVector, - const size_t filePathsIndexOffset, - const bool loopBackToStart, - const std::unordered_map& inputNameToIndex, - Qnn_Tensor_t* inputs, - qnn_wrapper_api::GraphInfo_t graphInfo, - iotensor::InputDataType inputDataType) { - QNN_DEBUG("populateInputTensors() graphIndx %d", graphIdx); - if (nullptr == inputs) { - QNN_ERROR("inputs is nullptr"); - return {StatusCode::FAILURE, 0, 0}; - } - auto inputCount = graphInfo.numInputTensors; - if (filePathsVector.size() != inputCount) { - QNN_ERROR( - "Incorrect amount of Input files for graphIdx: %d. Expected: %d, " - "received: %d", - graphIdx, - inputCount, - filePathsVector.size()); - return {StatusCode::FAILURE, 0, 0}; - } - size_t numFilesPopulated = 0; - size_t numBatchSize = 0; - for (size_t inputIdx = 0; inputIdx < inputCount; inputIdx++) { - size_t inputNameIdx = inputIdx; - QNN_DEBUG("index = %d input column index = %d", inputIdx, inputNameIdx); - std::string inputNodeName; - if (QNN_TENSOR_GET_NAME(graphInfo.inputTensors[inputIdx])) - inputNodeName = QNN_TENSOR_GET_NAME(graphInfo.inputTensors[inputIdx]); - if (!inputNodeName.empty() && inputNameToIndex.find(inputNodeName) != inputNameToIndex.end()) { - inputNameIdx = inputNameToIndex.at(inputNodeName); - } - StatusCode returnStatus; - size_t currentInputNumFilesPopulated = 0; - size_t currentInputNumBatchSize = 0; - std::tie(returnStatus, currentInputNumFilesPopulated, currentInputNumBatchSize) = - populateInputTensor(filePathsVector[inputNameIdx], - filePathsIndexOffset, - loopBackToStart, - &(inputs[inputIdx]), - inputDataType); - if (StatusCode::SUCCESS != returnStatus) { - QNN_ERROR("populateInputTensorFromFiles failed for input %s with index %d", - inputNodeName.c_str(), - inputIdx); - return {StatusCode::FAILURE, currentInputNumFilesPopulated, currentInputNumBatchSize}; - } - if (inputIdx == 0) { - numFilesPopulated = currentInputNumFilesPopulated; - numBatchSize = currentInputNumBatchSize; - } else { - if (numFilesPopulated != currentInputNumFilesPopulated || - numBatchSize != currentInputNumBatchSize) { - QNN_ERROR( - "Current input tensor with name: %s with index %d files populated = %d, batch size = %d" - " does not match with expected files populated = %d, batch size = %d", - inputNodeName.c_str(), - inputIdx, - currentInputNumFilesPopulated, - currentInputNumBatchSize, - numFilesPopulated, - numBatchSize); - return {StatusCode::FAILURE, numFilesPopulated, numBatchSize}; - } - } - } - return {StatusCode::SUCCESS, numFilesPopulated, numBatchSize}; -} - -// Helper method to populate an input tensor in the graph during execution. -// It relies on reading data from buffer provided during executeGraph() call. -iotensor::StatusCode iotensor::IOTensor::populateInputTensor( - uint8_t* buffer, Qnn_Tensor_t* input, iotensor::InputDataType inputDataType) { - if (nullptr == input) { - QNN_ERROR("input is nullptr"); - return StatusCode::FAILURE; - } - std::vector dims; - fillDims(dims, QNN_TENSOR_GET_DIMENSIONS(input), QNN_TENSOR_GET_RANK(input)); - if (inputDataType == InputDataType::FLOAT && - QNN_TENSOR_GET_DATA_TYPE(input) != QNN_DATATYPE_FLOAT_32) { - QNN_DEBUG("Received FLOAT input, but model needs non-float input"); - if (StatusCode::SUCCESS != copyFromFloatToNative(reinterpret_cast(buffer), input)) { - QNN_DEBUG("copyFromFloatToNative failure"); - return StatusCode::FAILURE; - } - } else { - size_t length; - datautil::StatusCode returnStatus; - std::tie(returnStatus, length) = - datautil::calculateLength(dims, QNN_TENSOR_GET_DATA_TYPE(input)); - if (datautil::StatusCode::SUCCESS != returnStatus) { - return StatusCode::FAILURE; - } - pal::StringOp::memscpy( - reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(input).data), length, buffer, length); - } - return StatusCode::SUCCESS; -} - -// Helper method to populate all input tensors. -iotensor::StatusCode iotensor::IOTensor::populateInputTensors( - uint32_t graphIdx, - std::vector inputBuffers, - Qnn_Tensor_t* inputs, - qnn_wrapper_api::GraphInfo_t graphInfo, - iotensor::InputDataType inputDataType) { - if (nullptr == inputs) { - QNN_ERROR("inputs is nullptr"); - return StatusCode::FAILURE; - } - auto inputCount = graphInfo.numInputTensors; - if (inputBuffers.size() != inputCount) { - QNN_ERROR("Incorrect amount of Input Buffers for graphIdx: %d. Expected: %d, received: %d", - graphIdx, - inputCount, - inputBuffers.size()); - return StatusCode::FAILURE; - } - for (size_t inputIdx = 0; inputIdx < inputCount; inputIdx++) { - if (StatusCode::SUCCESS != - populateInputTensor(inputBuffers[inputIdx], &(inputs[inputIdx]), inputDataType)) { - QNN_DEBUG("populateInputTensor() failure for input: %d", inputIdx); - return StatusCode::FAILURE; - } - } - return StatusCode::SUCCESS; -} - -// Setup details for Qnn_Tensor_t for execution -// based on information in Qnn_TensorWrapper_t provided by model.so. -iotensor::StatusCode iotensor::IOTensor::setupTensors(Qnn_Tensor_t** tensors, - uint32_t tensorCount, - Qnn_Tensor_t* tensorWrappers) { - if (nullptr == tensorWrappers) { - QNN_ERROR("tensorWrappers is nullptr"); - return StatusCode::FAILURE; - } - if (0 == tensorCount) { - QNN_INFO("tensor count is 0. Nothing to setup."); - return StatusCode::SUCCESS; - } - auto returnStatus = StatusCode::SUCCESS; - *tensors = (Qnn_Tensor_t*)calloc(1, tensorCount * sizeof(Qnn_Tensor_t)); - if (nullptr == *tensors) { - QNN_ERROR("mem alloc failed for *tensors"); - returnStatus = StatusCode::FAILURE; - return returnStatus; - } - for (size_t tensorIdx = 0; tensorIdx < tensorCount; tensorIdx++) { - Qnn_Tensor_t wrapperTensor = tensorWrappers[tensorIdx]; - std::vector dims; - fillDims(dims, QNN_TENSOR_GET_DIMENSIONS(wrapperTensor), QNN_TENSOR_GET_RANK(wrapperTensor)); - if (StatusCode::SUCCESS == returnStatus) { - QNN_DEBUG("allocateBuffer successful"); - (*tensors)[tensorIdx] = QNN_TENSOR_INIT; - returnStatus = - (sample_app::deepCopyQnnTensorInfo(((*tensors) + tensorIdx), &wrapperTensor) == true - ? StatusCode::SUCCESS - : StatusCode::FAILURE); - } - if (StatusCode::SUCCESS == returnStatus) { - QNN_DEBUG("deepCopyQnnTensorInfo successful"); - QNN_TENSOR_SET_MEM_TYPE(((*tensors) + tensorIdx), QNN_TENSORMEMTYPE_RAW); - } - Qnn_ClientBuffer_t clientBuffer = QNN_CLIENT_BUFFER_INIT; - returnStatus = allocateBuffer(reinterpret_cast(&clientBuffer.data), - dims, - QNN_TENSOR_GET_DATA_TYPE((*tensors) + tensorIdx)); - datautil::StatusCode datautilStatus{datautil::StatusCode::SUCCESS}; - size_t length{0}; - std::tie(datautilStatus, length) = - datautil::calculateLength(dims, QNN_TENSOR_GET_DATA_TYPE((*tensors) + tensorIdx)); - if (datautilStatus != datautil::StatusCode::SUCCESS) { - returnStatus = StatusCode::FAILURE; - } - clientBuffer.dataSize = length; - QNN_TENSOR_SET_CLIENT_BUF(((*tensors) + tensorIdx), clientBuffer); - if (StatusCode::SUCCESS != returnStatus) { - QNN_ERROR("Failure in setupTensors, cleaning up resources"); - if (nullptr != (QNN_TENSOR_GET_CLIENT_BUF((*tensors) + tensorIdx)).data) { - free(QNN_TENSOR_GET_CLIENT_BUF((*tensors) + tensorIdx).data); - } - tearDownTensors(*tensors, tensorIdx); - *tensors = nullptr; - returnStatus = StatusCode::FAILURE; - QNN_ERROR("Failure in setupTensors, done cleaning up resources"); - return returnStatus; - } - } - return returnStatus; -} - -// Setup details for all input and output tensors for graph execution. -iotensor::StatusCode iotensor::IOTensor::setupInputAndOutputTensors( - Qnn_Tensor_t** inputs, Qnn_Tensor_t** outputs, qnn_wrapper_api::GraphInfo_t graphInfo) { - auto returnStatus = StatusCode::SUCCESS; - if (StatusCode::SUCCESS != - setupTensors(inputs, graphInfo.numInputTensors, (graphInfo.inputTensors))) { - QNN_ERROR("Failure in setting up input tensors"); - returnStatus = StatusCode::FAILURE; - } - if (StatusCode::SUCCESS != - setupTensors(outputs, graphInfo.numOutputTensors, (graphInfo.outputTensors))) { - QNN_ERROR("Failure in setting up output tensors"); - returnStatus = StatusCode::FAILURE; - } - if (StatusCode::SUCCESS != returnStatus) { - QNN_ERROR("Failure in setupInputAndOutputTensors, cleaning up resources"); - if (nullptr != *inputs) { - QNN_DEBUG("cleaning up input tensors"); - tearDownTensors(*inputs, graphInfo.numInputTensors); - *inputs = nullptr; - } - if (nullptr != *outputs) { - QNN_DEBUG("cleaning up output tensors"); - tearDownTensors(*outputs, graphInfo.numOutputTensors); - *outputs = nullptr; - } - QNN_ERROR("Failure in setupInputAndOutputTensors, done cleaning up resources"); - } - return returnStatus; -} - -// Clean up all tensors related data after execution. -iotensor::StatusCode iotensor::IOTensor::tearDownTensors(Qnn_Tensor_t* tensors, - uint32_t tensorCount) { - for (size_t tensorIdx = 0; tensorIdx < tensorCount; tensorIdx++) { - QNN_DEBUG("freeing resources for tensor: %d", tensorIdx); - if (nullptr != QNN_TENSOR_GET_DIMENSIONS(tensors[tensorIdx])) { - QNN_DEBUG("freeing dimensions"); - free(QNN_TENSOR_GET_DIMENSIONS(tensors[tensorIdx])); - } - if (nullptr != QNN_TENSOR_GET_NAME(tensors[tensorIdx])) { // zw: free tensor name. - QNN_DEBUG("freeing tensor name"); - free((void*)QNN_TENSOR_GET_NAME(tensors[tensorIdx])); - } - if (nullptr != QNN_TENSOR_GET_CLIENT_BUF(tensors[tensorIdx]).data) { - QNN_DEBUG("freeing clientBuf.data"); - free(QNN_TENSOR_GET_CLIENT_BUF(tensors[tensorIdx]).data); - } - } - free(tensors); - return StatusCode::SUCCESS; -} - -// Clean up all input and output tensors after execution. -iotensor::StatusCode iotensor::IOTensor::tearDownInputAndOutputTensors(Qnn_Tensor_t* inputs, - Qnn_Tensor_t* outputs, - size_t numInputTensors, - size_t numOutputTensors) { - if (nullptr != inputs) { - QNN_INFO("cleaning up resources for input tensors"); - tearDownTensors(inputs, numInputTensors); - inputs = nullptr; - } - if (nullptr != outputs) { - QNN_INFO("cleaning up resources for output tensors"); - tearDownTensors(outputs, numOutputTensors); - outputs = nullptr; - } - return StatusCode::SUCCESS; -} - -// Helper method to allocate a buffer. -iotensor::StatusCode iotensor::IOTensor::allocateBuffer(uint8_t** buffer, - std::vector dims, - Qnn_DataType_t dataType) { - size_t elementCount = datautil::calculateElementCount(dims); - auto returnStatus = StatusCode::SUCCESS; - switch (dataType) { - case QNN_DATATYPE_FLOAT_32: - QNN_DEBUG("allocating float buffer"); - returnStatus = allocateBuffer(reinterpret_cast(buffer), elementCount); - break; - - case QNN_DATATYPE_UINT_8: - case QNN_DATATYPE_UFIXED_POINT_8: - QNN_DEBUG("allocating uint8_t buffer"); - returnStatus = allocateBuffer(reinterpret_cast(buffer), elementCount); - break; - - case QNN_DATATYPE_UINT_16: - case QNN_DATATYPE_UFIXED_POINT_16: - QNN_DEBUG("allocating uint16_t buffer"); - returnStatus = allocateBuffer(reinterpret_cast(buffer), elementCount); - break; - - case QNN_DATATYPE_UINT_32: - QNN_DEBUG("allocating uint32_t buffer"); - returnStatus = allocateBuffer(reinterpret_cast(buffer), elementCount); - break; - - case QNN_DATATYPE_INT_8: - QNN_DEBUG("allocating int8_t buffer"); - returnStatus = allocateBuffer(reinterpret_cast(buffer), elementCount); - break; - - case QNN_DATATYPE_INT_16: - QNN_DEBUG("allocating int16_t buffer"); - returnStatus = allocateBuffer(reinterpret_cast(buffer), elementCount); - break; - - case QNN_DATATYPE_INT_32: - QNN_DEBUG("allocating int32_t buffer"); - returnStatus = allocateBuffer(reinterpret_cast(buffer), elementCount); - break; - - case QNN_DATATYPE_BOOL_8: - QNN_DEBUG("allocating bool buffer"); - returnStatus = allocateBuffer(reinterpret_cast(buffer), elementCount); - break; - - default: - QNN_ERROR("Datatype not supported yet!"); - returnStatus = StatusCode::FAILURE; - break; - } - return returnStatus; -} - -// Helper method to allocate a buffer. -template -iotensor::StatusCode iotensor::IOTensor::allocateBuffer(T** buffer, size_t& elementCount) { - QNN_DEBUG("ElementCount: %d, sizeof(T): %d, total size: %d", - elementCount, - sizeof(T), - elementCount * sizeof(T)); - *buffer = (T*)malloc(elementCount * sizeof(T)); - if (nullptr == *buffer) { - QNN_ERROR("mem alloc failed for *buffer"); - return StatusCode::FAILURE; - } - return StatusCode::SUCCESS; -} - -// Convert data to float or de-quantization. This is used when -// user requests for float output and the model produces -// non-float output. -iotensor::StatusCode iotensor::IOTensor::convertToFloat(float** out, Qnn_Tensor_t* tensor) { - if (nullptr == tensor) { - QNN_ERROR("tensors is nullptr"); - return StatusCode::FAILURE; - } - std::vector dims; - fillDims(dims, QNN_TENSOR_GET_DIMENSIONS(tensor), QNN_TENSOR_GET_RANK(tensor)); - auto returnStatus = StatusCode::SUCCESS; - size_t elementCount = datautil::calculateElementCount(dims); - - if(!(*out)) { // zw: If (*out != nullptr), *out point to share memory, don't need to allocate buffer. - returnStatus = allocateBuffer(out, elementCount); - } - - if (StatusCode::SUCCESS != returnStatus) { - QNN_ERROR("failure in allocateBuffer"); - return returnStatus; - } - switch (QNN_TENSOR_GET_DATA_TYPE(tensor)) { - case QNN_DATATYPE_UFIXED_POINT_8: - if (datautil::StatusCode::SUCCESS != - datautil::tfNToFloat( - *out, - reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - QNN_TENSOR_GET_QUANT_PARAMS(tensor).scaleOffsetEncoding.offset, - QNN_TENSOR_GET_QUANT_PARAMS(tensor).scaleOffsetEncoding.scale, - elementCount)) { - QNN_ERROR("failure in tfNToFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_UFIXED_POINT_16: - if (datautil::StatusCode::SUCCESS != - datautil::tfNToFloat( - *out, - reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - QNN_TENSOR_GET_QUANT_PARAMS(tensor).scaleOffsetEncoding.offset, - QNN_TENSOR_GET_QUANT_PARAMS(tensor).scaleOffsetEncoding.scale, - elementCount)) { - QNN_ERROR("failure in tfNToFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_UINT_8: - if (datautil::StatusCode::SUCCESS != - datautil::castToFloat( - *out, - reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - elementCount)) { - QNN_ERROR("failure in castToFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_UINT_16: - if (datautil::StatusCode::SUCCESS != - datautil::castToFloat( - *out, - reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - elementCount)) { - QNN_ERROR("failure in castToFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_UINT_32: - if (datautil::StatusCode::SUCCESS != - datautil::castToFloat( - *out, - reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - elementCount)) { - QNN_ERROR("failure in castToFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_INT_8: - if (datautil::StatusCode::SUCCESS != - datautil::castToFloat( - *out, - reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - elementCount)) { - QNN_ERROR("failure in castToFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_INT_16: - if (datautil::StatusCode::SUCCESS != - datautil::castToFloat( - *out, - reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - elementCount)) { - QNN_ERROR("failure in castToFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_INT_32: - if (datautil::StatusCode::SUCCESS != - datautil::castToFloat( - *out, - reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - elementCount)) { - QNN_ERROR("failure in castToFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - case QNN_DATATYPE_BOOL_8: - if (datautil::StatusCode::SUCCESS != - datautil::castToFloat( - *out, - reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(tensor).data), - elementCount)) { - QNN_ERROR("failure in castToFloat"); - returnStatus = StatusCode::FAILURE; - } - break; - - default: - QNN_ERROR("Datatype not supported yet!"); - returnStatus = StatusCode::FAILURE; - break; - } - if (StatusCode::SUCCESS != returnStatus) { - QNN_DEBUG("freeing *out"); - if (*out != nullptr) { - free(*out); - *out = nullptr; - } - } - return returnStatus; -} - -// Helper method to convert Output tensors to float and write them -// out to files. -iotensor::StatusCode iotensor::IOTensor::convertAndWriteOutputTensorInFloat( - Qnn_Tensor_t* output, - std::vector outputPaths, - std::string fileName, - size_t outputBatchSize) { - if (nullptr == output) { - QNN_ERROR("output is nullptr"); - return StatusCode::FAILURE; - } - - auto returnStatus = StatusCode::SUCCESS; - std::vector dims; - fillDims(dims, QNN_TENSOR_GET_DIMENSIONS(output), QNN_TENSOR_GET_RANK(output)); - float* floatBuffer = nullptr; - returnStatus = convertToFloat(&floatBuffer, output); - if (StatusCode::SUCCESS != returnStatus) { - QNN_ERROR("failure in convertToFloat"); - return StatusCode::FAILURE; - } - uint8_t* bufferToWrite = reinterpret_cast(floatBuffer); - if (datautil::StatusCode::SUCCESS != - datautil::writeBatchDataToFile( - outputPaths, fileName, dims, QNN_DATATYPE_FLOAT_32, bufferToWrite, outputBatchSize)) { - QNN_ERROR("failure in writeBatchDataToFile"); - returnStatus = StatusCode::FAILURE; - } - if (nullptr != floatBuffer) { - QNN_DEBUG("freeing floatBuffer"); - free(floatBuffer); - floatBuffer = nullptr; - } - return returnStatus; -} - -// Helper method to write out output. There is no de-quantization here. -// Just write output as is to files. -iotensor::StatusCode iotensor::IOTensor::writeOutputTensor(Qnn_Tensor_t* output, - std::vector outputPaths, - std::string fileName, - size_t outputBatchSize) { - if (nullptr == output) { - QNN_ERROR("output is nullptr"); - return StatusCode::FAILURE; - } - auto returnStatus = StatusCode::SUCCESS; - std::vector dims; - fillDims(dims, QNN_TENSOR_GET_DIMENSIONS(output), QNN_TENSOR_GET_RANK(output)); - uint8_t* bufferToWrite = reinterpret_cast(QNN_TENSOR_GET_CLIENT_BUF(output).data); - if (datautil::StatusCode::SUCCESS != - datautil::writeBatchDataToFile(outputPaths, - fileName, - dims, - QNN_TENSOR_GET_DATA_TYPE(output), - bufferToWrite, - outputBatchSize)) { - QNN_ERROR("failure in writeBatchDataToFile"); - returnStatus = StatusCode::FAILURE; - } - return returnStatus; -} - -// Write out all output tensors to files. If output_data_type is float, -// then all outputs will be raw floats regardless of what the model outputs. -// If the output_data_type is native, then output is written as produced by the model. -// Also, for native option, a json with quantization parameters is written out. -// If output_data_type is float_and_native, both above are done. -// If the output in the graph is float, then output_data_type has no effect. -iotensor::StatusCode iotensor::IOTensor::writeOutputTensors(uint32_t graphIdx, - size_t startIdx, - char* graphName, - Qnn_Tensor_t* outputs, - uint32_t numOutputs, - iotensor::OutputDataType outputDatatype, - uint32_t graphsCount, - std::string outputPath, - size_t numInputFilesPopulated, - size_t outputBatchSize) { - if (nullptr == outputs) { - QNN_ERROR("Received nullptr"); - return StatusCode::FAILURE; - } - if (graphsCount > 1) { - if (nullptr != graphName && strlen(graphName) > 0) { - outputPath += (pal::Path::getSeparator() + std::string(graphName)); - } else { - outputPath += (pal::Path::getSeparator() + std::string("Graph_") + std::to_string(graphIdx)); - } - } - auto returnStatus = StatusCode::SUCCESS; - std::vector outputPaths; - for (size_t idx = 0; idx < numInputFilesPopulated; idx++) { - std::string output = outputPath + (pal::Path::getSeparator() + std::string("Result_") + - std::to_string(startIdx + idx)); - outputPaths.push_back(output); - } - for (size_t outputIdx = 0; outputIdx < numOutputs; outputIdx++) { - QNN_DEBUG("Writing output for outputIdx: %d", outputIdx); - std::string outputFilePrefix; - if (nullptr != QNN_TENSOR_GET_NAME(outputs[outputIdx]) && - strlen(QNN_TENSOR_GET_NAME(outputs[outputIdx])) > 0) { - outputFilePrefix = std::string(QNN_TENSOR_GET_NAME(outputs[outputIdx])); - } else { - outputFilePrefix = std::string("Output_") + std::to_string(outputIdx); - } - auto outputFile = outputFilePrefix + std::string(".raw"); - auto outputFileNative = outputFilePrefix + std::string("_native.raw"); - if (QNN_TENSOR_GET_DATA_TYPE(outputs[outputIdx]) == QNN_DATATYPE_FLOAT_32) { - QNN_DEBUG("Writing in output->dataType == QNN_DATATYPE_FLOAT_32"); - returnStatus = - writeOutputTensor(&(outputs[outputIdx]), outputPaths, outputFile, outputBatchSize); - } else if (outputDatatype == OutputDataType::FLOAT_ONLY) { - QNN_DEBUG("Writing in output->dataType == OutputDataType::FLOAT_ONLY"); - returnStatus = convertAndWriteOutputTensorInFloat( - &(outputs[outputIdx]), outputPaths, outputFile, outputBatchSize); - } else if (outputDatatype == OutputDataType::NATIVE_ONLY) { - QNN_DEBUG("Writing in output->dataType == OutputDataType::NATIVE_ONLY"); - returnStatus = - writeOutputTensor(&(outputs[outputIdx]), outputPaths, outputFileNative, outputBatchSize); - } else if (outputDatatype == OutputDataType::FLOAT_AND_NATIVE) { - QNN_DEBUG("Writing in output->dataType == OutputDataType::FLOAT_AND_NATIVE"); - returnStatus = convertAndWriteOutputTensorInFloat( - &(outputs[outputIdx]), outputPaths, outputFile, outputBatchSize); - if (StatusCode::SUCCESS == returnStatus) { - returnStatus = writeOutputTensor( - &(outputs[outputIdx]), outputPaths, outputFileNative, outputBatchSize); - } - } - } - return returnStatus; -} - -// Helper method to allocate a buffer and copy data to it. -iotensor::StatusCode iotensor::IOTensor::allocateAndCopyBuffer(uint8_t** buffer, - Qnn_Tensor_t* tensor) { - if (nullptr == tensor) { - return StatusCode::FAILURE; - } - std::vector dims; - fillDims(dims, QNN_TENSOR_GET_DIMENSIONS(tensor), QNN_TENSOR_GET_RANK(tensor)); - datautil::StatusCode datautilStatus; - size_t length; - std::tie(datautilStatus, length) = - datautil::calculateLength(dims, QNN_TENSOR_GET_DATA_TYPE(tensor)); - if (datautilStatus != datautil::StatusCode::SUCCESS) { - return StatusCode::FAILURE; - } - if (StatusCode::SUCCESS != allocateBuffer(buffer, dims, QNN_TENSOR_GET_DATA_TYPE(tensor))) { - QNN_ERROR("failure in allocateBuffer"); - return StatusCode::FAILURE; - } - pal::StringOp::memscpy(*buffer, - length * sizeof(uint8_t), - QNN_TENSOR_GET_CLIENT_BUF(tensor).data, - length * sizeof(uint8_t)); - return StatusCode::SUCCESS; -} - -iotensor::StatusCode iotensor::IOTensor::fillDims(std::vector& dims, - uint32_t* inDimensions, - uint32_t rank) { - if (nullptr == inDimensions) { - QNN_ERROR("input dimensions is nullptr"); - return StatusCode::FAILURE; - } - for (size_t r = 0; r < rank; r++) { - dims.push_back(inDimensions[r]); - } - return StatusCode::SUCCESS; -} - -iotensor::OutputDataType iotensor::parseOutputDataType(std::string dataTypeString) { - std::transform(dataTypeString.begin(), dataTypeString.end(), dataTypeString.begin(), ::tolower); - OutputDataType parsedDataType = OutputDataType::INVALID; - if (dataTypeString == "float_only") { - parsedDataType = OutputDataType::FLOAT_ONLY; - } else if (dataTypeString == "native_only") { - parsedDataType = OutputDataType::NATIVE_ONLY; - } else if (dataTypeString == "float_and_native") { - parsedDataType = OutputDataType::FLOAT_AND_NATIVE; - } - return parsedDataType; -} - -iotensor::InputDataType iotensor::parseInputDataType(std::string dataTypeString) { - std::transform(dataTypeString.begin(), dataTypeString.end(), dataTypeString.begin(), ::tolower); - InputDataType parsedDataType = InputDataType::INVALID; - if (dataTypeString == "float") { - parsedDataType = InputDataType::FLOAT; - } else if (dataTypeString == "native") { - parsedDataType = InputDataType::NATIVE; - } - return parsedDataType; -} diff --git a/LibQNNHelper/src/Utils/IOTensor.hpp b/LibQNNHelper/src/Utils/IOTensor.hpp deleted file mode 100644 index 4686a3b..0000000 --- a/LibQNNHelper/src/Utils/IOTensor.hpp +++ /dev/null @@ -1,120 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -#pragma once - -#include -#include - -#include "QnnBackend.h" -#include "QnnCommon.h" -#include "QnnContext.h" -#include "QnnGraph.h" -#include "QnnProperty.h" -#include "QnnSampleAppUtils.hpp" -#include "QnnTensor.h" -#include "QnnTypes.h" -#include "QnnWrapperUtils.hpp" - -namespace qnn { -namespace tools { -namespace iotensor { - -enum class StatusCode { SUCCESS, FAILURE }; -enum class OutputDataType { FLOAT_ONLY, NATIVE_ONLY, FLOAT_AND_NATIVE, INVALID }; -enum class InputDataType { FLOAT, NATIVE, INVALID }; - -OutputDataType parseOutputDataType(std::string dataTypeString); -InputDataType parseInputDataType(std::string dataTypeString); - -using PopulateInputTensorsRetType_t = std::tuple; - -class IOTensor { - public: - StatusCode setupInputAndOutputTensors(Qnn_Tensor_t **inputs, - Qnn_Tensor_t **outputs, - qnn_wrapper_api::GraphInfo_t graphInfo); - - StatusCode writeOutputTensors(uint32_t graphIdx, - size_t startIdx, - char *graphName, - Qnn_Tensor_t *outputs, - uint32_t numOutputs, - OutputDataType outputDatatype, - uint32_t graphsCount, - std::string outputPath, - size_t numInputFilesPopulated, - size_t outputBatchSize); - - PopulateInputTensorsRetType_t populateInputTensors( - uint32_t graphIdx, - const std::vector> &filePathsVector, - const size_t filePathsIndexOffset, - const bool loopBackToStart, - const std::unordered_map &inputNameToIndex, - Qnn_Tensor_t *inputs, - qnn_wrapper_api::GraphInfo_t graphInfo, - iotensor::InputDataType inputDataType); - - StatusCode populateInputTensors(uint32_t graphIdx, - std::vector inputBuffers, - Qnn_Tensor_t *inputs, - qnn_wrapper_api::GraphInfo_t graphInfo, - InputDataType inputDataType); - - StatusCode tearDownInputAndOutputTensors(Qnn_Tensor_t *inputs, - Qnn_Tensor_t *outputs, - size_t numInputTensors, - size_t numOutputTensors); - - StatusCode convertToFloat(float **out, Qnn_Tensor_t *output); // zw: change it to public function. - - StatusCode fillDims(std::vector &dims, uint32_t *inDimensions, uint32_t rank); // zw: change it to public function. - - private: - PopulateInputTensorsRetType_t populateInputTensor(const std::vector &filePaths, - const size_t filePathsIndexOffset, - const bool loopBackToStart, - Qnn_Tensor_t *input, - InputDataType inputDataType); - - StatusCode populateInputTensor(uint8_t *buffer, Qnn_Tensor_t *input, InputDataType inputDataType); - - PopulateInputTensorsRetType_t readDataAndAllocateBuffer(const std::vector &filePaths, - const size_t filePathsIndexOffset, - const bool loopBackToStart, - std::vector dims, - Qnn_DataType_t dataType, - uint8_t **bufferToCopy); - - template - StatusCode allocateBuffer(T **buffer, size_t &elementCount); - - StatusCode convertAndWriteOutputTensorInFloat(Qnn_Tensor_t *output, - std::vector outputPaths, - std::string fileName, - size_t outputBatchSize); - - StatusCode writeOutputTensor(Qnn_Tensor_t *output, - std::vector outputPaths, - std::string fileName, - size_t outputBatchSize); - - StatusCode allocateAndCopyBuffer(uint8_t **buffer, Qnn_Tensor_t *tensor); - - StatusCode tearDownTensors(Qnn_Tensor_t *tensors, uint32_t tensorCount); - - StatusCode allocateBuffer(uint8_t **buffer, std::vector dims, Qnn_DataType_t dataType); - - StatusCode copyFromFloatToNative(float *floatBuffer, Qnn_Tensor_t *tensor); - - StatusCode setupTensors(Qnn_Tensor_t **tensors, uint32_t tensorCount, Qnn_Tensor_t *tensorsInfo); -}; -} // namespace iotensor -} // namespace tools -} // namespace qnn \ No newline at end of file diff --git a/LibQNNHelper/src/Utils/QnnSampleAppUtils.cpp b/LibQNNHelper/src/Utils/QnnSampleAppUtils.cpp deleted file mode 100644 index e29ce7f..0000000 --- a/LibQNNHelper/src/Utils/QnnSampleAppUtils.cpp +++ /dev/null @@ -1,383 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -#include -#include -#include -#include -#include -#include -#include - -#include "Logger.hpp" -#include "PAL/Directory.hpp" -#include "PAL/FileOp.hpp" -#include "PAL/Path.hpp" -#include "PAL/StringOp.hpp" -#include "QnnSampleAppUtils.hpp" -#include "QnnTypeMacros.hpp" - -using namespace qnn; -using namespace qnn::tools; - -void sample_app::split(std::vector &splitString, - const std::string &tokenizedString, - const char separator) { - splitString.clear(); - std::istringstream tokenizedStringStream(tokenizedString); - while (!tokenizedStringStream.eof()) { - std::string value; - getline(tokenizedStringStream, value, separator); - if (!value.empty()) { - splitString.push_back(value); - } - } -} - -void sample_app::parseInputFilePaths(std::vector &inputFilePaths, - std::vector &paths, - std::string separator) { - for (auto &inputInfo : inputFilePaths) { - auto position = inputInfo.find(separator); - if (position != std::string::npos) { - auto path = inputInfo.substr(position + separator.size()); - paths.push_back(path); - } else { - paths.push_back(inputInfo); - } - } -} - -sample_app::ReadInputListsRetType_t sample_app::readInputLists( - std::vector inputFileListPaths) { - std::vector>> filePathsLists; - std::vector> inputNameToIndexMaps; - for (auto const &path : inputFileListPaths) { - bool readSuccess; - std::vector> filePathList; - std::unordered_map inputNameToIndex; - std::tie(filePathList, inputNameToIndex, readSuccess) = readInputList(path); - if (!readSuccess) { - filePathsLists.clear(); - return std::make_tuple(filePathsLists, inputNameToIndexMaps, false); - } - filePathsLists.push_back(filePathList); - inputNameToIndexMaps.push_back(inputNameToIndex); - } - return std::make_tuple(filePathsLists, inputNameToIndexMaps, true); -} - -sample_app::ReadInputListRetType_t sample_app::readInputList(const std::string inputFileListPath) { - std::queue lines; - std::ifstream fileListStream(inputFileListPath); - if (!fileListStream) { - QNN_ERROR("Failed to open input file: %s", inputFileListPath.c_str()); - return std::make_tuple(std::vector>{}, - std::unordered_map{}, - false); - } - - std::string fileLine; - while (std::getline(fileListStream, fileLine)) { - if (fileLine.empty()) continue; - lines.push(fileLine); - } - - if (!lines.empty() && lines.front().compare(0, 1, "#") == 0) { - lines.pop(); - } - - if (!lines.empty() && lines.front().compare(0, 1, "%") == 0) { - lines.pop(); - } - - std::string separator = ":="; - std::vector> filePathsList; - std::unordered_map inputNameToIndex; - if (!lines.empty()) { - inputNameToIndex = extractInputNameIndices(lines.front(), separator); - } - while (!lines.empty()) { - std::vector paths{}; - std::vector inputFilePaths; - split(inputFilePaths, lines.front(), ' '); - parseInputFilePaths(inputFilePaths, paths, separator); - filePathsList.reserve(paths.size()); - for (size_t idx = 0; idx < paths.size(); idx++) { - if (idx >= filePathsList.size()) { - filePathsList.push_back(std::vector()); - } - filePathsList[idx].push_back(paths[idx]); - } - lines.pop(); - } - return std::make_tuple(filePathsList, inputNameToIndex, true); -} - -std::unordered_map sample_app::extractInputNameIndices( - const std::string &inputLine, const std::string &separator) { - std::vector inputFilePaths; - std::unordered_map inputNameToIndex; - split(inputFilePaths, inputLine, ' '); - size_t inputCount = 0; - for (uint32_t idx = 0; idx < inputFilePaths.size(); idx++) { - auto position = inputFilePaths[idx].find(separator); - if (position != std::string::npos) { - auto unsanitizedTensorName = inputFilePaths[idx].substr(0, position); - auto sanitizedTensorName = sanitizeTensorName(unsanitizedTensorName); - if (sanitizedTensorName != unsanitizedTensorName) { - inputNameToIndex[unsanitizedTensorName] = idx; - } - inputNameToIndex[sanitizedTensorName] = idx; - inputCount = inputCount + 1; - } - } - return inputCount == inputFilePaths.size() ? inputNameToIndex - : std::unordered_map{}; -} - -std::string sample_app::sanitizeTensorName(std::string name) { - std::string sanitizedName = std::regex_replace(name, std::regex("\\W+"), "_"); - if (!std::isalpha(sanitizedName[0]) && sanitizedName[0] != '_') { - sanitizedName = "_" + sanitizedName; - } - return sanitizedName; -} - -sample_app::ProfilingLevel sample_app::parseProfilingLevel(std::string profilingLevelString) { - std::transform(profilingLevelString.begin(), - profilingLevelString.end(), - profilingLevelString.begin(), - ::tolower); - ProfilingLevel parsedProfilingLevel = ProfilingLevel::INVALID; - if (profilingLevelString == "off") { - parsedProfilingLevel = ProfilingLevel::OFF; - } else if (profilingLevelString == "basic") { - parsedProfilingLevel = ProfilingLevel::BASIC; - } else if (profilingLevelString == "detailed") { - parsedProfilingLevel = ProfilingLevel::DETAILED; - } - return parsedProfilingLevel; -} - -bool sample_app::deepCopyQnnTensorInfo(Qnn_Tensor_t *dst, const Qnn_Tensor_t *src) { - if (nullptr == dst || nullptr == src) { - QNN_ERROR("Received nullptr"); - return false; - } - // set tensor.version before using QNN_TENSOR_SET macros, as they require the version to be set - // to correctly assign values - dst->version = src->version; - const char *tensorName = QNN_TENSOR_GET_NAME(src); - if (!tensorName) { - QNN_TENSOR_SET_NAME(dst, nullptr); - } else { - QNN_TENSOR_SET_NAME(dst, pal::StringOp::strndup(tensorName, strlen(tensorName))); - } - QNN_TENSOR_SET_ID(dst, QNN_TENSOR_GET_ID(src)); - QNN_TENSOR_SET_TYPE(dst, QNN_TENSOR_GET_TYPE(src)); - QNN_TENSOR_SET_DATA_FORMAT(dst, QNN_TENSOR_GET_DATA_FORMAT(src)); - QNN_TENSOR_SET_DATA_TYPE(dst, QNN_TENSOR_GET_DATA_TYPE(src)); - Qnn_QuantizeParams_t qParams = QNN_QUANTIZE_PARAMS_INIT; - qParams.encodingDefinition = QNN_TENSOR_GET_QUANT_PARAMS(src).encodingDefinition; - qParams.quantizationEncoding = QNN_QUANTIZATION_ENCODING_UNDEFINED; - if (QNN_TENSOR_GET_QUANT_PARAMS(src).quantizationEncoding == - QNN_QUANTIZATION_ENCODING_SCALE_OFFSET) { - qParams.quantizationEncoding = QNN_TENSOR_GET_QUANT_PARAMS(src).quantizationEncoding; - qParams.scaleOffsetEncoding = QNN_TENSOR_GET_QUANT_PARAMS(src).scaleOffsetEncoding; - } else if (QNN_TENSOR_GET_QUANT_PARAMS(src).quantizationEncoding == - QNN_QUANTIZATION_ENCODING_AXIS_SCALE_OFFSET) { - qParams.quantizationEncoding = QNN_TENSOR_GET_QUANT_PARAMS(src).quantizationEncoding; - qParams.axisScaleOffsetEncoding.axis = - QNN_TENSOR_GET_QUANT_PARAMS(src).axisScaleOffsetEncoding.axis; - qParams.axisScaleOffsetEncoding.numScaleOffsets = - QNN_TENSOR_GET_QUANT_PARAMS(src).axisScaleOffsetEncoding.numScaleOffsets; - if (QNN_TENSOR_GET_QUANT_PARAMS(src).axisScaleOffsetEncoding.numScaleOffsets > 0) { - qParams.axisScaleOffsetEncoding.scaleOffset = (Qnn_ScaleOffset_t *)malloc( - QNN_TENSOR_GET_QUANT_PARAMS(src).axisScaleOffsetEncoding.numScaleOffsets * - sizeof(Qnn_ScaleOffset_t)); - if (qParams.axisScaleOffsetEncoding.scaleOffset) { - for (size_t idx = 0; - idx < QNN_TENSOR_GET_QUANT_PARAMS(src).axisScaleOffsetEncoding.numScaleOffsets; - idx++) { - qParams.axisScaleOffsetEncoding.scaleOffset[idx].scale = - QNN_TENSOR_GET_QUANT_PARAMS(src).axisScaleOffsetEncoding.scaleOffset[idx].scale; - qParams.axisScaleOffsetEncoding.scaleOffset[idx].offset = - QNN_TENSOR_GET_QUANT_PARAMS(src).axisScaleOffsetEncoding.scaleOffset[idx].offset; - } - } - } - } - QNN_TENSOR_SET_QUANT_PARAMS(dst, qParams); - QNN_TENSOR_SET_RANK(dst, QNN_TENSOR_GET_RANK(src)); - QNN_TENSOR_SET_DIMENSIONS(dst, nullptr); - if (QNN_TENSOR_GET_RANK(src) > 0) { - QNN_TENSOR_SET_DIMENSIONS(dst, (uint32_t *)malloc(QNN_TENSOR_GET_RANK(src) * sizeof(uint32_t))); - if (QNN_TENSOR_GET_DIMENSIONS(dst)) { - pal::StringOp::memscpy(QNN_TENSOR_GET_DIMENSIONS(dst), - QNN_TENSOR_GET_RANK(src) * sizeof(uint32_t), - QNN_TENSOR_GET_DIMENSIONS(src), - QNN_TENSOR_GET_RANK(src) * sizeof(uint32_t)); - } - } - return true; -} - -bool sample_app::copyTensorsInfo(const Qnn_Tensor_t *tensorsInfoSrc, - Qnn_Tensor_t *&tensorWrappers, - uint32_t tensorsCount) { - QNN_FUNCTION_ENTRY_LOG; - auto returnStatus = true; - tensorWrappers = (Qnn_Tensor_t *)calloc(tensorsCount, sizeof(Qnn_Tensor_t)); - if (nullptr == tensorWrappers) { - QNN_ERROR("Failed to allocate memory for tensorWrappers."); - return false; - } - if (returnStatus) { - for (size_t tIdx = 0; tIdx < tensorsCount; tIdx++) { - QNN_DEBUG("Extracting tensorInfo for tensor Idx: %d", tIdx); - tensorWrappers[tIdx] = QNN_TENSOR_INIT; - deepCopyQnnTensorInfo(&tensorWrappers[tIdx], &tensorsInfoSrc[tIdx]); - } - } - QNN_FUNCTION_EXIT_LOG; - return returnStatus; -} - -bool sample_app::copyGraphsInfoV1(const QnnSystemContext_GraphInfoV1_t *graphInfoSrc, - qnn_wrapper_api::GraphInfo_t *graphInfoDst) { - graphInfoDst->graphName = nullptr; - if (graphInfoSrc->graphName) { - graphInfoDst->graphName = - pal::StringOp::strndup(graphInfoSrc->graphName, strlen(graphInfoSrc->graphName)); - } - graphInfoDst->inputTensors = nullptr; - graphInfoDst->numInputTensors = 0; - if (graphInfoSrc->graphInputs) { - if (!copyTensorsInfo( - graphInfoSrc->graphInputs, graphInfoDst->inputTensors, graphInfoSrc->numGraphInputs)) { - return false; - } - graphInfoDst->numInputTensors = graphInfoSrc->numGraphInputs; - } - graphInfoDst->outputTensors = nullptr; - graphInfoDst->numOutputTensors = 0; - if (graphInfoSrc->graphOutputs) { - if (!copyTensorsInfo(graphInfoSrc->graphOutputs, - graphInfoDst->outputTensors, - graphInfoSrc->numGraphOutputs)) { - return false; - } - graphInfoDst->numOutputTensors = graphInfoSrc->numGraphOutputs; - } - return true; -} - -bool sample_app::copyGraphsInfo(const QnnSystemContext_GraphInfo_t *graphsInput, - const uint32_t numGraphs, - qnn_wrapper_api::GraphInfo_t **&graphsInfo) { - QNN_FUNCTION_ENTRY_LOG; - if (!graphsInput) { - QNN_ERROR("Received nullptr for graphsInput."); - return false; - } - auto returnStatus = true; - graphsInfo = - (qnn_wrapper_api::GraphInfo_t **)calloc(numGraphs, sizeof(qnn_wrapper_api::GraphInfo_t *)); - qnn_wrapper_api::GraphInfo_t *graphInfoArr = - (qnn_wrapper_api::GraphInfo_t *)calloc(numGraphs, sizeof(qnn_wrapper_api::GraphInfo_t)); - if (nullptr == graphsInfo || nullptr == graphInfoArr) { - QNN_ERROR("Failure to allocate memory for *graphInfo"); - returnStatus = false; - } - if (true == returnStatus) { - for (size_t gIdx = 0; gIdx < numGraphs; gIdx++) { - QNN_DEBUG("Extracting graphsInfo for graph Idx: %d", gIdx); - if (graphsInput[gIdx].version == QNN_SYSTEM_CONTEXT_GRAPH_INFO_VERSION_1) { - copyGraphsInfoV1(&graphsInput[gIdx].graphInfoV1, &graphInfoArr[gIdx]); - } - graphsInfo[gIdx] = graphInfoArr + gIdx; - } - } - if (true != returnStatus) { - QNN_ERROR("Received an ERROR during extractGraphsInfo. Freeing resources."); - if (graphsInfo) { - for (uint32_t gIdx = 0; gIdx < numGraphs; gIdx++) { - if (graphsInfo[gIdx]) { - if (nullptr != graphsInfo[gIdx]->graphName) { - free(graphsInfo[gIdx]->graphName); - graphsInfo[gIdx]->graphName = nullptr; - } - qnn_wrapper_api::freeQnnTensors(graphsInfo[gIdx]->inputTensors, - graphsInfo[gIdx]->numInputTensors); - qnn_wrapper_api::freeQnnTensors(graphsInfo[gIdx]->outputTensors, - graphsInfo[gIdx]->numOutputTensors); - } - } - free(*graphsInfo); - } - free(graphsInfo); - graphsInfo = nullptr; - } - QNN_FUNCTION_EXIT_LOG; - return true; -} - -bool sample_app::copyMetadataToGraphsInfo(const QnnSystemContext_BinaryInfo_t *binaryInfo, - qnn_wrapper_api::GraphInfo_t **&graphsInfo, - uint32_t &graphsCount) { - if (nullptr == binaryInfo) { - QNN_ERROR("binaryInfo is nullptr."); - return false; - } - graphsCount = 0; - if (binaryInfo->version == QNN_SYSTEM_CONTEXT_BINARY_INFO_VERSION_1) { - if (binaryInfo->contextBinaryInfoV1.graphs) { - if (!copyGraphsInfo(binaryInfo->contextBinaryInfoV1.graphs, - binaryInfo->contextBinaryInfoV1.numGraphs, - graphsInfo)) { - QNN_ERROR("Failed while copying graphs Info."); - return false; - } - graphsCount = binaryInfo->contextBinaryInfoV1.numGraphs; - return true; - } - } else if (binaryInfo->version == QNN_SYSTEM_CONTEXT_BINARY_INFO_VERSION_2) { - if (binaryInfo->contextBinaryInfoV2.graphs) { - if (!copyGraphsInfo(binaryInfo->contextBinaryInfoV2.graphs, - binaryInfo->contextBinaryInfoV2.numGraphs, - graphsInfo)) { - QNN_ERROR("Failed while copying graphs Info."); - return false; - } - graphsCount = binaryInfo->contextBinaryInfoV2.numGraphs; - return true; - } - } - QNN_ERROR("Unrecognized system context binary info version."); - return false; -} - -QnnLog_Level_t sample_app::parseLogLevel(std::string logLevelString) { - QNN_FUNCTION_ENTRY_LOG; - std::transform(logLevelString.begin(), logLevelString.end(), logLevelString.begin(), ::tolower); - QnnLog_Level_t parsedLogLevel = QNN_LOG_LEVEL_MAX; - if (logLevelString == "error") { - parsedLogLevel = QNN_LOG_LEVEL_ERROR; - } else if (logLevelString == "warn") { - parsedLogLevel = QNN_LOG_LEVEL_WARN; - } else if (logLevelString == "info") { - parsedLogLevel = QNN_LOG_LEVEL_INFO; - } else if (logLevelString == "verbose") { - parsedLogLevel = QNN_LOG_LEVEL_VERBOSE; - } else if (logLevelString == "debug") { - parsedLogLevel = QNN_LOG_LEVEL_DEBUG; - } - QNN_FUNCTION_EXIT_LOG; - return parsedLogLevel; -} diff --git a/LibQNNHelper/src/Utils/QnnSampleAppUtils.hpp b/LibQNNHelper/src/Utils/QnnSampleAppUtils.hpp deleted file mode 100644 index 3deb466..0000000 --- a/LibQNNHelper/src/Utils/QnnSampleAppUtils.hpp +++ /dev/null @@ -1,82 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -#pragma once - -#include -#include -#include -#include -#include -#include -#include -#include -#include "Logger.hpp" - -#include "SampleApp.hpp" - -namespace qnn { -namespace tools { -namespace sample_app { - -enum class ProfilingLevel { OFF, BASIC, DETAILED, INVALID }; - -using ReadInputListRetType_t = std:: - tuple>, std::unordered_map, bool>; - -ReadInputListRetType_t readInputList(std::string inputFileListPath); - -using ReadInputListsRetType_t = std::tuple>>, - std::vector>, - bool>; - -ReadInputListsRetType_t readInputLists(std::vector inputFileListPath); - -std::unordered_map extractInputNameIndices(const std::string &inputLine, - const std::string &separator); - -std::string sanitizeTensorName(std::string name); - -ProfilingLevel parseProfilingLevel(std::string profilingLevelString); - -void parseInputFilePaths(std::vector &inputFilePaths, - std::vector &paths, - std::string separator); - -void split(std::vector &splitString, - const std::string &tokenizedString, - const char separator); - -bool copyMetadataToGraphsInfo(const QnnSystemContext_BinaryInfo_t *binaryInfo, - qnn_wrapper_api::GraphInfo_t **&graphsInfo, - uint32_t &graphsCount); - -bool copyGraphsInfo(const QnnSystemContext_GraphInfo_t *graphsInput, - const uint32_t numGraphs, - qnn_wrapper_api::GraphInfo_t **&graphsInfo); - -bool copyGraphsInfoV1(const QnnSystemContext_GraphInfoV1_t *graphInfoSrc, - qnn_wrapper_api::GraphInfo_t *graphInfoDst); - -bool copyTensorsInfo(const Qnn_Tensor_t *tensorsInfoSrc, - Qnn_Tensor_t *&tensorWrappers, - uint32_t tensorsCount); - -bool deepCopyQnnTensorInfo(Qnn_Tensor_t *dst, const Qnn_Tensor_t *src); - -QnnLog_Level_t parseLogLevel(std::string logLevelString); - -void inline exitWithMessage(std::string &&msg, int code) { - // std::cerr << msg << std::endl; - QNN_ERROR(msg.c_str()); - std::exit(code); -} - -} // namespace sample_app -} // namespace tools -} // namespace qnn diff --git a/LibQNNHelper/src/WrapperUtils/QnnWrapperUtils.cpp b/LibQNNHelper/src/WrapperUtils/QnnWrapperUtils.cpp deleted file mode 100644 index 5ad3dd3..0000000 --- a/LibQNNHelper/src/WrapperUtils/QnnWrapperUtils.cpp +++ /dev/null @@ -1,48 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -#include - -#include "QnnTypeMacros.hpp" -#include "QnnWrapperUtils.hpp" - -qnn_wrapper_api::ModelError_t qnn_wrapper_api::freeQnnTensor(Qnn_Tensor_t &tensor) { - // free all pointer allocations in struct - free((void *)QNN_TENSOR_GET_NAME(tensor)); - free(QNN_TENSOR_GET_DIMENSIONS(tensor)); - if (QNN_TENSOR_GET_IS_DYNAMIC_DIMENSIONS(tensor)) { - free(QNN_TENSOR_GET_IS_DYNAMIC_DIMENSIONS(tensor)); - } - return MODEL_NO_ERROR; -} - -qnn_wrapper_api::ModelError_t qnn_wrapper_api::freeQnnTensors(Qnn_Tensor_t *&tensors, - uint32_t numTensors) { - // free all pointer allocations in struct - for (size_t i = 0; i < numTensors; i++) { - freeQnnTensor(tensors[i]); - } - free(tensors); - return MODEL_NO_ERROR; -} - -qnn_wrapper_api::ModelError_t qnn_wrapper_api::freeGraphsInfo(GraphInfoPtr_t **graphsInfo, - uint32_t numGraphs) { - if (graphsInfo == nullptr || *graphsInfo == nullptr) { - return MODEL_TENSOR_ERROR; - } - for (uint32_t i = 0; i < numGraphs; i++) { - free((*graphsInfo)[i]->graphName); - freeQnnTensors((*graphsInfo)[i]->inputTensors, (*graphsInfo)[i]->numInputTensors); - freeQnnTensors((*graphsInfo)[i]->outputTensors, (*graphsInfo)[i]->numOutputTensors); - } - free(**graphsInfo); - free(*graphsInfo); - *graphsInfo = nullptr; - return MODEL_NO_ERROR; -} diff --git a/LibQNNHelper/src/WrapperUtils/QnnWrapperUtils.hpp b/LibQNNHelper/src/WrapperUtils/QnnWrapperUtils.hpp deleted file mode 100644 index 8702a92..0000000 --- a/LibQNNHelper/src/WrapperUtils/QnnWrapperUtils.hpp +++ /dev/null @@ -1,166 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -#pragma once - -#include "QnnContext.h" -#include "QnnGraph.h" -#include "QnnTensor.h" -#include "QnnTypes.h" - -namespace qnn_wrapper_api { - -// macro utils - -// Enables FILE[LINE]: FMT for VALIDATE macro -#ifdef QNN_ENABLE_DEBUG - -#define PRINTF(fmt, ...) \ - do { \ - printf("%s[%d]: ", __FILE__, __LINE__); \ - printf((fmt), ##__VA_ARGS__); \ - } while (0) - -#else - -#define PRINTF(fmt, ...) \ - do { \ - printf((fmt), ##__VA_ARGS__); \ - } while (0) - -#endif - -#ifdef QNN_ENABLE_DEBUG -#define PRINT_DEBUG(fmt, ...) \ - do { \ - printf("[ DEBUG ] "); \ - PRINTF((fmt), ##__VA_ARGS__); \ - } while (0) -#else -#define PRINT_DEBUG(fmt, ...) -#endif - -// Enables ERROR tag for errors -#define PRINT_ERROR(fmt, ...) \ - do { \ - printf("[ ERROR ] "); \ - PRINTF((fmt), ##__VA_ARGS__); \ - } while (0) - -// Enables WARNING tag for errors -#define PRINT_WARNING(fmt, ...) \ - do { \ - printf("[ WARNING ] "); \ - PRINTF((fmt), ##__VA_ARGS__); \ - } while (0) - -// Enables INFO tag for errors -#define PRINT_INFO(fmt, ...) \ - do { \ - printf("[ INFO ] "); \ - PRINTF((fmt), ##__VA_ARGS__); \ - } while (0) - -#define STRINGFY(str) str -#define STRINGFYVALUE(str) STRINGFY(str) - -// Ensures ModelError_t returning functions return MODEL_NO_ERROR -// retStatus should be set to MODEL_NO_ERROR before passing to macro -#define VALIDATE(value, retStatus) \ - do { \ - retStatus = value; \ - if (retStatus != qnn_wrapper_api::MODEL_NO_ERROR) { \ - PRINT_ERROR( \ - "%s expected MODEL_NO_ERROR, got %s\n", #value, getModelErrorName(retStatus).c_str()); \ - return retStatus; \ - } \ - } while (0) - -// macros for retrieving binary data -#define BINVARSTART(NAME) \ - ({ \ - extern const uint8_t _binary_obj_binary_##NAME##_raw_start[]; \ - (void *)_binary_obj_binary_##NAME##_raw_start; \ - }) -#define BINVAREND(NAME) \ - ({ \ - extern const uint8_t _binary_obj_binary_##NAME##_raw_end[]; \ - (void *)_binary_obj_binary_##NAME##_raw_end; \ - }) -#define BINLEN(NAME) \ - ({ \ - extern const uint8_t _binary_obj_binary_##NAME##_raw_start[]; \ - extern const uint8_t _binary_obj_binary_##NAME##_raw_end[]; \ - (uint32_t)((_binary_obj_binary_##NAME##_raw_end) - (_binary_obj_binary_##NAME##_raw_start)); \ - }) - -typedef enum ModelError { - MODEL_NO_ERROR = 0, - MODEL_TENSOR_ERROR = 1, - MODEL_PARAMS_ERROR = 2, - MODEL_NODES_ERROR = 3, - MODEL_GRAPH_ERROR = 4, - MODEL_CONTEXT_ERROR = 5, - MODEL_GENERATION_ERROR = 6, - MODEL_SETUP_ERROR = 7, - MODEL_INVALID_ARGUMENT_ERROR = 8, - MODEL_FILE_ERROR = 9, - MODEL_MEMORY_ALLOCATE_ERROR = 10, - // Value selected to ensure 32 bits. - MODEL_UNKNOWN_ERROR = 0x7FFFFFFF -} ModelError_t; - -typedef struct GraphInfo { - Qnn_GraphHandle_t graph; - char *graphName; - Qnn_Tensor_t *inputTensors; - uint32_t numInputTensors; - Qnn_Tensor_t *outputTensors; - uint32_t numOutputTensors; -} GraphInfo_t; -typedef GraphInfo_t *GraphInfoPtr_t; - -typedef struct GraphConfigInfo { - char *graphName; - const QnnGraph_Config_t **graphConfigs; -} GraphConfigInfo_t; - -/** - * @brief Frees all memory allocated tensor attributes. - * - * @param[in] tensor Qnn_Tensor_t object to free - * - * @return Error code - */ -ModelError_t freeQnnTensor(Qnn_Tensor_t &tensor); - -/** - * @brief Loops through and frees all memory allocated tensor attributes for each tensor - * object. - * - * @param[in] tensors array of tensor objects to free - * - * @param[in] numTensors length of the above tensors array - * - * @return Error code - */ -ModelError_t freeQnnTensors(Qnn_Tensor_t *&tensors, uint32_t numTensors); - -/** - * @brief A helper function to free memory malloced for communicating the Graph for a model(s) - * - * @param[in] graphsInfo Pointer pointing to location of graph objects - * - * @param[in] numGraphs The number of graph objects the above pointer is pointing to - * - * @return Error code - * - */ -ModelError_t freeGraphsInfo(GraphInfoPtr_t **graphsInfo, uint32_t numGraphs); - -} // namespace qnn_wrapper_api diff --git a/LibQNNHelper/src/libQNNHelper.cpp b/LibQNNHelper/src/libQNNHelper.cpp deleted file mode 100644 index c5b35ff..0000000 --- a/LibQNNHelper/src/libQNNHelper.cpp +++ /dev/null @@ -1,523 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "BuildId.hpp" -#include "DynamicLoadUtil.hpp" -#include "Logger.hpp" -#include "PAL/DynamicLoading.hpp" -#include "PAL/GetOpt.hpp" -#include "QnnSampleApp.hpp" -#include "QnnSampleAppUtils.hpp" -#include "LibQNNHelper.hpp" -#include "Utils/Utils.hpp" - -using namespace qnn; -using namespace qnn::log; -using namespace qnn::tools; - -static void* sg_backendHandle{nullptr}; -static void* sg_modelHandle{nullptr}; -static QNN_INTERFACE_VER_TYPE sg_qnnInterface; - -QnnHtpDevice_Infrastructure_t *gs_htpInfra(nullptr); -static bool sg_perf_global = false; - -std::unordered_map> sg_model_map; -static sample_app::ProfilingLevel sg_parsedProfilingLevel = sample_app::ProfilingLevel::OFF; - -namespace qnn { -namespace tools { -namespace libqnnhelper { - -std::unique_ptr initQnnSampleApp(std::string cachedBinaryPath, - std::string backEndPath, - std::string systemLibraryPath, - bool loadFromCachedBinary) { - // Just keep blank for below paths. - std::string modelPath; - std::string cachedBinaryPath2; - std::string opPackagePaths; - std::string saveBinaryName; - - if (loadFromCachedBinary) { // *.bin - cachedBinaryPath2 = cachedBinaryPath; - } - else { // *.dll - modelPath = cachedBinaryPath; - } - - iotensor::OutputDataType parsedOutputDataType = iotensor::OutputDataType::FLOAT_ONLY; - iotensor::InputDataType parsedInputDataType = iotensor::InputDataType::FLOAT; - - bool dumpOutputs = true; - bool debug = false; - - sample_app::QnnFunctionPointers qnnFunctionPointers; - // Load backend and model .so and validate all the required function symbols are resolved - auto statusCode = dynamicloadutil::getQnnFunctionPointers(backEndPath, - modelPath, - &qnnFunctionPointers, - &sg_backendHandle, - !loadFromCachedBinary, - &sg_modelHandle); - if (dynamicloadutil::StatusCode::SUCCESS != statusCode) { - if (dynamicloadutil::StatusCode::FAIL_LOAD_BACKEND == statusCode) { - sample_app::exitWithMessage( - "Error initializing QNN Function Pointers: could not load backend: " + backEndPath, EXIT_FAILURE); - } else if (dynamicloadutil::StatusCode::FAIL_LOAD_MODEL == statusCode) { - sample_app::exitWithMessage( - "Error initializing QNN Function Pointers: could not load model: " + modelPath, EXIT_FAILURE); - } else { - sample_app::exitWithMessage("Error initializing QNN Function Pointers", EXIT_FAILURE); - } - } - - if (loadFromCachedBinary) { - statusCode = dynamicloadutil::getQnnSystemFunctionPointers(systemLibraryPath, &qnnFunctionPointers); - if (dynamicloadutil::StatusCode::SUCCESS != statusCode) { - sample_app::exitWithMessage("Error initializing QNN System Function Pointers", EXIT_FAILURE); - } - } - - sg_qnnInterface = qnnFunctionPointers.qnnInterface; - std::unique_ptr app(new sample_app::QnnSampleApp(qnnFunctionPointers, "null", opPackagePaths, sg_backendHandle, "null", - debug, parsedOutputDataType, parsedInputDataType, sg_parsedProfilingLevel, - dumpOutputs, cachedBinaryPath2, saveBinaryName)); - return app; -} - -} // namespace libqnnhelper -} // namespace tools -} // namespace qnn - - -std::unique_ptr getQnnSampleApp(std::string model_name) { - auto it = sg_model_map.find(model_name); - if (it != sg_model_map.end()) { - if (it->second) { - auto app = std::move(it->second); - sg_model_map.erase(it); - return app; - } - } - return nullptr; -} - -void SetProcInfo(std::string proc_name, uint64_t epoch) { - setEpoch(epoch); - g_ProcName = proc_name; -} - -bool SetProfilingLevel(int32_t profiling_level) { - sg_parsedProfilingLevel = (sample_app::ProfilingLevel)profiling_level; - - g_profilingLevel = profiling_level; - return true; -} - -bool SetLogLevel(int32_t log_level, const std::string log_path) { - if(log_path != "" && log_path != "None") { - if (_access(log_path.c_str(), 0) == 0) { - std::string STD_OUT = log_path + "\\log_out.txt"; - std::string STD_ERR = log_path + "\\log_err.txt"; - - freopen(STD_OUT.c_str(), "w+", stdout); - freopen(STD_ERR.c_str(), "w+", stderr); - } - } - - if (!qnn::log::initializeLogging()) { - QNN_ERROR("ERROR: Unable to initialize logging!\n"); - return false; - } - - if (!log::setLogLevel((QnnLog_Level_t) log_level)) { - QNN_ERROR("Unable to set log level!\n"); - return false; - } - - g_logEpoch = getEpoch(); - g_logLevel = log_level; - - return true; -} - -bool SetPerfProfileGlobal(const std::string& perf_profile) { - if (nullptr == sg_backendHandle) { - QNN_ERR("SetPerfProfileGlobal::initialize one model before set perf profile!\n"); - return false; - } - - if (nullptr == gs_htpInfra) { - QnnDevice_Infrastructure_t deviceInfra = nullptr; - Qnn_ErrorHandle_t devErr = sg_qnnInterface.deviceGetInfrastructure(&deviceInfra); - - if (devErr != QNN_SUCCESS) { - QNN_ERR("SetPerfProfileGlobal::device error"); - return false; - } - gs_htpInfra = static_cast(deviceInfra); - } - - QnnHtpDevice_PerfInfrastructure_t perfInfra = gs_htpInfra->perfInfra; - QNN_INF("PERF::SetPerfProfileGlobal"); - sg_perf_global = true; - - return boostPerformance(perfInfra, perf_profile); -} - -bool RelPerfProfileGlobal() { - if (false == sg_perf_global) { - QNN_ERR("You should set perf profile before you release it!\n"); - return false; - } - - sg_perf_global = false; - QnnHtpDevice_PerfInfrastructure_t perfInfra = gs_htpInfra->perfInfra; - QNN_INF("PERF::RelPerfProfileGlobal"); - - return resetPerformance(perfInfra); -} - -void QNN_ERR(const char* fmt, ...) { - QnnLog_Callback_t logCallback = getLogCallback(); - if (!logCallback) {return;} - - if (QNN_LOG_LEVEL_ERROR > getLogLevel()) { - return; - } - va_list argp; - va_start(argp, fmt); - (*logCallback)(fmt, QNN_LOG_LEVEL_ERROR, getTimediff(), argp); - va_end(argp); -} - -void QNN_WAR(const char* fmt, ...) { - QnnLog_Callback_t logCallback = getLogCallback(); - if (!logCallback) {return;} - - if (QNN_LOG_LEVEL_WARN > getLogLevel()) { - return; - } - va_list argp; - va_start(argp, fmt); - (*logCallback)(fmt, QNN_LOG_LEVEL_WARN, getTimediff(), argp); - va_end(argp); -} - -void QNN_INF(const char* fmt, ...) { - QnnLog_Callback_t logCallback = getLogCallback(); - if (!logCallback) {return;} - - if (QNN_LOG_LEVEL_INFO > getLogLevel()) { - return; - } - - va_list argp; - va_start(argp, fmt); - (*logCallback)(fmt, QNN_LOG_LEVEL_INFO, getTimediff(), argp); - va_end(argp); -} - -void QNN_VEB(const char* fmt, ...) { - QnnLog_Callback_t logCallback = getLogCallback(); - if (!logCallback) {return;} - - if (QNN_LOG_LEVEL_VERBOSE > getLogLevel()) { - return; - } - va_list argp; - va_start(argp, fmt); - (*logCallback)(fmt, QNN_LOG_LEVEL_DEBUG, getTimediff(), argp); - va_end(argp); -} - -void QNN_DBG(const char* fmt, ...) { - QnnLog_Callback_t logCallback = getLogCallback(); - if (!logCallback) {return;} - - if (QNN_LOG_LEVEL_DEBUG > getLogLevel()) { - return; - } - va_list argp; - va_start(argp, fmt); - (*logCallback)(fmt, QNN_LOG_LEVEL_DEBUG, getTimediff(), argp); - va_end(argp); -} - -bool CreateShareMemory(std::string share_memory_name, size_t share_memory_size) { - return CreateShareMem(share_memory_name, share_memory_size); -} - -bool DeleteShareMemory(std::string share_memory_name) { - return DeleteShareMem(share_memory_name); -} - -bool ModelInitializeEx(const std::string& model_name, const std::string& proc_name, const std::string& model_path, - const std::string& backend_lib_path, const std::string& system_lib_path) { - BOOL result = false; - - QNN_INF("LibQNNHelper::ModelInitialize: %s \n", model_name.c_str()); - - if(!proc_name.empty()) { - // If proc_name, create process and save process info & model name to map, load model in new process. - result = TalkToSvc_Initialize(model_name, proc_name, model_path, backend_lib_path, system_lib_path); - return result; - } - - TimerHelper timerHelper; - - bool loadFromCachedBinary{ true }; - std::string cachedBinaryPath = model_path; - std::string backEndPath = backend_lib_path; - std::string systemLibraryPath = system_lib_path; - - std::string suffix_mode_path = cachedBinaryPath.substr(cachedBinaryPath.find_last_of('.') + 1); - if (suffix_mode_path == "bin") { // *.bin - QNN_INFO("cachedBinaryPath: %s", cachedBinaryPath.c_str()); - } - else { // *.dll - loadFromCachedBinary = false; - QNN_INFO("modelPath: %s", cachedBinaryPath.c_str()); - } - // TODO: support *.dlc. - - - if (!qnn::log::initializeLogging()) { - QNN_ERROR("ERROR: Unable to initialize logging!\n"); - return false; - } - - { - std::unique_ptr app = libqnnhelper::initQnnSampleApp(cachedBinaryPath, backEndPath, systemLibraryPath, loadFromCachedBinary); - - if (nullptr == app) { - return false; - } - - QNN_INFO("LibQNNHelper build version: %s", qnn::tools::getBuildId().c_str()); - QNN_INFO("Backend build version: %s", app->getBackendBuildId().c_str()); - - app->initializeLog(); - - if (sample_app::StatusCode::SUCCESS != app->initializeBackend()) { - app->reportError("Backend Initialization failure"); - return false; - } - - auto devicePropertySupportStatus = app->isDevicePropertySupported(); - if (sample_app::StatusCode::FAILURE != devicePropertySupportStatus) { - auto createDeviceStatus = app->createDevice(); - if (sample_app::StatusCode::SUCCESS != createDeviceStatus) { - app->reportError("Device Creation failure"); - return false; - } - } - - if (sample_app::StatusCode::SUCCESS != app->initializeProfiling()) { - app->reportError("Profiling Initialization failure"); - return false; - } - - if (sample_app::StatusCode::SUCCESS != app->registerOpPackages()) { - app->reportError("Register Op Packages failure"); - return false; - } - - if (!loadFromCachedBinary) { - if (sample_app::StatusCode::SUCCESS != app->createContext()) { - app->reportError("Context Creation failure"); - return false; - } - if (sample_app::StatusCode::SUCCESS != app->composeGraphs()) { - app->reportError("Graph Prepare failure"); - return false; - } - if (sample_app::StatusCode::SUCCESS != app->finalizeGraphs()) { - app->reportError("Graph Finalize failure"); - return false; - } - } else { - if (sample_app::StatusCode::SUCCESS != app->createFromBinary()) { - app->reportError("Create From Binary failure"); - return false; - } - } - - if (loadFromCachedBinary) { - if (sample_app::StatusCode::SUCCESS != app->initializePerformance()) { - app->reportError("Performance initialization failure"); - return false; - } - } - - timerHelper.Print("model_initialize"); - - sg_model_map.insert(std::make_pair(model_name, std::move(app))); - - return true; - } - - return false; -} - -bool ModelInferenceEx(std::string model_name, std::string proc_name, std::string share_memory_name, - std::vector& inputBuffers, std::vector& inputSize, - std::vector& outputBuffers, std::vector& outputSize, - std::string& perfProfile) { - BOOL result = true; - - //QNN_INF("LibQNNHelper::ModelInference: %s \n", model_name.c_str()); - - if (!proc_name.empty()) { - // If proc_name, run the model in that process. - result = TalkToSvc_Inference(model_name, proc_name, share_memory_name, inputBuffers, inputSize, outputBuffers, outputSize, perfProfile); - return result; - } - - TimerHelper timerHelper; - - std::unique_ptr app = getQnnSampleApp(model_name); - - if (result && nullptr == app) { - app->reportError("Inference failure"); - result = false; - } - - if (result && sample_app::StatusCode::SUCCESS != app->executeGraphsBuffers(inputBuffers, outputBuffers, outputSize, perfProfile)) { - app->reportError("Graph Execution failure"); - result = false; - } - - sg_model_map.insert(std::make_pair(model_name, std::move(app))); - - timerHelper.Print("model_inference"); - - return result; -} - -bool ModelDestroyEx(std::string model_name, std::string proc_name) { - BOOL result = false; - - QNN_INF("LibQNNHelper::ModelDestroy: %s \n", model_name.c_str()); - - if (!proc_name.empty()) { - // If proc_name, desctroy the model in that process. - result = TalkToSvc_Destroy(model_name, proc_name); - return result; - } - - TimerHelper timerHelper; - - std::unique_ptr app = getQnnSampleApp(model_name); - if (nullptr == app) { - app->reportError("Can't find the model with model_name: " + model_name); - return false; - } - - if (sample_app::StatusCode::SUCCESS != app->destroyPerformance()) { - app->reportError("Performance destroy failure"); - return false; - } - - if (sample_app::StatusCode::SUCCESS != app->freeGraphs()) { - app->reportError("Free graphs failure"); - return false; - } - - if (sample_app::StatusCode::SUCCESS != app->freeContext()) { - app->reportError("Context Free failure"); - return false; - } - - auto devicePropertySupportStatus = app->isDevicePropertySupported(); - if (sample_app::StatusCode::FAILURE != devicePropertySupportStatus) { - auto freeDeviceStatus = app->freeDevice(); - if (sample_app::StatusCode::SUCCESS != freeDeviceStatus) { - app->reportError("Device Free failure"); - return false; - } - } - - timerHelper.Print("model_destroy"); - - return true; -} - - -///////////////////////////////////////////////////////////////////////////// -/// Class LibQNNHelper implementation. -///////////////////////////////////////////////////////////////////////////// - -bool LibQNNHelper::ModelInitialize(const std::string& model_name, const std::string& proc_name, const std::string& model_path, - const std::string& backend_lib_path, const std::string& system_lib_path) { - if (!proc_name.empty()) { // Create process and save process info & model name to map, load model in new process. - return TalkToSvc_Initialize(model_name, proc_name, model_path, backend_lib_path, system_lib_path); - } - - return false; -} - -bool LibQNNHelper::ModelInitialize(const std::string& model_name, const std::string& model_path, - const std::string& backend_lib_path, const std::string& system_lib_path) { - return ModelInitializeEx(model_name, "", model_path, backend_lib_path, system_lib_path); -} - -bool LibQNNHelper::ModelInference(std::string model_name, std::string proc_name, std::string share_memory_name, - std::vector& inputBuffers, std::vector& inputSize, - std::vector& outputBuffers, std::vector& outputSize, - std::string& perfProfile) { - if (!proc_name.empty()) { // If proc_name, run the model in that process. - return TalkToSvc_Inference(model_name, proc_name, share_memory_name, inputBuffers, inputSize, outputBuffers, outputSize, perfProfile); - } - - return false; -} - -bool LibQNNHelper::ModelInference(std::string model_name, std::vector& inputBuffers, - std::vector& outputBuffers, std::vector& outputSize, - std::string& perfProfile){ - std::vector inputSize; - return ModelInferenceEx(model_name, "", "", inputBuffers, inputSize, outputBuffers, outputSize, perfProfile); -} - -bool LibQNNHelper::ModelDestroy(std::string model_name, std::string proc_name) { - if (!proc_name.empty()) { // If proc_name, desctroy the model in that process. - return TalkToSvc_Destroy(model_name, proc_name); - } - - return false; -} - -bool LibQNNHelper::ModelDestroy(std::string model_name) { - return ModelDestroyEx(model_name, ""); -} - -bool LibQNNHelper::CreateShareMemory(std::string share_memory_name, size_t share_memory_size) { - return CreateShareMem(share_memory_name, share_memory_size); -} - -bool LibQNNHelper::DeleteShareMemory(std::string share_memory_name) { - return DeleteShareMem(share_memory_name); -} - -int main(int argc, char** argv) { - - return EXIT_SUCCESS; -} diff --git a/LibQNNHelper/src/libQNNHelper.hpp b/LibQNNHelper/src/libQNNHelper.hpp deleted file mode 100644 index 973f4e8..0000000 --- a/LibQNNHelper/src/libQNNHelper.hpp +++ /dev/null @@ -1,98 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== -#pragma once - -#include -#include -#include -#include -#include - -#define LIBQNNHELPER_API __declspec(dllexport) - - -///////////////////////////////////////////////////////////////////////////// -/// Sync log time with 'SvcQNNHelper.exe' processes. For QNNHelper library internal use. -///////////////////////////////////////////////////////////////////////////// -extern "C" LIBQNNHELPER_API void SetProcInfo(std::string proc_name, uint64_t epoch); - - -///////////////////////////////////////////////////////////////////////////// -/// Log print. -///////////////////////////////////////////////////////////////////////////// -extern "C" LIBQNNHELPER_API void QNN_ERR(const char* fmt, ...); -extern "C" LIBQNNHELPER_API void QNN_WAR(const char* fmt, ...); -extern "C" LIBQNNHELPER_API void QNN_INF(const char* fmt, ...); -extern "C" LIBQNNHELPER_API void QNN_VEB(const char* fmt, ...); -extern "C" LIBQNNHELPER_API void QNN_DBG(const char* fmt, ...); -extern "C" LIBQNNHELPER_API bool SetLogLevel(int32_t log_level, const std::string log_path = "None"); -extern "C" LIBQNNHELPER_API bool SetProfilingLevel(int32_t profiling_level); -extern "C" LIBQNNHELPER_API bool SetPerfProfileGlobal(const std::string& perf_profile); -extern "C" LIBQNNHELPER_API bool RelPerfProfileGlobal(); - - -///////////////////////////////////////////////////////////////////////////// -/// Class LibQNNHelper declaration. -///////////////////////////////////////////////////////////////////////////// -class LIBQNNHELPER_API LibQNNHelper -{ -public: - bool ModelInitialize(const std::string& model_name, const std::string& model_path, - const std::string& backend_lib_path, const std::string& system_lib_path); - bool ModelInitialize(const std::string& model_name, const std::string& proc_name, const std::string& model_path, - const std::string& backend_lib_path, const std::string& system_lib_path); - - bool ModelInference(std::string model_name, std::vector& inputBuffers, - std::vector& outputBuffers, std::vector& outputSize, - std::string& perfProfile); - bool ModelInference(std::string model_name, std::string proc_name, std::string share_memory_name, - std::vector& inputBuffers, std::vector& inputSize, - std::vector& outputBuffers, std::vector& outputSize, - std::string& perfProfile); - - bool ModelDestroy(std::string model_name); - bool ModelDestroy(std::string model_name, std::string proc_name); - - bool CreateShareMemory(std::string share_memory_name, size_t share_memory_size); - bool DeleteShareMemory(std::string share_memory_name); -}; - - -///////////////////////////////////////////////////////////////////////////// -/// Class TimerHelper declaration. -///////////////////////////////////////////////////////////////////////////// -#pragma warning(disable:4251) -class LIBQNNHELPER_API TimerHelper -{ -public: - TimerHelper() { - Reset(); - } - - void Reset() { - time_start = std::chrono::steady_clock::now(); - } - - void Print(std::string message) { - time_now = std::chrono::steady_clock::now(); - double dr_ms = std::chrono::duration(time_now - time_start).count(); - QNN_WAR("Time: %s %.2f\n", message.c_str(), dr_ms); - } - - void Print(std::string message, bool reset) { - Print(message); - if (reset) { - Reset(); - } - } - -private: - std::chrono::steady_clock::time_point time_start; - std::chrono::steady_clock::time_point time_now; -}; - diff --git a/LibQNNHelper/src/main.cpp b/LibQNNHelper/src/main.cpp deleted file mode 100644 index 0fc631b..0000000 --- a/LibQNNHelper/src/main.cpp +++ /dev/null @@ -1,456 +0,0 @@ -//============================================================================== -// -// Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. -// -// SPDX-License-Identifier: BSD-3-Clause -// -//============================================================================== - -#include -#include -#include - -#include "BuildId.hpp" -#include "DynamicLoadUtil.hpp" -#include "Logger.hpp" -#include "PAL/DynamicLoading.hpp" -#include "PAL/GetOpt.hpp" -#include "QnnSampleApp.hpp" -#include "QnnSampleAppUtils.hpp" - -static void* sg_backendHandle{nullptr}; -static void* sg_modelHandle{nullptr}; - -namespace qnn { -namespace tools { -namespace sample_app { - -void showHelp() { - std::cout - << "\nDESCRIPTION:\n" - << "------------\n" - << "Sample application demonstrating how to load and execute a neural network\n" - << "using QNN APIs.\n" - << "\n\n" - << "REQUIRED ARGUMENTS:\n" - << "-------------------\n" - << " --model Path to the model containing a QNN network.\n" - << "\n" - << " --backend Path to a QNN backend to execute the model.\n" - << "\n" - << " --input_list Path to a file listing the inputs for the network.\n" - << " If there are multiple graphs in model.so, this has\n" - << " to be comma separated list of input list files.\n" - << "\n" - << " --retrieve_context Path to cached binary from which to load a saved\n" - " context from and execute graphs. --retrieve_context " - "and\n" - " --model are mutually exclusive. Only one of the " - "options\n" - " can be specified at a time.\n" - << "\n\n" - - << "OPTIONAL ARGUMENTS:\n" - << "-------------------\n" - - << " --debug Specifies that output from all layers of the network\n" - << " will be saved.\n" - << "\n" - << " --output_dir The directory to save output to. Defaults to " - "./output.\n" - << "\n" - << " --output_data_type Data type of the output. Values can be:\n\n" - " 1. float_only: dump outputs in float only.\n" - " 2. native_only: dump outputs in data type " - "native\n" - " to the model. For ex., " - "uint8_t.\n" - " 3. float_and_native: dump outputs in both float and\n" - " native.\n\n" - " (This is N/A for a float model. In other cases,\n" - " if not specified, defaults to float_only.)\n" - << "\n" - << " --input_data_type Data type of the input. Values can be:\n\n" - " 1. float: reads inputs as floats and quantizes\n" - " if necessary based on quantization\n" - " parameters in the model.\n" - " 2. native: reads inputs assuming the data type to " - "be\n" - " native to the model. For ex., " - "uint8_t.\n\n" - " (This is N/A for a float model. In other cases,\n" - " if not specified, defaults to float.)\n" - << "\n" - << " --op_packages Provide a comma separated list of op packages \n" - " and interface providers to register. The syntax is:\n" - " " - "op_package_path:interface_provider[,op_package_path:interface_provider...]\n" - << "\n" - << " --profiling_level Enable profiling. Valid Values:\n" - " 1. basic: captures execution and init time.\n" - " 2. detailed: in addition to basic, captures\n" - " per Op timing for execution.\n" - << "\n" - << " --save_context Specifies that the backend context and metadata " - "related \n" - " to graphs be saved to a binary file.\n" - " Value of this parameter is the name of the name\n" - " required to save the context binary to.\n" - " Saved in the same path as --output_dir option.\n" - " Note: --retrieve_context and --save_context are " - "mutually\n" - " exclusive. Both options should not be specified at\n" - " the same time.\n" - << "\n" -#ifdef QNN_ENABLE_DEBUG - << " --log_level Specifies max logging level to be set. Valid " - "settings: \n" - " \"error\", \"warn\", \"info\", \"verbose\" and " - "\"debug\"." - "\n" -#else - << " --log_level Specifies max logging level to be set. Valid " - "settings: \n" - " \"error\", \"warn\", \"info\" and \"verbose\"." - "\n" -#endif - << "\n" - << " --system_library Path to QNN System library (libQnnSystem.so) needed to " - "exercise reflection APIs\n" - " when loading a context from a binary cache.\n" - " libQnnSystem.so is provided under /lib in the " - "SDK.\n" - "\n" - << " --version Print the QNN SDK version.\n" - << "\n" - << " --help Show this help message.\n" - << std::endl; -} - -void showHelpAndExit(std::string&& error) { - std::cerr << "ERROR: " << error << "\n"; - std::cerr << "Please check help below:\n"; - showHelp(); - std::exit(EXIT_FAILURE); -} - -std::unique_ptr processCommandLine(int argc, - char** argv, - bool& loadFromCachedBinary) { - enum OPTIONS { - OPT_HELP = 0, - OPT_MODEL = 1, - OPT_BACKEND = 2, - OPT_INPUT_LIST = 3, - OPT_OUTPUT_DIR = 4, - OPT_OP_PACKAGES = 5, - OPT_DEBUG_OUTPUTS = 6, - OPT_OUTPUT_DATA_TYPE = 7, - OPT_INPUT_DATA_TYPE = 8, - OPT_LOG_LEVEL = 9, - OPT_PROFILING_LEVEL = 10, - OPT_RETRIEVE_CONTEXT = 11, - OPT_SAVE_CONTEXT = 12, - OPT_VERSION = 13, - OPT_SYSTEM_LIBRARY = 14 - }; - - // Create the command line options - static struct pal::Option s_longOptions[] = { - {"help", pal::no_argument, NULL, OPT_HELP}, - {"model", pal::required_argument, NULL, OPT_MODEL}, - {"backend", pal::required_argument, NULL, OPT_BACKEND}, - {"input_list", pal::required_argument, NULL, OPT_INPUT_LIST}, - {"output_dir", pal::required_argument, NULL, OPT_OUTPUT_DIR}, - {"op_packages", pal::required_argument, NULL, OPT_OP_PACKAGES}, - {"debug", pal::no_argument, NULL, OPT_DEBUG_OUTPUTS}, - {"output_data_type", pal::required_argument, NULL, OPT_OUTPUT_DATA_TYPE}, - {"input_data_type", pal::required_argument, NULL, OPT_INPUT_DATA_TYPE}, - {"profiling_level", pal::required_argument, NULL, OPT_PROFILING_LEVEL}, - {"log_level", pal::required_argument, NULL, OPT_LOG_LEVEL}, - {"retrieve_context", pal::required_argument, NULL, OPT_RETRIEVE_CONTEXT}, - {"save_context", pal::required_argument, NULL, OPT_SAVE_CONTEXT}, - {"system_library", pal::required_argument, NULL, OPT_SYSTEM_LIBRARY}, - {"version", pal::no_argument, NULL, OPT_VERSION}, - {NULL, 0, NULL, 0}}; - - // Command line parsing loop - int longIndex = 0; - int opt = 0; - std::string modelPath; - std::string backEndPath; - std::string inputListPaths; - bool debug = false; - std::string outputPath; - std::string opPackagePaths; - iotensor::OutputDataType parsedOutputDataType = iotensor::OutputDataType::FLOAT_ONLY; - iotensor::InputDataType parsedInputDataType = iotensor::InputDataType::FLOAT; - sample_app::ProfilingLevel parsedProfilingLevel = ProfilingLevel::OFF; - bool dumpOutputs = true; - std::string cachedBinaryPath; - std::string saveBinaryName; - QnnLog_Level_t logLevel{QNN_LOG_LEVEL_ERROR}; - std::string systemLibraryPath; - while ((opt = pal::getOptLongOnly(argc, argv, "", s_longOptions, &longIndex)) != -1) { - switch (opt) { - case OPT_HELP: - showHelp(); - std::exit(EXIT_SUCCESS); - break; - - case OPT_VERSION: - std::cout << "QNN SDK " << qnn::tools::getBuildId() << "\n"; - std::exit(EXIT_SUCCESS); - break; - - case OPT_MODEL: - modelPath = pal::g_optArg; - break; - - case OPT_BACKEND: - backEndPath = pal::g_optArg; - break; - - case OPT_INPUT_LIST: - inputListPaths = pal::g_optArg; - break; - - case OPT_DEBUG_OUTPUTS: - debug = true; - break; - - case OPT_OUTPUT_DIR: - outputPath = pal::g_optArg; - break; - - case OPT_OP_PACKAGES: - opPackagePaths = pal::g_optArg; - break; - - case OPT_OUTPUT_DATA_TYPE: - parsedOutputDataType = iotensor::parseOutputDataType(pal::g_optArg); - if (parsedOutputDataType == iotensor::OutputDataType::INVALID) { - showHelpAndExit("Invalid output data type string."); - } - break; - - case OPT_INPUT_DATA_TYPE: - parsedInputDataType = iotensor::parseInputDataType(pal::g_optArg); - if (parsedInputDataType == iotensor::InputDataType::INVALID) { - showHelpAndExit("Invalid input data type string."); - } - break; - - case OPT_PROFILING_LEVEL: - parsedProfilingLevel = sample_app::parseProfilingLevel(pal::g_optArg); - if (parsedProfilingLevel == sample_app::ProfilingLevel::INVALID) { - showHelpAndExit("Invalid profiling level."); - } - break; - - case OPT_LOG_LEVEL: - logLevel = sample_app::parseLogLevel(pal::g_optArg); - if (logLevel != QNN_LOG_LEVEL_MAX) { - if (!log::setLogLevel(logLevel)) { - showHelpAndExit("Unable to set log level."); - } - } - break; - - case OPT_RETRIEVE_CONTEXT: - loadFromCachedBinary = true; - cachedBinaryPath = pal::g_optArg; - if (cachedBinaryPath.empty()) { - showHelpAndExit("Cached context binary file not specified."); - } - break; - - case OPT_SAVE_CONTEXT: - saveBinaryName = pal::g_optArg; - if (saveBinaryName.empty()) { - showHelpAndExit("Save context needs a file name."); - } - break; - - case OPT_SYSTEM_LIBRARY: - systemLibraryPath = pal::g_optArg; - if (systemLibraryPath.empty()) { - showHelpAndExit("System library (libQnnSystem.so) path not specified."); - } - break; - - default: - std::cerr << "ERROR: Invalid argument passed: " << argv[pal::g_optInd - 1] - << "\nPlease check the Arguments section in the description below.\n"; - showHelp(); - std::exit(EXIT_FAILURE); - } - } - - if (!modelPath.empty()) { - if (!cachedBinaryPath.empty()) { - showHelpAndExit( - "Error: both --model and --cached_binary specified. Only one option is valid at a " - "time.\n"); - } - } else { - if (cachedBinaryPath.empty()) { - showHelpAndExit("Missing option: --model\n"); - } - } - - if (!cachedBinaryPath.empty() && !saveBinaryName.empty()) { - showHelpAndExit("Error: both --cached_binary and --save_binary specified"); - } - - if (backEndPath.empty()) { - showHelpAndExit("Missing option: --backend\n"); - } - - if (inputListPaths.empty()) { - showHelpAndExit("Missing option: --input_list\n"); - } - - if (loadFromCachedBinary && systemLibraryPath.empty()) { - showHelpAndExit( - "Missing option: --system_library. QNN System shared library (libQnnSystem.so) is needed " - "to load from a cached binary\n"); - } - - QNN_INFO("Model: %s", modelPath.c_str()); - QNN_INFO("Backend: %s", backEndPath.c_str()); - - QnnFunctionPointers qnnFunctionPointers; - // Load backend and model .so and validate all the required function symbols are resolved - auto statusCode = dynamicloadutil::getQnnFunctionPointers(backEndPath, - modelPath, - &qnnFunctionPointers, - &sg_backendHandle, - !loadFromCachedBinary, - &sg_modelHandle); - if (dynamicloadutil::StatusCode::SUCCESS != statusCode) { - if (dynamicloadutil::StatusCode::FAIL_LOAD_BACKEND == statusCode) { - exitWithMessage( - "Error initializing QNN Function Pointers: could not load backend: " + backEndPath, - EXIT_FAILURE); - } else if (dynamicloadutil::StatusCode::FAIL_LOAD_MODEL == statusCode) { - exitWithMessage( - "Error initializing QNN Function Pointers: could not load model: " + modelPath, - EXIT_FAILURE); - } else { - exitWithMessage("Error initializing QNN Function Pointers", EXIT_FAILURE); - } - } - - if (loadFromCachedBinary) { - statusCode = - dynamicloadutil::getQnnSystemFunctionPointers(systemLibraryPath, &qnnFunctionPointers); - if (dynamicloadutil::StatusCode::SUCCESS != statusCode) { - exitWithMessage("Error initializing QNN System Function Pointers", EXIT_FAILURE); - } - } - - std::unique_ptr app(new sample_app::QnnSampleApp(qnnFunctionPointers, - inputListPaths, - opPackagePaths, - sg_backendHandle, - outputPath, - debug, - parsedOutputDataType, - parsedInputDataType, - parsedProfilingLevel, - dumpOutputs, - cachedBinaryPath, - saveBinaryName)); - return app; -} - -} // namespace sample_app -} // namespace tools -} // namespace qnn - -int main_disable(int argc, char** argv) { // zw: change it from 'executable' file to dynamic lib(libqnnhelper.dll) - using namespace qnn::tools; - - if (!qnn::log::initializeLogging()) { - std::cerr << "ERROR: Unable to initialize logging!\n"; - return EXIT_FAILURE; - } - - { - bool loadFromCachedBinary{false}; - std::unique_ptr app = - sample_app::processCommandLine(argc, argv, loadFromCachedBinary); - - if (nullptr == app) { - return EXIT_FAILURE; - } - - QNN_INFO("qnn-sample-app build version: %s", qnn::tools::getBuildId().c_str()); - QNN_INFO("Backend build version: %s", app->getBackendBuildId().c_str()); - - if (sample_app::StatusCode::SUCCESS != app->initialize()) { - return app->reportError("Initialization failure"); - } - - if (sample_app::StatusCode::SUCCESS != app->initializeBackend()) { - return app->reportError("Backend Initialization failure"); - } - - auto devicePropertySupportStatus = app->isDevicePropertySupported(); - if (sample_app::StatusCode::FAILURE != devicePropertySupportStatus) { - auto createDeviceStatus = app->createDevice(); - if (sample_app::StatusCode::SUCCESS != createDeviceStatus) { - return app->reportError("Device Creation failure"); - } - } - - if (sample_app::StatusCode::SUCCESS != app->initializeProfiling()) { - return app->reportError("Profiling Initialization failure"); - } - - if (sample_app::StatusCode::SUCCESS != app->registerOpPackages()) { - return app->reportError("Register Op Packages failure"); - } - - if (!loadFromCachedBinary) { - if (sample_app::StatusCode::SUCCESS != app->createContext()) { - return app->reportError("Context Creation failure"); - } - if (sample_app::StatusCode::SUCCESS != app->composeGraphs()) { - return app->reportError("Graph Prepare failure"); - } - if (sample_app::StatusCode::SUCCESS != app->finalizeGraphs()) { - return app->reportError("Graph Finalize failure"); - } - } else { - if (sample_app::StatusCode::SUCCESS != app->createFromBinary()) { - return app->reportError("Create From Binary failure"); - } - } - - if (sample_app::StatusCode::SUCCESS != app->executeGraphs()) { - return app->reportError("Graph Execution failure"); - } - - if (sample_app::StatusCode::SUCCESS != app->freeContext()) { - return app->reportError("Context Free failure"); - } - - if (sample_app::StatusCode::FAILURE != devicePropertySupportStatus) { - auto freeDeviceStatus = app->freeDevice(); - if (sample_app::StatusCode::SUCCESS != freeDeviceStatus) { - return app->reportError("Device Free failure"); - } - } - } - - if (sg_backendHandle) { - pal::dynamicloading::dlClose(sg_backendHandle); - } - if (sg_modelHandle) { - pal::dynamicloading::dlClose(sg_modelHandle); - } - - return EXIT_SUCCESS; -}