正在显示
26 个修改的文件
包含
4893 行增加
和
0 行删除
| 1 | +cmake_minimum_required(VERSION 3.10) | ||
| 2 | + | ||
| 3 | +set(OpenCV_DIR ${CMAKE_SOURCE_DIR}/opencv-mobile-4.11.0-android/sdk/native/jni) | ||
| 4 | +find_package(OpenCV REQUIRED core imgproc) | ||
| 5 | + | ||
| 6 | +set(ncnn_DIR ${CMAKE_SOURCE_DIR}/ncnn-20250503-android-vulkan/${ANDROID_ABI}/lib/cmake/ncnn) | ||
| 7 | +find_package(ncnn REQUIRED) | ||
| 8 | + | ||
| 9 | +add_library(rvmncnn SHARED rvmncnn.cpp rvm.cpp ndkcamera.cpp opencv_processor.cpp) | ||
| 10 | + | ||
| 11 | +target_link_libraries(rvmncnn ncnn ${OpenCV_LIBS} camera2ndk mediandk) |
| 1 | +# 背景图片替换功能使用说明 | ||
| 2 | + | ||
| 3 | +## 功能概述 | ||
| 4 | +此修改允许将Robust Video Matting (RVM)的背景从单一颜色替换为指定的图片。 | ||
| 5 | + | ||
| 6 | +## 使用方式 | ||
| 7 | + | ||
| 8 | +### 1. Java/Kotlin层调用 | ||
| 9 | +```java | ||
| 10 | +// 设置背景图片 | ||
| 11 | +Bitmap backgroundBitmap = BitmapFactory.decodeResource(getResources(), R.drawable.background); | ||
| 12 | +RVMNcnn.setBackgroundImage(backgroundBitmap); | ||
| 13 | + | ||
| 14 | +// 清除背景图片,恢复默认颜色 | ||
| 15 | +RVMNcnn.setBackgroundImage(null); | ||
| 16 | +``` | ||
| 17 | + | ||
| 18 | +### 2. JNI接口 | ||
| 19 | +新增JNI函数: | ||
| 20 | +- `Java_org_example_project_RVMNcnn_setBackgroundImage(JNIEnv* env, jobject thiz, jobject bitmap)` | ||
| 21 | + | ||
| 22 | +### 3. C++层API | ||
| 23 | +新增RVM类方法: | ||
| 24 | +- `void set_background_image(const cv::Mat& background)` - 设置背景图片 | ||
| 25 | +- `void clear_background_image()` - 清除背景图片,使用默认颜色 | ||
| 26 | + | ||
| 27 | +## 技术细节 | ||
| 28 | + | ||
| 29 | +### 背景图片处理 | ||
| 30 | +- 支持RGBA_8888和RGB_565格式的Bitmap | ||
| 31 | +- 自动转换为OpenCV BGR格式 | ||
| 32 | +- 支持任意尺寸的图片,会自动缩放适配 | ||
| 33 | +- 如果未设置背景图片,使用默认颜色RGB(120, 255, 155) | ||
| 34 | + | ||
| 35 | +### 混合算法 | ||
| 36 | +使用alpha混合公式: | ||
| 37 | +``` | ||
| 38 | +result = foreground * alpha + background * (1 - alpha) | ||
| 39 | +``` | ||
| 40 | + | ||
| 41 | +### 性能考虑 | ||
| 42 | +- 背景图片会在设置时进行一次格式转换和缩放 | ||
| 43 | +- 每帧渲染时进行实时像素采样 | ||
| 44 | +- 建议使用与输入视频分辨率相近的背景图片以获得最佳性能 | ||
| 45 | + | ||
| 46 | +## 注意事项 | ||
| 47 | +1. 背景图片应该是RGB或RGBA格式的8位图像 | ||
| 48 | +2. 图片尺寸不需要与输入视频完全一致,会自动适配 | ||
| 49 | +3. 设置null可以恢复默认的背景颜色 | ||
| 50 | +4. 背景图片会在RVM实例销毁时自动释放 |
| 1 | +// | ||
| 2 | +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. | ||
| 3 | +// Copyright (C) 2013 LunarG, Inc. | ||
| 4 | +// | ||
| 5 | +// All rights reserved. | ||
| 6 | +// | ||
| 7 | +// Redistribution and use in source and binary forms, with or without | ||
| 8 | +// modification, are permitted provided that the following conditions | ||
| 9 | +// are met: | ||
| 10 | +// | ||
| 11 | +// Redistributions of source code must retain the above copyright | ||
| 12 | +// notice, this list of conditions and the following disclaimer. | ||
| 13 | +// | ||
| 14 | +// Redistributions in binary form must reproduce the above | ||
| 15 | +// copyright notice, this list of conditions and the following | ||
| 16 | +// disclaimer in the documentation and/or other materials provided | ||
| 17 | +// with the distribution. | ||
| 18 | +// | ||
| 19 | +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its | ||
| 20 | +// contributors may be used to endorse or promote products derived | ||
| 21 | +// from this software without specific prior written permission. | ||
| 22 | +// | ||
| 23 | +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 24 | +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 25 | +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | ||
| 26 | +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | ||
| 27 | +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
| 28 | +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, | ||
| 29 | +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
| 30 | +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | ||
| 31 | +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | ||
| 32 | +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN | ||
| 33 | +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 34 | +// POSSIBILITY OF SUCH DAMAGE. | ||
| 35 | +// | ||
| 36 | + | ||
| 37 | +#ifndef _RESOURCE_LIMITS_INCLUDED_ | ||
| 38 | +#define _RESOURCE_LIMITS_INCLUDED_ | ||
| 39 | + | ||
| 40 | +struct TLimits { | ||
| 41 | + bool nonInductiveForLoops; | ||
| 42 | + bool whileLoops; | ||
| 43 | + bool doWhileLoops; | ||
| 44 | + bool generalUniformIndexing; | ||
| 45 | + bool generalAttributeMatrixVectorIndexing; | ||
| 46 | + bool generalVaryingIndexing; | ||
| 47 | + bool generalSamplerIndexing; | ||
| 48 | + bool generalVariableIndexing; | ||
| 49 | + bool generalConstantMatrixVectorIndexing; | ||
| 50 | +}; | ||
| 51 | + | ||
| 52 | +struct TBuiltInResource { | ||
| 53 | + int maxLights; | ||
| 54 | + int maxClipPlanes; | ||
| 55 | + int maxTextureUnits; | ||
| 56 | + int maxTextureCoords; | ||
| 57 | + int maxVertexAttribs; | ||
| 58 | + int maxVertexUniformComponents; | ||
| 59 | + int maxVaryingFloats; | ||
| 60 | + int maxVertexTextureImageUnits; | ||
| 61 | + int maxCombinedTextureImageUnits; | ||
| 62 | + int maxTextureImageUnits; | ||
| 63 | + int maxFragmentUniformComponents; | ||
| 64 | + int maxDrawBuffers; | ||
| 65 | + int maxVertexUniformVectors; | ||
| 66 | + int maxVaryingVectors; | ||
| 67 | + int maxFragmentUniformVectors; | ||
| 68 | + int maxVertexOutputVectors; | ||
| 69 | + int maxFragmentInputVectors; | ||
| 70 | + int minProgramTexelOffset; | ||
| 71 | + int maxProgramTexelOffset; | ||
| 72 | + int maxClipDistances; | ||
| 73 | + int maxComputeWorkGroupCountX; | ||
| 74 | + int maxComputeWorkGroupCountY; | ||
| 75 | + int maxComputeWorkGroupCountZ; | ||
| 76 | + int maxComputeWorkGroupSizeX; | ||
| 77 | + int maxComputeWorkGroupSizeY; | ||
| 78 | + int maxComputeWorkGroupSizeZ; | ||
| 79 | + int maxComputeUniformComponents; | ||
| 80 | + int maxComputeTextureImageUnits; | ||
| 81 | + int maxComputeImageUniforms; | ||
| 82 | + int maxComputeAtomicCounters; | ||
| 83 | + int maxComputeAtomicCounterBuffers; | ||
| 84 | + int maxVaryingComponents; | ||
| 85 | + int maxVertexOutputComponents; | ||
| 86 | + int maxGeometryInputComponents; | ||
| 87 | + int maxGeometryOutputComponents; | ||
| 88 | + int maxFragmentInputComponents; | ||
| 89 | + int maxImageUnits; | ||
| 90 | + int maxCombinedImageUnitsAndFragmentOutputs; | ||
| 91 | + int maxCombinedShaderOutputResources; | ||
| 92 | + int maxImageSamples; | ||
| 93 | + int maxVertexImageUniforms; | ||
| 94 | + int maxTessControlImageUniforms; | ||
| 95 | + int maxTessEvaluationImageUniforms; | ||
| 96 | + int maxGeometryImageUniforms; | ||
| 97 | + int maxFragmentImageUniforms; | ||
| 98 | + int maxCombinedImageUniforms; | ||
| 99 | + int maxGeometryTextureImageUnits; | ||
| 100 | + int maxGeometryOutputVertices; | ||
| 101 | + int maxGeometryTotalOutputComponents; | ||
| 102 | + int maxGeometryUniformComponents; | ||
| 103 | + int maxGeometryVaryingComponents; | ||
| 104 | + int maxTessControlInputComponents; | ||
| 105 | + int maxTessControlOutputComponents; | ||
| 106 | + int maxTessControlTextureImageUnits; | ||
| 107 | + int maxTessControlUniformComponents; | ||
| 108 | + int maxTessControlTotalOutputComponents; | ||
| 109 | + int maxTessEvaluationInputComponents; | ||
| 110 | + int maxTessEvaluationOutputComponents; | ||
| 111 | + int maxTessEvaluationTextureImageUnits; | ||
| 112 | + int maxTessEvaluationUniformComponents; | ||
| 113 | + int maxTessPatchComponents; | ||
| 114 | + int maxPatchVertices; | ||
| 115 | + int maxTessGenLevel; | ||
| 116 | + int maxViewports; | ||
| 117 | + int maxVertexAtomicCounters; | ||
| 118 | + int maxTessControlAtomicCounters; | ||
| 119 | + int maxTessEvaluationAtomicCounters; | ||
| 120 | + int maxGeometryAtomicCounters; | ||
| 121 | + int maxFragmentAtomicCounters; | ||
| 122 | + int maxCombinedAtomicCounters; | ||
| 123 | + int maxAtomicCounterBindings; | ||
| 124 | + int maxVertexAtomicCounterBuffers; | ||
| 125 | + int maxTessControlAtomicCounterBuffers; | ||
| 126 | + int maxTessEvaluationAtomicCounterBuffers; | ||
| 127 | + int maxGeometryAtomicCounterBuffers; | ||
| 128 | + int maxFragmentAtomicCounterBuffers; | ||
| 129 | + int maxCombinedAtomicCounterBuffers; | ||
| 130 | + int maxAtomicCounterBufferSize; | ||
| 131 | + int maxTransformFeedbackBuffers; | ||
| 132 | + int maxTransformFeedbackInterleavedComponents; | ||
| 133 | + int maxCullDistances; | ||
| 134 | + int maxCombinedClipAndCullDistances; | ||
| 135 | + int maxSamples; | ||
| 136 | + int maxMeshOutputVerticesNV; | ||
| 137 | + int maxMeshOutputPrimitivesNV; | ||
| 138 | + int maxMeshWorkGroupSizeX_NV; | ||
| 139 | + int maxMeshWorkGroupSizeY_NV; | ||
| 140 | + int maxMeshWorkGroupSizeZ_NV; | ||
| 141 | + int maxTaskWorkGroupSizeX_NV; | ||
| 142 | + int maxTaskWorkGroupSizeY_NV; | ||
| 143 | + int maxTaskWorkGroupSizeZ_NV; | ||
| 144 | + int maxMeshViewCountNV; | ||
| 145 | + int maxMeshOutputVerticesEXT; | ||
| 146 | + int maxMeshOutputPrimitivesEXT; | ||
| 147 | + int maxMeshWorkGroupSizeX_EXT; | ||
| 148 | + int maxMeshWorkGroupSizeY_EXT; | ||
| 149 | + int maxMeshWorkGroupSizeZ_EXT; | ||
| 150 | + int maxTaskWorkGroupSizeX_EXT; | ||
| 151 | + int maxTaskWorkGroupSizeY_EXT; | ||
| 152 | + int maxTaskWorkGroupSizeZ_EXT; | ||
| 153 | + int maxMeshViewCountEXT; | ||
| 154 | + int maxDualSourceDrawBuffersEXT; | ||
| 155 | + | ||
| 156 | + TLimits limits; | ||
| 157 | +}; | ||
| 158 | + | ||
| 159 | +#endif // _RESOURCE_LIMITS_INCLUDED_ |
| 1 | +/** | ||
| 2 | + This code is based on the glslang_c_interface implementation by Viktor Latypov | ||
| 3 | +**/ | ||
| 4 | + | ||
| 5 | +/** | ||
| 6 | +BSD 2-Clause License | ||
| 7 | + | ||
| 8 | +Copyright (c) 2019, Viktor Latypov | ||
| 9 | +All rights reserved. | ||
| 10 | + | ||
| 11 | +Redistribution and use in source and binary forms, with or without | ||
| 12 | +modification, are permitted provided that the following conditions are met: | ||
| 13 | + | ||
| 14 | +1. Redistributions of source code must retain the above copyright notice, this | ||
| 15 | + list of conditions and the following disclaimer. | ||
| 16 | + | ||
| 17 | +2. Redistributions in binary form must reproduce the above copyright notice, | ||
| 18 | + this list of conditions and the following disclaimer in the documentation | ||
| 19 | + and/or other materials provided with the distribution. | ||
| 20 | + | ||
| 21 | +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 22 | +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 23 | +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
| 24 | +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE | ||
| 25 | +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
| 26 | +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | ||
| 27 | +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | ||
| 28 | +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | ||
| 29 | +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
| 30 | +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 31 | +**/ | ||
| 32 | + | ||
| 33 | +#ifndef GLSLANG_C_IFACE_H_INCLUDED | ||
| 34 | +#define GLSLANG_C_IFACE_H_INCLUDED | ||
| 35 | + | ||
| 36 | +#include <stdbool.h> | ||
| 37 | +#include <stdlib.h> | ||
| 38 | + | ||
| 39 | +#include "glslang_c_shader_types.h" | ||
| 40 | +#include "visibility.h" | ||
| 41 | + | ||
| 42 | +typedef struct glslang_shader_s glslang_shader_t; | ||
| 43 | +typedef struct glslang_program_s glslang_program_t; | ||
| 44 | +typedef struct glslang_mapper_s glslang_mapper_t; | ||
| 45 | +typedef struct glslang_resolver_s glslang_resolver_t; | ||
| 46 | + | ||
| 47 | +/* Version counterpart */ | ||
| 48 | +typedef struct glslang_version_s { | ||
| 49 | + int major; | ||
| 50 | + int minor; | ||
| 51 | + int patch; | ||
| 52 | + const char* flavor; | ||
| 53 | +} glslang_version_t; | ||
| 54 | + | ||
| 55 | +/* TLimits counterpart */ | ||
| 56 | +typedef struct glslang_limits_s { | ||
| 57 | + bool non_inductive_for_loops; | ||
| 58 | + bool while_loops; | ||
| 59 | + bool do_while_loops; | ||
| 60 | + bool general_uniform_indexing; | ||
| 61 | + bool general_attribute_matrix_vector_indexing; | ||
| 62 | + bool general_varying_indexing; | ||
| 63 | + bool general_sampler_indexing; | ||
| 64 | + bool general_variable_indexing; | ||
| 65 | + bool general_constant_matrix_vector_indexing; | ||
| 66 | +} glslang_limits_t; | ||
| 67 | + | ||
| 68 | +/* TBuiltInResource counterpart */ | ||
| 69 | +typedef struct glslang_resource_s { | ||
| 70 | + int max_lights; | ||
| 71 | + int max_clip_planes; | ||
| 72 | + int max_texture_units; | ||
| 73 | + int max_texture_coords; | ||
| 74 | + int max_vertex_attribs; | ||
| 75 | + int max_vertex_uniform_components; | ||
| 76 | + int max_varying_floats; | ||
| 77 | + int max_vertex_texture_image_units; | ||
| 78 | + int max_combined_texture_image_units; | ||
| 79 | + int max_texture_image_units; | ||
| 80 | + int max_fragment_uniform_components; | ||
| 81 | + int max_draw_buffers; | ||
| 82 | + int max_vertex_uniform_vectors; | ||
| 83 | + int max_varying_vectors; | ||
| 84 | + int max_fragment_uniform_vectors; | ||
| 85 | + int max_vertex_output_vectors; | ||
| 86 | + int max_fragment_input_vectors; | ||
| 87 | + int min_program_texel_offset; | ||
| 88 | + int max_program_texel_offset; | ||
| 89 | + int max_clip_distances; | ||
| 90 | + int max_compute_work_group_count_x; | ||
| 91 | + int max_compute_work_group_count_y; | ||
| 92 | + int max_compute_work_group_count_z; | ||
| 93 | + int max_compute_work_group_size_x; | ||
| 94 | + int max_compute_work_group_size_y; | ||
| 95 | + int max_compute_work_group_size_z; | ||
| 96 | + int max_compute_uniform_components; | ||
| 97 | + int max_compute_texture_image_units; | ||
| 98 | + int max_compute_image_uniforms; | ||
| 99 | + int max_compute_atomic_counters; | ||
| 100 | + int max_compute_atomic_counter_buffers; | ||
| 101 | + int max_varying_components; | ||
| 102 | + int max_vertex_output_components; | ||
| 103 | + int max_geometry_input_components; | ||
| 104 | + int max_geometry_output_components; | ||
| 105 | + int max_fragment_input_components; | ||
| 106 | + int max_image_units; | ||
| 107 | + int max_combined_image_units_and_fragment_outputs; | ||
| 108 | + int max_combined_shader_output_resources; | ||
| 109 | + int max_image_samples; | ||
| 110 | + int max_vertex_image_uniforms; | ||
| 111 | + int max_tess_control_image_uniforms; | ||
| 112 | + int max_tess_evaluation_image_uniforms; | ||
| 113 | + int max_geometry_image_uniforms; | ||
| 114 | + int max_fragment_image_uniforms; | ||
| 115 | + int max_combined_image_uniforms; | ||
| 116 | + int max_geometry_texture_image_units; | ||
| 117 | + int max_geometry_output_vertices; | ||
| 118 | + int max_geometry_total_output_components; | ||
| 119 | + int max_geometry_uniform_components; | ||
| 120 | + int max_geometry_varying_components; | ||
| 121 | + int max_tess_control_input_components; | ||
| 122 | + int max_tess_control_output_components; | ||
| 123 | + int max_tess_control_texture_image_units; | ||
| 124 | + int max_tess_control_uniform_components; | ||
| 125 | + int max_tess_control_total_output_components; | ||
| 126 | + int max_tess_evaluation_input_components; | ||
| 127 | + int max_tess_evaluation_output_components; | ||
| 128 | + int max_tess_evaluation_texture_image_units; | ||
| 129 | + int max_tess_evaluation_uniform_components; | ||
| 130 | + int max_tess_patch_components; | ||
| 131 | + int max_patch_vertices; | ||
| 132 | + int max_tess_gen_level; | ||
| 133 | + int max_viewports; | ||
| 134 | + int max_vertex_atomic_counters; | ||
| 135 | + int max_tess_control_atomic_counters; | ||
| 136 | + int max_tess_evaluation_atomic_counters; | ||
| 137 | + int max_geometry_atomic_counters; | ||
| 138 | + int max_fragment_atomic_counters; | ||
| 139 | + int max_combined_atomic_counters; | ||
| 140 | + int max_atomic_counter_bindings; | ||
| 141 | + int max_vertex_atomic_counter_buffers; | ||
| 142 | + int max_tess_control_atomic_counter_buffers; | ||
| 143 | + int max_tess_evaluation_atomic_counter_buffers; | ||
| 144 | + int max_geometry_atomic_counter_buffers; | ||
| 145 | + int max_fragment_atomic_counter_buffers; | ||
| 146 | + int max_combined_atomic_counter_buffers; | ||
| 147 | + int max_atomic_counter_buffer_size; | ||
| 148 | + int max_transform_feedback_buffers; | ||
| 149 | + int max_transform_feedback_interleaved_components; | ||
| 150 | + int max_cull_distances; | ||
| 151 | + int max_combined_clip_and_cull_distances; | ||
| 152 | + int max_samples; | ||
| 153 | + int max_mesh_output_vertices_nv; | ||
| 154 | + int max_mesh_output_primitives_nv; | ||
| 155 | + int max_mesh_work_group_size_x_nv; | ||
| 156 | + int max_mesh_work_group_size_y_nv; | ||
| 157 | + int max_mesh_work_group_size_z_nv; | ||
| 158 | + int max_task_work_group_size_x_nv; | ||
| 159 | + int max_task_work_group_size_y_nv; | ||
| 160 | + int max_task_work_group_size_z_nv; | ||
| 161 | + int max_mesh_view_count_nv; | ||
| 162 | + int max_mesh_output_vertices_ext; | ||
| 163 | + int max_mesh_output_primitives_ext; | ||
| 164 | + int max_mesh_work_group_size_x_ext; | ||
| 165 | + int max_mesh_work_group_size_y_ext; | ||
| 166 | + int max_mesh_work_group_size_z_ext; | ||
| 167 | + int max_task_work_group_size_x_ext; | ||
| 168 | + int max_task_work_group_size_y_ext; | ||
| 169 | + int max_task_work_group_size_z_ext; | ||
| 170 | + int max_mesh_view_count_ext; | ||
| 171 | + union | ||
| 172 | + { | ||
| 173 | + int max_dual_source_draw_buffers_ext; | ||
| 174 | + | ||
| 175 | + /* Incorrectly capitalized name retained for backward compatibility */ | ||
| 176 | + int maxDualSourceDrawBuffersEXT; | ||
| 177 | + }; | ||
| 178 | + | ||
| 179 | + glslang_limits_t limits; | ||
| 180 | +} glslang_resource_t; | ||
| 181 | + | ||
| 182 | +/* Inclusion result structure allocated by C include_local/include_system callbacks */ | ||
| 183 | +typedef struct glsl_include_result_s { | ||
| 184 | + /* Header file name or NULL if inclusion failed */ | ||
| 185 | + const char* header_name; | ||
| 186 | + | ||
| 187 | + /* Header contents or NULL */ | ||
| 188 | + const char* header_data; | ||
| 189 | + size_t header_length; | ||
| 190 | + | ||
| 191 | +} glsl_include_result_t; | ||
| 192 | + | ||
| 193 | +/* Callback for local file inclusion */ | ||
| 194 | +typedef glsl_include_result_t* (*glsl_include_local_func)(void* ctx, const char* header_name, const char* includer_name, | ||
| 195 | + size_t include_depth); | ||
| 196 | + | ||
| 197 | +/* Callback for system file inclusion */ | ||
| 198 | +typedef glsl_include_result_t* (*glsl_include_system_func)(void* ctx, const char* header_name, | ||
| 199 | + const char* includer_name, size_t include_depth); | ||
| 200 | + | ||
| 201 | +/* Callback for include result destruction */ | ||
| 202 | +typedef int (*glsl_free_include_result_func)(void* ctx, glsl_include_result_t* result); | ||
| 203 | + | ||
| 204 | +/* Collection of callbacks for GLSL preprocessor */ | ||
| 205 | +typedef struct glsl_include_callbacks_s { | ||
| 206 | + glsl_include_system_func include_system; | ||
| 207 | + glsl_include_local_func include_local; | ||
| 208 | + glsl_free_include_result_func free_include_result; | ||
| 209 | +} glsl_include_callbacks_t; | ||
| 210 | + | ||
| 211 | +typedef struct glslang_input_s { | ||
| 212 | + glslang_source_t language; | ||
| 213 | + glslang_stage_t stage; | ||
| 214 | + glslang_client_t client; | ||
| 215 | + glslang_target_client_version_t client_version; | ||
| 216 | + glslang_target_language_t target_language; | ||
| 217 | + glslang_target_language_version_t target_language_version; | ||
| 218 | + /** Shader source code */ | ||
| 219 | + const char* code; | ||
| 220 | + int default_version; | ||
| 221 | + glslang_profile_t default_profile; | ||
| 222 | + int force_default_version_and_profile; | ||
| 223 | + int forward_compatible; | ||
| 224 | + glslang_messages_t messages; | ||
| 225 | + const glslang_resource_t* resource; | ||
| 226 | + glsl_include_callbacks_t callbacks; | ||
| 227 | + void* callbacks_ctx; | ||
| 228 | +} glslang_input_t; | ||
| 229 | + | ||
| 230 | +/* SpvOptions counterpart */ | ||
| 231 | +typedef struct glslang_spv_options_s { | ||
| 232 | + bool generate_debug_info; | ||
| 233 | + bool strip_debug_info; | ||
| 234 | + bool disable_optimizer; | ||
| 235 | + bool optimize_size; | ||
| 236 | + bool disassemble; | ||
| 237 | + bool validate; | ||
| 238 | + bool emit_nonsemantic_shader_debug_info; | ||
| 239 | + bool emit_nonsemantic_shader_debug_source; | ||
| 240 | + bool compile_only; | ||
| 241 | + bool optimize_allow_expanded_id_bound; | ||
| 242 | +} glslang_spv_options_t; | ||
| 243 | + | ||
| 244 | +#ifdef __cplusplus | ||
| 245 | +extern "C" { | ||
| 246 | +#endif | ||
| 247 | + | ||
| 248 | +GLSLANG_EXPORT void glslang_get_version(glslang_version_t* version); | ||
| 249 | + | ||
| 250 | +GLSLANG_EXPORT int glslang_initialize_process(void); | ||
| 251 | +GLSLANG_EXPORT void glslang_finalize_process(void); | ||
| 252 | + | ||
| 253 | +GLSLANG_EXPORT glslang_shader_t* glslang_shader_create(const glslang_input_t* input); | ||
| 254 | +GLSLANG_EXPORT void glslang_shader_delete(glslang_shader_t* shader); | ||
| 255 | +GLSLANG_EXPORT void glslang_shader_set_preamble(glslang_shader_t* shader, const char* s); | ||
| 256 | +GLSLANG_EXPORT void glslang_shader_shift_binding(glslang_shader_t* shader, glslang_resource_type_t res, unsigned int base); | ||
| 257 | +GLSLANG_EXPORT void glslang_shader_shift_binding_for_set(glslang_shader_t* shader, glslang_resource_type_t res, unsigned int base, unsigned int set); | ||
| 258 | +GLSLANG_EXPORT void glslang_shader_set_options(glslang_shader_t* shader, int options); // glslang_shader_options_t | ||
| 259 | +GLSLANG_EXPORT void glslang_shader_set_glsl_version(glslang_shader_t* shader, int version); | ||
| 260 | +GLSLANG_EXPORT void glslang_shader_set_default_uniform_block_set_and_binding(glslang_shader_t* shader, unsigned int set, unsigned int binding); | ||
| 261 | +GLSLANG_EXPORT void glslang_shader_set_default_uniform_block_name(glslang_shader_t* shader, const char *name); | ||
| 262 | +GLSLANG_EXPORT void glslang_shader_set_resource_set_binding(glslang_shader_t* shader, const char *const *bindings, unsigned int num_bindings); | ||
| 263 | +GLSLANG_EXPORT int glslang_shader_preprocess(glslang_shader_t* shader, const glslang_input_t* input); | ||
| 264 | +GLSLANG_EXPORT int glslang_shader_parse(glslang_shader_t* shader, const glslang_input_t* input); | ||
| 265 | +GLSLANG_EXPORT const char* glslang_shader_get_preprocessed_code(glslang_shader_t* shader); | ||
| 266 | +GLSLANG_EXPORT void glslang_shader_set_preprocessed_code(glslang_shader_t* shader, const char* code); | ||
| 267 | +GLSLANG_EXPORT const char* glslang_shader_get_info_log(glslang_shader_t* shader); | ||
| 268 | +GLSLANG_EXPORT const char* glslang_shader_get_info_debug_log(glslang_shader_t* shader); | ||
| 269 | + | ||
| 270 | +GLSLANG_EXPORT glslang_program_t* glslang_program_create(void); | ||
| 271 | +GLSLANG_EXPORT void glslang_program_delete(glslang_program_t* program); | ||
| 272 | +GLSLANG_EXPORT void glslang_program_add_shader(glslang_program_t* program, glslang_shader_t* shader); | ||
| 273 | +GLSLANG_EXPORT int glslang_program_link(glslang_program_t* program, int messages); // glslang_messages_t | ||
| 274 | +GLSLANG_EXPORT void glslang_program_add_source_text(glslang_program_t* program, glslang_stage_t stage, const char* text, size_t len); | ||
| 275 | +GLSLANG_EXPORT void glslang_program_set_source_file(glslang_program_t* program, glslang_stage_t stage, const char* file); | ||
| 276 | +GLSLANG_EXPORT int glslang_program_map_io(glslang_program_t* program); | ||
| 277 | +GLSLANG_EXPORT int glslang_program_map_io_with_resolver_and_mapper(glslang_program_t* program, glslang_resolver_t* resolver, glslang_mapper_t* mapper); | ||
| 278 | +GLSLANG_EXPORT void glslang_program_SPIRV_generate(glslang_program_t* program, glslang_stage_t stage); | ||
| 279 | +GLSLANG_EXPORT void glslang_program_SPIRV_generate_with_options(glslang_program_t* program, glslang_stage_t stage, glslang_spv_options_t* spv_options); | ||
| 280 | +GLSLANG_EXPORT size_t glslang_program_SPIRV_get_size(glslang_program_t* program); | ||
| 281 | +GLSLANG_EXPORT void glslang_program_SPIRV_get(glslang_program_t* program, unsigned int*); | ||
| 282 | +GLSLANG_EXPORT unsigned int* glslang_program_SPIRV_get_ptr(glslang_program_t* program); | ||
| 283 | +GLSLANG_EXPORT const char* glslang_program_SPIRV_get_messages(glslang_program_t* program); | ||
| 284 | +GLSLANG_EXPORT const char* glslang_program_get_info_log(glslang_program_t* program); | ||
| 285 | +GLSLANG_EXPORT const char* glslang_program_get_info_debug_log(glslang_program_t* program); | ||
| 286 | + | ||
| 287 | +GLSLANG_EXPORT glslang_mapper_t* glslang_glsl_mapper_create(void); | ||
| 288 | +GLSLANG_EXPORT void glslang_glsl_mapper_delete(glslang_mapper_t* mapper); | ||
| 289 | + | ||
| 290 | +GLSLANG_EXPORT glslang_resolver_t* glslang_glsl_resolver_create(glslang_program_t* program, glslang_stage_t stage); | ||
| 291 | +GLSLANG_EXPORT void glslang_glsl_resolver_delete(glslang_resolver_t* resolver); | ||
| 292 | + | ||
| 293 | +#ifdef __cplusplus | ||
| 294 | +} | ||
| 295 | +#endif | ||
| 296 | + | ||
| 297 | +#endif /* #ifdef GLSLANG_C_IFACE_INCLUDED */ |
| 1 | +/** | ||
| 2 | + This code is based on the glslang_c_interface implementation by Viktor Latypov | ||
| 3 | +**/ | ||
| 4 | + | ||
| 5 | +/** | ||
| 6 | +BSD 2-Clause License | ||
| 7 | + | ||
| 8 | +Copyright (c) 2019, Viktor Latypov | ||
| 9 | +All rights reserved. | ||
| 10 | + | ||
| 11 | +Redistribution and use in source and binary forms, with or without | ||
| 12 | +modification, are permitted provided that the following conditions are met: | ||
| 13 | + | ||
| 14 | +1. Redistributions of source code must retain the above copyright notice, this | ||
| 15 | + list of conditions and the following disclaimer. | ||
| 16 | + | ||
| 17 | +2. Redistributions in binary form must reproduce the above copyright notice, | ||
| 18 | + this list of conditions and the following disclaimer in the documentation | ||
| 19 | + and/or other materials provided with the distribution. | ||
| 20 | + | ||
| 21 | +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 22 | +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 23 | +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
| 24 | +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE | ||
| 25 | +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
| 26 | +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | ||
| 27 | +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | ||
| 28 | +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | ||
| 29 | +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
| 30 | +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 31 | +**/ | ||
| 32 | + | ||
| 33 | +#ifndef C_SHADER_TYPES_H_INCLUDED | ||
| 34 | +#define C_SHADER_TYPES_H_INCLUDED | ||
| 35 | + | ||
| 36 | +#define LAST_ELEMENT_MARKER(x) x | ||
| 37 | + | ||
| 38 | +/* EShLanguage counterpart */ | ||
| 39 | +typedef enum { | ||
| 40 | + GLSLANG_STAGE_VERTEX, | ||
| 41 | + GLSLANG_STAGE_TESSCONTROL, | ||
| 42 | + GLSLANG_STAGE_TESSEVALUATION, | ||
| 43 | + GLSLANG_STAGE_GEOMETRY, | ||
| 44 | + GLSLANG_STAGE_FRAGMENT, | ||
| 45 | + GLSLANG_STAGE_COMPUTE, | ||
| 46 | + GLSLANG_STAGE_RAYGEN, | ||
| 47 | + GLSLANG_STAGE_RAYGEN_NV = GLSLANG_STAGE_RAYGEN, | ||
| 48 | + GLSLANG_STAGE_INTERSECT, | ||
| 49 | + GLSLANG_STAGE_INTERSECT_NV = GLSLANG_STAGE_INTERSECT, | ||
| 50 | + GLSLANG_STAGE_ANYHIT, | ||
| 51 | + GLSLANG_STAGE_ANYHIT_NV = GLSLANG_STAGE_ANYHIT, | ||
| 52 | + GLSLANG_STAGE_CLOSESTHIT, | ||
| 53 | + GLSLANG_STAGE_CLOSESTHIT_NV = GLSLANG_STAGE_CLOSESTHIT, | ||
| 54 | + GLSLANG_STAGE_MISS, | ||
| 55 | + GLSLANG_STAGE_MISS_NV = GLSLANG_STAGE_MISS, | ||
| 56 | + GLSLANG_STAGE_CALLABLE, | ||
| 57 | + GLSLANG_STAGE_CALLABLE_NV = GLSLANG_STAGE_CALLABLE, | ||
| 58 | + GLSLANG_STAGE_TASK, | ||
| 59 | + GLSLANG_STAGE_TASK_NV = GLSLANG_STAGE_TASK, | ||
| 60 | + GLSLANG_STAGE_MESH, | ||
| 61 | + GLSLANG_STAGE_MESH_NV = GLSLANG_STAGE_MESH, | ||
| 62 | + LAST_ELEMENT_MARKER(GLSLANG_STAGE_COUNT), | ||
| 63 | +} glslang_stage_t; // would be better as stage, but this is ancient now | ||
| 64 | + | ||
| 65 | +/* EShLanguageMask counterpart */ | ||
| 66 | +typedef enum { | ||
| 67 | + GLSLANG_STAGE_VERTEX_MASK = (1 << GLSLANG_STAGE_VERTEX), | ||
| 68 | + GLSLANG_STAGE_TESSCONTROL_MASK = (1 << GLSLANG_STAGE_TESSCONTROL), | ||
| 69 | + GLSLANG_STAGE_TESSEVALUATION_MASK = (1 << GLSLANG_STAGE_TESSEVALUATION), | ||
| 70 | + GLSLANG_STAGE_GEOMETRY_MASK = (1 << GLSLANG_STAGE_GEOMETRY), | ||
| 71 | + GLSLANG_STAGE_FRAGMENT_MASK = (1 << GLSLANG_STAGE_FRAGMENT), | ||
| 72 | + GLSLANG_STAGE_COMPUTE_MASK = (1 << GLSLANG_STAGE_COMPUTE), | ||
| 73 | + GLSLANG_STAGE_RAYGEN_MASK = (1 << GLSLANG_STAGE_RAYGEN), | ||
| 74 | + GLSLANG_STAGE_RAYGEN_NV_MASK = GLSLANG_STAGE_RAYGEN_MASK, | ||
| 75 | + GLSLANG_STAGE_INTERSECT_MASK = (1 << GLSLANG_STAGE_INTERSECT), | ||
| 76 | + GLSLANG_STAGE_INTERSECT_NV_MASK = GLSLANG_STAGE_INTERSECT_MASK, | ||
| 77 | + GLSLANG_STAGE_ANYHIT_MASK = (1 << GLSLANG_STAGE_ANYHIT), | ||
| 78 | + GLSLANG_STAGE_ANYHIT_NV_MASK = GLSLANG_STAGE_ANYHIT_MASK, | ||
| 79 | + GLSLANG_STAGE_CLOSESTHIT_MASK = (1 << GLSLANG_STAGE_CLOSESTHIT), | ||
| 80 | + GLSLANG_STAGE_CLOSESTHIT_NV_MASK = GLSLANG_STAGE_CLOSESTHIT_MASK, | ||
| 81 | + GLSLANG_STAGE_MISS_MASK = (1 << GLSLANG_STAGE_MISS), | ||
| 82 | + GLSLANG_STAGE_MISS_NV_MASK = GLSLANG_STAGE_MISS_MASK, | ||
| 83 | + GLSLANG_STAGE_CALLABLE_MASK = (1 << GLSLANG_STAGE_CALLABLE), | ||
| 84 | + GLSLANG_STAGE_CALLABLE_NV_MASK = GLSLANG_STAGE_CALLABLE_MASK, | ||
| 85 | + GLSLANG_STAGE_TASK_MASK = (1 << GLSLANG_STAGE_TASK), | ||
| 86 | + GLSLANG_STAGE_TASK_NV_MASK = GLSLANG_STAGE_TASK_MASK, | ||
| 87 | + GLSLANG_STAGE_MESH_MASK = (1 << GLSLANG_STAGE_MESH), | ||
| 88 | + GLSLANG_STAGE_MESH_NV_MASK = GLSLANG_STAGE_MESH_MASK, | ||
| 89 | + LAST_ELEMENT_MARKER(GLSLANG_STAGE_MASK_COUNT), | ||
| 90 | +} glslang_stage_mask_t; | ||
| 91 | + | ||
| 92 | +/* EShSource counterpart */ | ||
| 93 | +typedef enum { | ||
| 94 | + GLSLANG_SOURCE_NONE, | ||
| 95 | + GLSLANG_SOURCE_GLSL, | ||
| 96 | + GLSLANG_SOURCE_HLSL, | ||
| 97 | + LAST_ELEMENT_MARKER(GLSLANG_SOURCE_COUNT), | ||
| 98 | +} glslang_source_t; | ||
| 99 | + | ||
| 100 | +/* EShClient counterpart */ | ||
| 101 | +typedef enum { | ||
| 102 | + GLSLANG_CLIENT_NONE, | ||
| 103 | + GLSLANG_CLIENT_VULKAN, | ||
| 104 | + GLSLANG_CLIENT_OPENGL, | ||
| 105 | + LAST_ELEMENT_MARKER(GLSLANG_CLIENT_COUNT), | ||
| 106 | +} glslang_client_t; | ||
| 107 | + | ||
| 108 | +/* EShTargetLanguage counterpart */ | ||
| 109 | +typedef enum { | ||
| 110 | + GLSLANG_TARGET_NONE, | ||
| 111 | + GLSLANG_TARGET_SPV, | ||
| 112 | + LAST_ELEMENT_MARKER(GLSLANG_TARGET_COUNT), | ||
| 113 | +} glslang_target_language_t; | ||
| 114 | + | ||
| 115 | +/* SH_TARGET_ClientVersion counterpart */ | ||
| 116 | +typedef enum { | ||
| 117 | + GLSLANG_TARGET_VULKAN_1_0 = (1 << 22), | ||
| 118 | + GLSLANG_TARGET_VULKAN_1_1 = (1 << 22) | (1 << 12), | ||
| 119 | + GLSLANG_TARGET_VULKAN_1_2 = (1 << 22) | (2 << 12), | ||
| 120 | + GLSLANG_TARGET_VULKAN_1_3 = (1 << 22) | (3 << 12), | ||
| 121 | + GLSLANG_TARGET_VULKAN_1_4 = (1 << 22) | (4 << 12), | ||
| 122 | + GLSLANG_TARGET_OPENGL_450 = 450, | ||
| 123 | + LAST_ELEMENT_MARKER(GLSLANG_TARGET_CLIENT_VERSION_COUNT = 6), | ||
| 124 | +} glslang_target_client_version_t; | ||
| 125 | + | ||
| 126 | +/* SH_TARGET_LanguageVersion counterpart */ | ||
| 127 | +typedef enum { | ||
| 128 | + GLSLANG_TARGET_SPV_1_0 = (1 << 16), | ||
| 129 | + GLSLANG_TARGET_SPV_1_1 = (1 << 16) | (1 << 8), | ||
| 130 | + GLSLANG_TARGET_SPV_1_2 = (1 << 16) | (2 << 8), | ||
| 131 | + GLSLANG_TARGET_SPV_1_3 = (1 << 16) | (3 << 8), | ||
| 132 | + GLSLANG_TARGET_SPV_1_4 = (1 << 16) | (4 << 8), | ||
| 133 | + GLSLANG_TARGET_SPV_1_5 = (1 << 16) | (5 << 8), | ||
| 134 | + GLSLANG_TARGET_SPV_1_6 = (1 << 16) | (6 << 8), | ||
| 135 | + LAST_ELEMENT_MARKER(GLSLANG_TARGET_LANGUAGE_VERSION_COUNT = 7), | ||
| 136 | +} glslang_target_language_version_t; | ||
| 137 | + | ||
| 138 | +/* EShExecutable counterpart */ | ||
| 139 | +typedef enum { GLSLANG_EX_VERTEX_FRAGMENT, GLSLANG_EX_FRAGMENT } glslang_executable_t; | ||
| 140 | + | ||
| 141 | +// EShOptimizationLevel counterpart | ||
| 142 | +// This enum is not used in the current C interface, but could be added at a later date. | ||
| 143 | +// GLSLANG_OPT_NONE is the current default. | ||
| 144 | +typedef enum { | ||
| 145 | + GLSLANG_OPT_NO_GENERATION, | ||
| 146 | + GLSLANG_OPT_NONE, | ||
| 147 | + GLSLANG_OPT_SIMPLE, | ||
| 148 | + GLSLANG_OPT_FULL, | ||
| 149 | + LAST_ELEMENT_MARKER(GLSLANG_OPT_LEVEL_COUNT), | ||
| 150 | +} glslang_optimization_level_t; | ||
| 151 | + | ||
| 152 | +/* EShTextureSamplerTransformMode counterpart */ | ||
| 153 | +typedef enum { | ||
| 154 | + GLSLANG_TEX_SAMP_TRANS_KEEP, | ||
| 155 | + GLSLANG_TEX_SAMP_TRANS_UPGRADE_TEXTURE_REMOVE_SAMPLER, | ||
| 156 | + LAST_ELEMENT_MARKER(GLSLANG_TEX_SAMP_TRANS_COUNT), | ||
| 157 | +} glslang_texture_sampler_transform_mode_t; | ||
| 158 | + | ||
| 159 | +/* EShMessages counterpart */ | ||
| 160 | +typedef enum { | ||
| 161 | + GLSLANG_MSG_DEFAULT_BIT = 0, | ||
| 162 | + GLSLANG_MSG_RELAXED_ERRORS_BIT = (1 << 0), | ||
| 163 | + GLSLANG_MSG_SUPPRESS_WARNINGS_BIT = (1 << 1), | ||
| 164 | + GLSLANG_MSG_AST_BIT = (1 << 2), | ||
| 165 | + GLSLANG_MSG_SPV_RULES_BIT = (1 << 3), | ||
| 166 | + GLSLANG_MSG_VULKAN_RULES_BIT = (1 << 4), | ||
| 167 | + GLSLANG_MSG_ONLY_PREPROCESSOR_BIT = (1 << 5), | ||
| 168 | + GLSLANG_MSG_READ_HLSL_BIT = (1 << 6), | ||
| 169 | + GLSLANG_MSG_CASCADING_ERRORS_BIT = (1 << 7), | ||
| 170 | + GLSLANG_MSG_KEEP_UNCALLED_BIT = (1 << 8), | ||
| 171 | + GLSLANG_MSG_HLSL_OFFSETS_BIT = (1 << 9), | ||
| 172 | + GLSLANG_MSG_DEBUG_INFO_BIT = (1 << 10), | ||
| 173 | + GLSLANG_MSG_HLSL_ENABLE_16BIT_TYPES_BIT = (1 << 11), | ||
| 174 | + GLSLANG_MSG_HLSL_LEGALIZATION_BIT = (1 << 12), | ||
| 175 | + GLSLANG_MSG_HLSL_DX9_COMPATIBLE_BIT = (1 << 13), | ||
| 176 | + GLSLANG_MSG_BUILTIN_SYMBOL_TABLE_BIT = (1 << 14), | ||
| 177 | + GLSLANG_MSG_ENHANCED = (1 << 15), | ||
| 178 | + GLSLANG_MSG_ABSOLUTE_PATH = (1 << 16), | ||
| 179 | + GLSLANG_MSG_DISPLAY_ERROR_COLUMN = (1 << 17), | ||
| 180 | + GLSLANG_MSG_LINK_TIME_OPTIMIZATION_BIT = (1 << 18), | ||
| 181 | + LAST_ELEMENT_MARKER(GLSLANG_MSG_COUNT), | ||
| 182 | +} glslang_messages_t; | ||
| 183 | + | ||
| 184 | +/* EShReflectionOptions counterpart */ | ||
| 185 | +typedef enum { | ||
| 186 | + GLSLANG_REFLECTION_DEFAULT_BIT = 0, | ||
| 187 | + GLSLANG_REFLECTION_STRICT_ARRAY_SUFFIX_BIT = (1 << 0), | ||
| 188 | + GLSLANG_REFLECTION_BASIC_ARRAY_SUFFIX_BIT = (1 << 1), | ||
| 189 | + GLSLANG_REFLECTION_INTERMEDIATE_IOO_BIT = (1 << 2), | ||
| 190 | + GLSLANG_REFLECTION_SEPARATE_BUFFERS_BIT = (1 << 3), | ||
| 191 | + GLSLANG_REFLECTION_ALL_BLOCK_VARIABLES_BIT = (1 << 4), | ||
| 192 | + GLSLANG_REFLECTION_UNWRAP_IO_BLOCKS_BIT = (1 << 5), | ||
| 193 | + GLSLANG_REFLECTION_ALL_IO_VARIABLES_BIT = (1 << 6), | ||
| 194 | + GLSLANG_REFLECTION_SHARED_STD140_SSBO_BIT = (1 << 7), | ||
| 195 | + GLSLANG_REFLECTION_SHARED_STD140_UBO_BIT = (1 << 8), | ||
| 196 | + LAST_ELEMENT_MARKER(GLSLANG_REFLECTION_COUNT), | ||
| 197 | +} glslang_reflection_options_t; | ||
| 198 | + | ||
| 199 | +/* EProfile counterpart (from Versions.h) */ | ||
| 200 | +typedef enum { | ||
| 201 | + GLSLANG_BAD_PROFILE = 0, | ||
| 202 | + GLSLANG_NO_PROFILE = (1 << 0), | ||
| 203 | + GLSLANG_CORE_PROFILE = (1 << 1), | ||
| 204 | + GLSLANG_COMPATIBILITY_PROFILE = (1 << 2), | ||
| 205 | + GLSLANG_ES_PROFILE = (1 << 3), | ||
| 206 | + LAST_ELEMENT_MARKER(GLSLANG_PROFILE_COUNT), | ||
| 207 | +} glslang_profile_t; | ||
| 208 | + | ||
| 209 | +/* Shader options */ | ||
| 210 | +typedef enum { | ||
| 211 | + GLSLANG_SHADER_DEFAULT_BIT = 0, | ||
| 212 | + GLSLANG_SHADER_AUTO_MAP_BINDINGS = (1 << 0), | ||
| 213 | + GLSLANG_SHADER_AUTO_MAP_LOCATIONS = (1 << 1), | ||
| 214 | + GLSLANG_SHADER_VULKAN_RULES_RELAXED = (1 << 2), | ||
| 215 | + LAST_ELEMENT_MARKER(GLSLANG_SHADER_COUNT), | ||
| 216 | +} glslang_shader_options_t; | ||
| 217 | + | ||
| 218 | +/* TResourceType counterpart */ | ||
| 219 | +typedef enum { | ||
| 220 | + GLSLANG_RESOURCE_TYPE_SAMPLER, | ||
| 221 | + GLSLANG_RESOURCE_TYPE_TEXTURE, | ||
| 222 | + GLSLANG_RESOURCE_TYPE_IMAGE, | ||
| 223 | + GLSLANG_RESOURCE_TYPE_UBO, | ||
| 224 | + GLSLANG_RESOURCE_TYPE_SSBO, | ||
| 225 | + GLSLANG_RESOURCE_TYPE_UAV, | ||
| 226 | + LAST_ELEMENT_MARKER(GLSLANG_RESOURCE_TYPE_COUNT), | ||
| 227 | +} glslang_resource_type_t; | ||
| 228 | + | ||
| 229 | +#undef LAST_ELEMENT_MARKER | ||
| 230 | + | ||
| 231 | +#endif |
| 1 | +// | ||
| 2 | +// Copyright (C) 2023 LunarG, Inc. | ||
| 3 | +// | ||
| 4 | +// All rights reserved. | ||
| 5 | +// | ||
| 6 | +// Redistribution and use in source and binary forms, with or without | ||
| 7 | +// modification, are permitted provided that the following conditions | ||
| 8 | +// are met: | ||
| 9 | +// | ||
| 10 | +// Redistributions of source code must retain the above copyright | ||
| 11 | +// notice, this list of conditions and the following disclaimer. | ||
| 12 | +// | ||
| 13 | +// Redistributions in binary form must reproduce the above | ||
| 14 | +// copyright notice, this list of conditions and the following | ||
| 15 | +// disclaimer in the documentation and/or other materials provided | ||
| 16 | +// with the distribution. | ||
| 17 | +// | ||
| 18 | +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its | ||
| 19 | +// contributors may be used to endorse or promote products derived | ||
| 20 | +// from this software without specific prior written permission. | ||
| 21 | +// | ||
| 22 | +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 23 | +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 24 | +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | ||
| 25 | +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | ||
| 26 | +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
| 27 | +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, | ||
| 28 | +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
| 29 | +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | ||
| 30 | +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | ||
| 31 | +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN | ||
| 32 | +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 33 | +// POSSIBILITY OF SUCH DAMAGE. | ||
| 34 | +// | ||
| 35 | +#ifdef GLSLANG_IS_SHARED_LIBRARY | ||
| 36 | + #ifdef _WIN32 | ||
| 37 | + #ifdef GLSLANG_EXPORTING | ||
| 38 | + #define GLSLANG_EXPORT __declspec(dllexport) | ||
| 39 | + #else | ||
| 40 | + #define GLSLANG_EXPORT __declspec(dllimport) | ||
| 41 | + #endif | ||
| 42 | + #elif __GNUC__ >= 4 | ||
| 43 | + #define GLSLANG_EXPORT __attribute__((visibility("default"))) | ||
| 44 | + #endif | ||
| 45 | +#endif // GLSLANG_IS_SHARED_LIBRARY | ||
| 46 | + | ||
| 47 | +#ifndef GLSLANG_EXPORT | ||
| 48 | +#define GLSLANG_EXPORT | ||
| 49 | +#endif | ||
| 50 | + | ||
| 51 | +// Symbols marked with this macro are only meant for public use by the test suite | ||
| 52 | +// and do not appear in publicly installed headers. They are not considered to be | ||
| 53 | +// part of the glslang library ABI. | ||
| 54 | +#define GLSLANG_EXPORT_FOR_TESTS GLSLANG_EXPORT |
| 1 | +// | ||
| 2 | +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. | ||
| 3 | +// Copyright (C) 2012-2013 LunarG, Inc. | ||
| 4 | +// Copyright (C) 2017, 2022-2024 Arm Limited. | ||
| 5 | +// Copyright (C) 2015-2018 Google, Inc. | ||
| 6 | +// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. | ||
| 7 | +// Modifications Copyright (C) 2024 Valve Corporation. | ||
| 8 | +// | ||
| 9 | +// All rights reserved. | ||
| 10 | +// | ||
| 11 | +// Redistribution and use in source and binary forms, with or without | ||
| 12 | +// modification, are permitted provided that the following conditions | ||
| 13 | +// are met: | ||
| 14 | +// | ||
| 15 | +// Redistributions of source code must retain the above copyright | ||
| 16 | +// notice, this list of conditions and the following disclaimer. | ||
| 17 | +// | ||
| 18 | +// Redistributions in binary form must reproduce the above | ||
| 19 | +// copyright notice, this list of conditions and the following | ||
| 20 | +// disclaimer in the documentation and/or other materials provided | ||
| 21 | +// with the distribution. | ||
| 22 | +// | ||
| 23 | +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its | ||
| 24 | +// contributors may be used to endorse or promote products derived | ||
| 25 | +// from this software without specific prior written permission. | ||
| 26 | +// | ||
| 27 | +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 28 | +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 29 | +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | ||
| 30 | +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | ||
| 31 | +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
| 32 | +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, | ||
| 33 | +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
| 34 | +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | ||
| 35 | +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | ||
| 36 | +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN | ||
| 37 | +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 38 | +// POSSIBILITY OF SUCH DAMAGE. | ||
| 39 | +// | ||
| 40 | + | ||
| 41 | +#ifndef _VERSIONS_INCLUDED_ | ||
| 42 | +#define _VERSIONS_INCLUDED_ | ||
| 43 | + | ||
| 44 | +#define LAST_ELEMENT_MARKER(x) x | ||
| 45 | + | ||
| 46 | +// | ||
| 47 | +// Help manage multiple profiles, versions, extensions etc. | ||
| 48 | +// | ||
| 49 | + | ||
| 50 | +// | ||
| 51 | +// Profiles are set up for masking operations, so queries can be done on multiple | ||
| 52 | +// profiles at the same time. | ||
| 53 | +// | ||
| 54 | +// Don't maintain an ordinal set of enums (0,1,2,3...) to avoid all possible | ||
| 55 | +// defects from mixing the two different forms. | ||
| 56 | +// | ||
| 57 | +typedef enum : unsigned { | ||
| 58 | + EBadProfile = 0, | ||
| 59 | + ENoProfile = (1 << 0), // only for desktop, before profiles showed up | ||
| 60 | + ECoreProfile = (1 << 1), | ||
| 61 | + ECompatibilityProfile = (1 << 2), | ||
| 62 | + EEsProfile = (1 << 3), | ||
| 63 | + LAST_ELEMENT_MARKER(EProfileCount), | ||
| 64 | +} EProfile; | ||
| 65 | + | ||
| 66 | +namespace glslang { | ||
| 67 | + | ||
| 68 | +// | ||
| 69 | +// Map from profile enum to externally readable text name. | ||
| 70 | +// | ||
| 71 | +inline const char* ProfileName(EProfile profile) | ||
| 72 | +{ | ||
| 73 | + switch (profile) { | ||
| 74 | + case ENoProfile: return "none"; | ||
| 75 | + case ECoreProfile: return "core"; | ||
| 76 | + case ECompatibilityProfile: return "compatibility"; | ||
| 77 | + case EEsProfile: return "es"; | ||
| 78 | + default: return "unknown profile"; | ||
| 79 | + } | ||
| 80 | +} | ||
| 81 | + | ||
| 82 | +// | ||
| 83 | +// What source rules, validation rules, target language, etc. are needed or | ||
| 84 | +// desired for SPIR-V? | ||
| 85 | +// | ||
| 86 | +// 0 means a target or rule set is not enabled (ignore rules from that entity). | ||
| 87 | +// Non-0 means to apply semantic rules arising from that version of its rule set. | ||
| 88 | +// The union of all requested rule sets will be applied. | ||
| 89 | +// | ||
| 90 | +struct SpvVersion { | ||
| 91 | + SpvVersion() : spv(0), vulkanGlsl(0), vulkan(0), openGl(0), vulkanRelaxed(false) {} | ||
| 92 | + unsigned int spv; // the version of SPIR-V to target, as defined by "word 1" of the SPIR-V binary header | ||
| 93 | + int vulkanGlsl; // the version of GLSL semantics for Vulkan, from GL_KHR_vulkan_glsl, for "#define VULKAN XXX" | ||
| 94 | + int vulkan; // the version of Vulkan, for which SPIR-V execution environment rules to use | ||
| 95 | + int openGl; // the version of GLSL semantics for OpenGL, from GL_ARB_gl_spirv, for "#define GL_SPIRV XXX" | ||
| 96 | + bool vulkanRelaxed; // relax changes to GLSL for Vulkan, allowing some GL-specific to be compiled to Vulkan SPIR-V target | ||
| 97 | +}; | ||
| 98 | + | ||
| 99 | +// | ||
| 100 | +// The behaviors from the GLSL "#extension extension_name : behavior" | ||
| 101 | +// | ||
| 102 | +typedef enum { | ||
| 103 | + EBhMissing = 0, | ||
| 104 | + EBhRequire, | ||
| 105 | + EBhEnable, | ||
| 106 | + EBhWarn, | ||
| 107 | + EBhDisable, | ||
| 108 | + EBhDisablePartial // use as initial state of an extension that is only partially implemented | ||
| 109 | +} TExtensionBehavior; | ||
| 110 | + | ||
| 111 | +// | ||
| 112 | +// Symbolic names for extensions. Strings may be directly used when calling the | ||
| 113 | +// functions, but better to have the compiler do spelling checks. | ||
| 114 | +// | ||
| 115 | +const char* const E_GL_OES_texture_3D = "GL_OES_texture_3D"; | ||
| 116 | +const char* const E_GL_OES_standard_derivatives = "GL_OES_standard_derivatives"; | ||
| 117 | +const char* const E_GL_EXT_frag_depth = "GL_EXT_frag_depth"; | ||
| 118 | +const char* const E_GL_OES_EGL_image_external = "GL_OES_EGL_image_external"; | ||
| 119 | +const char* const E_GL_OES_EGL_image_external_essl3 = "GL_OES_EGL_image_external_essl3"; | ||
| 120 | +const char* const E_GL_EXT_YUV_target = "GL_EXT_YUV_target"; | ||
| 121 | +const char* const E_GL_EXT_shader_texture_lod = "GL_EXT_shader_texture_lod"; | ||
| 122 | +const char* const E_GL_EXT_shadow_samplers = "GL_EXT_shadow_samplers"; | ||
| 123 | + | ||
| 124 | +const char* const E_GL_ARB_texture_rectangle = "GL_ARB_texture_rectangle"; | ||
| 125 | +const char* const E_GL_3DL_array_objects = "GL_3DL_array_objects"; | ||
| 126 | +const char* const E_GL_ARB_shading_language_420pack = "GL_ARB_shading_language_420pack"; | ||
| 127 | +const char* const E_GL_ARB_texture_gather = "GL_ARB_texture_gather"; | ||
| 128 | +const char* const E_GL_ARB_gpu_shader5 = "GL_ARB_gpu_shader5"; | ||
| 129 | +const char* const E_GL_ARB_separate_shader_objects = "GL_ARB_separate_shader_objects"; | ||
| 130 | +const char* const E_GL_ARB_compute_shader = "GL_ARB_compute_shader"; | ||
| 131 | +const char* const E_GL_ARB_tessellation_shader = "GL_ARB_tessellation_shader"; | ||
| 132 | +const char* const E_GL_ARB_enhanced_layouts = "GL_ARB_enhanced_layouts"; | ||
| 133 | +const char* const E_GL_ARB_texture_cube_map_array = "GL_ARB_texture_cube_map_array"; | ||
| 134 | +const char* const E_GL_ARB_texture_multisample = "GL_ARB_texture_multisample"; | ||
| 135 | +const char* const E_GL_ARB_shader_texture_lod = "GL_ARB_shader_texture_lod"; | ||
| 136 | +const char* const E_GL_ARB_explicit_attrib_location = "GL_ARB_explicit_attrib_location"; | ||
| 137 | +const char* const E_GL_ARB_explicit_uniform_location = "GL_ARB_explicit_uniform_location"; | ||
| 138 | +const char* const E_GL_ARB_shader_image_load_store = "GL_ARB_shader_image_load_store"; | ||
| 139 | +const char* const E_GL_ARB_shader_atomic_counters = "GL_ARB_shader_atomic_counters"; | ||
| 140 | +const char* const E_GL_ARB_shader_atomic_counter_ops = "GL_ARB_shader_atomic_counter_ops"; | ||
| 141 | +const char* const E_GL_ARB_shader_draw_parameters = "GL_ARB_shader_draw_parameters"; | ||
| 142 | +const char* const E_GL_ARB_shader_group_vote = "GL_ARB_shader_group_vote"; | ||
| 143 | +const char* const E_GL_ARB_derivative_control = "GL_ARB_derivative_control"; | ||
| 144 | +const char* const E_GL_ARB_shader_texture_image_samples = "GL_ARB_shader_texture_image_samples"; | ||
| 145 | +const char* const E_GL_ARB_viewport_array = "GL_ARB_viewport_array"; | ||
| 146 | +const char* const E_GL_ARB_gpu_shader_int64 = "GL_ARB_gpu_shader_int64"; | ||
| 147 | +const char* const E_GL_ARB_gpu_shader_fp64 = "GL_ARB_gpu_shader_fp64"; | ||
| 148 | +const char* const E_GL_ARB_shader_ballot = "GL_ARB_shader_ballot"; | ||
| 149 | +const char* const E_GL_ARB_sparse_texture2 = "GL_ARB_sparse_texture2"; | ||
| 150 | +const char* const E_GL_ARB_sparse_texture_clamp = "GL_ARB_sparse_texture_clamp"; | ||
| 151 | +const char* const E_GL_ARB_shader_stencil_export = "GL_ARB_shader_stencil_export"; | ||
| 152 | +// const char* const E_GL_ARB_cull_distance = "GL_ARB_cull_distance"; // present for 4.5, but need extension control over block members | ||
| 153 | +const char* const E_GL_ARB_post_depth_coverage = "GL_ARB_post_depth_coverage"; | ||
| 154 | +const char* const E_GL_ARB_shader_viewport_layer_array = "GL_ARB_shader_viewport_layer_array"; | ||
| 155 | +const char* const E_GL_ARB_fragment_shader_interlock = "GL_ARB_fragment_shader_interlock"; | ||
| 156 | +const char* const E_GL_ARB_shader_clock = "GL_ARB_shader_clock"; | ||
| 157 | +const char* const E_GL_ARB_uniform_buffer_object = "GL_ARB_uniform_buffer_object"; | ||
| 158 | +const char* const E_GL_ARB_sample_shading = "GL_ARB_sample_shading"; | ||
| 159 | +const char* const E_GL_ARB_shader_bit_encoding = "GL_ARB_shader_bit_encoding"; | ||
| 160 | +const char* const E_GL_ARB_shader_image_size = "GL_ARB_shader_image_size"; | ||
| 161 | +const char* const E_GL_ARB_shader_storage_buffer_object = "GL_ARB_shader_storage_buffer_object"; | ||
| 162 | +const char* const E_GL_ARB_shading_language_packing = "GL_ARB_shading_language_packing"; | ||
| 163 | +const char* const E_GL_ARB_texture_query_lod = "GL_ARB_texture_query_lod"; | ||
| 164 | +const char* const E_GL_ARB_vertex_attrib_64bit = "GL_ARB_vertex_attrib_64bit"; | ||
| 165 | +const char* const E_GL_ARB_draw_instanced = "GL_ARB_draw_instanced"; | ||
| 166 | +const char* const E_GL_ARB_fragment_coord_conventions = "GL_ARB_fragment_coord_conventions"; | ||
| 167 | +const char* const E_GL_ARB_bindless_texture = "GL_ARB_bindless_texture"; | ||
| 168 | + | ||
| 169 | +const char* const E_GL_KHR_shader_subgroup_basic = "GL_KHR_shader_subgroup_basic"; | ||
| 170 | +const char* const E_GL_KHR_shader_subgroup_vote = "GL_KHR_shader_subgroup_vote"; | ||
| 171 | +const char* const E_GL_KHR_shader_subgroup_arithmetic = "GL_KHR_shader_subgroup_arithmetic"; | ||
| 172 | +const char* const E_GL_KHR_shader_subgroup_ballot = "GL_KHR_shader_subgroup_ballot"; | ||
| 173 | +const char* const E_GL_KHR_shader_subgroup_shuffle = "GL_KHR_shader_subgroup_shuffle"; | ||
| 174 | +const char* const E_GL_KHR_shader_subgroup_shuffle_relative = "GL_KHR_shader_subgroup_shuffle_relative"; | ||
| 175 | +const char* const E_GL_KHR_shader_subgroup_rotate = "GL_KHR_shader_subgroup_rotate"; | ||
| 176 | +const char* const E_GL_KHR_shader_subgroup_clustered = "GL_KHR_shader_subgroup_clustered"; | ||
| 177 | +const char* const E_GL_KHR_shader_subgroup_quad = "GL_KHR_shader_subgroup_quad"; | ||
| 178 | +const char* const E_GL_KHR_memory_scope_semantics = "GL_KHR_memory_scope_semantics"; | ||
| 179 | +const char* const E_GL_KHR_cooperative_matrix = "GL_KHR_cooperative_matrix"; | ||
| 180 | + | ||
| 181 | +const char* const E_GL_EXT_shader_atomic_int64 = "GL_EXT_shader_atomic_int64"; | ||
| 182 | + | ||
| 183 | +const char* const E_GL_EXT_shader_non_constant_global_initializers = "GL_EXT_shader_non_constant_global_initializers"; | ||
| 184 | +const char* const E_GL_EXT_shader_image_load_formatted = "GL_EXT_shader_image_load_formatted"; | ||
| 185 | + | ||
| 186 | +const char* const E_GL_EXT_shader_16bit_storage = "GL_EXT_shader_16bit_storage"; | ||
| 187 | +const char* const E_GL_EXT_shader_8bit_storage = "GL_EXT_shader_8bit_storage"; | ||
| 188 | + | ||
| 189 | + | ||
| 190 | +// EXT extensions | ||
| 191 | +const char* const E_GL_EXT_device_group = "GL_EXT_device_group"; | ||
| 192 | +const char* const E_GL_EXT_multiview = "GL_EXT_multiview"; | ||
| 193 | +const char* const E_GL_EXT_post_depth_coverage = "GL_EXT_post_depth_coverage"; | ||
| 194 | +const char* const E_GL_EXT_control_flow_attributes = "GL_EXT_control_flow_attributes"; | ||
| 195 | +const char* const E_GL_EXT_nonuniform_qualifier = "GL_EXT_nonuniform_qualifier"; | ||
| 196 | +const char* const E_GL_EXT_samplerless_texture_functions = "GL_EXT_samplerless_texture_functions"; | ||
| 197 | +const char* const E_GL_EXT_scalar_block_layout = "GL_EXT_scalar_block_layout"; | ||
| 198 | +const char* const E_GL_EXT_fragment_invocation_density = "GL_EXT_fragment_invocation_density"; | ||
| 199 | +const char* const E_GL_EXT_buffer_reference = "GL_EXT_buffer_reference"; | ||
| 200 | +const char* const E_GL_EXT_buffer_reference2 = "GL_EXT_buffer_reference2"; | ||
| 201 | +const char* const E_GL_EXT_buffer_reference_uvec2 = "GL_EXT_buffer_reference_uvec2"; | ||
| 202 | +const char* const E_GL_EXT_demote_to_helper_invocation = "GL_EXT_demote_to_helper_invocation"; | ||
| 203 | +const char* const E_GL_EXT_shader_realtime_clock = "GL_EXT_shader_realtime_clock"; | ||
| 204 | +const char* const E_GL_EXT_debug_printf = "GL_EXT_debug_printf"; | ||
| 205 | +const char* const E_GL_EXT_ray_tracing = "GL_EXT_ray_tracing"; | ||
| 206 | +const char* const E_GL_EXT_ray_query = "GL_EXT_ray_query"; | ||
| 207 | +const char* const E_GL_EXT_ray_flags_primitive_culling = "GL_EXT_ray_flags_primitive_culling"; | ||
| 208 | +const char* const E_GL_EXT_ray_cull_mask = "GL_EXT_ray_cull_mask"; | ||
| 209 | +const char* const E_GL_EXT_blend_func_extended = "GL_EXT_blend_func_extended"; | ||
| 210 | +const char* const E_GL_EXT_shader_implicit_conversions = "GL_EXT_shader_implicit_conversions"; | ||
| 211 | +const char* const E_GL_EXT_fragment_shading_rate = "GL_EXT_fragment_shading_rate"; | ||
| 212 | +const char* const E_GL_EXT_shader_image_int64 = "GL_EXT_shader_image_int64"; | ||
| 213 | +const char* const E_GL_EXT_null_initializer = "GL_EXT_null_initializer"; | ||
| 214 | +const char* const E_GL_EXT_shared_memory_block = "GL_EXT_shared_memory_block"; | ||
| 215 | +const char* const E_GL_EXT_subgroup_uniform_control_flow = "GL_EXT_subgroup_uniform_control_flow"; | ||
| 216 | +const char* const E_GL_EXT_spirv_intrinsics = "GL_EXT_spirv_intrinsics"; | ||
| 217 | +const char* const E_GL_EXT_fragment_shader_barycentric = "GL_EXT_fragment_shader_barycentric"; | ||
| 218 | +const char* const E_GL_EXT_mesh_shader = "GL_EXT_mesh_shader"; | ||
| 219 | +const char* const E_GL_EXT_opacity_micromap = "GL_EXT_opacity_micromap"; | ||
| 220 | +const char* const E_GL_EXT_shader_quad_control = "GL_EXT_shader_quad_control"; | ||
| 221 | +const char* const E_GL_EXT_draw_instanced = "GL_EXT_draw_instanced"; | ||
| 222 | +const char* const E_GL_EXT_texture_array = "GL_EXT_texture_array"; | ||
| 223 | +const char* const E_GL_EXT_maximal_reconvergence = "GL_EXT_maximal_reconvergence"; | ||
| 224 | +const char* const E_GL_EXT_expect_assume = "GL_EXT_expect_assume"; | ||
| 225 | +const char* const E_GL_EXT_control_flow_attributes2 = "GL_EXT_control_flow_attributes2"; | ||
| 226 | +const char* const E_GL_EXT_spec_constant_composites = "GL_EXT_spec_constant_composites"; | ||
| 227 | +const char* const E_GL_EXT_texture_offset_non_const = "GL_EXT_texture_offset_non_const"; | ||
| 228 | +const char* const E_GL_EXT_nontemporal_keyword = "GL_EXT_nontemporal_keyword"; | ||
| 229 | + | ||
| 230 | +// Arrays of extensions for the above viewportEXTs duplications | ||
| 231 | + | ||
| 232 | +const char* const post_depth_coverageEXTs[] = { E_GL_ARB_post_depth_coverage, E_GL_EXT_post_depth_coverage }; | ||
| 233 | +const int Num_post_depth_coverageEXTs = sizeof(post_depth_coverageEXTs) / sizeof(post_depth_coverageEXTs[0]); | ||
| 234 | + | ||
| 235 | +// Array of extensions to cover both extensions providing ray tracing capabilities. | ||
| 236 | +const char* const ray_tracing_EXTs[] = { E_GL_EXT_ray_query, E_GL_EXT_ray_tracing }; | ||
| 237 | +const int Num_ray_tracing_EXTs = sizeof(ray_tracing_EXTs) / sizeof(ray_tracing_EXTs[0]); | ||
| 238 | + | ||
| 239 | +// OVR extensions | ||
| 240 | +const char* const E_GL_OVR_multiview = "GL_OVR_multiview"; | ||
| 241 | +const char* const E_GL_OVR_multiview2 = "GL_OVR_multiview2"; | ||
| 242 | + | ||
| 243 | +const char* const OVR_multiview_EXTs[] = { E_GL_OVR_multiview, E_GL_OVR_multiview2 }; | ||
| 244 | +const int Num_OVR_multiview_EXTs = sizeof(OVR_multiview_EXTs) / sizeof(OVR_multiview_EXTs[0]); | ||
| 245 | + | ||
| 246 | +// #line and #include | ||
| 247 | +const char* const E_GL_GOOGLE_cpp_style_line_directive = "GL_GOOGLE_cpp_style_line_directive"; | ||
| 248 | +const char* const E_GL_GOOGLE_include_directive = "GL_GOOGLE_include_directive"; | ||
| 249 | +const char* const E_GL_ARB_shading_language_include = "GL_ARB_shading_language_include"; | ||
| 250 | + | ||
| 251 | +const char* const E_GL_AMD_shader_ballot = "GL_AMD_shader_ballot"; | ||
| 252 | +const char* const E_GL_AMD_shader_trinary_minmax = "GL_AMD_shader_trinary_minmax"; | ||
| 253 | +const char* const E_GL_AMD_shader_explicit_vertex_parameter = "GL_AMD_shader_explicit_vertex_parameter"; | ||
| 254 | +const char* const E_GL_AMD_gcn_shader = "GL_AMD_gcn_shader"; | ||
| 255 | +const char* const E_GL_AMD_gpu_shader_half_float = "GL_AMD_gpu_shader_half_float"; | ||
| 256 | +const char* const E_GL_AMD_texture_gather_bias_lod = "GL_AMD_texture_gather_bias_lod"; | ||
| 257 | +const char* const E_GL_AMD_gpu_shader_int16 = "GL_AMD_gpu_shader_int16"; | ||
| 258 | +const char* const E_GL_AMD_shader_image_load_store_lod = "GL_AMD_shader_image_load_store_lod"; | ||
| 259 | +const char* const E_GL_AMD_shader_fragment_mask = "GL_AMD_shader_fragment_mask"; | ||
| 260 | +const char* const E_GL_AMD_gpu_shader_half_float_fetch = "GL_AMD_gpu_shader_half_float_fetch"; | ||
| 261 | +const char* const E_GL_AMD_shader_early_and_late_fragment_tests = "GL_AMD_shader_early_and_late_fragment_tests"; | ||
| 262 | + | ||
| 263 | +const char* const E_GL_INTEL_shader_integer_functions2 = "GL_INTEL_shader_integer_functions2"; | ||
| 264 | + | ||
| 265 | +const char* const E_GL_NV_sample_mask_override_coverage = "GL_NV_sample_mask_override_coverage"; | ||
| 266 | +const char* const E_SPV_NV_geometry_shader_passthrough = "GL_NV_geometry_shader_passthrough"; | ||
| 267 | +const char* const E_GL_NV_viewport_array2 = "GL_NV_viewport_array2"; | ||
| 268 | +const char* const E_GL_NV_stereo_view_rendering = "GL_NV_stereo_view_rendering"; | ||
| 269 | +const char* const E_GL_NVX_multiview_per_view_attributes = "GL_NVX_multiview_per_view_attributes"; | ||
| 270 | +const char* const E_GL_NV_shader_atomic_int64 = "GL_NV_shader_atomic_int64"; | ||
| 271 | +const char* const E_GL_NV_conservative_raster_underestimation = "GL_NV_conservative_raster_underestimation"; | ||
| 272 | +const char* const E_GL_NV_shader_noperspective_interpolation = "GL_NV_shader_noperspective_interpolation"; | ||
| 273 | +const char* const E_GL_NV_shader_subgroup_partitioned = "GL_NV_shader_subgroup_partitioned"; | ||
| 274 | +const char* const E_GL_NV_shading_rate_image = "GL_NV_shading_rate_image"; | ||
| 275 | +const char* const E_GL_NV_ray_tracing = "GL_NV_ray_tracing"; | ||
| 276 | +const char* const E_GL_NV_ray_tracing_motion_blur = "GL_NV_ray_tracing_motion_blur"; | ||
| 277 | +const char* const E_GL_NV_fragment_shader_barycentric = "GL_NV_fragment_shader_barycentric"; | ||
| 278 | +const char* const E_GL_NV_compute_shader_derivatives = "GL_NV_compute_shader_derivatives"; | ||
| 279 | +const char* const E_GL_NV_shader_texture_footprint = "GL_NV_shader_texture_footprint"; | ||
| 280 | +const char* const E_GL_NV_mesh_shader = "GL_NV_mesh_shader"; | ||
| 281 | +const char* const E_GL_NV_cooperative_matrix = "GL_NV_cooperative_matrix"; | ||
| 282 | +const char* const E_GL_NV_shader_sm_builtins = "GL_NV_shader_sm_builtins"; | ||
| 283 | +const char* const E_GL_NV_integer_cooperative_matrix = "GL_NV_integer_cooperative_matrix"; | ||
| 284 | +const char* const E_GL_NV_shader_invocation_reorder = "GL_NV_shader_invocation_reorder"; | ||
| 285 | +const char* const E_GL_EXT_ray_tracing_position_fetch = "GL_EXT_ray_tracing_position_fetch"; | ||
| 286 | +const char* const E_GL_NV_displacement_micromap = "GL_NV_displacement_micromap"; | ||
| 287 | +const char* const E_GL_NV_shader_atomic_fp16_vector = "GL_NV_shader_atomic_fp16_vector"; | ||
| 288 | +const char* const E_GL_NV_cooperative_matrix2 = "GL_NV_cooperative_matrix2"; | ||
| 289 | +const char* const E_GL_NV_cooperative_vector = "GL_NV_cooperative_vector"; | ||
| 290 | +const char* const E_GL_NV_cluster_acceleration_structure = "GL_NV_cluster_acceleration_structure"; | ||
| 291 | +const char* const E_GL_NV_linear_swept_spheres = "GL_NV_linear_swept_spheres"; | ||
| 292 | + | ||
| 293 | +// ARM | ||
| 294 | +const char* const E_GL_ARM_shader_core_builtins = "GL_ARM_shader_core_builtins"; | ||
| 295 | + | ||
| 296 | +// Arrays of extensions for the above viewportEXTs duplications | ||
| 297 | + | ||
| 298 | +const char* const viewportEXTs[] = { E_GL_ARB_shader_viewport_layer_array, E_GL_NV_viewport_array2 }; | ||
| 299 | +const int Num_viewportEXTs = sizeof(viewportEXTs) / sizeof(viewportEXTs[0]); | ||
| 300 | + | ||
| 301 | + | ||
| 302 | +const char* const E_GL_QCOM_image_processing = "GL_QCOM_image_processing"; | ||
| 303 | +const char* const E_GL_QCOM_image_processing2 = "GL_QCOM_image_processing2"; | ||
| 304 | + | ||
| 305 | +// AEP | ||
| 306 | +const char* const E_GL_ANDROID_extension_pack_es31a = "GL_ANDROID_extension_pack_es31a"; | ||
| 307 | +const char* const E_GL_KHR_blend_equation_advanced = "GL_KHR_blend_equation_advanced"; | ||
| 308 | +const char* const E_GL_OES_sample_variables = "GL_OES_sample_variables"; | ||
| 309 | +const char* const E_GL_OES_shader_image_atomic = "GL_OES_shader_image_atomic"; | ||
| 310 | +const char* const E_GL_OES_shader_multisample_interpolation = "GL_OES_shader_multisample_interpolation"; | ||
| 311 | +const char* const E_GL_OES_texture_storage_multisample_2d_array = "GL_OES_texture_storage_multisample_2d_array"; | ||
| 312 | +const char* const E_GL_EXT_geometry_shader = "GL_EXT_geometry_shader"; | ||
| 313 | +const char* const E_GL_EXT_geometry_point_size = "GL_EXT_geometry_point_size"; | ||
| 314 | +const char* const E_GL_EXT_gpu_shader5 = "GL_EXT_gpu_shader5"; | ||
| 315 | +const char* const E_GL_EXT_primitive_bounding_box = "GL_EXT_primitive_bounding_box"; | ||
| 316 | +const char* const E_GL_EXT_shader_io_blocks = "GL_EXT_shader_io_blocks"; | ||
| 317 | +const char* const E_GL_EXT_tessellation_shader = "GL_EXT_tessellation_shader"; | ||
| 318 | +const char* const E_GL_EXT_tessellation_point_size = "GL_EXT_tessellation_point_size"; | ||
| 319 | +const char* const E_GL_EXT_texture_buffer = "GL_EXT_texture_buffer"; | ||
| 320 | +const char* const E_GL_EXT_texture_cube_map_array = "GL_EXT_texture_cube_map_array"; | ||
| 321 | +const char* const E_GL_EXT_shader_integer_mix = "GL_EXT_shader_integer_mix"; | ||
| 322 | + | ||
| 323 | +// OES matching AEP | ||
| 324 | +const char* const E_GL_OES_geometry_shader = "GL_OES_geometry_shader"; | ||
| 325 | +const char* const E_GL_OES_geometry_point_size = "GL_OES_geometry_point_size"; | ||
| 326 | +const char* const E_GL_OES_gpu_shader5 = "GL_OES_gpu_shader5"; | ||
| 327 | +const char* const E_GL_OES_primitive_bounding_box = "GL_OES_primitive_bounding_box"; | ||
| 328 | +const char* const E_GL_OES_shader_io_blocks = "GL_OES_shader_io_blocks"; | ||
| 329 | +const char* const E_GL_OES_tessellation_shader = "GL_OES_tessellation_shader"; | ||
| 330 | +const char* const E_GL_OES_tessellation_point_size = "GL_OES_tessellation_point_size"; | ||
| 331 | +const char* const E_GL_OES_texture_buffer = "GL_OES_texture_buffer"; | ||
| 332 | +const char* const E_GL_OES_texture_cube_map_array = "GL_OES_texture_cube_map_array"; | ||
| 333 | + | ||
| 334 | +// EXT | ||
| 335 | +const char* const E_GL_EXT_shader_explicit_arithmetic_types = "GL_EXT_shader_explicit_arithmetic_types"; | ||
| 336 | +const char* const E_GL_EXT_shader_explicit_arithmetic_types_int8 = "GL_EXT_shader_explicit_arithmetic_types_int8"; | ||
| 337 | +const char* const E_GL_EXT_shader_explicit_arithmetic_types_int16 = "GL_EXT_shader_explicit_arithmetic_types_int16"; | ||
| 338 | +const char* const E_GL_EXT_shader_explicit_arithmetic_types_int32 = "GL_EXT_shader_explicit_arithmetic_types_int32"; | ||
| 339 | +const char* const E_GL_EXT_shader_explicit_arithmetic_types_int64 = "GL_EXT_shader_explicit_arithmetic_types_int64"; | ||
| 340 | +const char* const E_GL_EXT_shader_explicit_arithmetic_types_float16 = "GL_EXT_shader_explicit_arithmetic_types_float16"; | ||
| 341 | +const char* const E_GL_EXT_shader_explicit_arithmetic_types_float32 = "GL_EXT_shader_explicit_arithmetic_types_float32"; | ||
| 342 | +const char* const E_GL_EXT_shader_explicit_arithmetic_types_float64 = "GL_EXT_shader_explicit_arithmetic_types_float64"; | ||
| 343 | + | ||
| 344 | +const char* const E_GL_EXT_shader_subgroup_extended_types_int8 = "GL_EXT_shader_subgroup_extended_types_int8"; | ||
| 345 | +const char* const E_GL_EXT_shader_subgroup_extended_types_int16 = "GL_EXT_shader_subgroup_extended_types_int16"; | ||
| 346 | +const char* const E_GL_EXT_shader_subgroup_extended_types_int64 = "GL_EXT_shader_subgroup_extended_types_int64"; | ||
| 347 | +const char* const E_GL_EXT_shader_subgroup_extended_types_float16 = "GL_EXT_shader_subgroup_extended_types_float16"; | ||
| 348 | +const char* const E_GL_EXT_terminate_invocation = "GL_EXT_terminate_invocation"; | ||
| 349 | + | ||
| 350 | +const char* const E_GL_EXT_shader_atomic_float = "GL_EXT_shader_atomic_float"; | ||
| 351 | +const char* const E_GL_EXT_shader_atomic_float2 = "GL_EXT_shader_atomic_float2"; | ||
| 352 | + | ||
| 353 | +const char* const E_GL_EXT_shader_tile_image = "GL_EXT_shader_tile_image"; | ||
| 354 | + | ||
| 355 | +const char* const E_GL_EXT_texture_shadow_lod = "GL_EXT_texture_shadow_lod"; | ||
| 356 | + | ||
| 357 | +const char* const E_GL_EXT_integer_dot_product = "GL_EXT_integer_dot_product"; | ||
| 358 | + | ||
| 359 | +// Arrays of extensions for the above AEP duplications | ||
| 360 | + | ||
| 361 | +const char* const AEP_geometry_shader[] = { E_GL_EXT_geometry_shader, E_GL_OES_geometry_shader }; | ||
| 362 | +const int Num_AEP_geometry_shader = sizeof(AEP_geometry_shader)/sizeof(AEP_geometry_shader[0]); | ||
| 363 | + | ||
| 364 | +const char* const AEP_geometry_point_size[] = { E_GL_EXT_geometry_point_size, E_GL_OES_geometry_point_size }; | ||
| 365 | +const int Num_AEP_geometry_point_size = sizeof(AEP_geometry_point_size)/sizeof(AEP_geometry_point_size[0]); | ||
| 366 | + | ||
| 367 | +const char* const AEP_gpu_shader5[] = { E_GL_EXT_gpu_shader5, E_GL_OES_gpu_shader5 }; | ||
| 368 | +const int Num_AEP_gpu_shader5 = sizeof(AEP_gpu_shader5)/sizeof(AEP_gpu_shader5[0]); | ||
| 369 | + | ||
| 370 | +const char* const AEP_primitive_bounding_box[] = { E_GL_EXT_primitive_bounding_box, E_GL_OES_primitive_bounding_box }; | ||
| 371 | +const int Num_AEP_primitive_bounding_box = sizeof(AEP_primitive_bounding_box)/sizeof(AEP_primitive_bounding_box[0]); | ||
| 372 | + | ||
| 373 | +const char* const AEP_shader_io_blocks[] = { E_GL_EXT_shader_io_blocks, E_GL_OES_shader_io_blocks }; | ||
| 374 | +const int Num_AEP_shader_io_blocks = sizeof(AEP_shader_io_blocks)/sizeof(AEP_shader_io_blocks[0]); | ||
| 375 | + | ||
| 376 | +const char* const AEP_tessellation_shader[] = { E_GL_EXT_tessellation_shader, E_GL_OES_tessellation_shader }; | ||
| 377 | +const int Num_AEP_tessellation_shader = sizeof(AEP_tessellation_shader)/sizeof(AEP_tessellation_shader[0]); | ||
| 378 | + | ||
| 379 | +const char* const AEP_tessellation_point_size[] = { E_GL_EXT_tessellation_point_size, E_GL_OES_tessellation_point_size }; | ||
| 380 | +const int Num_AEP_tessellation_point_size = sizeof(AEP_tessellation_point_size)/sizeof(AEP_tessellation_point_size[0]); | ||
| 381 | + | ||
| 382 | +const char* const AEP_texture_buffer[] = { E_GL_EXT_texture_buffer, E_GL_OES_texture_buffer }; | ||
| 383 | +const int Num_AEP_texture_buffer = sizeof(AEP_texture_buffer)/sizeof(AEP_texture_buffer[0]); | ||
| 384 | + | ||
| 385 | +const char* const AEP_texture_cube_map_array[] = { E_GL_EXT_texture_cube_map_array, E_GL_OES_texture_cube_map_array }; | ||
| 386 | +const int Num_AEP_texture_cube_map_array = sizeof(AEP_texture_cube_map_array)/sizeof(AEP_texture_cube_map_array[0]); | ||
| 387 | + | ||
| 388 | +const char* const AEP_mesh_shader[] = { E_GL_NV_mesh_shader, E_GL_EXT_mesh_shader }; | ||
| 389 | +const int Num_AEP_mesh_shader = sizeof(AEP_mesh_shader)/sizeof(AEP_mesh_shader[0]); | ||
| 390 | + | ||
| 391 | +} // end namespace glslang | ||
| 392 | + | ||
| 393 | +#endif // _VERSIONS_INCLUDED_ |
| 1 | +// | ||
| 2 | +// Copyright (C) 2016 Google, Inc. | ||
| 3 | +// | ||
| 4 | +// All rights reserved. | ||
| 5 | +// | ||
| 6 | +// Redistribution and use in source and binary forms, with or without | ||
| 7 | +// modification, are permitted provided that the following conditions | ||
| 8 | +// are met: | ||
| 9 | +// | ||
| 10 | +// Redistributions of source code must retain the above copyright | ||
| 11 | +// notice, this list of conditions and the following disclaimer. | ||
| 12 | +// | ||
| 13 | +// Redistributions in binary form must reproduce the above | ||
| 14 | +// copyright notice, this list of conditions and the following | ||
| 15 | +// disclaimer in the documentation and/or other materials provided | ||
| 16 | +// with the distribution. | ||
| 17 | +// | ||
| 18 | +// Neither the name of Google Inc. nor the names of its | ||
| 19 | +// contributors may be used to endorse or promote products derived | ||
| 20 | +// from this software without specific prior written permission. | ||
| 21 | +// | ||
| 22 | +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 23 | +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 24 | +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | ||
| 25 | +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | ||
| 26 | +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
| 27 | +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, | ||
| 28 | +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
| 29 | +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | ||
| 30 | +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | ||
| 31 | +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN | ||
| 32 | +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 33 | +// POSSIBILITY OF SUCH DAMAGE. | ||
| 34 | + | ||
| 35 | +#ifndef _STAND_ALONE_RESOURCE_LIMITS_INCLUDED_ | ||
| 36 | +#define _STAND_ALONE_RESOURCE_LIMITS_INCLUDED_ | ||
| 37 | + | ||
| 38 | +#include <string> | ||
| 39 | + | ||
| 40 | +#include "../Include/ResourceLimits.h" | ||
| 41 | +#include "../Include/visibility.h" | ||
| 42 | + | ||
| 43 | +// Return pointer to user-writable Resource to pass through API in | ||
| 44 | +// future-proof way. | ||
| 45 | +GLSLANG_EXPORT extern TBuiltInResource* GetResources(); | ||
| 46 | + | ||
| 47 | +// These are the default resources for TBuiltInResources, used for both | ||
| 48 | +// - parsing this string for the case where the user didn't supply one, | ||
| 49 | +// - dumping out a template for user construction of a config file. | ||
| 50 | +GLSLANG_EXPORT extern const TBuiltInResource* GetDefaultResources(); | ||
| 51 | + | ||
| 52 | +// Returns the DefaultTBuiltInResource as a human-readable string. | ||
| 53 | +GLSLANG_EXPORT std::string GetDefaultTBuiltInResourceString(); | ||
| 54 | + | ||
| 55 | +// Decodes the resource limits from |config| to |resources|. | ||
| 56 | +GLSLANG_EXPORT void DecodeResourceLimits(TBuiltInResource* resources, char* config); | ||
| 57 | + | ||
| 58 | +#endif // _STAND_ALONE_RESOURCE_LIMITS_INCLUDED_ |
| 1 | +// | ||
| 2 | +// Copyright (C) 2002-2005 3Dlabs Inc. Ltd. | ||
| 3 | +// Copyright (C) 2013-2016 LunarG, Inc. | ||
| 4 | +// Copyright (C) 2015-2018 Google, Inc. | ||
| 5 | +// | ||
| 6 | +// All rights reserved. | ||
| 7 | +// | ||
| 8 | +// Redistribution and use in source and binary forms, with or without | ||
| 9 | +// modification, are permitted provided that the following conditions | ||
| 10 | +// are met: | ||
| 11 | +// | ||
| 12 | +// Redistributions of source code must retain the above copyright | ||
| 13 | +// notice, this list of conditions and the following disclaimer. | ||
| 14 | +// | ||
| 15 | +// Redistributions in binary form must reproduce the above | ||
| 16 | +// copyright notice, this list of conditions and the following | ||
| 17 | +// disclaimer in the documentation and/or other materials provided | ||
| 18 | +// with the distribution. | ||
| 19 | +// | ||
| 20 | +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its | ||
| 21 | +// contributors may be used to endorse or promote products derived | ||
| 22 | +// from this software without specific prior written permission. | ||
| 23 | +// | ||
| 24 | +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 25 | +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 26 | +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | ||
| 27 | +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | ||
| 28 | +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
| 29 | +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, | ||
| 30 | +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
| 31 | +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | ||
| 32 | +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | ||
| 33 | +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN | ||
| 34 | +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 35 | +// POSSIBILITY OF SUCH DAMAGE. | ||
| 36 | +// | ||
| 37 | +#ifndef _COMPILER_INTERFACE_INCLUDED_ | ||
| 38 | +#define _COMPILER_INTERFACE_INCLUDED_ | ||
| 39 | + | ||
| 40 | +#include "../Include/ResourceLimits.h" | ||
| 41 | +#include "../Include/visibility.h" | ||
| 42 | +#include "../MachineIndependent/Versions.h" | ||
| 43 | + | ||
| 44 | +#include <cstring> | ||
| 45 | +#include <vector> | ||
| 46 | + | ||
| 47 | +#ifdef _WIN32 | ||
| 48 | + #define C_DECL __cdecl | ||
| 49 | +#else | ||
| 50 | + #define C_DECL | ||
| 51 | +#endif | ||
| 52 | + | ||
| 53 | +// | ||
| 54 | +// This is the platform independent interface between an OGL driver | ||
| 55 | +// and the shading language compiler/linker. | ||
| 56 | +// | ||
| 57 | + | ||
| 58 | +#ifdef __cplusplus | ||
| 59 | + extern "C" { | ||
| 60 | +#endif | ||
| 61 | + | ||
| 62 | +// | ||
| 63 | +// Call before doing any other compiler/linker operations. | ||
| 64 | +// | ||
| 65 | +// (Call once per process, not once per thread.) | ||
| 66 | +// | ||
| 67 | +GLSLANG_EXPORT int ShInitialize(); | ||
| 68 | + | ||
| 69 | +// | ||
| 70 | +// Call this at process shutdown to clean up memory. | ||
| 71 | +// | ||
| 72 | +GLSLANG_EXPORT int ShFinalize(); | ||
| 73 | + | ||
| 74 | +// | ||
| 75 | +// Types of languages the compiler can consume. | ||
| 76 | +// | ||
| 77 | +typedef enum { | ||
| 78 | + EShLangVertex, | ||
| 79 | + EShLangTessControl, | ||
| 80 | + EShLangTessEvaluation, | ||
| 81 | + EShLangGeometry, | ||
| 82 | + EShLangFragment, | ||
| 83 | + EShLangCompute, | ||
| 84 | + EShLangRayGen, | ||
| 85 | + EShLangRayGenNV = EShLangRayGen, | ||
| 86 | + EShLangIntersect, | ||
| 87 | + EShLangIntersectNV = EShLangIntersect, | ||
| 88 | + EShLangAnyHit, | ||
| 89 | + EShLangAnyHitNV = EShLangAnyHit, | ||
| 90 | + EShLangClosestHit, | ||
| 91 | + EShLangClosestHitNV = EShLangClosestHit, | ||
| 92 | + EShLangMiss, | ||
| 93 | + EShLangMissNV = EShLangMiss, | ||
| 94 | + EShLangCallable, | ||
| 95 | + EShLangCallableNV = EShLangCallable, | ||
| 96 | + EShLangTask, | ||
| 97 | + EShLangTaskNV = EShLangTask, | ||
| 98 | + EShLangMesh, | ||
| 99 | + EShLangMeshNV = EShLangMesh, | ||
| 100 | + LAST_ELEMENT_MARKER(EShLangCount), | ||
| 101 | +} EShLanguage; // would be better as stage, but this is ancient now | ||
| 102 | + | ||
| 103 | +typedef enum : unsigned { | ||
| 104 | + EShLangVertexMask = (1 << EShLangVertex), | ||
| 105 | + EShLangTessControlMask = (1 << EShLangTessControl), | ||
| 106 | + EShLangTessEvaluationMask = (1 << EShLangTessEvaluation), | ||
| 107 | + EShLangGeometryMask = (1 << EShLangGeometry), | ||
| 108 | + EShLangFragmentMask = (1 << EShLangFragment), | ||
| 109 | + EShLangComputeMask = (1 << EShLangCompute), | ||
| 110 | + EShLangRayGenMask = (1 << EShLangRayGen), | ||
| 111 | + EShLangRayGenNVMask = EShLangRayGenMask, | ||
| 112 | + EShLangIntersectMask = (1 << EShLangIntersect), | ||
| 113 | + EShLangIntersectNVMask = EShLangIntersectMask, | ||
| 114 | + EShLangAnyHitMask = (1 << EShLangAnyHit), | ||
| 115 | + EShLangAnyHitNVMask = EShLangAnyHitMask, | ||
| 116 | + EShLangClosestHitMask = (1 << EShLangClosestHit), | ||
| 117 | + EShLangClosestHitNVMask = EShLangClosestHitMask, | ||
| 118 | + EShLangMissMask = (1 << EShLangMiss), | ||
| 119 | + EShLangMissNVMask = EShLangMissMask, | ||
| 120 | + EShLangCallableMask = (1 << EShLangCallable), | ||
| 121 | + EShLangCallableNVMask = EShLangCallableMask, | ||
| 122 | + EShLangTaskMask = (1 << EShLangTask), | ||
| 123 | + EShLangTaskNVMask = EShLangTaskMask, | ||
| 124 | + EShLangMeshMask = (1 << EShLangMesh), | ||
| 125 | + EShLangMeshNVMask = EShLangMeshMask, | ||
| 126 | + LAST_ELEMENT_MARKER(EShLanguageMaskCount), | ||
| 127 | +} EShLanguageMask; | ||
| 128 | + | ||
| 129 | +namespace glslang { | ||
| 130 | + | ||
| 131 | +class TType; | ||
| 132 | + | ||
| 133 | +typedef enum { | ||
| 134 | + EShSourceNone, | ||
| 135 | + EShSourceGlsl, // GLSL, includes ESSL (OpenGL ES GLSL) | ||
| 136 | + EShSourceHlsl, // HLSL | ||
| 137 | + LAST_ELEMENT_MARKER(EShSourceCount), | ||
| 138 | +} EShSource; // if EShLanguage were EShStage, this could be EShLanguage instead | ||
| 139 | + | ||
| 140 | +typedef enum { | ||
| 141 | + EShClientNone, // use when there is no client, e.g. for validation | ||
| 142 | + EShClientVulkan, // as GLSL dialect, specifies KHR_vulkan_glsl extension | ||
| 143 | + EShClientOpenGL, // as GLSL dialect, specifies ARB_gl_spirv extension | ||
| 144 | + LAST_ELEMENT_MARKER(EShClientCount), | ||
| 145 | +} EShClient; | ||
| 146 | + | ||
| 147 | +typedef enum { | ||
| 148 | + EShTargetNone, | ||
| 149 | + EShTargetSpv, // SPIR-V (preferred spelling) | ||
| 150 | + EshTargetSpv = EShTargetSpv, // legacy spelling | ||
| 151 | + LAST_ELEMENT_MARKER(EShTargetCount), | ||
| 152 | +} EShTargetLanguage; | ||
| 153 | + | ||
| 154 | +typedef enum { | ||
| 155 | + EShTargetVulkan_1_0 = (1 << 22), // Vulkan 1.0 | ||
| 156 | + EShTargetVulkan_1_1 = (1 << 22) | (1 << 12), // Vulkan 1.1 | ||
| 157 | + EShTargetVulkan_1_2 = (1 << 22) | (2 << 12), // Vulkan 1.2 | ||
| 158 | + EShTargetVulkan_1_3 = (1 << 22) | (3 << 12), // Vulkan 1.3 | ||
| 159 | + EShTargetVulkan_1_4 = (1 << 22) | (4 << 12), // Vulkan 1.4 | ||
| 160 | + EShTargetOpenGL_450 = 450, // OpenGL | ||
| 161 | + LAST_ELEMENT_MARKER(EShTargetClientVersionCount = 6), | ||
| 162 | +} EShTargetClientVersion; | ||
| 163 | + | ||
| 164 | +typedef EShTargetClientVersion EshTargetClientVersion; | ||
| 165 | + | ||
| 166 | +typedef enum { | ||
| 167 | + EShTargetSpv_1_0 = (1 << 16), // SPIR-V 1.0 | ||
| 168 | + EShTargetSpv_1_1 = (1 << 16) | (1 << 8), // SPIR-V 1.1 | ||
| 169 | + EShTargetSpv_1_2 = (1 << 16) | (2 << 8), // SPIR-V 1.2 | ||
| 170 | + EShTargetSpv_1_3 = (1 << 16) | (3 << 8), // SPIR-V 1.3 | ||
| 171 | + EShTargetSpv_1_4 = (1 << 16) | (4 << 8), // SPIR-V 1.4 | ||
| 172 | + EShTargetSpv_1_5 = (1 << 16) | (5 << 8), // SPIR-V 1.5 | ||
| 173 | + EShTargetSpv_1_6 = (1 << 16) | (6 << 8), // SPIR-V 1.6 | ||
| 174 | + LAST_ELEMENT_MARKER(EShTargetLanguageVersionCount = 7), | ||
| 175 | +} EShTargetLanguageVersion; | ||
| 176 | + | ||
| 177 | +// | ||
| 178 | +// Following are a series of helper enums for managing layouts and qualifiers, | ||
| 179 | +// used for TPublicType, TType, others. | ||
| 180 | +// | ||
| 181 | + | ||
| 182 | +enum TLayoutPacking { | ||
| 183 | + ElpNone, | ||
| 184 | + ElpShared, // default, but different than saying nothing | ||
| 185 | + ElpStd140, | ||
| 186 | + ElpStd430, | ||
| 187 | + ElpPacked, | ||
| 188 | + ElpScalar, | ||
| 189 | + ElpCount // If expanding, see bitfield width below | ||
| 190 | +}; | ||
| 191 | + | ||
| 192 | +struct TInputLanguage { | ||
| 193 | + EShSource languageFamily; // redundant information with other input, this one overrides when not EShSourceNone | ||
| 194 | + EShLanguage stage; // redundant information with other input, this one overrides when not EShSourceNone | ||
| 195 | + EShClient dialect; | ||
| 196 | + int dialectVersion; // version of client's language definition, not the client (when not EShClientNone) | ||
| 197 | + bool vulkanRulesRelaxed; | ||
| 198 | +}; | ||
| 199 | + | ||
| 200 | +struct TClient { | ||
| 201 | + EShClient client; | ||
| 202 | + EShTargetClientVersion version; // version of client itself (not the client's input dialect) | ||
| 203 | +}; | ||
| 204 | + | ||
| 205 | +struct TTarget { | ||
| 206 | + EShTargetLanguage language; | ||
| 207 | + EShTargetLanguageVersion version; // version to target, if SPIR-V, defined by "word 1" of the SPIR-V header | ||
| 208 | + bool hlslFunctionality1; // can target hlsl_functionality1 extension(s) | ||
| 209 | +}; | ||
| 210 | + | ||
| 211 | +// All source/client/target versions and settings. | ||
| 212 | +// Can override previous methods of setting, when items are set here. | ||
| 213 | +// Expected to grow, as more are added, rather than growing parameter lists. | ||
| 214 | +struct TEnvironment { | ||
| 215 | + TInputLanguage input; // definition of the input language | ||
| 216 | + TClient client; // what client is the overall compilation being done for? | ||
| 217 | + TTarget target; // what to generate | ||
| 218 | +}; | ||
| 219 | + | ||
| 220 | +GLSLANG_EXPORT const char* StageName(EShLanguage); | ||
| 221 | + | ||
| 222 | +} // end namespace glslang | ||
| 223 | + | ||
| 224 | +// | ||
| 225 | +// Types of output the linker will create. | ||
| 226 | +// | ||
| 227 | +typedef enum { | ||
| 228 | + EShExVertexFragment, | ||
| 229 | + EShExFragment | ||
| 230 | +} EShExecutable; | ||
| 231 | + | ||
| 232 | +// | ||
| 233 | +// Optimization level for the compiler. | ||
| 234 | +// | ||
| 235 | +typedef enum { | ||
| 236 | + EShOptNoGeneration, | ||
| 237 | + EShOptNone, | ||
| 238 | + EShOptSimple, // Optimizations that can be done quickly | ||
| 239 | + EShOptFull, // Optimizations that will take more time | ||
| 240 | + LAST_ELEMENT_MARKER(EshOptLevelCount), | ||
| 241 | +} EShOptimizationLevel; | ||
| 242 | + | ||
| 243 | +// | ||
| 244 | +// Texture and Sampler transformation mode. | ||
| 245 | +// | ||
| 246 | +typedef enum { | ||
| 247 | + EShTexSampTransKeep, // keep textures and samplers as is (default) | ||
| 248 | + EShTexSampTransUpgradeTextureRemoveSampler, // change texture w/o embeded sampler into sampled texture and throw away all samplers | ||
| 249 | + LAST_ELEMENT_MARKER(EShTexSampTransCount), | ||
| 250 | +} EShTextureSamplerTransformMode; | ||
| 251 | + | ||
| 252 | +// | ||
| 253 | +// Message choices for what errors and warnings are given. | ||
| 254 | +// | ||
| 255 | +enum EShMessages : unsigned { | ||
| 256 | + EShMsgDefault = 0, // default is to give all required errors and extra warnings | ||
| 257 | + EShMsgRelaxedErrors = (1 << 0), // be liberal in accepting input | ||
| 258 | + EShMsgSuppressWarnings = (1 << 1), // suppress all warnings, except those required by the specification | ||
| 259 | + EShMsgAST = (1 << 2), // print the AST intermediate representation | ||
| 260 | + EShMsgSpvRules = (1 << 3), // issue messages for SPIR-V generation | ||
| 261 | + EShMsgVulkanRules = (1 << 4), // issue messages for Vulkan-requirements of GLSL for SPIR-V | ||
| 262 | + EShMsgOnlyPreprocessor = (1 << 5), // only print out errors produced by the preprocessor | ||
| 263 | + EShMsgReadHlsl = (1 << 6), // use HLSL parsing rules and semantics | ||
| 264 | + EShMsgCascadingErrors = (1 << 7), // get cascading errors; risks error-recovery issues, instead of an early exit | ||
| 265 | + EShMsgKeepUncalled = (1 << 8), // for testing, don't eliminate uncalled functions | ||
| 266 | + EShMsgHlslOffsets = (1 << 9), // allow block offsets to follow HLSL rules instead of GLSL rules | ||
| 267 | + EShMsgDebugInfo = (1 << 10), // save debug information | ||
| 268 | + EShMsgHlslEnable16BitTypes = (1 << 11), // enable use of 16-bit types in SPIR-V for HLSL | ||
| 269 | + EShMsgHlslLegalization = (1 << 12), // enable HLSL Legalization messages | ||
| 270 | + EShMsgHlslDX9Compatible = (1 << 13), // enable HLSL DX9 compatible mode (for samplers and semantics) | ||
| 271 | + EShMsgBuiltinSymbolTable = (1 << 14), // print the builtin symbol table | ||
| 272 | + EShMsgEnhanced = (1 << 15), // enhanced message readability | ||
| 273 | + EShMsgAbsolutePath = (1 << 16), // Output Absolute path for messages | ||
| 274 | + EShMsgDisplayErrorColumn = (1 << 17), // Display error message column aswell as line | ||
| 275 | + EShMsgLinkTimeOptimization = (1 << 18), // perform cross-stage optimizations during linking | ||
| 276 | + LAST_ELEMENT_MARKER(EShMsgCount), | ||
| 277 | +}; | ||
| 278 | + | ||
| 279 | +// | ||
| 280 | +// Options for building reflection | ||
| 281 | +// | ||
| 282 | +typedef enum { | ||
| 283 | + EShReflectionDefault = 0, // default is original behaviour before options were added | ||
| 284 | + EShReflectionStrictArraySuffix = (1 << 0), // reflection will follow stricter rules for array-of-structs suffixes | ||
| 285 | + EShReflectionBasicArraySuffix = (1 << 1), // arrays of basic types will be appended with [0] as in GL reflection | ||
| 286 | + EShReflectionIntermediateIO = (1 << 2), // reflect inputs and outputs to program, even with no vertex shader | ||
| 287 | + EShReflectionSeparateBuffers = (1 << 3), // buffer variables and buffer blocks are reflected separately | ||
| 288 | + EShReflectionAllBlockVariables = (1 << 4), // reflect all variables in blocks, even if they are inactive | ||
| 289 | + EShReflectionUnwrapIOBlocks = (1 << 5), // unwrap input/output blocks the same as with uniform blocks | ||
| 290 | + EShReflectionAllIOVariables = (1 << 6), // reflect all input/output variables, even if they are inactive | ||
| 291 | + EShReflectionSharedStd140SSBO = (1 << 7), // Apply std140/shared rules for ubo to ssbo | ||
| 292 | + EShReflectionSharedStd140UBO = (1 << 8), // Apply std140/shared rules for ubo to ssbo | ||
| 293 | + LAST_ELEMENT_MARKER(EShReflectionCount), | ||
| 294 | +} EShReflectionOptions; | ||
| 295 | + | ||
| 296 | +// | ||
| 297 | +// Build a table for bindings. This can be used for locating | ||
| 298 | +// attributes, uniforms, globals, etc., as needed. | ||
| 299 | +// | ||
| 300 | +typedef struct { | ||
| 301 | + const char* name; | ||
| 302 | + int binding; | ||
| 303 | +} ShBinding; | ||
| 304 | + | ||
| 305 | +typedef struct { | ||
| 306 | + int numBindings; | ||
| 307 | + ShBinding* bindings; // array of bindings | ||
| 308 | +} ShBindingTable; | ||
| 309 | + | ||
| 310 | +// | ||
| 311 | +// ShHandle held by but opaque to the driver. It is allocated, | ||
| 312 | +// managed, and de-allocated by the compiler/linker. Its contents | ||
| 313 | +// are defined by and used by the compiler and linker. For example, | ||
| 314 | +// symbol table information and object code passed from the compiler | ||
| 315 | +// to the linker can be stored where ShHandle points. | ||
| 316 | +// | ||
| 317 | +// If handle creation fails, 0 will be returned. | ||
| 318 | +// | ||
| 319 | +typedef void* ShHandle; | ||
| 320 | + | ||
| 321 | +// | ||
| 322 | +// Driver calls these to create and destroy compiler/linker | ||
| 323 | +// objects. | ||
| 324 | +// | ||
| 325 | +GLSLANG_EXPORT ShHandle ShConstructCompiler(const EShLanguage, int /*debugOptions unused*/); // one per shader | ||
| 326 | +GLSLANG_EXPORT ShHandle ShConstructLinker(const EShExecutable, int /*debugOptions unused*/); // one per shader pair | ||
| 327 | +GLSLANG_EXPORT ShHandle ShConstructUniformMap(); // one per uniform namespace (currently entire program object) | ||
| 328 | +GLSLANG_EXPORT void ShDestruct(ShHandle); | ||
| 329 | + | ||
| 330 | +// | ||
| 331 | +// The return value of ShCompile is boolean, non-zero indicating | ||
| 332 | +// success. | ||
| 333 | +// | ||
| 334 | +// The info-log should be written by ShCompile into | ||
| 335 | +// ShHandle, so it can answer future queries. | ||
| 336 | +// | ||
| 337 | +GLSLANG_EXPORT int ShCompile(const ShHandle, const char* const shaderStrings[], const int numStrings, | ||
| 338 | + const int* lengths, const EShOptimizationLevel, const TBuiltInResource* resources, | ||
| 339 | + int, // debugOptions unused | ||
| 340 | + int defaultVersion = 110, // use 100 for ES environment, overridden by #version in shader | ||
| 341 | + bool forwardCompatible = false, // give errors for use of deprecated features | ||
| 342 | + EShMessages messages = EShMsgDefault, // warnings and errors | ||
| 343 | + const char* fileName = nullptr | ||
| 344 | +); | ||
| 345 | + | ||
| 346 | +GLSLANG_EXPORT int ShLinkExt( | ||
| 347 | + const ShHandle, // linker object | ||
| 348 | + const ShHandle h[], // compiler objects to link together | ||
| 349 | + const int numHandles); | ||
| 350 | + | ||
| 351 | +// | ||
| 352 | +// ShSetEncrpytionMethod is a place-holder for specifying | ||
| 353 | +// how source code is encrypted. | ||
| 354 | +// | ||
| 355 | +GLSLANG_EXPORT void ShSetEncryptionMethod(ShHandle); | ||
| 356 | + | ||
| 357 | +// | ||
| 358 | +// All the following return 0 if the information is not | ||
| 359 | +// available in the object passed down, or the object is bad. | ||
| 360 | +// | ||
| 361 | +GLSLANG_EXPORT const char* ShGetInfoLog(const ShHandle); | ||
| 362 | +GLSLANG_EXPORT const void* ShGetExecutable(const ShHandle); | ||
| 363 | +GLSLANG_EXPORT int ShSetVirtualAttributeBindings(const ShHandle, const ShBindingTable*); // to detect user aliasing | ||
| 364 | +GLSLANG_EXPORT int ShSetFixedAttributeBindings(const ShHandle, const ShBindingTable*); // to force any physical mappings | ||
| 365 | +// | ||
| 366 | +// Tell the linker to never assign a vertex attribute to this list of physical attributes | ||
| 367 | +// | ||
| 368 | +GLSLANG_EXPORT int ShExcludeAttributes(const ShHandle, int *attributes, int count); | ||
| 369 | + | ||
| 370 | +// | ||
| 371 | +// Returns the location ID of the named uniform. | ||
| 372 | +// Returns -1 if error. | ||
| 373 | +// | ||
| 374 | +GLSLANG_EXPORT int ShGetUniformLocation(const ShHandle uniformMap, const char* name); | ||
| 375 | + | ||
| 376 | +#ifdef __cplusplus | ||
| 377 | + } // end extern "C" | ||
| 378 | +#endif | ||
| 379 | + | ||
| 380 | +//////////////////////////////////////////////////////////////////////////////////////////// | ||
| 381 | +// | ||
| 382 | +// Deferred-Lowering C++ Interface | ||
| 383 | +// ----------------------------------- | ||
| 384 | +// | ||
| 385 | +// Below is a new alternate C++ interface, which deprecates the above | ||
| 386 | +// opaque handle-based interface. | ||
| 387 | +// | ||
| 388 | +// The below is further designed to handle multiple compilation units per stage, where | ||
| 389 | +// the intermediate results, including the parse tree, are preserved until link time, | ||
| 390 | +// rather than the above interface which is designed to have each compilation unit | ||
| 391 | +// lowered at compile time. In the above model, linking occurs on the lowered results, | ||
| 392 | +// whereas in this model intra-stage linking can occur at the parse tree | ||
| 393 | +// (treeRoot in TIntermediate) level, and then a full stage can be lowered. | ||
| 394 | +// | ||
| 395 | + | ||
| 396 | +#include <list> | ||
| 397 | +#include <string> | ||
| 398 | +#include <utility> | ||
| 399 | + | ||
| 400 | +class TCompiler; | ||
| 401 | +class TInfoSink; | ||
| 402 | + | ||
| 403 | +namespace glslang { | ||
| 404 | + | ||
| 405 | +struct Version { | ||
| 406 | + int major; | ||
| 407 | + int minor; | ||
| 408 | + int patch; | ||
| 409 | + const char* flavor; | ||
| 410 | +}; | ||
| 411 | + | ||
| 412 | +GLSLANG_EXPORT Version GetVersion(); | ||
| 413 | +GLSLANG_EXPORT const char* GetEsslVersionString(); | ||
| 414 | +GLSLANG_EXPORT const char* GetGlslVersionString(); | ||
| 415 | +GLSLANG_EXPORT int GetKhronosToolId(); | ||
| 416 | + | ||
| 417 | +class TIntermediate; | ||
| 418 | +class TProgram; | ||
| 419 | +class TPoolAllocator; | ||
| 420 | +class TIoMapResolver; | ||
| 421 | + | ||
| 422 | +// Call this exactly once per process before using anything else | ||
| 423 | +GLSLANG_EXPORT bool InitializeProcess(); | ||
| 424 | + | ||
| 425 | +// Call once per process to tear down everything | ||
| 426 | +GLSLANG_EXPORT void FinalizeProcess(); | ||
| 427 | + | ||
| 428 | +// Resource type for IO resolver | ||
| 429 | +enum TResourceType { | ||
| 430 | + EResSampler, | ||
| 431 | + EResTexture, | ||
| 432 | + EResImage, | ||
| 433 | + EResUbo, | ||
| 434 | + EResSsbo, | ||
| 435 | + EResUav, | ||
| 436 | + EResCount | ||
| 437 | +}; | ||
| 438 | + | ||
| 439 | +enum TBlockStorageClass | ||
| 440 | +{ | ||
| 441 | + EbsUniform = 0, | ||
| 442 | + EbsStorageBuffer, | ||
| 443 | + EbsPushConstant, | ||
| 444 | + EbsNone, // not a uniform or buffer variable | ||
| 445 | + EbsCount, | ||
| 446 | +}; | ||
| 447 | + | ||
| 448 | +// Make one TShader per shader that you will link into a program. Then | ||
| 449 | +// - provide the shader through setStrings() or setStringsWithLengths() | ||
| 450 | +// - optionally call setEnv*(), see below for more detail | ||
| 451 | +// - optionally use setPreamble() to set a special shader string that will be | ||
| 452 | +// processed before all others but won't affect the validity of #version | ||
| 453 | +// - optionally call addProcesses() for each setting/transform, | ||
| 454 | +// see comment for class TProcesses | ||
| 455 | +// - call parse(): source language and target environment must be selected | ||
| 456 | +// either by correct setting of EShMessages sent to parse(), or by | ||
| 457 | +// explicitly calling setEnv*() | ||
| 458 | +// - query the info logs | ||
| 459 | +// | ||
| 460 | +// N.B.: Does not yet support having the same TShader instance being linked into | ||
| 461 | +// multiple programs. | ||
| 462 | +// | ||
| 463 | +// N.B.: Destruct a linked program *before* destructing the shaders linked into it. | ||
| 464 | +// | ||
| 465 | +class TShader { | ||
| 466 | +public: | ||
| 467 | + GLSLANG_EXPORT explicit TShader(EShLanguage); | ||
| 468 | + GLSLANG_EXPORT virtual ~TShader(); | ||
| 469 | + GLSLANG_EXPORT void setStrings(const char* const* s, int n); | ||
| 470 | + GLSLANG_EXPORT void setStringsWithLengths( | ||
| 471 | + const char* const* s, const int* l, int n); | ||
| 472 | + GLSLANG_EXPORT void setStringsWithLengthsAndNames( | ||
| 473 | + const char* const* s, const int* l, const char* const* names, int n); | ||
| 474 | + void setPreamble(const char* s) { preamble = s; } | ||
| 475 | + GLSLANG_EXPORT void setEntryPoint(const char* entryPoint); | ||
| 476 | + GLSLANG_EXPORT void setSourceEntryPoint(const char* sourceEntryPointName); | ||
| 477 | + GLSLANG_EXPORT void addProcesses(const std::vector<std::string>&); | ||
| 478 | + GLSLANG_EXPORT void setUniqueId(unsigned long long id); | ||
| 479 | + GLSLANG_EXPORT void setOverrideVersion(int version); | ||
| 480 | + GLSLANG_EXPORT void setDebugInfo(bool debugInfo); | ||
| 481 | + | ||
| 482 | + // IO resolver binding data: see comments in ShaderLang.cpp | ||
| 483 | + GLSLANG_EXPORT void setShiftBinding(TResourceType res, unsigned int base); | ||
| 484 | + GLSLANG_EXPORT void setShiftSamplerBinding(unsigned int base); // DEPRECATED: use setShiftBinding | ||
| 485 | + GLSLANG_EXPORT void setShiftTextureBinding(unsigned int base); // DEPRECATED: use setShiftBinding | ||
| 486 | + GLSLANG_EXPORT void setShiftImageBinding(unsigned int base); // DEPRECATED: use setShiftBinding | ||
| 487 | + GLSLANG_EXPORT void setShiftUboBinding(unsigned int base); // DEPRECATED: use setShiftBinding | ||
| 488 | + GLSLANG_EXPORT void setShiftUavBinding(unsigned int base); // DEPRECATED: use setShiftBinding | ||
| 489 | + GLSLANG_EXPORT void setShiftCbufferBinding(unsigned int base); // synonym for setShiftUboBinding | ||
| 490 | + GLSLANG_EXPORT void setShiftSsboBinding(unsigned int base); // DEPRECATED: use setShiftBinding | ||
| 491 | + GLSLANG_EXPORT void setShiftBindingForSet(TResourceType res, unsigned int base, unsigned int set); | ||
| 492 | + GLSLANG_EXPORT void setResourceSetBinding(const std::vector<std::string>& base); | ||
| 493 | + GLSLANG_EXPORT void setAutoMapBindings(bool map); | ||
| 494 | + GLSLANG_EXPORT void setAutoMapLocations(bool map); | ||
| 495 | + GLSLANG_EXPORT void addUniformLocationOverride(const char* name, int loc); | ||
| 496 | + GLSLANG_EXPORT void setUniformLocationBase(int base); | ||
| 497 | + GLSLANG_EXPORT void setInvertY(bool invert); | ||
| 498 | + GLSLANG_EXPORT void setDxPositionW(bool dxPosW); | ||
| 499 | + GLSLANG_EXPORT void setEnhancedMsgs(); | ||
| 500 | +#ifdef ENABLE_HLSL | ||
| 501 | + GLSLANG_EXPORT void setHlslIoMapping(bool hlslIoMap); | ||
| 502 | + GLSLANG_EXPORT void setFlattenUniformArrays(bool flatten); | ||
| 503 | +#endif | ||
| 504 | + GLSLANG_EXPORT void setNoStorageFormat(bool useUnknownFormat); | ||
| 505 | + GLSLANG_EXPORT void setNanMinMaxClamp(bool nanMinMaxClamp); | ||
| 506 | + GLSLANG_EXPORT void setTextureSamplerTransformMode(EShTextureSamplerTransformMode mode); | ||
| 507 | + GLSLANG_EXPORT void addBlockStorageOverride(const char* nameStr, glslang::TBlockStorageClass backing); | ||
| 508 | + | ||
| 509 | + GLSLANG_EXPORT void setGlobalUniformBlockName(const char* name); | ||
| 510 | + GLSLANG_EXPORT void setAtomicCounterBlockName(const char* name); | ||
| 511 | + GLSLANG_EXPORT void setGlobalUniformSet(unsigned int set); | ||
| 512 | + GLSLANG_EXPORT void setGlobalUniformBinding(unsigned int binding); | ||
| 513 | + GLSLANG_EXPORT void setAtomicCounterBlockSet(unsigned int set); | ||
| 514 | + GLSLANG_EXPORT void setAtomicCounterBlockBinding(unsigned int binding); | ||
| 515 | + | ||
| 516 | + GLSLANG_EXPORT void addSourceText(const char* text, size_t len); | ||
| 517 | + GLSLANG_EXPORT void setSourceFile(const char* file); | ||
| 518 | + | ||
| 519 | + // For setting up the environment (cleared to nothingness in the constructor). | ||
| 520 | + // These must be called so that parsing is done for the right source language and | ||
| 521 | + // target environment, either indirectly through TranslateEnvironment() based on | ||
| 522 | + // EShMessages et. al., or directly by the user. | ||
| 523 | + // | ||
| 524 | + // setEnvInput: The input source language and stage. If generating code for a | ||
| 525 | + // specific client, the input client semantics to use and the | ||
| 526 | + // version of that client's input semantics to use, otherwise | ||
| 527 | + // use EShClientNone and version of 0, e.g. for validation mode. | ||
| 528 | + // Note 'version' does not describe the target environment, | ||
| 529 | + // just the version of the source dialect to compile under. | ||
| 530 | + // For example, to choose the Vulkan dialect of GLSL defined by | ||
| 531 | + // version 100 of the KHR_vulkan_glsl extension: lang = EShSourceGlsl, | ||
| 532 | + // dialect = EShClientVulkan, and version = 100. | ||
| 533 | + // | ||
| 534 | + // See the definitions of TEnvironment, EShSource, EShLanguage, | ||
| 535 | + // and EShClient for choices and more detail. | ||
| 536 | + // | ||
| 537 | + // setEnvClient: The client that will be hosting the execution, and its version. | ||
| 538 | + // Note 'version' is not the version of the languages involved, but | ||
| 539 | + // the version of the client environment. | ||
| 540 | + // Use EShClientNone and version of 0 if there is no client, e.g. | ||
| 541 | + // for validation mode. | ||
| 542 | + // | ||
| 543 | + // See EShTargetClientVersion for choices. | ||
| 544 | + // | ||
| 545 | + // setEnvTarget: The language to translate to when generating code, and that | ||
| 546 | + // language's version. | ||
| 547 | + // Use EShTargetNone and version of 0 if there is no client, e.g. | ||
| 548 | + // for validation mode. | ||
| 549 | + // | ||
| 550 | + void setEnvInput(EShSource lang, EShLanguage envStage, EShClient client, int version) | ||
| 551 | + { | ||
| 552 | + environment.input.languageFamily = lang; | ||
| 553 | + environment.input.stage = envStage; | ||
| 554 | + environment.input.dialect = client; | ||
| 555 | + environment.input.dialectVersion = version; | ||
| 556 | + } | ||
| 557 | + void setEnvClient(EShClient client, EShTargetClientVersion version) | ||
| 558 | + { | ||
| 559 | + environment.client.client = client; | ||
| 560 | + environment.client.version = version; | ||
| 561 | + } | ||
| 562 | + void setEnvTarget(EShTargetLanguage lang, EShTargetLanguageVersion version) | ||
| 563 | + { | ||
| 564 | + environment.target.language = lang; | ||
| 565 | + environment.target.version = version; | ||
| 566 | + } | ||
| 567 | + | ||
| 568 | + void getStrings(const char* const* &s, int& n) { s = strings; n = numStrings; } | ||
| 569 | + | ||
| 570 | +#ifdef ENABLE_HLSL | ||
| 571 | + void setEnvTargetHlslFunctionality1() { environment.target.hlslFunctionality1 = true; } | ||
| 572 | + bool getEnvTargetHlslFunctionality1() const { return environment.target.hlslFunctionality1; } | ||
| 573 | +#else | ||
| 574 | + bool getEnvTargetHlslFunctionality1() const { return false; } | ||
| 575 | +#endif | ||
| 576 | + | ||
| 577 | + void setEnvInputVulkanRulesRelaxed() { environment.input.vulkanRulesRelaxed = true; } | ||
| 578 | + bool getEnvInputVulkanRulesRelaxed() const { return environment.input.vulkanRulesRelaxed; } | ||
| 579 | + | ||
| 580 | + void setCompileOnly() { compileOnly = true; } | ||
| 581 | + bool getCompileOnly() const { return compileOnly; } | ||
| 582 | + | ||
| 583 | + // Interface to #include handlers. | ||
| 584 | + // | ||
| 585 | + // To support #include, a client of Glslang does the following: | ||
| 586 | + // 1. Call setStringsWithNames to set the source strings and associated | ||
| 587 | + // names. For example, the names could be the names of the files | ||
| 588 | + // containing the shader sources. | ||
| 589 | + // 2. Call parse with an Includer. | ||
| 590 | + // | ||
| 591 | + // When the Glslang parser encounters an #include directive, it calls | ||
| 592 | + // the Includer's include method with the requested include name | ||
| 593 | + // together with the current string name. The returned IncludeResult | ||
| 594 | + // contains the fully resolved name of the included source, together | ||
| 595 | + // with the source text that should replace the #include directive | ||
| 596 | + // in the source stream. After parsing that source, Glslang will | ||
| 597 | + // release the IncludeResult object. | ||
| 598 | + class Includer { | ||
| 599 | + public: | ||
| 600 | + // An IncludeResult contains the resolved name and content of a source | ||
| 601 | + // inclusion. | ||
| 602 | + struct IncludeResult { | ||
| 603 | + IncludeResult(const std::string& headerName, const char* const headerData, const size_t headerLength, void* userData) : | ||
| 604 | + headerName(headerName), headerData(headerData), headerLength(headerLength), userData(userData) { } | ||
| 605 | + // For a successful inclusion, the fully resolved name of the requested | ||
| 606 | + // include. For example, in a file system-based includer, full resolution | ||
| 607 | + // should convert a relative path name into an absolute path name. | ||
| 608 | + // For a failed inclusion, this is an empty string. | ||
| 609 | + const std::string headerName; | ||
| 610 | + // The content and byte length of the requested inclusion. The | ||
| 611 | + // Includer producing this IncludeResult retains ownership of the | ||
| 612 | + // storage. | ||
| 613 | + // For a failed inclusion, the header | ||
| 614 | + // field points to a string containing error details. | ||
| 615 | + const char* const headerData; | ||
| 616 | + const size_t headerLength; | ||
| 617 | + // Include resolver's context. | ||
| 618 | + void* userData; | ||
| 619 | + protected: | ||
| 620 | + IncludeResult& operator=(const IncludeResult&); | ||
| 621 | + IncludeResult(); | ||
| 622 | + }; | ||
| 623 | + | ||
| 624 | + // For both include methods below: | ||
| 625 | + // | ||
| 626 | + // Resolves an inclusion request by name, current source name, | ||
| 627 | + // and include depth. | ||
| 628 | + // On success, returns an IncludeResult containing the resolved name | ||
| 629 | + // and content of the include. | ||
| 630 | + // On failure, returns a nullptr, or an IncludeResult | ||
| 631 | + // with an empty string for the headerName and error details in the | ||
| 632 | + // header field. | ||
| 633 | + // The Includer retains ownership of the contents | ||
| 634 | + // of the returned IncludeResult value, and those contents must | ||
| 635 | + // remain valid until the releaseInclude method is called on that | ||
| 636 | + // IncludeResult object. | ||
| 637 | + // | ||
| 638 | + // Note "local" vs. "system" is not an "either/or": "local" is an | ||
| 639 | + // extra thing to do over "system". Both might get called, as per | ||
| 640 | + // the C++ specification. | ||
| 641 | + | ||
| 642 | + // For the "system" or <>-style includes; search the "system" paths. | ||
| 643 | + virtual IncludeResult* includeSystem(const char* /*headerName*/, | ||
| 644 | + const char* /*includerName*/, | ||
| 645 | + size_t /*inclusionDepth*/) { return nullptr; } | ||
| 646 | + | ||
| 647 | + // For the "local"-only aspect of a "" include. Should not search in the | ||
| 648 | + // "system" paths, because on returning a failure, the parser will | ||
| 649 | + // call includeSystem() to look in the "system" locations. | ||
| 650 | + virtual IncludeResult* includeLocal(const char* /*headerName*/, | ||
| 651 | + const char* /*includerName*/, | ||
| 652 | + size_t /*inclusionDepth*/) { return nullptr; } | ||
| 653 | + | ||
| 654 | + // Signals that the parser will no longer use the contents of the | ||
| 655 | + // specified IncludeResult. | ||
| 656 | + virtual void releaseInclude(IncludeResult*) = 0; | ||
| 657 | + virtual ~Includer() {} | ||
| 658 | + }; | ||
| 659 | + | ||
| 660 | + // Fail all Includer searches | ||
| 661 | + class ForbidIncluder : public Includer { | ||
| 662 | + public: | ||
| 663 | + virtual void releaseInclude(IncludeResult*) override { } | ||
| 664 | + }; | ||
| 665 | + | ||
| 666 | + GLSLANG_EXPORT bool parse( | ||
| 667 | + const TBuiltInResource*, int defaultVersion, EProfile defaultProfile, | ||
| 668 | + bool forceDefaultVersionAndProfile, bool forwardCompatible, | ||
| 669 | + EShMessages, Includer&); | ||
| 670 | + | ||
| 671 | + bool parse(const TBuiltInResource* res, int defaultVersion, EProfile defaultProfile, bool forceDefaultVersionAndProfile, | ||
| 672 | + bool forwardCompatible, EShMessages messages) | ||
| 673 | + { | ||
| 674 | + TShader::ForbidIncluder includer; | ||
| 675 | + return parse(res, defaultVersion, defaultProfile, forceDefaultVersionAndProfile, forwardCompatible, messages, includer); | ||
| 676 | + } | ||
| 677 | + | ||
| 678 | + // Equivalent to parse() without a default profile and without forcing defaults. | ||
| 679 | + bool parse(const TBuiltInResource* builtInResources, int defaultVersion, bool forwardCompatible, EShMessages messages) | ||
| 680 | + { | ||
| 681 | + return parse(builtInResources, defaultVersion, ENoProfile, false, forwardCompatible, messages); | ||
| 682 | + } | ||
| 683 | + | ||
| 684 | + bool parse(const TBuiltInResource* builtInResources, int defaultVersion, bool forwardCompatible, EShMessages messages, | ||
| 685 | + Includer& includer) | ||
| 686 | + { | ||
| 687 | + return parse(builtInResources, defaultVersion, ENoProfile, false, forwardCompatible, messages, includer); | ||
| 688 | + } | ||
| 689 | + | ||
| 690 | + // NOTE: Doing just preprocessing to obtain a correct preprocessed shader string | ||
| 691 | + // is not an officially supported or fully working path. | ||
| 692 | + GLSLANG_EXPORT bool preprocess( | ||
| 693 | + const TBuiltInResource* builtInResources, int defaultVersion, | ||
| 694 | + EProfile defaultProfile, bool forceDefaultVersionAndProfile, | ||
| 695 | + bool forwardCompatible, EShMessages message, std::string* outputString, | ||
| 696 | + Includer& includer); | ||
| 697 | + | ||
| 698 | + GLSLANG_EXPORT const char* getInfoLog(); | ||
| 699 | + GLSLANG_EXPORT const char* getInfoDebugLog(); | ||
| 700 | + EShLanguage getStage() const { return stage; } | ||
| 701 | + TIntermediate* getIntermediate() const { return intermediate; } | ||
| 702 | + | ||
| 703 | +protected: | ||
| 704 | + TPoolAllocator* pool; | ||
| 705 | + EShLanguage stage; | ||
| 706 | + TCompiler* compiler; | ||
| 707 | + TIntermediate* intermediate; | ||
| 708 | + TInfoSink* infoSink; | ||
| 709 | + // strings and lengths follow the standard for glShaderSource: | ||
| 710 | + // strings is an array of numStrings pointers to string data. | ||
| 711 | + // lengths can be null, but if not it is an array of numStrings | ||
| 712 | + // integers containing the length of the associated strings. | ||
| 713 | + // if lengths is null or lengths[n] < 0 the associated strings[n] is | ||
| 714 | + // assumed to be null-terminated. | ||
| 715 | + // stringNames is the optional names for all the strings. If stringNames | ||
| 716 | + // is null, then none of the strings has name. If a certain element in | ||
| 717 | + // stringNames is null, then the corresponding string does not have name. | ||
| 718 | + const char* const* strings; // explicit code to compile, see previous comment | ||
| 719 | + const int* lengths; | ||
| 720 | + const char* const* stringNames; | ||
| 721 | + int numStrings; // size of the above arrays | ||
| 722 | + const char* preamble; // string of implicit code to compile before the explicitly provided code | ||
| 723 | + | ||
| 724 | + // a function in the source string can be renamed FROM this TO the name given in setEntryPoint. | ||
| 725 | + std::string sourceEntryPointName; | ||
| 726 | + | ||
| 727 | + // overrides #version in shader source or default version if #version isn't present | ||
| 728 | + int overrideVersion; | ||
| 729 | + | ||
| 730 | + TEnvironment environment; | ||
| 731 | + | ||
| 732 | + // Indicates this shader is meant to be used without linking | ||
| 733 | + bool compileOnly = false; | ||
| 734 | + | ||
| 735 | + friend class TProgram; | ||
| 736 | + | ||
| 737 | +private: | ||
| 738 | + TShader& operator=(TShader&); | ||
| 739 | +}; | ||
| 740 | + | ||
| 741 | +// | ||
| 742 | +// A reflection database and its interface, consistent with the OpenGL API reflection queries. | ||
| 743 | +// | ||
| 744 | + | ||
| 745 | +// Data needed for just a single object at the granularity exchanged by the reflection API | ||
| 746 | +class TObjectReflection { | ||
| 747 | +public: | ||
| 748 | + GLSLANG_EXPORT TObjectReflection(const std::string& pName, const TType& pType, int pOffset, int pGLDefineType, int pSize, int pIndex); | ||
| 749 | + | ||
| 750 | + const TType* getType() const { return type; } | ||
| 751 | + GLSLANG_EXPORT int getBinding() const; | ||
| 752 | + GLSLANG_EXPORT void dump() const; | ||
| 753 | + static TObjectReflection badReflection() { return TObjectReflection(); } | ||
| 754 | + | ||
| 755 | + GLSLANG_EXPORT unsigned int layoutLocation() const; | ||
| 756 | + | ||
| 757 | + std::string name; | ||
| 758 | + int offset; | ||
| 759 | + int glDefineType; | ||
| 760 | + int size; // data size in bytes for a block, array size for a (non-block) object that's an array | ||
| 761 | + int index; | ||
| 762 | + int counterIndex; | ||
| 763 | + int numMembers; | ||
| 764 | + int arrayStride; // stride of an array variable | ||
| 765 | + int topLevelArraySize; // size of the top-level variable in a storage buffer member | ||
| 766 | + int topLevelArrayStride; // stride of the top-level variable in a storage buffer member | ||
| 767 | + EShLanguageMask stages; | ||
| 768 | + | ||
| 769 | +protected: | ||
| 770 | + TObjectReflection() | ||
| 771 | + : offset(-1), glDefineType(-1), size(-1), index(-1), counterIndex(-1), numMembers(-1), arrayStride(0), | ||
| 772 | + topLevelArrayStride(0), stages(EShLanguageMask(0)), type(nullptr) | ||
| 773 | + { | ||
| 774 | + } | ||
| 775 | + | ||
| 776 | + const TType* type; | ||
| 777 | +}; | ||
| 778 | + | ||
| 779 | +class TReflection; | ||
| 780 | +class TIoMapper; | ||
| 781 | +struct TVarEntryInfo; | ||
| 782 | + | ||
| 783 | +// Allows to customize the binding layout after linking. | ||
| 784 | +// All used uniform variables will invoke at least validateBinding. | ||
| 785 | +// If validateBinding returned true then the other resolveBinding, | ||
| 786 | +// resolveSet, and resolveLocation are invoked to resolve the binding | ||
| 787 | +// and descriptor set index respectively. | ||
| 788 | +// | ||
| 789 | +// Invocations happen in a particular order: | ||
| 790 | +// 1) all shader inputs | ||
| 791 | +// 2) all shader outputs | ||
| 792 | +// 3) all uniforms with binding and set already defined | ||
| 793 | +// 4) all uniforms with binding but no set defined | ||
| 794 | +// 5) all uniforms with set but no binding defined | ||
| 795 | +// 6) all uniforms with no binding and no set defined | ||
| 796 | +// | ||
| 797 | +// mapIO will use this resolver in two phases. The first | ||
| 798 | +// phase is a notification phase, calling the corresponging | ||
| 799 | +// notifiy callbacks, this phase ends with a call to endNotifications. | ||
| 800 | +// Phase two starts directly after the call to endNotifications | ||
| 801 | +// and calls all other callbacks to validate and to get the | ||
| 802 | +// bindings, sets, locations, component and color indices. | ||
| 803 | +// | ||
| 804 | +// NOTE: that still limit checks are applied to bindings and sets | ||
| 805 | +// and may result in an error. | ||
| 806 | +class TIoMapResolver | ||
| 807 | +{ | ||
| 808 | +public: | ||
| 809 | + virtual ~TIoMapResolver() {} | ||
| 810 | + | ||
| 811 | + // Should return true if the resulting/current binding would be okay. | ||
| 812 | + // Basic idea is to do aliasing binding checks with this. | ||
| 813 | + virtual bool validateBinding(EShLanguage stage, TVarEntryInfo& ent) = 0; | ||
| 814 | + // Should return a value >= 0 if the current binding should be overridden. | ||
| 815 | + // Return -1 if the current binding (including no binding) should be kept. | ||
| 816 | + virtual int resolveBinding(EShLanguage stage, TVarEntryInfo& ent) = 0; | ||
| 817 | + // Should return a value >= 0 if the current set should be overridden. | ||
| 818 | + // Return -1 if the current set (including no set) should be kept. | ||
| 819 | + virtual int resolveSet(EShLanguage stage, TVarEntryInfo& ent) = 0; | ||
| 820 | + // Should return a value >= 0 if the current location should be overridden. | ||
| 821 | + // Return -1 if the current location (including no location) should be kept. | ||
| 822 | + virtual int resolveUniformLocation(EShLanguage stage, TVarEntryInfo& ent) = 0; | ||
| 823 | + // Should return true if the resulting/current setup would be okay. | ||
| 824 | + // Basic idea is to do aliasing checks and reject invalid semantic names. | ||
| 825 | + virtual bool validateInOut(EShLanguage stage, TVarEntryInfo& ent) = 0; | ||
| 826 | + // Should return a value >= 0 if the current location should be overridden. | ||
| 827 | + // Return -1 if the current location (including no location) should be kept. | ||
| 828 | + virtual int resolveInOutLocation(EShLanguage stage, TVarEntryInfo& ent) = 0; | ||
| 829 | + // Should return a value >= 0 if the current component index should be overridden. | ||
| 830 | + // Return -1 if the current component index (including no index) should be kept. | ||
| 831 | + virtual int resolveInOutComponent(EShLanguage stage, TVarEntryInfo& ent) = 0; | ||
| 832 | + // Should return a value >= 0 if the current color index should be overridden. | ||
| 833 | + // Return -1 if the current color index (including no index) should be kept. | ||
| 834 | + virtual int resolveInOutIndex(EShLanguage stage, TVarEntryInfo& ent) = 0; | ||
| 835 | + // Notification of a uniform variable | ||
| 836 | + virtual void notifyBinding(EShLanguage stage, TVarEntryInfo& ent) = 0; | ||
| 837 | + // Notification of a in or out variable | ||
| 838 | + virtual void notifyInOut(EShLanguage stage, TVarEntryInfo& ent) = 0; | ||
| 839 | + // Called by mapIO when it starts its notify pass for the given stage | ||
| 840 | + virtual void beginNotifications(EShLanguage stage) = 0; | ||
| 841 | + // Called by mapIO when it has finished the notify pass | ||
| 842 | + virtual void endNotifications(EShLanguage stage) = 0; | ||
| 843 | + // Called by mipIO when it starts its resolve pass for the given stage | ||
| 844 | + virtual void beginResolve(EShLanguage stage) = 0; | ||
| 845 | + // Called by mapIO when it has finished the resolve pass | ||
| 846 | + virtual void endResolve(EShLanguage stage) = 0; | ||
| 847 | + // Called by mapIO when it starts its symbol collect for teh given stage | ||
| 848 | + virtual void beginCollect(EShLanguage stage) = 0; | ||
| 849 | + // Called by mapIO when it has finished the symbol collect | ||
| 850 | + virtual void endCollect(EShLanguage stage) = 0; | ||
| 851 | + // Called by TSlotCollector to resolve storage locations or bindings | ||
| 852 | + virtual void reserverStorageSlot(TVarEntryInfo& ent, TInfoSink& infoSink) = 0; | ||
| 853 | + // Called by TSlotCollector to resolve resource locations or bindings | ||
| 854 | + virtual void reserverResourceSlot(TVarEntryInfo& ent, TInfoSink& infoSink) = 0; | ||
| 855 | + // Called by mapIO.addStage to set shader stage mask to mark a stage be added to this pipeline | ||
| 856 | + virtual void addStage(EShLanguage stage, TIntermediate& stageIntermediate) = 0; | ||
| 857 | +}; | ||
| 858 | + | ||
| 859 | +// I/O mapper | ||
| 860 | +class TIoMapper { | ||
| 861 | +public: | ||
| 862 | + TIoMapper() {} | ||
| 863 | + virtual ~TIoMapper() {} | ||
| 864 | + // grow the reflection stage by stage | ||
| 865 | + bool virtual addStage(EShLanguage, TIntermediate&, TInfoSink&, TIoMapResolver*); | ||
| 866 | + bool virtual doMap(TIoMapResolver*, TInfoSink&) { return true; } | ||
| 867 | + bool virtual setAutoPushConstantBlock(const char*, unsigned int, TLayoutPacking) { return false; } | ||
| 868 | +}; | ||
| 869 | + | ||
| 870 | +// Get the default GLSL IO mapper | ||
| 871 | +GLSLANG_EXPORT TIoMapper* GetGlslIoMapper(); | ||
| 872 | + | ||
| 873 | +// Make one TProgram per set of shaders that will get linked together. Add all | ||
| 874 | +// the shaders that are to be linked together. After calling shader.parse() | ||
| 875 | +// for all shaders, call link(). | ||
| 876 | +// | ||
| 877 | +// N.B.: Destruct a linked program *before* destructing the shaders linked into it. | ||
| 878 | +// | ||
| 879 | +class TProgram { | ||
| 880 | +public: | ||
| 881 | + GLSLANG_EXPORT TProgram(); | ||
| 882 | + GLSLANG_EXPORT virtual ~TProgram(); | ||
| 883 | + void addShader(TShader* shader) { stages[shader->stage].push_back(shader); } | ||
| 884 | + std::list<TShader*>& getShaders(EShLanguage stage) { return stages[stage]; } | ||
| 885 | + // Link Validation interface | ||
| 886 | + GLSLANG_EXPORT bool link(EShMessages); | ||
| 887 | + GLSLANG_EXPORT const char* getInfoLog(); | ||
| 888 | + GLSLANG_EXPORT const char* getInfoDebugLog(); | ||
| 889 | + | ||
| 890 | + TIntermediate* getIntermediate(EShLanguage stage) const { return intermediate[stage]; } | ||
| 891 | + | ||
| 892 | + // Reflection Interface | ||
| 893 | + | ||
| 894 | + // call first, to do liveness analysis, index mapping, etc.; returns false on failure | ||
| 895 | + GLSLANG_EXPORT bool buildReflection(int opts = EShReflectionDefault); | ||
| 896 | + GLSLANG_EXPORT unsigned getLocalSize(int dim) const; // return dim'th local size | ||
| 897 | + GLSLANG_EXPORT int getReflectionIndex(const char *name) const; | ||
| 898 | + GLSLANG_EXPORT int getReflectionPipeIOIndex(const char* name, const bool inOrOut) const; | ||
| 899 | + GLSLANG_EXPORT int getNumUniformVariables() const; | ||
| 900 | + GLSLANG_EXPORT const TObjectReflection& getUniform(int index) const; | ||
| 901 | + GLSLANG_EXPORT int getNumUniformBlocks() const; | ||
| 902 | + GLSLANG_EXPORT const TObjectReflection& getUniformBlock(int index) const; | ||
| 903 | + GLSLANG_EXPORT int getNumPipeInputs() const; | ||
| 904 | + GLSLANG_EXPORT const TObjectReflection& getPipeInput(int index) const; | ||
| 905 | + GLSLANG_EXPORT int getNumPipeOutputs() const; | ||
| 906 | + GLSLANG_EXPORT const TObjectReflection& getPipeOutput(int index) const; | ||
| 907 | + GLSLANG_EXPORT int getNumBufferVariables() const; | ||
| 908 | + GLSLANG_EXPORT const TObjectReflection& getBufferVariable(int index) const; | ||
| 909 | + GLSLANG_EXPORT int getNumBufferBlocks() const; | ||
| 910 | + GLSLANG_EXPORT const TObjectReflection& getBufferBlock(int index) const; | ||
| 911 | + GLSLANG_EXPORT int getNumAtomicCounters() const; | ||
| 912 | + GLSLANG_EXPORT const TObjectReflection& getAtomicCounter(int index) const; | ||
| 913 | + | ||
| 914 | + // Legacy Reflection Interface - expressed in terms of above interface | ||
| 915 | + | ||
| 916 | + // can be used for glGetProgramiv(GL_ACTIVE_UNIFORMS) | ||
| 917 | + int getNumLiveUniformVariables() const { return getNumUniformVariables(); } | ||
| 918 | + | ||
| 919 | + // can be used for glGetProgramiv(GL_ACTIVE_UNIFORM_BLOCKS) | ||
| 920 | + int getNumLiveUniformBlocks() const { return getNumUniformBlocks(); } | ||
| 921 | + | ||
| 922 | + // can be used for glGetProgramiv(GL_ACTIVE_ATTRIBUTES) | ||
| 923 | + int getNumLiveAttributes() const { return getNumPipeInputs(); } | ||
| 924 | + | ||
| 925 | + // can be used for glGetUniformIndices() | ||
| 926 | + int getUniformIndex(const char *name) const { return getReflectionIndex(name); } | ||
| 927 | + | ||
| 928 | + int getPipeIOIndex(const char *name, const bool inOrOut) const | ||
| 929 | + { return getReflectionPipeIOIndex(name, inOrOut); } | ||
| 930 | + | ||
| 931 | + // can be used for "name" part of glGetActiveUniform() | ||
| 932 | + const char *getUniformName(int index) const { return getUniform(index).name.c_str(); } | ||
| 933 | + | ||
| 934 | + // returns the binding number | ||
| 935 | + int getUniformBinding(int index) const { return getUniform(index).getBinding(); } | ||
| 936 | + | ||
| 937 | + // returns Shaders Stages where a Uniform is present | ||
| 938 | + EShLanguageMask getUniformStages(int index) const { return getUniform(index).stages; } | ||
| 939 | + | ||
| 940 | + // can be used for glGetActiveUniformsiv(GL_UNIFORM_BLOCK_INDEX) | ||
| 941 | + int getUniformBlockIndex(int index) const { return getUniform(index).index; } | ||
| 942 | + | ||
| 943 | + // can be used for glGetActiveUniformsiv(GL_UNIFORM_TYPE) | ||
| 944 | + int getUniformType(int index) const { return getUniform(index).glDefineType; } | ||
| 945 | + | ||
| 946 | + // can be used for glGetActiveUniformsiv(GL_UNIFORM_OFFSET) | ||
| 947 | + int getUniformBufferOffset(int index) const { return getUniform(index).offset; } | ||
| 948 | + | ||
| 949 | + // can be used for glGetActiveUniformsiv(GL_UNIFORM_SIZE) | ||
| 950 | + int getUniformArraySize(int index) const { return getUniform(index).size; } | ||
| 951 | + | ||
| 952 | + // returns a TType* | ||
| 953 | + const TType *getUniformTType(int index) const { return getUniform(index).getType(); } | ||
| 954 | + | ||
| 955 | + // can be used for glGetActiveUniformBlockName() | ||
| 956 | + const char *getUniformBlockName(int index) const { return getUniformBlock(index).name.c_str(); } | ||
| 957 | + | ||
| 958 | + // can be used for glGetActiveUniformBlockiv(UNIFORM_BLOCK_DATA_SIZE) | ||
| 959 | + int getUniformBlockSize(int index) const { return getUniformBlock(index).size; } | ||
| 960 | + | ||
| 961 | + // returns the block binding number | ||
| 962 | + int getUniformBlockBinding(int index) const { return getUniformBlock(index).getBinding(); } | ||
| 963 | + | ||
| 964 | + // returns block index of associated counter. | ||
| 965 | + int getUniformBlockCounterIndex(int index) const { return getUniformBlock(index).counterIndex; } | ||
| 966 | + | ||
| 967 | + // returns a TType* | ||
| 968 | + const TType *getUniformBlockTType(int index) const { return getUniformBlock(index).getType(); } | ||
| 969 | + | ||
| 970 | + // can be used for glGetActiveAttrib() | ||
| 971 | + const char *getAttributeName(int index) const { return getPipeInput(index).name.c_str(); } | ||
| 972 | + | ||
| 973 | + // can be used for glGetActiveAttrib() | ||
| 974 | + int getAttributeType(int index) const { return getPipeInput(index).glDefineType; } | ||
| 975 | + | ||
| 976 | + // returns a TType* | ||
| 977 | + const TType *getAttributeTType(int index) const { return getPipeInput(index).getType(); } | ||
| 978 | + | ||
| 979 | + GLSLANG_EXPORT void dumpReflection(); | ||
| 980 | + | ||
| 981 | + // Get the IO resolver to use for mapIO | ||
| 982 | + GLSLANG_EXPORT TIoMapResolver* getGlslIoResolver(EShLanguage stage); | ||
| 983 | + | ||
| 984 | + // I/O mapping: apply base offsets and map live unbound variables | ||
| 985 | + // If resolver is not provided it uses the previous approach | ||
| 986 | + // and respects auto assignment and offsets. | ||
| 987 | + GLSLANG_EXPORT bool mapIO(TIoMapResolver* pResolver = nullptr, TIoMapper* pIoMapper = nullptr); | ||
| 988 | + | ||
| 989 | +protected: | ||
| 990 | + GLSLANG_EXPORT bool linkStage(EShLanguage, EShMessages); | ||
| 991 | + GLSLANG_EXPORT bool crossStageCheck(EShMessages); | ||
| 992 | + | ||
| 993 | + TPoolAllocator* pool; | ||
| 994 | + std::list<TShader*> stages[EShLangCount]; | ||
| 995 | + TIntermediate* intermediate[EShLangCount]; | ||
| 996 | + bool newedIntermediate[EShLangCount]; // track which intermediate were "new" versus reusing a singleton unit in a stage | ||
| 997 | + TInfoSink* infoSink; | ||
| 998 | + TReflection* reflection; | ||
| 999 | + bool linked; | ||
| 1000 | + | ||
| 1001 | +private: | ||
| 1002 | + TProgram(TProgram&); | ||
| 1003 | + TProgram& operator=(TProgram&); | ||
| 1004 | +}; | ||
| 1005 | + | ||
| 1006 | +} // end namespace glslang | ||
| 1007 | + | ||
| 1008 | +#endif // _COMPILER_INTERFACE_INCLUDED_ |
| 1 | +/** | ||
| 2 | +BSD 2-Clause License | ||
| 3 | + | ||
| 4 | +Copyright (c) 2020, Travis Fort | ||
| 5 | +All rights reserved. | ||
| 6 | + | ||
| 7 | +Redistribution and use in source and binary forms, with or without | ||
| 8 | +modification, are permitted provided that the following conditions are met: | ||
| 9 | + | ||
| 10 | +1. Redistributions of source code must retain the above copyright notice, this | ||
| 11 | + list of conditions and the following disclaimer. | ||
| 12 | + | ||
| 13 | +2. Redistributions in binary form must reproduce the above copyright notice, | ||
| 14 | + this list of conditions and the following disclaimer in the documentation | ||
| 15 | + and/or other materials provided with the distribution. | ||
| 16 | + | ||
| 17 | +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
| 18 | +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 19 | +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
| 20 | +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE | ||
| 21 | +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
| 22 | +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | ||
| 23 | +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | ||
| 24 | +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | ||
| 25 | +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
| 26 | +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 27 | +**/ | ||
| 28 | + | ||
| 29 | +#ifndef _STAND_ALONE_RESOURCE_LIMITS_C_INCLUDED_ | ||
| 30 | +#define _STAND_ALONE_RESOURCE_LIMITS_C_INCLUDED_ | ||
| 31 | + | ||
| 32 | +#include "../Include/glslang_c_interface.h" | ||
| 33 | +#include "../Include/visibility.h" | ||
| 34 | + | ||
| 35 | +#ifdef __cplusplus | ||
| 36 | +extern "C" { | ||
| 37 | +#endif | ||
| 38 | + | ||
| 39 | +// Returns a struct that can be use to create custom resource values. | ||
| 40 | +GLSLANG_EXPORT glslang_resource_t* glslang_resource(void); | ||
| 41 | + | ||
| 42 | +// These are the default resources for TBuiltInResources, used for both | ||
| 43 | +// - parsing this string for the case where the user didn't supply one, | ||
| 44 | +// - dumping out a template for user construction of a config file. | ||
| 45 | +GLSLANG_EXPORT const glslang_resource_t* glslang_default_resource(void); | ||
| 46 | + | ||
| 47 | +// Returns the DefaultTBuiltInResource as a human-readable string. | ||
| 48 | +// NOTE: User is responsible for freeing this string. | ||
| 49 | +GLSLANG_EXPORT const char* glslang_default_resource_string(); | ||
| 50 | + | ||
| 51 | +// Decodes the resource limits from |config| to |resources|. | ||
| 52 | +GLSLANG_EXPORT void glslang_decode_resource_limits(glslang_resource_t* resources, char* config); | ||
| 53 | + | ||
| 54 | +#ifdef __cplusplus | ||
| 55 | +} | ||
| 56 | +#endif | ||
| 57 | + | ||
| 58 | +#endif // _STAND_ALONE_RESOURCE_LIMITS_C_INCLUDED_ |
| 1 | +// | ||
| 2 | +// Copyright (C) 2014 LunarG, Inc. | ||
| 3 | +// Copyright (C) 2015-2018 Google, Inc. | ||
| 4 | +// | ||
| 5 | +// All rights reserved. | ||
| 6 | +// | ||
| 7 | +// Redistribution and use in source and binary forms, with or without | ||
| 8 | +// modification, are permitted provided that the following conditions | ||
| 9 | +// are met: | ||
| 10 | +// | ||
| 11 | +// Redistributions of source code must retain the above copyright | ||
| 12 | +// notice, this list of conditions and the following disclaimer. | ||
| 13 | +// | ||
| 14 | +// Redistributions in binary form must reproduce the above | ||
| 15 | +// copyright notice, this list of conditions and the following | ||
| 16 | +// disclaimer in the documentation and/or other materials provided | ||
| 17 | +// with the distribution. | ||
| 18 | +// | ||
| 19 | +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its | ||
| 20 | +// contributors may be used to endorse or promote products derived | ||
| 21 | +// from this software without specific prior written permission. | ||
| 22 | +// | ||
| 23 | +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 24 | +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 25 | +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | ||
| 26 | +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | ||
| 27 | +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
| 28 | +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, | ||
| 29 | +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
| 30 | +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | ||
| 31 | +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | ||
| 32 | +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN | ||
| 33 | +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 34 | +// POSSIBILITY OF SUCH DAMAGE. | ||
| 35 | + | ||
| 36 | +#pragma once | ||
| 37 | + | ||
| 38 | +#include <string> | ||
| 39 | +#include <vector> | ||
| 40 | + | ||
| 41 | +#include "Logger.h" | ||
| 42 | +#include "glslang/Include/visibility.h" | ||
| 43 | + | ||
| 44 | +namespace glslang { | ||
| 45 | +class TIntermediate; | ||
| 46 | + | ||
| 47 | +struct SpvOptions { | ||
| 48 | + bool generateDebugInfo {false}; | ||
| 49 | + bool stripDebugInfo {false}; | ||
| 50 | + bool disableOptimizer {true}; | ||
| 51 | + bool optimizeSize {false}; | ||
| 52 | + bool disassemble {false}; | ||
| 53 | + bool validate {false}; | ||
| 54 | + bool emitNonSemanticShaderDebugInfo {false}; | ||
| 55 | + bool emitNonSemanticShaderDebugSource{ false }; | ||
| 56 | + bool compileOnly{false}; | ||
| 57 | + bool optimizerAllowExpandedIDBound{false}; | ||
| 58 | +}; | ||
| 59 | + | ||
| 60 | +GLSLANG_EXPORT void GetSpirvVersion(std::string&); | ||
| 61 | +GLSLANG_EXPORT int GetSpirvGeneratorVersion(); | ||
| 62 | +GLSLANG_EXPORT void GlslangToSpv(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv, | ||
| 63 | + SpvOptions* options = nullptr); | ||
| 64 | +GLSLANG_EXPORT void GlslangToSpv(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv, | ||
| 65 | + spv::SpvBuildLogger* logger, SpvOptions* options = nullptr); | ||
| 66 | +GLSLANG_EXPORT bool OutputSpvBin(const std::vector<unsigned int>& spirv, const char* baseName); | ||
| 67 | +GLSLANG_EXPORT bool OutputSpvHex(const std::vector<unsigned int>& spirv, const char* baseName, const char* varName); | ||
| 68 | + | ||
| 69 | +} |
| 1 | +// | ||
| 2 | +// Copyright (C) 2016 Google, Inc. | ||
| 3 | +// | ||
| 4 | +// All rights reserved. | ||
| 5 | +// | ||
| 6 | +// Redistribution and use in source and binary forms, with or without | ||
| 7 | +// modification, are permitted provided that the following conditions | ||
| 8 | +// are met: | ||
| 9 | +// | ||
| 10 | +// Redistributions of source code must retain the above copyright | ||
| 11 | +// notice, this list of conditions and the following disclaimer. | ||
| 12 | +// | ||
| 13 | +// Redistributions in binary form must reproduce the above | ||
| 14 | +// copyright notice, this list of conditions and the following | ||
| 15 | +// disclaimer in the documentation and/or other materials provided | ||
| 16 | +// with the distribution. | ||
| 17 | +// | ||
| 18 | +// Neither the name of Google Inc. nor the names of its | ||
| 19 | +// contributors may be used to endorse or promote products derived | ||
| 20 | +// from this software without specific prior written permission. | ||
| 21 | +// | ||
| 22 | +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 23 | +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 24 | +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | ||
| 25 | +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | ||
| 26 | +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
| 27 | +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, | ||
| 28 | +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
| 29 | +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | ||
| 30 | +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | ||
| 31 | +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN | ||
| 32 | +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 33 | +// POSSIBILITY OF SUCH DAMAGE. | ||
| 34 | + | ||
| 35 | +#ifndef GLSLANG_SPIRV_LOGGER_H | ||
| 36 | +#define GLSLANG_SPIRV_LOGGER_H | ||
| 37 | + | ||
| 38 | +#include <string> | ||
| 39 | +#include <vector> | ||
| 40 | +#include "glslang/Include/visibility.h" | ||
| 41 | + | ||
| 42 | +namespace spv { | ||
| 43 | + | ||
| 44 | +// A class for holding all SPIR-V build status messages, including | ||
| 45 | +// missing/TBD functionalities, warnings, and errors. | ||
| 46 | +class GLSLANG_EXPORT SpvBuildLogger { | ||
| 47 | +public: | ||
| 48 | + SpvBuildLogger() {} | ||
| 49 | + | ||
| 50 | + // Registers a TBD functionality. | ||
| 51 | + void tbdFunctionality(const std::string& f); | ||
| 52 | + // Registers a missing functionality. | ||
| 53 | + void missingFunctionality(const std::string& f); | ||
| 54 | + | ||
| 55 | + // Logs a warning. | ||
| 56 | + void warning(const std::string& w) { warnings.push_back(w); } | ||
| 57 | + // Logs an error. | ||
| 58 | + void error(const std::string& e) { errors.push_back(e); } | ||
| 59 | + | ||
| 60 | + // Returns all messages accumulated in the order of: | ||
| 61 | + // TBD functionalities, missing functionalities, warnings, errors. | ||
| 62 | + std::string getAllMessages() const; | ||
| 63 | + | ||
| 64 | +private: | ||
| 65 | + SpvBuildLogger(const SpvBuildLogger&); | ||
| 66 | + | ||
| 67 | + std::vector<std::string> tbdFeatures; | ||
| 68 | + std::vector<std::string> missingFeatures; | ||
| 69 | + std::vector<std::string> warnings; | ||
| 70 | + std::vector<std::string> errors; | ||
| 71 | +}; | ||
| 72 | + | ||
| 73 | +} // end spv namespace | ||
| 74 | + | ||
| 75 | +#endif // GLSLANG_SPIRV_LOGGER_H |
| 1 | +// | ||
| 2 | +// Copyright (C) 2015 LunarG, Inc. | ||
| 3 | +// | ||
| 4 | +// All rights reserved. | ||
| 5 | +// | ||
| 6 | +// Redistribution and use in source and binary forms, with or without | ||
| 7 | +// modification, are permitted provided that the following conditions | ||
| 8 | +// are met: | ||
| 9 | +// | ||
| 10 | +// Redistributions of source code must retain the above copyright | ||
| 11 | +// notice, this list of conditions and the following disclaimer. | ||
| 12 | +// | ||
| 13 | +// Redistributions in binary form must reproduce the above | ||
| 14 | +// copyright notice, this list of conditions and the following | ||
| 15 | +// disclaimer in the documentation and/or other materials provided | ||
| 16 | +// with the distribution. | ||
| 17 | +// | ||
| 18 | +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its | ||
| 19 | +// contributors may be used to endorse or promote products derived | ||
| 20 | +// from this software without specific prior written permission. | ||
| 21 | +// | ||
| 22 | +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 23 | +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 24 | +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | ||
| 25 | +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | ||
| 26 | +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
| 27 | +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, | ||
| 28 | +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
| 29 | +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | ||
| 30 | +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | ||
| 31 | +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN | ||
| 32 | +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 33 | +// POSSIBILITY OF SUCH DAMAGE. | ||
| 34 | +// | ||
| 35 | + | ||
| 36 | +#ifndef SPIRVREMAPPER_H | ||
| 37 | +#define SPIRVREMAPPER_H | ||
| 38 | + | ||
| 39 | +#include <string> | ||
| 40 | +#include <vector> | ||
| 41 | +#include <cstdlib> | ||
| 42 | +#include <exception> | ||
| 43 | + | ||
| 44 | +#ifdef GLSLANG_IS_SHARED_LIBRARY | ||
| 45 | + #ifdef _WIN32 | ||
| 46 | + #ifdef GLSLANG_EXPORTING | ||
| 47 | + #define GLSLANG_EXPORT __declspec(dllexport) | ||
| 48 | + #else | ||
| 49 | + #define GLSLANG_EXPORT __declspec(dllimport) | ||
| 50 | + #endif | ||
| 51 | + #elif __GNUC__ >= 4 | ||
| 52 | + #define GLSLANG_EXPORT __attribute__((visibility("default"))) | ||
| 53 | + #endif | ||
| 54 | +#endif // GLSLANG_IS_SHARED_LIBRARY | ||
| 55 | +#ifndef GLSLANG_EXPORT | ||
| 56 | +#define GLSLANG_EXPORT | ||
| 57 | +#endif | ||
| 58 | + | ||
| 59 | +namespace spv { | ||
| 60 | + | ||
| 61 | +class spirvbin_base_t | ||
| 62 | +{ | ||
| 63 | +public: | ||
| 64 | + enum Options { | ||
| 65 | + NONE = 0, | ||
| 66 | + STRIP = (1<<0), | ||
| 67 | + MAP_TYPES = (1<<1), | ||
| 68 | + MAP_NAMES = (1<<2), | ||
| 69 | + MAP_FUNCS = (1<<3), | ||
| 70 | + DCE_FUNCS = (1<<4), | ||
| 71 | + DCE_VARS = (1<<5), | ||
| 72 | + DCE_TYPES = (1<<6), | ||
| 73 | + OPT_LOADSTORE = (1<<7), | ||
| 74 | + OPT_FWD_LS = (1<<8), // EXPERIMENTAL: PRODUCES INVALID SCHEMA-0 SPIRV | ||
| 75 | + MAP_ALL = (MAP_TYPES | MAP_NAMES | MAP_FUNCS), | ||
| 76 | + DCE_ALL = (DCE_FUNCS | DCE_VARS | DCE_TYPES), | ||
| 77 | + OPT_ALL = (OPT_LOADSTORE), | ||
| 78 | + | ||
| 79 | + ALL_BUT_STRIP = (MAP_ALL | DCE_ALL | OPT_ALL), | ||
| 80 | + DO_EVERYTHING = (STRIP | ALL_BUT_STRIP) | ||
| 81 | + }; | ||
| 82 | +}; | ||
| 83 | + | ||
| 84 | +} // namespace SPV | ||
| 85 | + | ||
| 86 | +#include <functional> | ||
| 87 | +#include <cstdint> | ||
| 88 | +#include <unordered_map> | ||
| 89 | +#include <unordered_set> | ||
| 90 | +#include <map> | ||
| 91 | +#include <set> | ||
| 92 | +#include <cassert> | ||
| 93 | + | ||
| 94 | +#include "spirv.hpp" | ||
| 95 | + | ||
| 96 | +namespace spv { | ||
| 97 | + | ||
| 98 | +static inline constexpr Id NoResult = 0; | ||
| 99 | + | ||
| 100 | +// class to hold SPIR-V binary data for remapping, DCE, and debug stripping | ||
| 101 | +class GLSLANG_EXPORT spirvbin_t : public spirvbin_base_t | ||
| 102 | +{ | ||
| 103 | +public: | ||
| 104 | + spirvbin_t(int verbose = 0) : entryPoint(spv::NoResult), largestNewId(0), verbose(verbose), errorLatch(false) | ||
| 105 | + { } | ||
| 106 | + | ||
| 107 | + virtual ~spirvbin_t() { } | ||
| 108 | + | ||
| 109 | + // remap on an existing binary in memory | ||
| 110 | + void remap(std::vector<std::uint32_t>& spv, const std::vector<std::string>& whiteListStrings, | ||
| 111 | + std::uint32_t opts = DO_EVERYTHING); | ||
| 112 | + | ||
| 113 | + // remap on an existing binary in memory - legacy interface without white list | ||
| 114 | + void remap(std::vector<std::uint32_t>& spv, std::uint32_t opts = DO_EVERYTHING); | ||
| 115 | + | ||
| 116 | + // Type for error/log handler functions | ||
| 117 | + typedef std::function<void(const std::string&)> errorfn_t; | ||
| 118 | + typedef std::function<void(const std::string&)> logfn_t; | ||
| 119 | + | ||
| 120 | + // Register error/log handling functions (can be lambda fn / functor / etc) | ||
| 121 | + static void registerErrorHandler(errorfn_t handler) { errorHandler = handler; } | ||
| 122 | + static void registerLogHandler(logfn_t handler) { logHandler = handler; } | ||
| 123 | + | ||
| 124 | +protected: | ||
| 125 | + // This can be overridden to provide other message behavior if needed | ||
| 126 | + virtual void msg(int minVerbosity, int indent, const std::string& txt) const; | ||
| 127 | + | ||
| 128 | +private: | ||
| 129 | + // Local to global, or global to local ID map | ||
| 130 | + typedef std::unordered_map<spv::Id, spv::Id> idmap_t; | ||
| 131 | + typedef std::unordered_set<spv::Id> idset_t; | ||
| 132 | + typedef std::unordered_map<spv::Id, int> blockmap_t; | ||
| 133 | + | ||
| 134 | + void remap(std::uint32_t opts = DO_EVERYTHING); | ||
| 135 | + | ||
| 136 | + // Map of names to IDs | ||
| 137 | + typedef std::unordered_map<std::string, spv::Id> namemap_t; | ||
| 138 | + | ||
| 139 | + typedef std::uint32_t spirword_t; | ||
| 140 | + | ||
| 141 | + typedef std::pair<unsigned, unsigned> range_t; | ||
| 142 | + typedef std::function<void(spv::Id&)> idfn_t; | ||
| 143 | + typedef std::function<bool(spv::Op, unsigned start)> instfn_t; | ||
| 144 | + | ||
| 145 | + // Special Values for ID map: | ||
| 146 | + static const spv::Id unmapped; // unchanged from default value | ||
| 147 | + static const spv::Id unused; // unused ID | ||
| 148 | + static const int header_size; // SPIR header = 5 words | ||
| 149 | + | ||
| 150 | + class id_iterator_t; | ||
| 151 | + | ||
| 152 | + // For mapping type entries between different shaders | ||
| 153 | + typedef std::vector<spirword_t> typeentry_t; | ||
| 154 | + typedef std::map<spv::Id, typeentry_t> globaltypes_t; | ||
| 155 | + | ||
| 156 | + // A set that preserves position order, and a reverse map | ||
| 157 | + typedef std::set<int> posmap_t; | ||
| 158 | + typedef std::unordered_map<spv::Id, int> posmap_rev_t; | ||
| 159 | + | ||
| 160 | + // Maps and ID to the size of its base type, if known. | ||
| 161 | + typedef std::unordered_map<spv::Id, unsigned> typesize_map_t; | ||
| 162 | + | ||
| 163 | + // handle error | ||
| 164 | + void error(const std::string& txt) const { errorLatch = true; errorHandler(txt); } | ||
| 165 | + | ||
| 166 | + bool isConstOp(spv::Op opCode) const; | ||
| 167 | + bool isTypeOp(spv::Op opCode) const; | ||
| 168 | + bool isStripOp(spv::Op opCode) const; | ||
| 169 | + bool isFlowCtrl(spv::Op opCode) const; | ||
| 170 | + range_t literalRange(spv::Op opCode) const; | ||
| 171 | + range_t typeRange(spv::Op opCode) const; | ||
| 172 | + range_t constRange(spv::Op opCode) const; | ||
| 173 | + unsigned typeSizeInWords(spv::Id id) const; | ||
| 174 | + unsigned idTypeSizeInWords(spv::Id id) const; | ||
| 175 | + | ||
| 176 | + bool isStripOp(spv::Op opCode, unsigned start) const; | ||
| 177 | + | ||
| 178 | + spv::Id& asId(unsigned word) { return spv[word]; } | ||
| 179 | + const spv::Id& asId(unsigned word) const { return spv[word]; } | ||
| 180 | + spv::Op asOpCode(unsigned word) const { return opOpCode(spv[word]); } | ||
| 181 | + std::uint32_t asOpCodeHash(unsigned word); | ||
| 182 | + spv::Decoration asDecoration(unsigned word) const { return spv::Decoration(spv[word]); } | ||
| 183 | + unsigned asWordCount(unsigned word) const { return opWordCount(spv[word]); } | ||
| 184 | + spv::Id asTypeConstId(unsigned word) const { return asId(word + (isTypeOp(asOpCode(word)) ? 1 : 2)); } | ||
| 185 | + unsigned idPos(spv::Id id) const; | ||
| 186 | + | ||
| 187 | + static unsigned opWordCount(spirword_t data) { return data >> spv::WordCountShift; } | ||
| 188 | + static spv::Op opOpCode(spirword_t data) { return spv::Op(data & spv::OpCodeMask); } | ||
| 189 | + | ||
| 190 | + // Header access & set methods | ||
| 191 | + spirword_t magic() const { return spv[0]; } // return magic number | ||
| 192 | + spirword_t bound() const { return spv[3]; } // return Id bound from header | ||
| 193 | + spirword_t bound(spirword_t b) { return spv[3] = b; } | ||
| 194 | + spirword_t genmagic() const { return spv[2]; } // generator magic | ||
| 195 | + spirword_t genmagic(spirword_t m) { return spv[2] = m; } | ||
| 196 | + spirword_t schemaNum() const { return spv[4]; } // schema number from header | ||
| 197 | + | ||
| 198 | + // Mapping fns: get | ||
| 199 | + spv::Id localId(spv::Id id) const { return idMapL[id]; } | ||
| 200 | + | ||
| 201 | + // Mapping fns: set | ||
| 202 | + inline spv::Id localId(spv::Id id, spv::Id newId); | ||
| 203 | + void countIds(spv::Id id); | ||
| 204 | + | ||
| 205 | + // Return next unused new local ID. | ||
| 206 | + // NOTE: boost::dynamic_bitset would be more efficient due to find_next(), | ||
| 207 | + // which std::vector<bool> doens't have. | ||
| 208 | + inline spv::Id nextUnusedId(spv::Id id); | ||
| 209 | + | ||
| 210 | + void buildLocalMaps(); | ||
| 211 | + std::string literalString(unsigned word) const; // Return literal as a std::string | ||
| 212 | + int literalStringWords(const std::string& str) const { return (int(str.size())+4)/4; } | ||
| 213 | + | ||
| 214 | + bool isNewIdMapped(spv::Id newId) const { return isMapped(newId); } | ||
| 215 | + bool isOldIdUnmapped(spv::Id oldId) const { return localId(oldId) == unmapped; } | ||
| 216 | + bool isOldIdUnused(spv::Id oldId) const { return localId(oldId) == unused; } | ||
| 217 | + bool isOldIdMapped(spv::Id oldId) const { return !isOldIdUnused(oldId) && !isOldIdUnmapped(oldId); } | ||
| 218 | + bool isFunction(spv::Id oldId) const { return fnPos.find(oldId) != fnPos.end(); } | ||
| 219 | + | ||
| 220 | + // bool matchType(const globaltypes_t& globalTypes, spv::Id lt, spv::Id gt) const; | ||
| 221 | + // spv::Id findType(const globaltypes_t& globalTypes, spv::Id lt) const; | ||
| 222 | + std::uint32_t hashType(unsigned typeStart) const; | ||
| 223 | + | ||
| 224 | + spirvbin_t& process(instfn_t, idfn_t, unsigned begin = 0, unsigned end = 0); | ||
| 225 | + int processInstruction(unsigned word, instfn_t, idfn_t); | ||
| 226 | + | ||
| 227 | + void validate() const; | ||
| 228 | + void mapTypeConst(); | ||
| 229 | + void mapFnBodies(); | ||
| 230 | + void optLoadStore(); | ||
| 231 | + void dceFuncs(); | ||
| 232 | + void dceVars(); | ||
| 233 | + void dceTypes(); | ||
| 234 | + void mapNames(); | ||
| 235 | + void foldIds(); // fold IDs to smallest space | ||
| 236 | + void forwardLoadStores(); // load store forwarding (EXPERIMENTAL) | ||
| 237 | + void offsetIds(); // create relative offset IDs | ||
| 238 | + | ||
| 239 | + void applyMap(); // remap per local name map | ||
| 240 | + void mapRemainder(); // map any IDs we haven't touched yet | ||
| 241 | + void stripDebug(); // strip all debug info | ||
| 242 | + void stripDeadRefs(); // strips debug info for now-dead references after DCE | ||
| 243 | + void strip(); // remove debug symbols | ||
| 244 | + | ||
| 245 | + std::vector<spirword_t> spv; // SPIR words | ||
| 246 | + | ||
| 247 | + std::vector<std::string> stripWhiteList; | ||
| 248 | + | ||
| 249 | + namemap_t nameMap; // ID names from OpName | ||
| 250 | + | ||
| 251 | + // Since we want to also do binary ops, we can't use std::vector<bool>. we could use | ||
| 252 | + // boost::dynamic_bitset, but we're trying to avoid a boost dependency. | ||
| 253 | + typedef std::uint64_t bits_t; | ||
| 254 | + std::vector<bits_t> mapped; // which new IDs have been mapped | ||
| 255 | + static const int mBits = sizeof(bits_t) * 4; | ||
| 256 | + | ||
| 257 | + bool isMapped(spv::Id id) const { return id < maxMappedId() && ((mapped[id/mBits] & (1LL<<(id%mBits))) != 0); } | ||
| 258 | + void setMapped(spv::Id id) { resizeMapped(id); mapped[id/mBits] |= (1LL<<(id%mBits)); } | ||
| 259 | + void resizeMapped(spv::Id id) { if (id >= maxMappedId()) mapped.resize(id/mBits+1, 0); } | ||
| 260 | + size_t maxMappedId() const { return mapped.size() * mBits; } | ||
| 261 | + | ||
| 262 | + // Add a strip range for a given instruction starting at 'start' | ||
| 263 | + // Note: avoiding brace initializers to please older versions os MSVC. | ||
| 264 | + void stripInst(unsigned start) { stripRange.push_back(range_t(start, start + asWordCount(start))); } | ||
| 265 | + | ||
| 266 | + // Function start and end. use unordered_map because we'll have | ||
| 267 | + // many fewer functions than IDs. | ||
| 268 | + std::unordered_map<spv::Id, range_t> fnPos; | ||
| 269 | + | ||
| 270 | + // Which functions are called, anywhere in the module, with a call count | ||
| 271 | + std::unordered_map<spv::Id, int> fnCalls; | ||
| 272 | + | ||
| 273 | + posmap_t typeConstPos; // word positions that define types & consts (ordered) | ||
| 274 | + posmap_rev_t idPosR; // reverse map from IDs to positions | ||
| 275 | + typesize_map_t idTypeSizeMap; // maps each ID to its type size, if known. | ||
| 276 | + | ||
| 277 | + std::vector<spv::Id> idMapL; // ID {M}ap from {L}ocal to {G}lobal IDs | ||
| 278 | + | ||
| 279 | + spv::Id entryPoint; // module entry point | ||
| 280 | + spv::Id largestNewId; // biggest new ID we have mapped anything to | ||
| 281 | + | ||
| 282 | + // Sections of the binary to strip, given as [begin,end) | ||
| 283 | + std::vector<range_t> stripRange; | ||
| 284 | + | ||
| 285 | + // processing options: | ||
| 286 | + std::uint32_t options; | ||
| 287 | + int verbose; // verbosity level | ||
| 288 | + | ||
| 289 | + // Error latch: this is set if the error handler is ever executed. It would be better to | ||
| 290 | + // use a try/catch block and throw, but that's not desired for certain environments, so | ||
| 291 | + // this is the alternative. | ||
| 292 | + mutable bool errorLatch; | ||
| 293 | + | ||
| 294 | + static errorfn_t errorHandler; | ||
| 295 | + static logfn_t logHandler; | ||
| 296 | +}; | ||
| 297 | + | ||
| 298 | +} // namespace SPV | ||
| 299 | + | ||
| 300 | +#endif // SPIRVREMAPPER_H |
| 1 | +// | ||
| 2 | +// Copyright (C) 2014-2016 LunarG, Inc. | ||
| 3 | +// Copyright (C) 2018 Google, Inc. | ||
| 4 | +// | ||
| 5 | +// All rights reserved. | ||
| 6 | +// | ||
| 7 | +// Redistribution and use in source and binary forms, with or without | ||
| 8 | +// modification, are permitted provided that the following conditions | ||
| 9 | +// are met: | ||
| 10 | +// | ||
| 11 | +// Redistributions of source code must retain the above copyright | ||
| 12 | +// notice, this list of conditions and the following disclaimer. | ||
| 13 | +// | ||
| 14 | +// Redistributions in binary form must reproduce the above | ||
| 15 | +// copyright notice, this list of conditions and the following | ||
| 16 | +// disclaimer in the documentation and/or other materials provided | ||
| 17 | +// with the distribution. | ||
| 18 | +// | ||
| 19 | +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its | ||
| 20 | +// contributors may be used to endorse or promote products derived | ||
| 21 | +// from this software without specific prior written permission. | ||
| 22 | +// | ||
| 23 | +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 24 | +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 25 | +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | ||
| 26 | +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | ||
| 27 | +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
| 28 | +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, | ||
| 29 | +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
| 30 | +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | ||
| 31 | +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | ||
| 32 | +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN | ||
| 33 | +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 34 | +// POSSIBILITY OF SUCH DAMAGE. | ||
| 35 | + | ||
| 36 | +// | ||
| 37 | +// Call into SPIRV-Tools to disassemble, validate, and optimize. | ||
| 38 | +// | ||
| 39 | + | ||
| 40 | +#pragma once | ||
| 41 | +#ifndef GLSLANG_SPV_TOOLS_H | ||
| 42 | +#define GLSLANG_SPV_TOOLS_H | ||
| 43 | + | ||
| 44 | +#if ENABLE_OPT | ||
| 45 | +#include <vector> | ||
| 46 | +#include <ostream> | ||
| 47 | +#include <unordered_set> | ||
| 48 | +#include "spirv-tools/libspirv.h" | ||
| 49 | +#endif | ||
| 50 | + | ||
| 51 | +#include "glslang/MachineIndependent/Versions.h" | ||
| 52 | +#include "glslang/Include/visibility.h" | ||
| 53 | +#include "GlslangToSpv.h" | ||
| 54 | +#include "Logger.h" | ||
| 55 | + | ||
| 56 | +namespace glslang { | ||
| 57 | + | ||
| 58 | +#if ENABLE_OPT | ||
| 59 | + | ||
| 60 | +class TIntermediate; | ||
| 61 | + | ||
| 62 | +// Translate glslang's view of target versioning to what SPIRV-Tools uses. | ||
| 63 | +GLSLANG_EXPORT spv_target_env MapToSpirvToolsEnv(const SpvVersion& spvVersion, spv::SpvBuildLogger* logger); | ||
| 64 | +GLSLANG_EXPORT spv_target_env MapToSpirvToolsEnv(const glslang::TIntermediate& intermediate, spv::SpvBuildLogger* logger); | ||
| 65 | + | ||
| 66 | +// Use the SPIRV-Tools disassembler to print SPIR-V using a SPV_ENV_UNIVERSAL_1_3 environment. | ||
| 67 | +GLSLANG_EXPORT void SpirvToolsDisassemble(std::ostream& out, const std::vector<unsigned int>& spirv); | ||
| 68 | + | ||
| 69 | +// Use the SPIRV-Tools disassembler to print SPIR-V with a provided SPIR-V environment. | ||
| 70 | +GLSLANG_EXPORT void SpirvToolsDisassemble(std::ostream& out, const std::vector<unsigned int>& spirv, | ||
| 71 | + spv_target_env requested_context); | ||
| 72 | + | ||
| 73 | +// Apply the SPIRV-Tools validator to generated SPIR-V. | ||
| 74 | +GLSLANG_EXPORT void SpirvToolsValidate(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv, | ||
| 75 | + spv::SpvBuildLogger*, bool prelegalization); | ||
| 76 | + | ||
| 77 | +// Apply the SPIRV-Tools optimizer to generated SPIR-V. HLSL SPIR-V is legalized in the process. | ||
| 78 | +GLSLANG_EXPORT void SpirvToolsTransform(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv, | ||
| 79 | + spv::SpvBuildLogger*, const SpvOptions*); | ||
| 80 | + | ||
| 81 | +// Apply the SPIRV-Tools EliminateDeadInputComponents pass to generated SPIR-V. Put result in |spirv|. | ||
| 82 | +GLSLANG_EXPORT void SpirvToolsEliminateDeadInputComponents(spv_target_env target_env, std::vector<unsigned int>& spirv, | ||
| 83 | + spv::SpvBuildLogger*); | ||
| 84 | + | ||
| 85 | +// Apply the SPIRV-Tools AnalyzeDeadOutputStores pass to generated SPIR-V. Put result in |live_locs|. | ||
| 86 | +// Return true if the result is valid. | ||
| 87 | +GLSLANG_EXPORT bool SpirvToolsAnalyzeDeadOutputStores(spv_target_env target_env, std::vector<unsigned int>& spirv, | ||
| 88 | + std::unordered_set<uint32_t>* live_locs, | ||
| 89 | + std::unordered_set<uint32_t>* live_builtins, | ||
| 90 | + spv::SpvBuildLogger*); | ||
| 91 | + | ||
| 92 | +// Apply the SPIRV-Tools EliminateDeadOutputStores and AggressiveDeadCodeElimination passes to generated SPIR-V using | ||
| 93 | +// |live_locs|. Put result in |spirv|. | ||
| 94 | +GLSLANG_EXPORT void SpirvToolsEliminateDeadOutputStores(spv_target_env target_env, std::vector<unsigned int>& spirv, | ||
| 95 | + std::unordered_set<uint32_t>* live_locs, | ||
| 96 | + std::unordered_set<uint32_t>* live_builtins, | ||
| 97 | + spv::SpvBuildLogger*); | ||
| 98 | + | ||
| 99 | +// Apply the SPIRV-Tools optimizer to strip debug info from SPIR-V. This is implicitly done by | ||
| 100 | +// SpirvToolsTransform if spvOptions->stripDebugInfo is set, but can be called separately if | ||
| 101 | +// optimization is disabled. | ||
| 102 | +GLSLANG_EXPORT void SpirvToolsStripDebugInfo(const glslang::TIntermediate& intermediate, | ||
| 103 | + std::vector<unsigned int>& spirv, spv::SpvBuildLogger*); | ||
| 104 | + | ||
| 105 | +#endif | ||
| 106 | + | ||
| 107 | +} // end namespace glslang | ||
| 108 | + | ||
| 109 | +#endif // GLSLANG_SPV_TOOLS_H |
| 1 | +// | ||
| 2 | +// Copyright (C) 2014-2015 LunarG, Inc. | ||
| 3 | +// | ||
| 4 | +// All rights reserved. | ||
| 5 | +// | ||
| 6 | +// Redistribution and use in source and binary forms, with or without | ||
| 7 | +// modification, are permitted provided that the following conditions | ||
| 8 | +// are met: | ||
| 9 | +// | ||
| 10 | +// Redistributions of source code must retain the above copyright | ||
| 11 | +// notice, this list of conditions and the following disclaimer. | ||
| 12 | +// | ||
| 13 | +// Redistributions in binary form must reproduce the above | ||
| 14 | +// copyright notice, this list of conditions and the following | ||
| 15 | +// disclaimer in the documentation and/or other materials provided | ||
| 16 | +// with the distribution. | ||
| 17 | +// | ||
| 18 | +// Neither the name of 3Dlabs Inc. Ltd. nor the names of its | ||
| 19 | +// contributors may be used to endorse or promote products derived | ||
| 20 | +// from this software without specific prior written permission. | ||
| 21 | +// | ||
| 22 | +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 23 | +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 24 | +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | ||
| 25 | +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | ||
| 26 | +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
| 27 | +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, | ||
| 28 | +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
| 29 | +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | ||
| 30 | +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | ||
| 31 | +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN | ||
| 32 | +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 33 | +// POSSIBILITY OF SUCH DAMAGE. | ||
| 34 | + | ||
| 35 | +// | ||
| 36 | +// Disassembler for SPIR-V. | ||
| 37 | +// | ||
| 38 | + | ||
| 39 | +#pragma once | ||
| 40 | +#ifndef disassembler_H | ||
| 41 | +#define disassembler_H | ||
| 42 | + | ||
| 43 | +#include <iostream> | ||
| 44 | +#include <vector> | ||
| 45 | + | ||
| 46 | +#include "glslang/Include/visibility.h" | ||
| 47 | + | ||
| 48 | +namespace spv { | ||
| 49 | + | ||
| 50 | + // disassemble with glslang custom disassembler | ||
| 51 | + GLSLANG_EXPORT void Disassemble(std::ostream& out, const std::vector<unsigned int>&); | ||
| 52 | + | ||
| 53 | +} // end namespace spv | ||
| 54 | + | ||
| 55 | +#endif // disassembler_H |
此 diff 太大无法显示。
livekit-android-sdk/src/main/jni/ncnn-20250503-android-vulkan/arm64-v8a/include/glslang/build_info.h
0 → 100644
| 1 | +// Copyright (C) 2020 The Khronos Group Inc. | ||
| 2 | +// | ||
| 3 | +// All rights reserved. | ||
| 4 | +// | ||
| 5 | +// Redistribution and use in source and binary forms, with or without | ||
| 6 | +// modification, are permitted provided that the following conditions | ||
| 7 | +// are met: | ||
| 8 | +// | ||
| 9 | +// Redistributions of source code must retain the above copyright | ||
| 10 | +// notice, this list of conditions and the following disclaimer. | ||
| 11 | +// | ||
| 12 | +// Redistributions in binary form must reproduce the above | ||
| 13 | +// copyright notice, this list of conditions and the following | ||
| 14 | +// disclaimer in the documentation and/or other materials provided | ||
| 15 | +// with the distribution. | ||
| 16 | +// | ||
| 17 | +// Neither the name of The Khronos Group Inc. nor the names of its | ||
| 18 | +// contributors may be used to endorse or promote products derived | ||
| 19 | +// from this software without specific prior written permission. | ||
| 20 | +// | ||
| 21 | +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
| 22 | +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
| 23 | +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | ||
| 24 | +// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | ||
| 25 | +// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
| 26 | +// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, | ||
| 27 | +// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
| 28 | +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER | ||
| 29 | +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | ||
| 30 | +// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN | ||
| 31 | +// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
| 32 | +// POSSIBILITY OF SUCH DAMAGE. | ||
| 33 | + | ||
| 34 | +#ifndef GLSLANG_BUILD_INFO | ||
| 35 | +#define GLSLANG_BUILD_INFO | ||
| 36 | + | ||
| 37 | +#define GLSLANG_VERSION_MAJOR 15 | ||
| 38 | +#define GLSLANG_VERSION_MINOR 1 | ||
| 39 | +#define GLSLANG_VERSION_PATCH 0 | ||
| 40 | +#define GLSLANG_VERSION_FLAVOR "" | ||
| 41 | + | ||
| 42 | +#define GLSLANG_VERSION_GREATER_THAN(major, minor, patch) \ | ||
| 43 | + ((GLSLANG_VERSION_MAJOR) > (major) || ((major) == GLSLANG_VERSION_MAJOR && \ | ||
| 44 | + ((GLSLANG_VERSION_MINOR) > (minor) || ((minor) == GLSLANG_VERSION_MINOR && \ | ||
| 45 | + (GLSLANG_VERSION_PATCH) > (patch))))) | ||
| 46 | + | ||
| 47 | +#define GLSLANG_VERSION_GREATER_OR_EQUAL_TO(major, minor, patch) \ | ||
| 48 | + ((GLSLANG_VERSION_MAJOR) > (major) || ((major) == GLSLANG_VERSION_MAJOR && \ | ||
| 49 | + ((GLSLANG_VERSION_MINOR) > (minor) || ((minor) == GLSLANG_VERSION_MINOR && \ | ||
| 50 | + (GLSLANG_VERSION_PATCH >= (patch)))))) | ||
| 51 | + | ||
| 52 | +#define GLSLANG_VERSION_LESS_THAN(major, minor, patch) \ | ||
| 53 | + ((GLSLANG_VERSION_MAJOR) < (major) || ((major) == GLSLANG_VERSION_MAJOR && \ | ||
| 54 | + ((GLSLANG_VERSION_MINOR) < (minor) || ((minor) == GLSLANG_VERSION_MINOR && \ | ||
| 55 | + (GLSLANG_VERSION_PATCH) < (patch))))) | ||
| 56 | + | ||
| 57 | +#define GLSLANG_VERSION_LESS_OR_EQUAL_TO(major, minor, patch) \ | ||
| 58 | + ((GLSLANG_VERSION_MAJOR) < (major) || ((major) == GLSLANG_VERSION_MAJOR && \ | ||
| 59 | + ((GLSLANG_VERSION_MINOR) < (minor) || ((minor) == GLSLANG_VERSION_MINOR && \ | ||
| 60 | + (GLSLANG_VERSION_PATCH <= (patch)))))) | ||
| 61 | + | ||
| 62 | +#endif // GLSLANG_BUILD_INFO |
livekit-android-sdk/src/main/jni/ncnn-20250503-android-vulkan/arm64-v8a/include/ncnn/allocator.h
0 → 100644
| 1 | +// Tencent is pleased to support the open source community by making ncnn available. | ||
| 2 | +// | ||
| 3 | +// Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved. | ||
| 4 | +// | ||
| 5 | +// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except | ||
| 6 | +// in compliance with the License. You may obtain a copy of the License at | ||
| 7 | +// | ||
| 8 | +// https://opensource.org/licenses/BSD-3-Clause | ||
| 9 | +// | ||
| 10 | +// Unless required by applicable law or agreed to in writing, software distributed | ||
| 11 | +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR | ||
| 12 | +// CONDITIONS OF ANY KIND, either express or implied. See the License for the | ||
| 13 | +// specific language governing permissions and limitations under the License. | ||
| 14 | + | ||
| 15 | +#ifndef NCNN_ALLOCATOR_H | ||
| 16 | +#define NCNN_ALLOCATOR_H | ||
| 17 | + | ||
| 18 | +#ifdef _WIN32 | ||
| 19 | +#define WIN32_LEAN_AND_MEAN | ||
| 20 | +#include <windows.h> | ||
| 21 | +#endif | ||
| 22 | + | ||
| 23 | +#include "platform.h" | ||
| 24 | + | ||
| 25 | +#include <stdlib.h> | ||
| 26 | + | ||
| 27 | +#if NCNN_PLATFORM_API | ||
| 28 | +#if __ANDROID_API__ >= 26 | ||
| 29 | +#include <android/hardware_buffer.h> | ||
| 30 | +#endif // __ANDROID_API__ >= 26 | ||
| 31 | +#endif // NCNN_PLATFORM_API | ||
| 32 | + | ||
| 33 | +namespace ncnn { | ||
| 34 | + | ||
| 35 | +// the alignment of all the allocated buffers | ||
| 36 | +#if NCNN_AVX512 | ||
| 37 | +#define NCNN_MALLOC_ALIGN 64 | ||
| 38 | +#elif NCNN_AVX | ||
| 39 | +#define NCNN_MALLOC_ALIGN 32 | ||
| 40 | +#else | ||
| 41 | +#define NCNN_MALLOC_ALIGN 16 | ||
| 42 | +#endif | ||
| 43 | + | ||
| 44 | +// we have some optimized kernels that may overread buffer a bit in loop | ||
| 45 | +// it is common to interleave next-loop data load with arithmetic instructions | ||
| 46 | +// allocating more bytes keeps us safe from SEGV_ACCERR failure | ||
| 47 | +#define NCNN_MALLOC_OVERREAD 64 | ||
| 48 | + | ||
| 49 | +// Aligns a pointer to the specified number of bytes | ||
| 50 | +// ptr Aligned pointer | ||
| 51 | +// n Alignment size that must be a power of two | ||
| 52 | +template<typename _Tp> | ||
| 53 | +static NCNN_FORCEINLINE _Tp* alignPtr(_Tp* ptr, int n = (int)sizeof(_Tp)) | ||
| 54 | +{ | ||
| 55 | + return (_Tp*)(((size_t)ptr + n - 1) & -n); | ||
| 56 | +} | ||
| 57 | + | ||
| 58 | +// Aligns a buffer size to the specified number of bytes | ||
| 59 | +// The function returns the minimum number that is greater or equal to sz and is divisible by n | ||
| 60 | +// sz Buffer size to align | ||
| 61 | +// n Alignment size that must be a power of two | ||
| 62 | +static NCNN_FORCEINLINE size_t alignSize(size_t sz, int n) | ||
| 63 | +{ | ||
| 64 | + return (sz + n - 1) & -n; | ||
| 65 | +} | ||
| 66 | + | ||
| 67 | +static NCNN_FORCEINLINE void* fastMalloc(size_t size) | ||
| 68 | +{ | ||
| 69 | +#if _MSC_VER | ||
| 70 | + return _aligned_malloc(size, NCNN_MALLOC_ALIGN); | ||
| 71 | +#elif (defined(__unix__) || defined(__APPLE__)) && _POSIX_C_SOURCE >= 200112L || (__ANDROID__ && __ANDROID_API__ >= 17) | ||
| 72 | + void* ptr = 0; | ||
| 73 | + if (posix_memalign(&ptr, NCNN_MALLOC_ALIGN, size + NCNN_MALLOC_OVERREAD)) | ||
| 74 | + ptr = 0; | ||
| 75 | + return ptr; | ||
| 76 | +#elif __ANDROID__ && __ANDROID_API__ < 17 | ||
| 77 | + return memalign(NCNN_MALLOC_ALIGN, size + NCNN_MALLOC_OVERREAD); | ||
| 78 | +#else | ||
| 79 | + unsigned char* udata = (unsigned char*)malloc(size + sizeof(void*) + NCNN_MALLOC_ALIGN + NCNN_MALLOC_OVERREAD); | ||
| 80 | + if (!udata) | ||
| 81 | + return 0; | ||
| 82 | + unsigned char** adata = alignPtr((unsigned char**)udata + 1, NCNN_MALLOC_ALIGN); | ||
| 83 | + adata[-1] = udata; | ||
| 84 | + return adata; | ||
| 85 | +#endif | ||
| 86 | +} | ||
| 87 | + | ||
| 88 | +static NCNN_FORCEINLINE void fastFree(void* ptr) | ||
| 89 | +{ | ||
| 90 | + if (ptr) | ||
| 91 | + { | ||
| 92 | +#if _MSC_VER | ||
| 93 | + _aligned_free(ptr); | ||
| 94 | +#elif (defined(__unix__) || defined(__APPLE__)) && _POSIX_C_SOURCE >= 200112L || (__ANDROID__ && __ANDROID_API__ >= 17) | ||
| 95 | + free(ptr); | ||
| 96 | +#elif __ANDROID__ && __ANDROID_API__ < 17 | ||
| 97 | + free(ptr); | ||
| 98 | +#else | ||
| 99 | + unsigned char* udata = ((unsigned char**)ptr)[-1]; | ||
| 100 | + free(udata); | ||
| 101 | +#endif | ||
| 102 | + } | ||
| 103 | +} | ||
| 104 | + | ||
| 105 | +#if NCNN_THREADS | ||
| 106 | +// exchange-add operation for atomic operations on reference counters | ||
| 107 | +#if defined __riscv && !defined __riscv_atomic | ||
| 108 | +// riscv target without A extension | ||
| 109 | +static NCNN_FORCEINLINE int NCNN_XADD(int* addr, int delta) | ||
| 110 | +{ | ||
| 111 | + int tmp = *addr; | ||
| 112 | + *addr += delta; | ||
| 113 | + return tmp; | ||
| 114 | +} | ||
| 115 | +#elif defined __INTEL_COMPILER && !(defined WIN32 || defined _WIN32) | ||
| 116 | +// atomic increment on the linux version of the Intel(tm) compiler | ||
| 117 | +#define NCNN_XADD(addr, delta) (int)_InterlockedExchangeAdd(const_cast<void*>(reinterpret_cast<volatile void*>(addr)), delta) | ||
| 118 | +#elif defined __GNUC__ | ||
| 119 | +#if defined __clang__ && __clang_major__ >= 3 && !defined __ANDROID__ && !defined __EMSCRIPTEN__ && !defined(__CUDACC__) | ||
| 120 | +#ifdef __ATOMIC_ACQ_REL | ||
| 121 | +#define NCNN_XADD(addr, delta) __c11_atomic_fetch_add((_Atomic(int)*)(addr), delta, __ATOMIC_ACQ_REL) | ||
| 122 | +#else | ||
| 123 | +#define NCNN_XADD(addr, delta) __atomic_fetch_add((_Atomic(int)*)(addr), delta, 4) | ||
| 124 | +#endif | ||
| 125 | +#else | ||
| 126 | +#if defined __ATOMIC_ACQ_REL && !defined __clang__ | ||
| 127 | +// version for gcc >= 4.7 | ||
| 128 | +#define NCNN_XADD(addr, delta) (int)__atomic_fetch_add((unsigned*)(addr), (unsigned)(delta), __ATOMIC_ACQ_REL) | ||
| 129 | +#else | ||
| 130 | +#define NCNN_XADD(addr, delta) (int)__sync_fetch_and_add((unsigned*)(addr), (unsigned)(delta)) | ||
| 131 | +#endif | ||
| 132 | +#endif | ||
| 133 | +#elif defined _MSC_VER && !defined RC_INVOKED | ||
| 134 | +#define NCNN_XADD(addr, delta) (int)_InterlockedExchangeAdd((long volatile*)addr, delta) | ||
| 135 | +#else | ||
| 136 | +// thread-unsafe branch | ||
| 137 | +static NCNN_FORCEINLINE int NCNN_XADD(int* addr, int delta) | ||
| 138 | +{ | ||
| 139 | + int tmp = *addr; | ||
| 140 | + *addr += delta; | ||
| 141 | + return tmp; | ||
| 142 | +} | ||
| 143 | +#endif | ||
| 144 | +#else // NCNN_THREADS | ||
| 145 | +static NCNN_FORCEINLINE int NCNN_XADD(int* addr, int delta) | ||
| 146 | +{ | ||
| 147 | + int tmp = *addr; | ||
| 148 | + *addr += delta; | ||
| 149 | + return tmp; | ||
| 150 | +} | ||
| 151 | +#endif // NCNN_THREADS | ||
| 152 | + | ||
| 153 | +class NCNN_EXPORT Allocator | ||
| 154 | +{ | ||
| 155 | +public: | ||
| 156 | + virtual ~Allocator(); | ||
| 157 | + virtual void* fastMalloc(size_t size) = 0; | ||
| 158 | + virtual void fastFree(void* ptr) = 0; | ||
| 159 | +}; | ||
| 160 | + | ||
| 161 | +class PoolAllocatorPrivate; | ||
| 162 | +class NCNN_EXPORT PoolAllocator : public Allocator | ||
| 163 | +{ | ||
| 164 | +public: | ||
| 165 | + PoolAllocator(); | ||
| 166 | + ~PoolAllocator(); | ||
| 167 | + | ||
| 168 | + // ratio range 0 ~ 1 | ||
| 169 | + // default cr = 0 | ||
| 170 | + void set_size_compare_ratio(float scr); | ||
| 171 | + | ||
| 172 | + // budget drop threshold | ||
| 173 | + // default threshold = 10 | ||
| 174 | + void set_size_drop_threshold(size_t); | ||
| 175 | + | ||
| 176 | + // release all budgets immediately | ||
| 177 | + void clear(); | ||
| 178 | + | ||
| 179 | + virtual void* fastMalloc(size_t size); | ||
| 180 | + virtual void fastFree(void* ptr); | ||
| 181 | + | ||
| 182 | +private: | ||
| 183 | + PoolAllocator(const PoolAllocator&); | ||
| 184 | + PoolAllocator& operator=(const PoolAllocator&); | ||
| 185 | + | ||
| 186 | +private: | ||
| 187 | + PoolAllocatorPrivate* const d; | ||
| 188 | +}; | ||
| 189 | + | ||
| 190 | +class UnlockedPoolAllocatorPrivate; | ||
| 191 | +class NCNN_EXPORT UnlockedPoolAllocator : public Allocator | ||
| 192 | +{ | ||
| 193 | +public: | ||
| 194 | + UnlockedPoolAllocator(); | ||
| 195 | + ~UnlockedPoolAllocator(); | ||
| 196 | + | ||
| 197 | + // ratio range 0 ~ 1 | ||
| 198 | + // default cr = 0 | ||
| 199 | + void set_size_compare_ratio(float scr); | ||
| 200 | + | ||
| 201 | + // budget drop threshold | ||
| 202 | + // default threshold = 10 | ||
| 203 | + void set_size_drop_threshold(size_t); | ||
| 204 | + | ||
| 205 | + // release all budgets immediately | ||
| 206 | + void clear(); | ||
| 207 | + | ||
| 208 | + virtual void* fastMalloc(size_t size); | ||
| 209 | + virtual void fastFree(void* ptr); | ||
| 210 | + | ||
| 211 | +private: | ||
| 212 | + UnlockedPoolAllocator(const UnlockedPoolAllocator&); | ||
| 213 | + UnlockedPoolAllocator& operator=(const UnlockedPoolAllocator&); | ||
| 214 | + | ||
| 215 | +private: | ||
| 216 | + UnlockedPoolAllocatorPrivate* const d; | ||
| 217 | +}; | ||
| 218 | + | ||
| 219 | +#if NCNN_VULKAN | ||
| 220 | + | ||
| 221 | +class VulkanDevice; | ||
| 222 | + | ||
| 223 | +class NCNN_EXPORT VkBufferMemory | ||
| 224 | +{ | ||
| 225 | +public: | ||
| 226 | + VkBuffer buffer; | ||
| 227 | + | ||
| 228 | + // the base offset assigned by allocator | ||
| 229 | + size_t offset; | ||
| 230 | + size_t capacity; | ||
| 231 | + | ||
| 232 | + VkDeviceMemory memory; | ||
| 233 | + void* mapped_ptr; | ||
| 234 | + | ||
| 235 | + // buffer state, modified by command functions internally | ||
| 236 | + mutable VkAccessFlags access_flags; | ||
| 237 | + mutable VkPipelineStageFlags stage_flags; | ||
| 238 | + | ||
| 239 | + // initialize and modified by mat | ||
| 240 | + int refcount; | ||
| 241 | +}; | ||
| 242 | + | ||
| 243 | +class NCNN_EXPORT VkImageMemory | ||
| 244 | +{ | ||
| 245 | +public: | ||
| 246 | + VkImage image; | ||
| 247 | + VkImageView imageview; | ||
| 248 | + | ||
| 249 | + // underlying info assigned by allocator | ||
| 250 | + int width; | ||
| 251 | + int height; | ||
| 252 | + int depth; | ||
| 253 | + VkFormat format; | ||
| 254 | + | ||
| 255 | + VkDeviceMemory memory; | ||
| 256 | + void* mapped_ptr; | ||
| 257 | + | ||
| 258 | + // the base offset assigned by allocator | ||
| 259 | + size_t bind_offset; | ||
| 260 | + size_t bind_capacity; | ||
| 261 | + | ||
| 262 | + // image state, modified by command functions internally | ||
| 263 | + mutable VkAccessFlags access_flags; | ||
| 264 | + mutable VkImageLayout image_layout; | ||
| 265 | + mutable VkPipelineStageFlags stage_flags; | ||
| 266 | + | ||
| 267 | + // in-execution state, modified by command functions internally | ||
| 268 | + mutable int command_refcount; | ||
| 269 | + | ||
| 270 | + // initialize and modified by mat | ||
| 271 | + int refcount; | ||
| 272 | +}; | ||
| 273 | + | ||
| 274 | +class NCNN_EXPORT VkAllocator | ||
| 275 | +{ | ||
| 276 | +public: | ||
| 277 | + explicit VkAllocator(const VulkanDevice* _vkdev); | ||
| 278 | + virtual ~VkAllocator(); | ||
| 279 | + | ||
| 280 | + virtual void clear(); | ||
| 281 | + | ||
| 282 | + virtual VkBufferMemory* fastMalloc(size_t size) = 0; | ||
| 283 | + virtual void fastFree(VkBufferMemory* ptr) = 0; | ||
| 284 | + virtual int flush(VkBufferMemory* ptr); | ||
| 285 | + virtual int invalidate(VkBufferMemory* ptr); | ||
| 286 | + | ||
| 287 | + virtual VkImageMemory* fastMalloc(int w, int h, int c, size_t elemsize, int elempack) = 0; | ||
| 288 | + virtual void fastFree(VkImageMemory* ptr) = 0; | ||
| 289 | + | ||
| 290 | +public: | ||
| 291 | + const VulkanDevice* vkdev; | ||
| 292 | + uint32_t buffer_memory_type_index; | ||
| 293 | + uint32_t image_memory_type_index; | ||
| 294 | + uint32_t reserved_type_index; | ||
| 295 | + bool mappable; | ||
| 296 | + bool coherent; | ||
| 297 | + | ||
| 298 | +protected: | ||
| 299 | + VkBuffer create_buffer(size_t size, VkBufferUsageFlags usage); | ||
| 300 | + VkDeviceMemory allocate_memory(size_t size, uint32_t memory_type_index); | ||
| 301 | + VkDeviceMemory allocate_dedicated_memory(size_t size, uint32_t memory_type_index, VkImage image, VkBuffer buffer); | ||
| 302 | + | ||
| 303 | + VkImage create_image(int width, int height, int depth, VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage); | ||
| 304 | + VkImageView create_imageview(VkImage image, VkFormat format); | ||
| 305 | +}; | ||
| 306 | + | ||
| 307 | +class VkBlobAllocatorPrivate; | ||
| 308 | +class NCNN_EXPORT VkBlobAllocator : public VkAllocator | ||
| 309 | +{ | ||
| 310 | +public: | ||
| 311 | + explicit VkBlobAllocator(const VulkanDevice* vkdev, size_t preferred_block_size = 16 * 1024 * 1024); // 16M | ||
| 312 | + virtual ~VkBlobAllocator(); | ||
| 313 | + | ||
| 314 | +public: | ||
| 315 | + // release all budgets immediately | ||
| 316 | + virtual void clear(); | ||
| 317 | + | ||
| 318 | + virtual VkBufferMemory* fastMalloc(size_t size); | ||
| 319 | + virtual void fastFree(VkBufferMemory* ptr); | ||
| 320 | + virtual VkImageMemory* fastMalloc(int w, int h, int c, size_t elemsize, int elempack); | ||
| 321 | + virtual void fastFree(VkImageMemory* ptr); | ||
| 322 | + | ||
| 323 | +private: | ||
| 324 | + VkBlobAllocator(const VkBlobAllocator&); | ||
| 325 | + VkBlobAllocator& operator=(const VkBlobAllocator&); | ||
| 326 | + | ||
| 327 | +private: | ||
| 328 | + VkBlobAllocatorPrivate* const d; | ||
| 329 | +}; | ||
| 330 | + | ||
| 331 | +class VkWeightAllocatorPrivate; | ||
| 332 | +class NCNN_EXPORT VkWeightAllocator : public VkAllocator | ||
| 333 | +{ | ||
| 334 | +public: | ||
| 335 | + explicit VkWeightAllocator(const VulkanDevice* vkdev, size_t preferred_block_size = 8 * 1024 * 1024); // 8M | ||
| 336 | + virtual ~VkWeightAllocator(); | ||
| 337 | + | ||
| 338 | +public: | ||
| 339 | + // release all blocks immediately | ||
| 340 | + virtual void clear(); | ||
| 341 | + | ||
| 342 | +public: | ||
| 343 | + virtual VkBufferMemory* fastMalloc(size_t size); | ||
| 344 | + virtual void fastFree(VkBufferMemory* ptr); | ||
| 345 | + virtual VkImageMemory* fastMalloc(int w, int h, int c, size_t elemsize, int elempack); | ||
| 346 | + virtual void fastFree(VkImageMemory* ptr); | ||
| 347 | + | ||
| 348 | +private: | ||
| 349 | + VkWeightAllocator(const VkWeightAllocator&); | ||
| 350 | + VkWeightAllocator& operator=(const VkWeightAllocator&); | ||
| 351 | + | ||
| 352 | +private: | ||
| 353 | + VkWeightAllocatorPrivate* const d; | ||
| 354 | +}; | ||
| 355 | + | ||
| 356 | +class VkStagingAllocatorPrivate; | ||
| 357 | +class NCNN_EXPORT VkStagingAllocator : public VkAllocator | ||
| 358 | +{ | ||
| 359 | +public: | ||
| 360 | + explicit VkStagingAllocator(const VulkanDevice* vkdev); | ||
| 361 | + virtual ~VkStagingAllocator(); | ||
| 362 | + | ||
| 363 | +public: | ||
| 364 | + // ratio range 0 ~ 1 | ||
| 365 | + // default cr = 0.75 | ||
| 366 | + void set_size_compare_ratio(float scr); | ||
| 367 | + | ||
| 368 | + // release all budgets immediately | ||
| 369 | + virtual void clear(); | ||
| 370 | + | ||
| 371 | + virtual VkBufferMemory* fastMalloc(size_t size); | ||
| 372 | + virtual void fastFree(VkBufferMemory* ptr); | ||
| 373 | + virtual VkImageMemory* fastMalloc(int w, int h, int c, size_t elemsize, int elempack); | ||
| 374 | + virtual void fastFree(VkImageMemory* ptr); | ||
| 375 | + | ||
| 376 | +private: | ||
| 377 | + VkStagingAllocator(const VkStagingAllocator&); | ||
| 378 | + VkStagingAllocator& operator=(const VkStagingAllocator&); | ||
| 379 | + | ||
| 380 | +private: | ||
| 381 | + VkStagingAllocatorPrivate* const d; | ||
| 382 | +}; | ||
| 383 | + | ||
| 384 | +class VkWeightStagingAllocatorPrivate; | ||
| 385 | +class NCNN_EXPORT VkWeightStagingAllocator : public VkAllocator | ||
| 386 | +{ | ||
| 387 | +public: | ||
| 388 | + explicit VkWeightStagingAllocator(const VulkanDevice* vkdev); | ||
| 389 | + virtual ~VkWeightStagingAllocator(); | ||
| 390 | + | ||
| 391 | +public: | ||
| 392 | + virtual VkBufferMemory* fastMalloc(size_t size); | ||
| 393 | + virtual void fastFree(VkBufferMemory* ptr); | ||
| 394 | + virtual VkImageMemory* fastMalloc(int w, int h, int c, size_t elemsize, int elempack); | ||
| 395 | + virtual void fastFree(VkImageMemory* ptr); | ||
| 396 | + | ||
| 397 | +private: | ||
| 398 | + VkWeightStagingAllocator(const VkWeightStagingAllocator&); | ||
| 399 | + VkWeightStagingAllocator& operator=(const VkWeightStagingAllocator&); | ||
| 400 | + | ||
| 401 | +private: | ||
| 402 | + VkWeightStagingAllocatorPrivate* const d; | ||
| 403 | +}; | ||
| 404 | + | ||
| 405 | +#if NCNN_PLATFORM_API | ||
| 406 | +#if __ANDROID_API__ >= 26 | ||
| 407 | +class NCNN_EXPORT VkAndroidHardwareBufferImageAllocator : public VkAllocator | ||
| 408 | +{ | ||
| 409 | +public: | ||
| 410 | + VkAndroidHardwareBufferImageAllocator(const VulkanDevice* _vkdev, AHardwareBuffer* _hb); | ||
| 411 | + virtual ~VkAndroidHardwareBufferImageAllocator(); | ||
| 412 | + | ||
| 413 | +public: | ||
| 414 | + virtual VkBufferMemory* fastMalloc(size_t size); | ||
| 415 | + virtual void fastFree(VkBufferMemory* ptr); | ||
| 416 | + virtual VkImageMemory* fastMalloc(int w, int h, int c, size_t elemsize, int elempack); | ||
| 417 | + virtual void fastFree(VkImageMemory* ptr); | ||
| 418 | + | ||
| 419 | +private: | ||
| 420 | + VkAndroidHardwareBufferImageAllocator(const VkAndroidHardwareBufferImageAllocator&); | ||
| 421 | + VkAndroidHardwareBufferImageAllocator& operator=(const VkAndroidHardwareBufferImageAllocator&); | ||
| 422 | + | ||
| 423 | +public: | ||
| 424 | + int init(); | ||
| 425 | + | ||
| 426 | + int width() const; | ||
| 427 | + int height() const; | ||
| 428 | + uint64_t external_format() const; | ||
| 429 | + | ||
| 430 | +public: | ||
| 431 | + AHardwareBuffer* hb; | ||
| 432 | + AHardwareBuffer_Desc bufferDesc; | ||
| 433 | + VkAndroidHardwareBufferFormatPropertiesANDROID bufferFormatProperties; | ||
| 434 | + VkAndroidHardwareBufferPropertiesANDROID bufferProperties; | ||
| 435 | + VkSamplerYcbcrConversionKHR samplerYcbcrConversion; | ||
| 436 | +}; | ||
| 437 | +#endif // __ANDROID_API__ >= 26 | ||
| 438 | +#endif // NCNN_PLATFORM_API | ||
| 439 | + | ||
| 440 | +#endif // NCNN_VULKAN | ||
| 441 | + | ||
| 442 | +} // namespace ncnn | ||
| 443 | + | ||
| 444 | +#endif // NCNN_ALLOCATOR_H |
livekit-android-sdk/src/main/jni/ncnn-20250503-android-vulkan/arm64-v8a/include/ncnn/benchmark.h
0 → 100644
| 1 | +// Tencent is pleased to support the open source community by making ncnn available. | ||
| 2 | +// | ||
| 3 | +// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. | ||
| 4 | +// | ||
| 5 | +// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except | ||
| 6 | +// in compliance with the License. You may obtain a copy of the License at | ||
| 7 | +// | ||
| 8 | +// https://opensource.org/licenses/BSD-3-Clause | ||
| 9 | +// | ||
| 10 | +// Unless required by applicable law or agreed to in writing, software distributed | ||
| 11 | +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR | ||
| 12 | +// CONDITIONS OF ANY KIND, either express or implied. See the License for the | ||
| 13 | +// specific language governing permissions and limitations under the License. | ||
| 14 | + | ||
| 15 | +#ifndef NCNN_BENCHMARK_H | ||
| 16 | +#define NCNN_BENCHMARK_H | ||
| 17 | + | ||
| 18 | +#include "layer.h" | ||
| 19 | +#include "mat.h" | ||
| 20 | +#include "platform.h" | ||
| 21 | + | ||
| 22 | +namespace ncnn { | ||
| 23 | + | ||
| 24 | +// get now timestamp in ms | ||
| 25 | +NCNN_EXPORT double get_current_time(); | ||
| 26 | + | ||
| 27 | +// sleep milliseconds | ||
| 28 | +NCNN_EXPORT void sleep(unsigned long long int milliseconds = 1000); | ||
| 29 | + | ||
| 30 | +#if NCNN_BENCHMARK | ||
| 31 | + | ||
| 32 | +NCNN_EXPORT void benchmark(const Layer* layer, double start, double end); | ||
| 33 | +NCNN_EXPORT void benchmark(const Layer* layer, const Mat& bottom_blob, Mat& top_blob, double start, double end); | ||
| 34 | + | ||
| 35 | +#endif // NCNN_BENCHMARK | ||
| 36 | + | ||
| 37 | +} // namespace ncnn | ||
| 38 | + | ||
| 39 | +#endif // NCNN_BENCHMARK_H |
livekit-android-sdk/src/main/jni/ncnn-20250503-android-vulkan/arm64-v8a/include/ncnn/blob.h
0 → 100644
| 1 | +// Tencent is pleased to support the open source community by making ncnn available. | ||
| 2 | +// | ||
| 3 | +// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. | ||
| 4 | +// | ||
| 5 | +// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except | ||
| 6 | +// in compliance with the License. You may obtain a copy of the License at | ||
| 7 | +// | ||
| 8 | +// https://opensource.org/licenses/BSD-3-Clause | ||
| 9 | +// | ||
| 10 | +// Unless required by applicable law or agreed to in writing, software distributed | ||
| 11 | +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR | ||
| 12 | +// CONDITIONS OF ANY KIND, either express or implied. See the License for the | ||
| 13 | +// specific language governing permissions and limitations under the License. | ||
| 14 | + | ||
| 15 | +#ifndef NCNN_BLOB_H | ||
| 16 | +#define NCNN_BLOB_H | ||
| 17 | + | ||
| 18 | +#include "mat.h" | ||
| 19 | +#include "platform.h" | ||
| 20 | + | ||
| 21 | +namespace ncnn { | ||
| 22 | + | ||
| 23 | +class NCNN_EXPORT Blob | ||
| 24 | +{ | ||
| 25 | +public: | ||
| 26 | + // empty | ||
| 27 | + Blob(); | ||
| 28 | + | ||
| 29 | +public: | ||
| 30 | +#if NCNN_STRING | ||
| 31 | + // blob name | ||
| 32 | + std::string name; | ||
| 33 | +#endif // NCNN_STRING | ||
| 34 | + // layer index which produce this blob as output | ||
| 35 | + int producer; | ||
| 36 | + // layer index which need this blob as input | ||
| 37 | + int consumer; | ||
| 38 | + // shape hint | ||
| 39 | + Mat shape; | ||
| 40 | +}; | ||
| 41 | + | ||
| 42 | +} // namespace ncnn | ||
| 43 | + | ||
| 44 | +#endif // NCNN_BLOB_H |
livekit-android-sdk/src/main/jni/ncnn-20250503-android-vulkan/arm64-v8a/include/ncnn/c_api.h
0 → 100644
| 1 | +/* Tencent is pleased to support the open source community by making ncnn available. | ||
| 2 | + * | ||
| 3 | + * Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. | ||
| 4 | + * | ||
| 5 | + * Licensed under the BSD 3-Clause License (the "License"); you may not use this file except | ||
| 6 | + * in compliance with the License. You may obtain a copy of the License at | ||
| 7 | + * | ||
| 8 | + * https://opensource.org/licenses/BSD-3-Clause | ||
| 9 | + * | ||
| 10 | + * Unless required by applicable law or agreed to in writing, software distributed | ||
| 11 | + * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR | ||
| 12 | + * CONDITIONS OF ANY KIND, either express or implied. See the License for the | ||
| 13 | + * specific language governing permissions and limitations under the License. | ||
| 14 | + */ | ||
| 15 | + | ||
| 16 | +#ifndef NCNN_C_API_H | ||
| 17 | +#define NCNN_C_API_H | ||
| 18 | + | ||
| 19 | +#include "platform.h" | ||
| 20 | + | ||
| 21 | +#if NCNN_C_API | ||
| 22 | + | ||
| 23 | +#include <stddef.h> | ||
| 24 | + | ||
| 25 | +#ifdef __cplusplus | ||
| 26 | +extern "C" { | ||
| 27 | +#endif | ||
| 28 | + | ||
| 29 | +NCNN_EXPORT const char* ncnn_version(void); | ||
| 30 | + | ||
| 31 | +/* allocator api */ | ||
| 32 | +typedef struct __ncnn_allocator_t* ncnn_allocator_t; | ||
| 33 | +struct NCNN_EXPORT __ncnn_allocator_t | ||
| 34 | +{ | ||
| 35 | + void* pthis; | ||
| 36 | + | ||
| 37 | + void* (*fast_malloc)(ncnn_allocator_t allocator, size_t size); | ||
| 38 | + void (*fast_free)(ncnn_allocator_t allocator, void* ptr); | ||
| 39 | +}; | ||
| 40 | + | ||
| 41 | +NCNN_EXPORT ncnn_allocator_t ncnn_allocator_create_pool_allocator(void); | ||
| 42 | +NCNN_EXPORT ncnn_allocator_t ncnn_allocator_create_unlocked_pool_allocator(void); | ||
| 43 | +NCNN_EXPORT void ncnn_allocator_destroy(ncnn_allocator_t allocator); | ||
| 44 | + | ||
| 45 | +/* option api */ | ||
| 46 | +typedef struct __ncnn_option_t* ncnn_option_t; | ||
| 47 | + | ||
| 48 | +NCNN_EXPORT ncnn_option_t ncnn_option_create(void); | ||
| 49 | +NCNN_EXPORT void ncnn_option_destroy(ncnn_option_t opt); | ||
| 50 | + | ||
| 51 | +NCNN_EXPORT int ncnn_option_get_num_threads(const ncnn_option_t opt); | ||
| 52 | +NCNN_EXPORT void ncnn_option_set_num_threads(ncnn_option_t opt, int num_threads); | ||
| 53 | + | ||
| 54 | +NCNN_EXPORT int ncnn_option_get_use_local_pool_allocator(const ncnn_option_t opt); | ||
| 55 | +NCNN_EXPORT void ncnn_option_set_use_local_pool_allocator(ncnn_option_t opt, int use_local_pool_allocator); | ||
| 56 | + | ||
| 57 | +NCNN_EXPORT void ncnn_option_set_blob_allocator(ncnn_option_t opt, ncnn_allocator_t allocator); | ||
| 58 | +NCNN_EXPORT void ncnn_option_set_workspace_allocator(ncnn_option_t opt, ncnn_allocator_t allocator); | ||
| 59 | + | ||
| 60 | +NCNN_EXPORT int ncnn_option_get_use_vulkan_compute(const ncnn_option_t opt); | ||
| 61 | +NCNN_EXPORT void ncnn_option_set_use_vulkan_compute(ncnn_option_t opt, int use_vulkan_compute); | ||
| 62 | + | ||
| 63 | +/* mat api */ | ||
| 64 | +typedef struct __ncnn_mat_t* ncnn_mat_t; | ||
| 65 | + | ||
| 66 | +NCNN_EXPORT ncnn_mat_t ncnn_mat_create(void); | ||
| 67 | +NCNN_EXPORT ncnn_mat_t ncnn_mat_create_1d(int w, ncnn_allocator_t allocator); | ||
| 68 | +NCNN_EXPORT ncnn_mat_t ncnn_mat_create_2d(int w, int h, ncnn_allocator_t allocator); | ||
| 69 | +NCNN_EXPORT ncnn_mat_t ncnn_mat_create_3d(int w, int h, int c, ncnn_allocator_t allocator); | ||
| 70 | +NCNN_EXPORT ncnn_mat_t ncnn_mat_create_4d(int w, int h, int d, int c, ncnn_allocator_t allocator); | ||
| 71 | +NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_1d(int w, void* data, ncnn_allocator_t allocator); | ||
| 72 | +NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_2d(int w, int h, void* data, ncnn_allocator_t allocator); | ||
| 73 | +NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_3d(int w, int h, int c, void* data, ncnn_allocator_t allocator); | ||
| 74 | +NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_4d(int w, int h, int d, int c, void* data, ncnn_allocator_t allocator); | ||
| 75 | +NCNN_EXPORT ncnn_mat_t ncnn_mat_create_1d_elem(int w, size_t elemsize, int elempack, ncnn_allocator_t allocator); | ||
| 76 | +NCNN_EXPORT ncnn_mat_t ncnn_mat_create_2d_elem(int w, int h, size_t elemsize, int elempack, ncnn_allocator_t allocator); | ||
| 77 | +NCNN_EXPORT ncnn_mat_t ncnn_mat_create_3d_elem(int w, int h, int c, size_t elemsize, int elempack, ncnn_allocator_t allocator); | ||
| 78 | +NCNN_EXPORT ncnn_mat_t ncnn_mat_create_4d_elem(int w, int h, int d, int c, size_t elemsize, int elempack, ncnn_allocator_t allocator); | ||
| 79 | +NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_1d_elem(int w, void* data, size_t elemsize, int elempack, ncnn_allocator_t allocator); | ||
| 80 | +NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_2d_elem(int w, int h, void* data, size_t elemsize, int elempack, ncnn_allocator_t allocator); | ||
| 81 | +NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_3d_elem(int w, int h, int c, void* data, size_t elemsize, int elempack, ncnn_allocator_t allocator); | ||
| 82 | +NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_4d_elem(int w, int h, int d, int c, void* data, size_t elemsize, int elempack, ncnn_allocator_t allocator); | ||
| 83 | +NCNN_EXPORT void ncnn_mat_destroy(ncnn_mat_t mat); | ||
| 84 | + | ||
| 85 | +NCNN_EXPORT void ncnn_mat_fill_float(ncnn_mat_t mat, float v); | ||
| 86 | + | ||
| 87 | +NCNN_EXPORT ncnn_mat_t ncnn_mat_clone(const ncnn_mat_t mat, ncnn_allocator_t allocator); | ||
| 88 | +NCNN_EXPORT ncnn_mat_t ncnn_mat_reshape_1d(const ncnn_mat_t mat, int w, ncnn_allocator_t allocator); | ||
| 89 | +NCNN_EXPORT ncnn_mat_t ncnn_mat_reshape_2d(const ncnn_mat_t mat, int w, int h, ncnn_allocator_t allocator); | ||
| 90 | +NCNN_EXPORT ncnn_mat_t ncnn_mat_reshape_3d(const ncnn_mat_t mat, int w, int h, int c, ncnn_allocator_t allocator); | ||
| 91 | +NCNN_EXPORT ncnn_mat_t ncnn_mat_reshape_4d(const ncnn_mat_t mat, int w, int h, int d, int c, ncnn_allocator_t allocator); | ||
| 92 | + | ||
| 93 | +NCNN_EXPORT int ncnn_mat_get_dims(const ncnn_mat_t mat); | ||
| 94 | +NCNN_EXPORT int ncnn_mat_get_w(const ncnn_mat_t mat); | ||
| 95 | +NCNN_EXPORT int ncnn_mat_get_h(const ncnn_mat_t mat); | ||
| 96 | +NCNN_EXPORT int ncnn_mat_get_d(const ncnn_mat_t mat); | ||
| 97 | +NCNN_EXPORT int ncnn_mat_get_c(const ncnn_mat_t mat); | ||
| 98 | +NCNN_EXPORT size_t ncnn_mat_get_elemsize(const ncnn_mat_t mat); | ||
| 99 | +NCNN_EXPORT int ncnn_mat_get_elempack(const ncnn_mat_t mat); | ||
| 100 | +NCNN_EXPORT size_t ncnn_mat_get_cstep(const ncnn_mat_t mat); | ||
| 101 | +NCNN_EXPORT void* ncnn_mat_get_data(const ncnn_mat_t mat); | ||
| 102 | + | ||
| 103 | +NCNN_EXPORT void* ncnn_mat_get_channel_data(const ncnn_mat_t mat, int c); | ||
| 104 | + | ||
| 105 | +#if NCNN_PIXEL | ||
| 106 | + | ||
| 107 | +/* mat pixel api */ | ||
| 108 | +#define NCNN_MAT_PIXEL_RGB 1 | ||
| 109 | +#define NCNN_MAT_PIXEL_BGR 2 | ||
| 110 | +#define NCNN_MAT_PIXEL_GRAY 3 | ||
| 111 | +#define NCNN_MAT_PIXEL_RGBA 4 | ||
| 112 | +#define NCNN_MAT_PIXEL_BGRA 5 | ||
| 113 | +#define NCNN_MAT_PIXEL_X2Y(X, Y) (X | (Y << 16)) | ||
| 114 | +NCNN_EXPORT ncnn_mat_t ncnn_mat_from_pixels(const unsigned char* pixels, int type, int w, int h, int stride, ncnn_allocator_t allocator); | ||
| 115 | +NCNN_EXPORT ncnn_mat_t ncnn_mat_from_pixels_resize(const unsigned char* pixels, int type, int w, int h, int stride, int target_width, int target_height, ncnn_allocator_t allocator); | ||
| 116 | +NCNN_EXPORT ncnn_mat_t ncnn_mat_from_pixels_roi(const unsigned char* pixels, int type, int w, int h, int stride, int roix, int roiy, int roiw, int roih, ncnn_allocator_t allocator); | ||
| 117 | +NCNN_EXPORT ncnn_mat_t ncnn_mat_from_pixels_roi_resize(const unsigned char* pixels, int type, int w, int h, int stride, int roix, int roiy, int roiw, int roih, int target_width, int target_height, ncnn_allocator_t allocator); | ||
| 118 | +NCNN_EXPORT void ncnn_mat_to_pixels(const ncnn_mat_t mat, unsigned char* pixels, int type, int stride); | ||
| 119 | +NCNN_EXPORT void ncnn_mat_to_pixels_resize(const ncnn_mat_t mat, unsigned char* pixels, int type, int target_width, int target_height, int target_stride); | ||
| 120 | + | ||
| 121 | +#endif /* NCNN_PIXEL */ | ||
| 122 | + | ||
| 123 | +NCNN_EXPORT void ncnn_mat_substract_mean_normalize(ncnn_mat_t mat, const float* mean_vals, const float* norm_vals); | ||
| 124 | + | ||
| 125 | +NCNN_EXPORT void ncnn_convert_packing(const ncnn_mat_t src, ncnn_mat_t* dst, int elempack, const ncnn_option_t opt); | ||
| 126 | +NCNN_EXPORT void ncnn_flatten(const ncnn_mat_t src, ncnn_mat_t* dst, const ncnn_option_t opt); | ||
| 127 | + | ||
| 128 | +/* blob api */ | ||
| 129 | +typedef struct __ncnn_blob_t* ncnn_blob_t; | ||
| 130 | + | ||
| 131 | +#if NCNN_STRING | ||
| 132 | +NCNN_EXPORT const char* ncnn_blob_get_name(const ncnn_blob_t blob); | ||
| 133 | +#endif /* NCNN_STRING */ | ||
| 134 | + | ||
| 135 | +NCNN_EXPORT int ncnn_blob_get_producer(const ncnn_blob_t blob); | ||
| 136 | +NCNN_EXPORT int ncnn_blob_get_consumer(const ncnn_blob_t blob); | ||
| 137 | + | ||
| 138 | +NCNN_EXPORT void ncnn_blob_get_shape(const ncnn_blob_t blob, int* dims, int* w, int* h, int* c); | ||
| 139 | + | ||
| 140 | +/* paramdict api */ | ||
| 141 | +typedef struct __ncnn_paramdict_t* ncnn_paramdict_t; | ||
| 142 | + | ||
| 143 | +NCNN_EXPORT ncnn_paramdict_t ncnn_paramdict_create(void); | ||
| 144 | +NCNN_EXPORT void ncnn_paramdict_destroy(ncnn_paramdict_t pd); | ||
| 145 | + | ||
| 146 | +NCNN_EXPORT int ncnn_paramdict_get_type(const ncnn_paramdict_t pd, int id); | ||
| 147 | + | ||
| 148 | +NCNN_EXPORT int ncnn_paramdict_get_int(const ncnn_paramdict_t pd, int id, int def); | ||
| 149 | +NCNN_EXPORT float ncnn_paramdict_get_float(const ncnn_paramdict_t pd, int id, float def); | ||
| 150 | +NCNN_EXPORT ncnn_mat_t ncnn_paramdict_get_array(const ncnn_paramdict_t pd, int id, const ncnn_mat_t def); | ||
| 151 | + | ||
| 152 | +NCNN_EXPORT void ncnn_paramdict_set_int(ncnn_paramdict_t pd, int id, int i); | ||
| 153 | +NCNN_EXPORT void ncnn_paramdict_set_float(ncnn_paramdict_t pd, int id, float f); | ||
| 154 | +NCNN_EXPORT void ncnn_paramdict_set_array(ncnn_paramdict_t pd, int id, const ncnn_mat_t v); | ||
| 155 | + | ||
| 156 | +/* datareader api */ | ||
| 157 | +typedef struct __ncnn_datareader_t* ncnn_datareader_t; | ||
| 158 | +struct NCNN_EXPORT __ncnn_datareader_t | ||
| 159 | +{ | ||
| 160 | + void* pthis; | ||
| 161 | + | ||
| 162 | +#if NCNN_STRING | ||
| 163 | + int (*scan)(ncnn_datareader_t dr, const char* format, void* p); | ||
| 164 | +#endif /* NCNN_STRING */ | ||
| 165 | + size_t (*read)(ncnn_datareader_t dr, void* buf, size_t size); | ||
| 166 | +}; | ||
| 167 | + | ||
| 168 | +NCNN_EXPORT ncnn_datareader_t ncnn_datareader_create(void); | ||
| 169 | +#if NCNN_STDIO | ||
| 170 | +NCNN_EXPORT ncnn_datareader_t ncnn_datareader_create_from_stdio(FILE* fp); | ||
| 171 | +#endif /* NCNN_STDIO */ | ||
| 172 | +NCNN_EXPORT ncnn_datareader_t ncnn_datareader_create_from_memory(const unsigned char** mem); | ||
| 173 | +NCNN_EXPORT void ncnn_datareader_destroy(ncnn_datareader_t dr); | ||
| 174 | + | ||
| 175 | +/* modelbin api */ | ||
| 176 | +typedef struct __ncnn_modelbin_t* ncnn_modelbin_t; | ||
| 177 | +struct NCNN_EXPORT __ncnn_modelbin_t | ||
| 178 | +{ | ||
| 179 | + void* pthis; | ||
| 180 | + | ||
| 181 | + ncnn_mat_t (*load_1d)(const ncnn_modelbin_t mb, int w, int type); | ||
| 182 | + ncnn_mat_t (*load_2d)(const ncnn_modelbin_t mb, int w, int h, int type); | ||
| 183 | + ncnn_mat_t (*load_3d)(const ncnn_modelbin_t mb, int w, int h, int c, int type); | ||
| 184 | +}; | ||
| 185 | + | ||
| 186 | +NCNN_EXPORT ncnn_modelbin_t ncnn_modelbin_create_from_datareader(const ncnn_datareader_t dr); | ||
| 187 | +NCNN_EXPORT ncnn_modelbin_t ncnn_modelbin_create_from_mat_array(const ncnn_mat_t* weights, int n); | ||
| 188 | +NCNN_EXPORT void ncnn_modelbin_destroy(ncnn_modelbin_t mb); | ||
| 189 | + | ||
| 190 | +/* layer api */ | ||
| 191 | +typedef struct __ncnn_layer_t* ncnn_layer_t; | ||
| 192 | +struct NCNN_EXPORT __ncnn_layer_t | ||
| 193 | +{ | ||
| 194 | + void* pthis; | ||
| 195 | + | ||
| 196 | + int (*load_param)(ncnn_layer_t layer, const ncnn_paramdict_t pd); | ||
| 197 | + int (*load_model)(ncnn_layer_t layer, const ncnn_modelbin_t mb); | ||
| 198 | + | ||
| 199 | + int (*create_pipeline)(ncnn_layer_t layer, const ncnn_option_t opt); | ||
| 200 | + int (*destroy_pipeline)(ncnn_layer_t layer, const ncnn_option_t opt); | ||
| 201 | + | ||
| 202 | + int (*forward_1)(const ncnn_layer_t layer, const ncnn_mat_t bottom_blob, ncnn_mat_t* top_blob, const ncnn_option_t opt); | ||
| 203 | + int (*forward_n)(const ncnn_layer_t layer, const ncnn_mat_t* bottom_blobs, int n, ncnn_mat_t* top_blobs, int n2, const ncnn_option_t opt); | ||
| 204 | + | ||
| 205 | + int (*forward_inplace_1)(const ncnn_layer_t layer, ncnn_mat_t bottom_top_blob, const ncnn_option_t opt); | ||
| 206 | + int (*forward_inplace_n)(const ncnn_layer_t layer, ncnn_mat_t* bottom_top_blobs, int n, const ncnn_option_t opt); | ||
| 207 | +}; | ||
| 208 | + | ||
| 209 | +NCNN_EXPORT ncnn_layer_t ncnn_layer_create(void); | ||
| 210 | +NCNN_EXPORT ncnn_layer_t ncnn_layer_create_by_typeindex(int typeindex); | ||
| 211 | +#if NCNN_STRING | ||
| 212 | +NCNN_EXPORT ncnn_layer_t ncnn_layer_create_by_type(const char* type); | ||
| 213 | +NCNN_EXPORT int ncnn_layer_type_to_index(const char* type); | ||
| 214 | +#endif /* NCNN_STRING */ | ||
| 215 | +NCNN_EXPORT void ncnn_layer_destroy(ncnn_layer_t layer); | ||
| 216 | + | ||
| 217 | +#if NCNN_STRING | ||
| 218 | +NCNN_EXPORT const char* ncnn_layer_get_name(const ncnn_layer_t layer); | ||
| 219 | +#endif /* NCNN_STRING */ | ||
| 220 | + | ||
| 221 | +NCNN_EXPORT int ncnn_layer_get_typeindex(const ncnn_layer_t layer); | ||
| 222 | +#if NCNN_STRING | ||
| 223 | +NCNN_EXPORT const char* ncnn_layer_get_type(const ncnn_layer_t layer); | ||
| 224 | +#endif /* NCNN_STRING */ | ||
| 225 | + | ||
| 226 | +NCNN_EXPORT int ncnn_layer_get_one_blob_only(const ncnn_layer_t layer); | ||
| 227 | +NCNN_EXPORT int ncnn_layer_get_support_inplace(const ncnn_layer_t layer); | ||
| 228 | +NCNN_EXPORT int ncnn_layer_get_support_vulkan(const ncnn_layer_t layer); | ||
| 229 | +NCNN_EXPORT int ncnn_layer_get_support_packing(const ncnn_layer_t layer); | ||
| 230 | +NCNN_EXPORT int ncnn_layer_get_support_bf16_storage(const ncnn_layer_t layer); | ||
| 231 | +NCNN_EXPORT int ncnn_layer_get_support_fp16_storage(const ncnn_layer_t layer); | ||
| 232 | +NCNN_EXPORT int ncnn_layer_get_support_image_storage(const ncnn_layer_t layer); | ||
| 233 | + | ||
| 234 | +NCNN_EXPORT void ncnn_layer_set_one_blob_only(ncnn_layer_t layer, int enable); | ||
| 235 | +NCNN_EXPORT void ncnn_layer_set_support_inplace(ncnn_layer_t layer, int enable); | ||
| 236 | +NCNN_EXPORT void ncnn_layer_set_support_vulkan(ncnn_layer_t layer, int enable); | ||
| 237 | +NCNN_EXPORT void ncnn_layer_set_support_packing(ncnn_layer_t layer, int enable); | ||
| 238 | +NCNN_EXPORT void ncnn_layer_set_support_bf16_storage(ncnn_layer_t layer, int enable); | ||
| 239 | +NCNN_EXPORT void ncnn_layer_set_support_fp16_storage(ncnn_layer_t layer, int enable); | ||
| 240 | +NCNN_EXPORT void ncnn_layer_set_support_image_storage(ncnn_layer_t layer, int enable); | ||
| 241 | + | ||
| 242 | +NCNN_EXPORT int ncnn_layer_get_bottom_count(const ncnn_layer_t layer); | ||
| 243 | +NCNN_EXPORT int ncnn_layer_get_bottom(const ncnn_layer_t layer, int i); | ||
| 244 | +NCNN_EXPORT int ncnn_layer_get_top_count(const ncnn_layer_t layer); | ||
| 245 | +NCNN_EXPORT int ncnn_layer_get_top(const ncnn_layer_t layer, int i); | ||
| 246 | + | ||
| 247 | +NCNN_EXPORT void ncnn_blob_get_bottom_shape(const ncnn_layer_t layer, int i, int* dims, int* w, int* h, int* c); | ||
| 248 | +NCNN_EXPORT void ncnn_blob_get_top_shape(const ncnn_layer_t layer, int i, int* dims, int* w, int* h, int* c); | ||
| 249 | + | ||
| 250 | +/* layer factory function */ | ||
| 251 | +typedef ncnn_layer_t (*ncnn_layer_creator_t)(void* userdata); | ||
| 252 | +typedef void (*ncnn_layer_destroyer_t)(ncnn_layer_t layer, void* userdata); | ||
| 253 | + | ||
| 254 | +typedef struct __ncnn_net_custom_layer_factory_t* ncnn_net_custom_layer_factory_t; | ||
| 255 | +struct __ncnn_net_custom_layer_factory_t | ||
| 256 | +{ | ||
| 257 | + ncnn_layer_creator_t creator; | ||
| 258 | + ncnn_layer_destroyer_t destroyer; | ||
| 259 | + void* userdata; | ||
| 260 | + ncnn_net_custom_layer_factory_t next; | ||
| 261 | +}; | ||
| 262 | + | ||
| 263 | +/* net api */ | ||
| 264 | +typedef struct __ncnn_net_t* ncnn_net_t; | ||
| 265 | +struct __ncnn_net_t | ||
| 266 | +{ | ||
| 267 | + void* pthis; | ||
| 268 | + | ||
| 269 | + ncnn_net_custom_layer_factory_t custom_layer_factory; | ||
| 270 | +}; | ||
| 271 | + | ||
| 272 | +NCNN_EXPORT ncnn_net_t ncnn_net_create(void); | ||
| 273 | +NCNN_EXPORT void ncnn_net_destroy(ncnn_net_t net); | ||
| 274 | + | ||
| 275 | +NCNN_EXPORT ncnn_option_t ncnn_net_get_option(ncnn_net_t net); | ||
| 276 | +NCNN_EXPORT void ncnn_net_set_option(ncnn_net_t net, ncnn_option_t opt); | ||
| 277 | + | ||
| 278 | +#if NCNN_VULKAN | ||
| 279 | +NCNN_EXPORT void ncnn_net_set_vulkan_device(ncnn_net_t net, int device_index); | ||
| 280 | +#endif | ||
| 281 | + | ||
| 282 | +#if NCNN_STRING | ||
| 283 | +NCNN_EXPORT void ncnn_net_register_custom_layer_by_type(ncnn_net_t net, const char* type, ncnn_layer_creator_t creator, ncnn_layer_destroyer_t destroyer, void* userdata); | ||
| 284 | +#endif /* NCNN_STRING */ | ||
| 285 | +NCNN_EXPORT void ncnn_net_register_custom_layer_by_typeindex(ncnn_net_t net, int typeindex, ncnn_layer_creator_t creator, ncnn_layer_destroyer_t destroyer, void* userdata); | ||
| 286 | + | ||
| 287 | +#if NCNN_STDIO | ||
| 288 | +#if NCNN_STRING | ||
| 289 | +NCNN_EXPORT int ncnn_net_load_param(ncnn_net_t net, const char* path); | ||
| 290 | +#endif /* NCNN_STRING */ | ||
| 291 | +NCNN_EXPORT int ncnn_net_load_param_bin(ncnn_net_t net, const char* path); | ||
| 292 | +NCNN_EXPORT int ncnn_net_load_model(ncnn_net_t net, const char* path); | ||
| 293 | +#endif /* NCNN_STDIO */ | ||
| 294 | + | ||
| 295 | +#if NCNN_STDIO | ||
| 296 | +#if NCNN_STRING | ||
| 297 | +NCNN_EXPORT int ncnn_net_load_param_memory(ncnn_net_t net, const char* mem); | ||
| 298 | +#endif /* NCNN_STRING */ | ||
| 299 | +#endif /* NCNN_STDIO */ | ||
| 300 | +NCNN_EXPORT int ncnn_net_load_param_bin_memory(ncnn_net_t net, const unsigned char* mem); | ||
| 301 | +NCNN_EXPORT int ncnn_net_load_model_memory(ncnn_net_t net, const unsigned char* mem); | ||
| 302 | + | ||
| 303 | +#if NCNN_STRING | ||
| 304 | +NCNN_EXPORT int ncnn_net_load_param_datareader(ncnn_net_t net, const ncnn_datareader_t dr); | ||
| 305 | +#endif /* NCNN_STRING */ | ||
| 306 | +NCNN_EXPORT int ncnn_net_load_param_bin_datareader(ncnn_net_t net, const ncnn_datareader_t dr); | ||
| 307 | +NCNN_EXPORT int ncnn_net_load_model_datareader(ncnn_net_t net, const ncnn_datareader_t dr); | ||
| 308 | + | ||
| 309 | +NCNN_EXPORT void ncnn_net_clear(ncnn_net_t net); | ||
| 310 | + | ||
| 311 | +NCNN_EXPORT int ncnn_net_get_input_count(const ncnn_net_t net); | ||
| 312 | +NCNN_EXPORT int ncnn_net_get_output_count(const ncnn_net_t net); | ||
| 313 | +#if NCNN_STRING | ||
| 314 | +NCNN_EXPORT const char* ncnn_net_get_input_name(const ncnn_net_t net, int i); | ||
| 315 | +NCNN_EXPORT const char* ncnn_net_get_output_name(const ncnn_net_t net, int i); | ||
| 316 | +#endif /* NCNN_STRING */ | ||
| 317 | +NCNN_EXPORT int ncnn_net_get_input_index(const ncnn_net_t net, int i); | ||
| 318 | +NCNN_EXPORT int ncnn_net_get_output_index(const ncnn_net_t net, int i); | ||
| 319 | + | ||
| 320 | +/* extractor api */ | ||
| 321 | +typedef struct __ncnn_extractor_t* ncnn_extractor_t; | ||
| 322 | + | ||
| 323 | +NCNN_EXPORT ncnn_extractor_t ncnn_extractor_create(ncnn_net_t net); | ||
| 324 | +NCNN_EXPORT void ncnn_extractor_destroy(ncnn_extractor_t ex); | ||
| 325 | + | ||
| 326 | +NCNN_EXPORT void ncnn_extractor_set_option(ncnn_extractor_t ex, const ncnn_option_t opt); | ||
| 327 | + | ||
| 328 | +#if NCNN_STRING | ||
| 329 | +NCNN_EXPORT int ncnn_extractor_input(ncnn_extractor_t ex, const char* name, const ncnn_mat_t mat); | ||
| 330 | +NCNN_EXPORT int ncnn_extractor_extract(ncnn_extractor_t ex, const char* name, ncnn_mat_t* mat); | ||
| 331 | +#endif /* NCNN_STRING */ | ||
| 332 | +NCNN_EXPORT int ncnn_extractor_input_index(ncnn_extractor_t ex, int index, const ncnn_mat_t mat); | ||
| 333 | +NCNN_EXPORT int ncnn_extractor_extract_index(ncnn_extractor_t ex, int index, ncnn_mat_t* mat); | ||
| 334 | + | ||
| 335 | +/* mat process api */ | ||
| 336 | +#define NCNN_BORDER_CONSTANT 0 | ||
| 337 | +#define NCNN_BORDER_REPLICATE 1 | ||
| 338 | +#define NCNN_BORDER_REFLECT 2 | ||
| 339 | +#define NCNN_BORDER_TRANSPARENT -233 | ||
| 340 | +NCNN_EXPORT void ncnn_copy_make_border(const ncnn_mat_t src, ncnn_mat_t dst, int top, int bottom, int left, int right, int type, float v, const ncnn_option_t opt); | ||
| 341 | +NCNN_EXPORT void ncnn_copy_make_border_3d(const ncnn_mat_t src, ncnn_mat_t dst, int top, int bottom, int left, int right, int front, int behind, int type, float v, const ncnn_option_t opt); | ||
| 342 | +NCNN_EXPORT void ncnn_copy_cut_border(const ncnn_mat_t src, ncnn_mat_t dst, int top, int bottom, int left, int right, const ncnn_option_t opt); | ||
| 343 | +NCNN_EXPORT void ncnn_copy_cut_border_3d(const ncnn_mat_t src, ncnn_mat_t dst, int top, int bottom, int left, int right, int front, int behind, const ncnn_option_t opt); | ||
| 344 | + | ||
| 345 | +#if NCNN_PIXEL_DRAWING | ||
| 346 | +/* mat pixel drawing api*/ | ||
| 347 | +NCNN_EXPORT void ncnn_draw_rectangle_c1(unsigned char* pixels, int w, int h, int rx, int ry, int rw, int rh, unsigned int color, int thickness); | ||
| 348 | +NCNN_EXPORT void ncnn_draw_rectangle_c2(unsigned char* pixels, int w, int h, int rx, int ry, int rw, int rh, unsigned int color, int thickness); | ||
| 349 | +NCNN_EXPORT void ncnn_draw_rectangle_c3(unsigned char* pixels, int w, int h, int rx, int ry, int rw, int rh, unsigned int color, int thickness); | ||
| 350 | +NCNN_EXPORT void ncnn_draw_rectangle_c4(unsigned char* pixels, int w, int h, int rx, int ry, int rw, int rh, unsigned int color, int thickness); | ||
| 351 | + | ||
| 352 | +NCNN_EXPORT void ncnn_draw_text_c1(unsigned char* pixels, int w, int h, const char* text, int x, int y, int fontpixelsize, unsigned int color); | ||
| 353 | +NCNN_EXPORT void ncnn_draw_text_c2(unsigned char* pixels, int w, int h, const char* text, int x, int y, int fontpixelsize, unsigned int color); | ||
| 354 | +NCNN_EXPORT void ncnn_draw_text_c3(unsigned char* pixels, int w, int h, const char* text, int x, int y, int fontpixelsize, unsigned int color); | ||
| 355 | +NCNN_EXPORT void ncnn_draw_text_c4(unsigned char* pixels, int w, int h, const char* text, int x, int y, int fontpixelsize, unsigned int color); | ||
| 356 | + | ||
| 357 | +NCNN_EXPORT void ncnn_draw_circle_c1(unsigned char* pixels, int w, int h, int cx, int cy, int radius, unsigned int color, int thickness); | ||
| 358 | +NCNN_EXPORT void ncnn_draw_circle_c2(unsigned char* pixels, int w, int h, int cx, int cy, int radius, unsigned int color, int thickness); | ||
| 359 | +NCNN_EXPORT void ncnn_draw_circle_c3(unsigned char* pixels, int w, int h, int cx, int cy, int radius, unsigned int color, int thickness); | ||
| 360 | +NCNN_EXPORT void ncnn_draw_circle_c4(unsigned char* pixels, int w, int h, int cx, int cy, int radius, unsigned int color, int thickness); | ||
| 361 | + | ||
| 362 | +NCNN_EXPORT void ncnn_draw_line_c1(unsigned char* pixels, int w, int h, int x0, int y0, int x1, int y1, unsigned int color, int thickness); | ||
| 363 | +NCNN_EXPORT void ncnn_draw_line_c2(unsigned char* pixels, int w, int h, int x0, int y0, int x1, int y1, unsigned int color, int thickness); | ||
| 364 | +NCNN_EXPORT void ncnn_draw_line_c3(unsigned char* pixels, int w, int h, int x0, int y0, int x1, int y1, unsigned int color, int thickness); | ||
| 365 | +NCNN_EXPORT void ncnn_draw_line_c4(unsigned char* pixels, int w, int h, int x0, int y0, int x1, int y1, unsigned int color, int thickness); | ||
| 366 | +#endif /* NCNN_PIXEL_DRAWING */ | ||
| 367 | + | ||
| 368 | +#ifdef __cplusplus | ||
| 369 | +} /* extern "C" */ | ||
| 370 | +#endif | ||
| 371 | + | ||
| 372 | +#endif /* NCNN_C_API */ | ||
| 373 | + | ||
| 374 | +#endif /* NCNN_C_API_H */ |
livekit-android-sdk/src/main/jni/ncnn-20250503-android-vulkan/arm64-v8a/include/ncnn/command.h
0 → 100644
| 1 | +// Tencent is pleased to support the open source community by making ncnn available. | ||
| 2 | +// | ||
| 3 | +// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. | ||
| 4 | +// | ||
| 5 | +// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except | ||
| 6 | +// in compliance with the License. You may obtain a copy of the License at | ||
| 7 | +// | ||
| 8 | +// https://opensource.org/licenses/BSD-3-Clause | ||
| 9 | +// | ||
| 10 | +// Unless required by applicable law or agreed to in writing, software distributed | ||
| 11 | +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR | ||
| 12 | +// CONDITIONS OF ANY KIND, either express or implied. See the License for the | ||
| 13 | +// specific language governing permissions and limitations under the License. | ||
| 14 | + | ||
| 15 | +#ifndef NCNN_COMMAND_H | ||
| 16 | +#define NCNN_COMMAND_H | ||
| 17 | + | ||
| 18 | +#include "platform.h" | ||
| 19 | + | ||
| 20 | +#if NCNN_VULKAN | ||
| 21 | + | ||
| 22 | +#include "mat.h" | ||
| 23 | + | ||
| 24 | +namespace ncnn { | ||
| 25 | + | ||
| 26 | +class Pipeline; | ||
| 27 | +#if NCNN_PLATFORM_API | ||
| 28 | +#if __ANDROID_API__ >= 26 | ||
| 29 | +class ImportAndroidHardwareBufferPipeline; | ||
| 30 | +#endif // __ANDROID_API__ >= 26 | ||
| 31 | +#endif // NCNN_PLATFORM_API | ||
| 32 | +class VkComputePrivate; | ||
| 33 | +class NCNN_EXPORT VkCompute | ||
| 34 | +{ | ||
| 35 | +public: | ||
| 36 | + explicit VkCompute(const VulkanDevice* vkdev); | ||
| 37 | + virtual ~VkCompute(); | ||
| 38 | + | ||
| 39 | +public: | ||
| 40 | + void record_upload(const Mat& src, VkMat& dst, const Option& opt); | ||
| 41 | + | ||
| 42 | + void record_upload(const Mat& src, VkImageMat& dst, const Option& opt); | ||
| 43 | + | ||
| 44 | + void record_download(const VkMat& src, Mat& dst, const Option& opt); | ||
| 45 | + | ||
| 46 | + void record_download(const VkImageMat& src, Mat& dst, const Option& opt); | ||
| 47 | + | ||
| 48 | + void record_buffer_to_image(const VkMat& src, VkImageMat& dst, const Option& opt); | ||
| 49 | + | ||
| 50 | + void record_image_to_buffer(const VkImageMat& src, VkMat& dst, const Option& opt); | ||
| 51 | + | ||
| 52 | + void record_clone(const Mat& src, VkMat& dst, const Option& opt); | ||
| 53 | + | ||
| 54 | + void record_clone(const Mat& src, VkImageMat& dst, const Option& opt); | ||
| 55 | + | ||
| 56 | + void record_clone(const VkMat& src, Mat& dst, const Option& opt); | ||
| 57 | + | ||
| 58 | + void record_clone(const VkImageMat& src, Mat& dst, const Option& opt); | ||
| 59 | + | ||
| 60 | + void record_clone(const VkMat& src, VkMat& dst, const Option& opt); | ||
| 61 | + | ||
| 62 | + void record_clone(const VkImageMat& src, VkImageMat& dst, const Option& opt); | ||
| 63 | + | ||
| 64 | + void record_clone(const VkMat& src, VkImageMat& dst, const Option& opt); | ||
| 65 | + | ||
| 66 | + void record_clone(const VkImageMat& src, VkMat& dst, const Option& opt); | ||
| 67 | + | ||
| 68 | + void record_pipeline(const Pipeline* pipeline, const std::vector<VkMat>& bindings, const std::vector<vk_constant_type>& constants, const VkMat& dispatcher); | ||
| 69 | + | ||
| 70 | + void record_pipeline(const Pipeline* pipeline, const std::vector<VkImageMat>& bindings, const std::vector<vk_constant_type>& constants, const VkImageMat& dispatcher); | ||
| 71 | + | ||
| 72 | + void record_pipeline(const Pipeline* pipeline, const std::vector<VkMat>& buffer_bindings, const std::vector<VkImageMat>& image_bindings, const std::vector<vk_constant_type>& constants, const VkMat& dispatcher); | ||
| 73 | + void record_pipeline(const Pipeline* pipeline, const std::vector<VkMat>& buffer_bindings, const std::vector<VkImageMat>& image_bindings, const std::vector<vk_constant_type>& constants, const VkImageMat& dispatcher); | ||
| 74 | + void record_pipeline(const Pipeline* pipeline, const std::vector<VkMat>& buffer_bindings, const std::vector<VkImageMat>& image_bindings, const std::vector<vk_constant_type>& constants, const Mat& dispatcher); | ||
| 75 | + | ||
| 76 | +#if NCNN_BENCHMARK | ||
| 77 | + void record_write_timestamp(uint32_t query); | ||
| 78 | +#endif // NCNN_BENCHMARK | ||
| 79 | + | ||
| 80 | +#if NCNN_PLATFORM_API | ||
| 81 | +#if __ANDROID_API__ >= 26 | ||
| 82 | + void record_import_android_hardware_buffer(const ImportAndroidHardwareBufferPipeline* pipeline, const VkImageMat& src, const VkMat& dst); | ||
| 83 | + | ||
| 84 | + void record_import_android_hardware_buffer(const ImportAndroidHardwareBufferPipeline* pipeline, const VkImageMat& src, const VkImageMat& dst); | ||
| 85 | +#endif // __ANDROID_API__ >= 26 | ||
| 86 | +#endif // NCNN_PLATFORM_API | ||
| 87 | + | ||
| 88 | + int submit_and_wait(); | ||
| 89 | + | ||
| 90 | + int reset(); | ||
| 91 | + | ||
| 92 | +#if NCNN_BENCHMARK | ||
| 93 | + int create_query_pool(uint32_t query_count); | ||
| 94 | + | ||
| 95 | + int get_query_pool_results(uint32_t first_query, uint32_t query_count, std::vector<uint64_t>& results); | ||
| 96 | +#endif // NCNN_BENCHMARK | ||
| 97 | + | ||
| 98 | +protected: | ||
| 99 | + const VulkanDevice* vkdev; | ||
| 100 | + | ||
| 101 | + void barrier_readwrite(const VkMat& binding); | ||
| 102 | + void barrier_readwrite(const VkImageMat& binding); | ||
| 103 | + void barrier_readonly(const VkImageMat& binding); | ||
| 104 | + | ||
| 105 | +private: | ||
| 106 | + VkComputePrivate* const d; | ||
| 107 | +}; | ||
| 108 | + | ||
| 109 | +class VkTransferPrivate; | ||
| 110 | +class NCNN_EXPORT VkTransfer | ||
| 111 | +{ | ||
| 112 | +public: | ||
| 113 | + explicit VkTransfer(const VulkanDevice* vkdev); | ||
| 114 | + virtual ~VkTransfer(); | ||
| 115 | + | ||
| 116 | +public: | ||
| 117 | + void record_upload(const Mat& src, VkMat& dst, const Option& opt, bool flatten = true); | ||
| 118 | + | ||
| 119 | + void record_upload(const Mat& src, VkImageMat& dst, const Option& opt); | ||
| 120 | + | ||
| 121 | + int submit_and_wait(); | ||
| 122 | + | ||
| 123 | +protected: | ||
| 124 | + const VulkanDevice* vkdev; | ||
| 125 | + | ||
| 126 | +private: | ||
| 127 | + VkTransferPrivate* const d; | ||
| 128 | +}; | ||
| 129 | + | ||
| 130 | +} // namespace ncnn | ||
| 131 | + | ||
| 132 | +#endif // NCNN_VULKAN | ||
| 133 | + | ||
| 134 | +#endif // NCNN_COMMAND_H |
livekit-android-sdk/src/main/jni/ncnn-20250503-android-vulkan/arm64-v8a/include/ncnn/cpu.h
0 → 100644
| 1 | +// Tencent is pleased to support the open source community by making ncnn available. | ||
| 2 | +// | ||
| 3 | +// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. | ||
| 4 | +// | ||
| 5 | +// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except | ||
| 6 | +// in compliance with the License. You may obtain a copy of the License at | ||
| 7 | +// | ||
| 8 | +// https://opensource.org/licenses/BSD-3-Clause | ||
| 9 | +// | ||
| 10 | +// Unless required by applicable law or agreed to in writing, software distributed | ||
| 11 | +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR | ||
| 12 | +// CONDITIONS OF ANY KIND, either express or implied. See the License for the | ||
| 13 | +// specific language governing permissions and limitations under the License. | ||
| 14 | + | ||
| 15 | +#ifndef NCNN_CPU_H | ||
| 16 | +#define NCNN_CPU_H | ||
| 17 | + | ||
| 18 | +#include <stddef.h> | ||
| 19 | + | ||
| 20 | +#if defined _WIN32 | ||
| 21 | +#define WIN32_LEAN_AND_MEAN | ||
| 22 | +#include <windows.h> | ||
| 23 | +#endif | ||
| 24 | +#if defined __ANDROID__ || defined __linux__ | ||
| 25 | +#include <sched.h> // cpu_set_t | ||
| 26 | +#endif | ||
| 27 | + | ||
| 28 | +#include "platform.h" | ||
| 29 | + | ||
| 30 | +namespace ncnn { | ||
| 31 | + | ||
| 32 | +class NCNN_EXPORT CpuSet | ||
| 33 | +{ | ||
| 34 | +public: | ||
| 35 | + CpuSet(); | ||
| 36 | + void enable(int cpu); | ||
| 37 | + void disable(int cpu); | ||
| 38 | + void disable_all(); | ||
| 39 | + bool is_enabled(int cpu) const; | ||
| 40 | + int num_enabled() const; | ||
| 41 | + | ||
| 42 | +public: | ||
| 43 | +#if defined _WIN32 | ||
| 44 | + ULONG_PTR mask; | ||
| 45 | +#endif | ||
| 46 | +#if defined __ANDROID__ || defined __linux__ | ||
| 47 | + cpu_set_t cpu_set; | ||
| 48 | +#endif | ||
| 49 | +#if __APPLE__ | ||
| 50 | + unsigned int policy; | ||
| 51 | +#endif | ||
| 52 | +}; | ||
| 53 | + | ||
| 54 | +// test optional cpu features | ||
| 55 | +// edsp = armv7 edsp | ||
| 56 | +NCNN_EXPORT int cpu_support_arm_edsp(); | ||
| 57 | +// neon = armv7 neon or aarch64 asimd | ||
| 58 | +NCNN_EXPORT int cpu_support_arm_neon(); | ||
| 59 | +// vfpv4 = armv7 fp16 + fma | ||
| 60 | +NCNN_EXPORT int cpu_support_arm_vfpv4(); | ||
| 61 | +// asimdhp = aarch64 asimd half precision | ||
| 62 | +NCNN_EXPORT int cpu_support_arm_asimdhp(); | ||
| 63 | +// cpuid = aarch64 cpuid info | ||
| 64 | +NCNN_EXPORT int cpu_support_arm_cpuid(); | ||
| 65 | +// asimddp = aarch64 asimd dot product | ||
| 66 | +NCNN_EXPORT int cpu_support_arm_asimddp(); | ||
| 67 | +// asimdfhm = aarch64 asimd fhm | ||
| 68 | +NCNN_EXPORT int cpu_support_arm_asimdfhm(); | ||
| 69 | +// bf16 = aarch64 bf16 | ||
| 70 | +NCNN_EXPORT int cpu_support_arm_bf16(); | ||
| 71 | +// i8mm = aarch64 i8mm | ||
| 72 | +NCNN_EXPORT int cpu_support_arm_i8mm(); | ||
| 73 | +// sve = aarch64 sve | ||
| 74 | +NCNN_EXPORT int cpu_support_arm_sve(); | ||
| 75 | +// sve2 = aarch64 sve2 | ||
| 76 | +NCNN_EXPORT int cpu_support_arm_sve2(); | ||
| 77 | +// svebf16 = aarch64 svebf16 | ||
| 78 | +NCNN_EXPORT int cpu_support_arm_svebf16(); | ||
| 79 | +// svei8mm = aarch64 svei8mm | ||
| 80 | +NCNN_EXPORT int cpu_support_arm_svei8mm(); | ||
| 81 | +// svef32mm = aarch64 svef32mm | ||
| 82 | +NCNN_EXPORT int cpu_support_arm_svef32mm(); | ||
| 83 | + | ||
| 84 | +// avx = x86 avx | ||
| 85 | +NCNN_EXPORT int cpu_support_x86_avx(); | ||
| 86 | +// fma = x86 fma | ||
| 87 | +NCNN_EXPORT int cpu_support_x86_fma(); | ||
| 88 | +// xop = x86 xop | ||
| 89 | +NCNN_EXPORT int cpu_support_x86_xop(); | ||
| 90 | +// f16c = x86 f16c | ||
| 91 | +NCNN_EXPORT int cpu_support_x86_f16c(); | ||
| 92 | +// avx2 = x86 avx2 + fma + f16c | ||
| 93 | +NCNN_EXPORT int cpu_support_x86_avx2(); | ||
| 94 | +// avx_vnni = x86 avx vnni | ||
| 95 | +NCNN_EXPORT int cpu_support_x86_avx_vnni(); | ||
| 96 | +// avx_vnni_int8 = x86 avx vnni int8 | ||
| 97 | +NCNN_EXPORT int cpu_support_x86_avx_vnni_int8(); | ||
| 98 | +// avx_vnni_int16 = x86 avx vnni int16 | ||
| 99 | +NCNN_EXPORT int cpu_support_x86_avx_vnni_int16(); | ||
| 100 | +// avx_ne_convert = x86 avx ne convert | ||
| 101 | +NCNN_EXPORT int cpu_support_x86_avx_ne_convert(); | ||
| 102 | +// avx512 = x86 avx512f + avx512cd + avx512bw + avx512dq + avx512vl | ||
| 103 | +NCNN_EXPORT int cpu_support_x86_avx512(); | ||
| 104 | +// avx512_vnni = x86 avx512 vnni | ||
| 105 | +NCNN_EXPORT int cpu_support_x86_avx512_vnni(); | ||
| 106 | +// avx512_bf16 = x86 avx512 bf16 | ||
| 107 | +NCNN_EXPORT int cpu_support_x86_avx512_bf16(); | ||
| 108 | +// avx512_fp16 = x86 avx512 fp16 | ||
| 109 | +NCNN_EXPORT int cpu_support_x86_avx512_fp16(); | ||
| 110 | + | ||
| 111 | +// lsx = loongarch lsx | ||
| 112 | +NCNN_EXPORT int cpu_support_loongarch_lsx(); | ||
| 113 | +// lasx = loongarch lasx | ||
| 114 | +NCNN_EXPORT int cpu_support_loongarch_lasx(); | ||
| 115 | + | ||
| 116 | +// msa = mips mas | ||
| 117 | +NCNN_EXPORT int cpu_support_mips_msa(); | ||
| 118 | +// mmi = loongson mmi | ||
| 119 | +NCNN_EXPORT int cpu_support_loongson_mmi(); | ||
| 120 | + | ||
| 121 | +// v = riscv vector | ||
| 122 | +NCNN_EXPORT int cpu_support_riscv_v(); | ||
| 123 | +// zfh = riscv half-precision float | ||
| 124 | +NCNN_EXPORT int cpu_support_riscv_zfh(); | ||
| 125 | +// zvfh = riscv vector half-precision float | ||
| 126 | +NCNN_EXPORT int cpu_support_riscv_zvfh(); | ||
| 127 | +// xtheadvector = riscv xtheadvector | ||
| 128 | +NCNN_EXPORT int cpu_support_riscv_xtheadvector(); | ||
| 129 | +// vlenb = riscv vector length in bytes | ||
| 130 | +NCNN_EXPORT int cpu_riscv_vlenb(); | ||
| 131 | + | ||
| 132 | +// cpu info | ||
| 133 | +NCNN_EXPORT int get_cpu_count(); | ||
| 134 | +NCNN_EXPORT int get_little_cpu_count(); | ||
| 135 | +NCNN_EXPORT int get_big_cpu_count(); | ||
| 136 | + | ||
| 137 | +NCNN_EXPORT int get_physical_cpu_count(); | ||
| 138 | +NCNN_EXPORT int get_physical_little_cpu_count(); | ||
| 139 | +NCNN_EXPORT int get_physical_big_cpu_count(); | ||
| 140 | + | ||
| 141 | +// cpu l2 varies from 64k to 1M, but l3 can be zero | ||
| 142 | +NCNN_EXPORT int get_cpu_level2_cache_size(); | ||
| 143 | +NCNN_EXPORT int get_cpu_level3_cache_size(); | ||
| 144 | + | ||
| 145 | +// bind all threads on little clusters if powersave enabled | ||
| 146 | +// affects HMP arch cpu like ARM big.LITTLE | ||
| 147 | +// only implemented on android at the moment | ||
| 148 | +// switching powersave is expensive and not thread-safe | ||
| 149 | +// 0 = all cores enabled(default) | ||
| 150 | +// 1 = only little clusters enabled | ||
| 151 | +// 2 = only big clusters enabled | ||
| 152 | +// return 0 if success for setter function | ||
| 153 | +NCNN_EXPORT int get_cpu_powersave(); | ||
| 154 | +NCNN_EXPORT int set_cpu_powersave(int powersave); | ||
| 155 | + | ||
| 156 | +// convenient wrapper | ||
| 157 | +NCNN_EXPORT const CpuSet& get_cpu_thread_affinity_mask(int powersave); | ||
| 158 | + | ||
| 159 | +// set explicit thread affinity | ||
| 160 | +NCNN_EXPORT int set_cpu_thread_affinity(const CpuSet& thread_affinity_mask); | ||
| 161 | + | ||
| 162 | +// runtime thread affinity info | ||
| 163 | +NCNN_EXPORT int is_current_thread_running_on_a53_a55(); | ||
| 164 | + | ||
| 165 | +// misc function wrapper for openmp routines | ||
| 166 | +NCNN_EXPORT int get_omp_num_threads(); | ||
| 167 | +NCNN_EXPORT void set_omp_num_threads(int num_threads); | ||
| 168 | + | ||
| 169 | +NCNN_EXPORT int get_omp_dynamic(); | ||
| 170 | +NCNN_EXPORT void set_omp_dynamic(int dynamic); | ||
| 171 | + | ||
| 172 | +NCNN_EXPORT int get_omp_thread_num(); | ||
| 173 | + | ||
| 174 | +NCNN_EXPORT int get_kmp_blocktime(); | ||
| 175 | +NCNN_EXPORT void set_kmp_blocktime(int time_ms); | ||
| 176 | + | ||
| 177 | +// need to flush denormals on Intel Chipset. | ||
| 178 | +// Other architectures such as ARM can be added as needed. | ||
| 179 | +// 0 = DAZ OFF, FTZ OFF | ||
| 180 | +// 1 = DAZ ON , FTZ OFF | ||
| 181 | +// 2 = DAZ OFF, FTZ ON | ||
| 182 | +// 3 = DAZ ON, FTZ ON | ||
| 183 | +NCNN_EXPORT int get_flush_denormals(); | ||
| 184 | +NCNN_EXPORT int set_flush_denormals(int flush_denormals); | ||
| 185 | + | ||
| 186 | +} // namespace ncnn | ||
| 187 | + | ||
| 188 | +#endif // NCNN_CPU_H |
livekit-android-sdk/src/main/jni/ncnn-20250503-android-vulkan/arm64-v8a/include/ncnn/datareader.h
0 → 100644
| 1 | +// Tencent is pleased to support the open source community by making ncnn available. | ||
| 2 | +// | ||
| 3 | +// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. | ||
| 4 | +// | ||
| 5 | +// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except | ||
| 6 | +// in compliance with the License. You may obtain a copy of the License at | ||
| 7 | +// | ||
| 8 | +// https://opensource.org/licenses/BSD-3-Clause | ||
| 9 | +// | ||
| 10 | +// Unless required by applicable law or agreed to in writing, software distributed | ||
| 11 | +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR | ||
| 12 | +// CONDITIONS OF ANY KIND, either express or implied. See the License for the | ||
| 13 | +// specific language governing permissions and limitations under the License. | ||
| 14 | + | ||
| 15 | +#ifndef NCNN_DATAREADER_H | ||
| 16 | +#define NCNN_DATAREADER_H | ||
| 17 | + | ||
| 18 | +#include "platform.h" | ||
| 19 | +#if NCNN_STDIO | ||
| 20 | +#include <stdio.h> | ||
| 21 | +#endif | ||
| 22 | + | ||
| 23 | +#if NCNN_PLATFORM_API | ||
| 24 | +#if __ANDROID_API__ >= 9 | ||
| 25 | +#include <android/asset_manager.h> | ||
| 26 | +#endif | ||
| 27 | +#endif // NCNN_PLATFORM_API | ||
| 28 | + | ||
| 29 | +namespace ncnn { | ||
| 30 | + | ||
| 31 | +// data read wrapper | ||
| 32 | +class NCNN_EXPORT DataReader | ||
| 33 | +{ | ||
| 34 | +public: | ||
| 35 | + DataReader(); | ||
| 36 | + virtual ~DataReader(); | ||
| 37 | + | ||
| 38 | +#if NCNN_STRING | ||
| 39 | + // parse plain param text | ||
| 40 | + // return 1 if scan success | ||
| 41 | + virtual int scan(const char* format, void* p) const; | ||
| 42 | +#endif // NCNN_STRING | ||
| 43 | + | ||
| 44 | + // read binary param and model data | ||
| 45 | + // return bytes read | ||
| 46 | + virtual size_t read(void* buf, size_t size) const; | ||
| 47 | + | ||
| 48 | + // get model data reference | ||
| 49 | + // return bytes referenced | ||
| 50 | + virtual size_t reference(size_t size, const void** buf) const; | ||
| 51 | +}; | ||
| 52 | + | ||
| 53 | +#if NCNN_STDIO | ||
| 54 | +class DataReaderFromStdioPrivate; | ||
| 55 | +class NCNN_EXPORT DataReaderFromStdio : public DataReader | ||
| 56 | +{ | ||
| 57 | +public: | ||
| 58 | + explicit DataReaderFromStdio(FILE* fp); | ||
| 59 | + virtual ~DataReaderFromStdio(); | ||
| 60 | + | ||
| 61 | +#if NCNN_STRING | ||
| 62 | + virtual int scan(const char* format, void* p) const; | ||
| 63 | +#endif // NCNN_STRING | ||
| 64 | + virtual size_t read(void* buf, size_t size) const; | ||
| 65 | + | ||
| 66 | +private: | ||
| 67 | + DataReaderFromStdio(const DataReaderFromStdio&); | ||
| 68 | + DataReaderFromStdio& operator=(const DataReaderFromStdio&); | ||
| 69 | + | ||
| 70 | +private: | ||
| 71 | + DataReaderFromStdioPrivate* const d; | ||
| 72 | +}; | ||
| 73 | +#endif // NCNN_STDIO | ||
| 74 | + | ||
| 75 | +class DataReaderFromMemoryPrivate; | ||
| 76 | +class NCNN_EXPORT DataReaderFromMemory : public DataReader | ||
| 77 | +{ | ||
| 78 | +public: | ||
| 79 | + explicit DataReaderFromMemory(const unsigned char*& mem); | ||
| 80 | + virtual ~DataReaderFromMemory(); | ||
| 81 | + | ||
| 82 | +#if NCNN_STRING | ||
| 83 | + virtual int scan(const char* format, void* p) const; | ||
| 84 | +#endif // NCNN_STRING | ||
| 85 | + virtual size_t read(void* buf, size_t size) const; | ||
| 86 | + virtual size_t reference(size_t size, const void** buf) const; | ||
| 87 | + | ||
| 88 | +private: | ||
| 89 | + DataReaderFromMemory(const DataReaderFromMemory&); | ||
| 90 | + DataReaderFromMemory& operator=(const DataReaderFromMemory&); | ||
| 91 | + | ||
| 92 | +private: | ||
| 93 | + DataReaderFromMemoryPrivate* const d; | ||
| 94 | +}; | ||
| 95 | + | ||
| 96 | +#if NCNN_PLATFORM_API | ||
| 97 | +#if __ANDROID_API__ >= 9 | ||
| 98 | +class DataReaderFromAndroidAssetPrivate; | ||
| 99 | +class NCNN_EXPORT DataReaderFromAndroidAsset : public DataReader | ||
| 100 | +{ | ||
| 101 | +public: | ||
| 102 | + explicit DataReaderFromAndroidAsset(AAsset* asset); | ||
| 103 | + virtual ~DataReaderFromAndroidAsset(); | ||
| 104 | + | ||
| 105 | +#if NCNN_STRING | ||
| 106 | + virtual int scan(const char* format, void* p) const; | ||
| 107 | +#endif // NCNN_STRING | ||
| 108 | + virtual size_t read(void* buf, size_t size) const; | ||
| 109 | + | ||
| 110 | +private: | ||
| 111 | + DataReaderFromAndroidAsset(const DataReaderFromAndroidAsset&); | ||
| 112 | + DataReaderFromAndroidAsset& operator=(const DataReaderFromAndroidAsset&); | ||
| 113 | + | ||
| 114 | +private: | ||
| 115 | + DataReaderFromAndroidAssetPrivate* const d; | ||
| 116 | +}; | ||
| 117 | +#endif // __ANDROID_API__ >= 9 | ||
| 118 | +#endif // NCNN_PLATFORM_API | ||
| 119 | + | ||
| 120 | +} // namespace ncnn | ||
| 121 | + | ||
| 122 | +#endif // NCNN_DATAREADER_H |
livekit-android-sdk/src/main/jni/ncnn-20250503-android-vulkan/arm64-v8a/include/ncnn/expression.h
0 → 100644
| 1 | +// Tencent is pleased to support the open source community by making ncnn available. | ||
| 2 | +// | ||
| 3 | +// Copyright (C) 2025 THL A29 Limited, a Tencent company. All rights reserved. | ||
| 4 | +// | ||
| 5 | +// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except | ||
| 6 | +// in compliance with the License. You may obtain a copy of the License at | ||
| 7 | +// | ||
| 8 | +// https://opensource.org/licenses/BSD-3-Clause | ||
| 9 | +// | ||
| 10 | +// Unless required by applicable law or agreed to in writing, software distributed | ||
| 11 | +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR | ||
| 12 | +// CONDITIONS OF ANY KIND, either express or implied. See the License for the | ||
| 13 | +// specific language governing permissions and limitations under the License. | ||
| 14 | + | ||
| 15 | +#include "mat.h" | ||
| 16 | + | ||
| 17 | +namespace ncnn { | ||
| 18 | + | ||
| 19 | +// count how many blobs are referenced inside expression | ||
| 20 | +NCNN_EXPORT int count_expression_blobs(const std::string& expr); | ||
| 21 | + | ||
| 22 | +// resolve reshape shape from expression and input blobs | ||
| 23 | +// resolve slice indices(starts, ends) from expression and input blobs | ||
| 24 | +// see docs/developer-guide/expression.md | ||
| 25 | +// return 0 if success | ||
| 26 | +NCNN_EXPORT int eval_list_expression(const std::string& expr, const std::vector<Mat>& blobs, std::vector<int>& outlist); | ||
| 27 | + | ||
| 28 | +} // namespace ncnn |
livekit-android-sdk/src/main/jni/ncnn-20250503-android-vulkan/arm64-v8a/include/ncnn/gpu.h
0 → 100644
| 1 | +// Tencent is pleased to support the open source community by making ncnn available. | ||
| 2 | +// | ||
| 3 | +// Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved. | ||
| 4 | +// | ||
| 5 | +// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except | ||
| 6 | +// in compliance with the License. You may obtain a copy of the License at | ||
| 7 | +// | ||
| 8 | +// https://opensource.org/licenses/BSD-3-Clause | ||
| 9 | +// | ||
| 10 | +// Unless required by applicable law or agreed to in writing, software distributed | ||
| 11 | +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR | ||
| 12 | +// CONDITIONS OF ANY KIND, either express or implied. See the License for the | ||
| 13 | +// specific language governing permissions and limitations under the License. | ||
| 14 | + | ||
| 15 | +#ifndef NCNN_GPU_H | ||
| 16 | +#define NCNN_GPU_H | ||
| 17 | + | ||
| 18 | +#include "platform.h" | ||
| 19 | + | ||
| 20 | +#if NCNN_VULKAN | ||
| 21 | + | ||
| 22 | +#include "mat.h" | ||
| 23 | + | ||
| 24 | +namespace ncnn { | ||
| 25 | + | ||
| 26 | +// instance | ||
| 27 | + | ||
| 28 | +// Create VkInstance and initialize some objects that need to be calculated by GPU | ||
| 29 | +// Creates a VkInstance object, Checks the extended attributes supported by the Vulkan instance concerned, | ||
| 30 | +// Initializes, and creates Vulkan validation layers (if ENABLE_VALIDATION_LAYER is enabled), | ||
| 31 | +// Iterates over all supported physical devices, etc. | ||
| 32 | +NCNN_EXPORT int create_gpu_instance(const char* driver_path = 0); | ||
| 33 | + | ||
| 34 | +// Get global VkInstance variable | ||
| 35 | +// Must be called after create_gpu_instance() and before destroy_gpu_instance() | ||
| 36 | +NCNN_EXPORT VkInstance get_gpu_instance(); | ||
| 37 | + | ||
| 38 | +// Destroy VkInstance object and free the memory of the associated object | ||
| 39 | +// Usually called in the destructor of the main program exit | ||
| 40 | +// The function will internally ensure that all vulkan devices are idle before proceeding with destruction. | ||
| 41 | +NCNN_EXPORT void destroy_gpu_instance(); | ||
| 42 | + | ||
| 43 | +// vulkan core | ||
| 44 | +extern PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers; | ||
| 45 | +extern PFN_vkAllocateDescriptorSets vkAllocateDescriptorSets; | ||
| 46 | +extern PFN_vkAllocateMemory vkAllocateMemory; | ||
| 47 | +extern PFN_vkBeginCommandBuffer vkBeginCommandBuffer; | ||
| 48 | +extern PFN_vkBindBufferMemory vkBindBufferMemory; | ||
| 49 | +extern PFN_vkBindImageMemory vkBindImageMemory; | ||
| 50 | +extern PFN_vkCmdBeginQuery vkCmdBeginQuery; | ||
| 51 | +extern PFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets; | ||
| 52 | +extern PFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer; | ||
| 53 | +extern PFN_vkCmdBindPipeline vkCmdBindPipeline; | ||
| 54 | +extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer; | ||
| 55 | +extern PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage; | ||
| 56 | +extern PFN_vkCmdCopyImage vkCmdCopyImage; | ||
| 57 | +extern PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer; | ||
| 58 | +extern PFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults; | ||
| 59 | +extern PFN_vkCmdDispatch vkCmdDispatch; | ||
| 60 | +extern PFN_vkCmdDispatchIndirect vkCmdDispatchIndirect; | ||
| 61 | +extern PFN_vkCmdEndQuery vkCmdEndQuery; | ||
| 62 | +extern PFN_vkCmdExecuteCommands vkCmdExecuteCommands; | ||
| 63 | +extern PFN_vkCmdFillBuffer vkCmdFillBuffer; | ||
| 64 | +extern PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier; | ||
| 65 | +extern PFN_vkCmdPushConstants vkCmdPushConstants; | ||
| 66 | +extern PFN_vkCmdResetQueryPool vkCmdResetQueryPool; | ||
| 67 | +extern PFN_vkCmdResolveImage vkCmdResolveImage; | ||
| 68 | +extern PFN_vkCmdUpdateBuffer vkCmdUpdateBuffer; | ||
| 69 | +extern PFN_vkCmdWriteTimestamp vkCmdWriteTimestamp; | ||
| 70 | +extern PFN_vkCreateBuffer vkCreateBuffer; | ||
| 71 | +extern PFN_vkCreateBufferView vkCreateBufferView; | ||
| 72 | +extern PFN_vkCreateCommandPool vkCreateCommandPool; | ||
| 73 | +extern PFN_vkCreateComputePipelines vkCreateComputePipelines; | ||
| 74 | +extern PFN_vkCreateDescriptorPool vkCreateDescriptorPool; | ||
| 75 | +extern PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout; | ||
| 76 | +extern PFN_vkCreateDevice vkCreateDevice; | ||
| 77 | +extern PFN_vkCreateFence vkCreateFence; | ||
| 78 | +extern PFN_vkCreateImage vkCreateImage; | ||
| 79 | +extern PFN_vkCreateImageView vkCreateImageView; | ||
| 80 | +extern PFN_vkCreatePipelineCache vkCreatePipelineCache; | ||
| 81 | +extern PFN_vkCreatePipelineLayout vkCreatePipelineLayout; | ||
| 82 | +extern PFN_vkCreateQueryPool vkCreateQueryPool; | ||
| 83 | +extern PFN_vkCreateSampler vkCreateSampler; | ||
| 84 | +extern PFN_vkCreateSemaphore vkCreateSemaphore; | ||
| 85 | +extern PFN_vkCreateShaderModule vkCreateShaderModule; | ||
| 86 | +extern PFN_vkDestroyBuffer vkDestroyBuffer; | ||
| 87 | +extern PFN_vkDestroyBufferView vkDestroyBufferView; | ||
| 88 | +extern PFN_vkDestroyCommandPool vkDestroyCommandPool; | ||
| 89 | +extern PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool; | ||
| 90 | +extern PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout; | ||
| 91 | +extern PFN_vkDestroyDevice vkDestroyDevice; | ||
| 92 | +extern PFN_vkDestroyFence vkDestroyFence; | ||
| 93 | +extern PFN_vkDestroyImage vkDestroyImage; | ||
| 94 | +extern PFN_vkDestroyImageView vkDestroyImageView; | ||
| 95 | +extern PFN_vkDestroyInstance vkDestroyInstance; | ||
| 96 | +extern PFN_vkDestroyPipeline vkDestroyPipeline; | ||
| 97 | +extern PFN_vkDestroyPipelineCache vkDestroyPipelineCache; | ||
| 98 | +extern PFN_vkDestroyPipelineLayout vkDestroyPipelineLayout; | ||
| 99 | +extern PFN_vkDestroyQueryPool vkDestroyQueryPool; | ||
| 100 | +extern PFN_vkDestroySampler vkDestroySampler; | ||
| 101 | +extern PFN_vkDestroySemaphore vkDestroySemaphore; | ||
| 102 | +extern PFN_vkDestroyShaderModule vkDestroyShaderModule; | ||
| 103 | +extern PFN_vkDeviceWaitIdle vkDeviceWaitIdle; | ||
| 104 | +extern PFN_vkEndCommandBuffer vkEndCommandBuffer; | ||
| 105 | +extern PFN_vkEnumerateDeviceExtensionProperties vkEnumerateDeviceExtensionProperties; | ||
| 106 | +extern PFN_vkEnumerateDeviceLayerProperties vkEnumerateDeviceLayerProperties; | ||
| 107 | +extern PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevices; | ||
| 108 | +extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges; | ||
| 109 | +extern PFN_vkFreeCommandBuffers vkFreeCommandBuffers; | ||
| 110 | +extern PFN_vkFreeDescriptorSets vkFreeDescriptorSets; | ||
| 111 | +extern PFN_vkFreeMemory vkFreeMemory; | ||
| 112 | +extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements; | ||
| 113 | +extern PFN_vkGetDeviceMemoryCommitment vkGetDeviceMemoryCommitment; | ||
| 114 | +extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr; | ||
| 115 | +extern PFN_vkGetDeviceQueue vkGetDeviceQueue; | ||
| 116 | +extern PFN_vkGetFenceStatus vkGetFenceStatus; | ||
| 117 | +extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; | ||
| 118 | +extern PFN_vkGetImageSubresourceLayout vkGetImageSubresourceLayout; | ||
| 119 | +extern PFN_vkGetPhysicalDeviceFeatures vkGetPhysicalDeviceFeatures; | ||
| 120 | +extern PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties; | ||
| 121 | +extern PFN_vkGetPhysicalDeviceImageFormatProperties vkGetPhysicalDeviceImageFormatProperties; | ||
| 122 | +extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties; | ||
| 123 | +extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties; | ||
| 124 | +extern PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties; | ||
| 125 | +extern PFN_vkGetPipelineCacheData vkGetPipelineCacheData; | ||
| 126 | +extern PFN_vkGetQueryPoolResults vkGetQueryPoolResults; | ||
| 127 | +extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges; | ||
| 128 | +extern PFN_vkMapMemory vkMapMemory; | ||
| 129 | +extern PFN_vkMergePipelineCaches vkMergePipelineCaches; | ||
| 130 | +extern PFN_vkQueueSubmit vkQueueSubmit; | ||
| 131 | +extern PFN_vkQueueWaitIdle vkQueueWaitIdle; | ||
| 132 | +extern PFN_vkResetCommandBuffer vkResetCommandBuffer; | ||
| 133 | +extern PFN_vkResetCommandPool vkResetCommandPool; | ||
| 134 | +extern PFN_vkResetDescriptorPool vkResetDescriptorPool; | ||
| 135 | +extern PFN_vkResetFences vkResetFences; | ||
| 136 | +extern PFN_vkUnmapMemory vkUnmapMemory; | ||
| 137 | +extern PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets; | ||
| 138 | +extern PFN_vkWaitForFences vkWaitForFences; | ||
| 139 | + | ||
| 140 | +// instance extension capability | ||
| 141 | +extern int support_VK_KHR_external_memory_capabilities; | ||
| 142 | +extern int support_VK_KHR_get_physical_device_properties2; | ||
| 143 | +extern int support_VK_KHR_get_surface_capabilities2; | ||
| 144 | +extern int support_VK_KHR_surface; | ||
| 145 | +extern int support_VK_EXT_debug_utils; | ||
| 146 | +extern int support_VK_EXT_validation_features; | ||
| 147 | +extern int support_VK_EXT_validation_flags; | ||
| 148 | +#if __ANDROID_API__ >= 26 | ||
| 149 | +extern int support_VK_KHR_android_surface; | ||
| 150 | +#endif // __ANDROID_API__ >= 26 | ||
| 151 | + | ||
| 152 | +// VK_KHR_cooperative_matrix | ||
| 153 | +extern PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR; | ||
| 154 | + | ||
| 155 | +// VK_KHR_external_memory_capabilities | ||
| 156 | +extern PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR vkGetPhysicalDeviceExternalBufferPropertiesKHR; | ||
| 157 | + | ||
| 158 | +// VK_KHR_get_physical_device_properties2 | ||
| 159 | +extern PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR; | ||
| 160 | +extern PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR; | ||
| 161 | +extern PFN_vkGetPhysicalDeviceFormatProperties2KHR vkGetPhysicalDeviceFormatProperties2KHR; | ||
| 162 | +extern PFN_vkGetPhysicalDeviceImageFormatProperties2KHR vkGetPhysicalDeviceImageFormatProperties2KHR; | ||
| 163 | +extern PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR vkGetPhysicalDeviceQueueFamilyProperties2KHR; | ||
| 164 | +extern PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR; | ||
| 165 | + | ||
| 166 | +// VK_KHR_get_surface_capabilities2 | ||
| 167 | +extern PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR vkGetPhysicalDeviceSurfaceCapabilities2KHR; | ||
| 168 | +extern PFN_vkGetPhysicalDeviceSurfaceFormats2KHR vkGetPhysicalDeviceSurfaceFormats2KHR; | ||
| 169 | + | ||
| 170 | +// VK_KHR_surface | ||
| 171 | +extern PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR; | ||
| 172 | +extern PFN_vkGetPhysicalDeviceSurfaceSupportKHR vkGetPhysicalDeviceSurfaceSupportKHR; | ||
| 173 | +extern PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR vkGetPhysicalDeviceSurfaceCapabilitiesKHR; | ||
| 174 | +extern PFN_vkGetPhysicalDeviceSurfaceFormatsKHR vkGetPhysicalDeviceSurfaceFormatsKHR; | ||
| 175 | +extern PFN_vkGetPhysicalDeviceSurfacePresentModesKHR vkGetPhysicalDeviceSurfacePresentModesKHR; | ||
| 176 | + | ||
| 177 | +#if __ANDROID_API__ >= 26 | ||
| 178 | +// VK_KHR_android_surface | ||
| 179 | +extern PFN_vkCreateAndroidSurfaceKHR vkCreateAndroidSurfaceKHR; | ||
| 180 | +#endif // __ANDROID_API__ >= 26 | ||
| 181 | + | ||
| 182 | +// VK_NV_cooperative_matrix | ||
| 183 | +extern PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV vkGetPhysicalDeviceCooperativeMatrixPropertiesNV; | ||
| 184 | + | ||
| 185 | +// get info | ||
| 186 | +NCNN_EXPORT int get_gpu_count(); | ||
| 187 | +NCNN_EXPORT int get_default_gpu_index(); | ||
| 188 | + | ||
| 189 | +class GpuInfoPrivate; | ||
| 190 | +class NCNN_EXPORT GpuInfo | ||
| 191 | +{ | ||
| 192 | +public: | ||
| 193 | + explicit GpuInfo(); | ||
| 194 | + virtual ~GpuInfo(); | ||
| 195 | + | ||
| 196 | + int device_index() const; | ||
| 197 | + | ||
| 198 | + // vulkan physical device | ||
| 199 | + VkPhysicalDevice physicalDevice() const; | ||
| 200 | + VkPhysicalDevice physical_device() const; // api compatibility | ||
| 201 | + | ||
| 202 | + // features | ||
| 203 | + const VkPhysicalDeviceFeatures& physicalDevicefeatures() const; | ||
| 204 | + | ||
| 205 | + // properties | ||
| 206 | + const VkPhysicalDeviceProperties& physicalDeviceProperties() const; | ||
| 207 | + | ||
| 208 | + // memory properties | ||
| 209 | + const VkPhysicalDeviceMemoryProperties& physicalDeviceMemoryProperties() const; | ||
| 210 | + const VkPhysicalDeviceMemoryProperties& physical_device_memory_properties() const; // api compatibility | ||
| 211 | + | ||
| 212 | + // extension properties | ||
| 213 | + const std::vector<VkExtensionProperties>& deviceExtensionProperties() const; | ||
| 214 | + | ||
| 215 | + // info | ||
| 216 | + uint32_t api_version() const; | ||
| 217 | + uint32_t driver_version() const; | ||
| 218 | + uint32_t vendor_id() const; | ||
| 219 | + uint32_t device_id() const; | ||
| 220 | + const char* device_name() const; | ||
| 221 | + uint8_t* pipeline_cache_uuid() const; | ||
| 222 | + | ||
| 223 | + // driver properties | ||
| 224 | + uint32_t driver_id() const; | ||
| 225 | + const char* driver_name() const; | ||
| 226 | + | ||
| 227 | + // 0 = discrete gpu | ||
| 228 | + // 1 = integrated gpu | ||
| 229 | + // 2 = virtual gpu | ||
| 230 | + // 3 = cpu | ||
| 231 | + int type() const; | ||
| 232 | + | ||
| 233 | + // hardware limit | ||
| 234 | + uint32_t max_shared_memory_size() const; | ||
| 235 | + uint32_t max_workgroup_count_x() const; | ||
| 236 | + uint32_t max_workgroup_count_y() const; | ||
| 237 | + uint32_t max_workgroup_count_z() const; | ||
| 238 | + uint32_t max_workgroup_invocations() const; | ||
| 239 | + uint32_t max_workgroup_size_x() const; | ||
| 240 | + uint32_t max_workgroup_size_y() const; | ||
| 241 | + uint32_t max_workgroup_size_z() const; | ||
| 242 | + size_t memory_map_alignment() const; | ||
| 243 | + size_t buffer_offset_alignment() const; | ||
| 244 | + size_t non_coherent_atom_size() const; | ||
| 245 | + size_t buffer_image_granularity() const; | ||
| 246 | + uint32_t max_image_dimension_1d() const; | ||
| 247 | + uint32_t max_image_dimension_2d() const; | ||
| 248 | + uint32_t max_image_dimension_3d() const; | ||
| 249 | + float timestamp_period() const; | ||
| 250 | + | ||
| 251 | + // runtime | ||
| 252 | + uint32_t compute_queue_family_index() const; | ||
| 253 | + uint32_t graphics_queue_family_index() const; | ||
| 254 | + uint32_t transfer_queue_family_index() const; | ||
| 255 | + | ||
| 256 | + uint32_t compute_queue_count() const; | ||
| 257 | + uint32_t graphics_queue_count() const; | ||
| 258 | + uint32_t transfer_queue_count() const; | ||
| 259 | + | ||
| 260 | + // property | ||
| 261 | + bool unified_compute_transfer_queue() const; | ||
| 262 | + | ||
| 263 | + // subgroup | ||
| 264 | + uint32_t subgroup_size() const; | ||
| 265 | + uint32_t min_subgroup_size() const; | ||
| 266 | + uint32_t max_subgroup_size() const; | ||
| 267 | + uint32_t max_compute_workgroup_subgroups() const; | ||
| 268 | + bool support_subgroup_size_control() const; | ||
| 269 | + bool support_compute_full_subgroups() const; | ||
| 270 | + uint32_t support_subgroup_ops() const; | ||
| 271 | + | ||
| 272 | + // bug is not feature | ||
| 273 | + bool bug_storage_buffer_no_l1() const; | ||
| 274 | + bool bug_corrupted_online_pipeline_cache() const; | ||
| 275 | + bool bug_buffer_image_load_zero() const; | ||
| 276 | + | ||
| 277 | + // but sometimes bug is a feature | ||
| 278 | + bool bug_implicit_fp16_arithmetic() const; | ||
| 279 | + | ||
| 280 | + // fp16 and int8 feature | ||
| 281 | + bool support_fp16_packed() const; | ||
| 282 | + bool support_fp16_storage() const; | ||
| 283 | + bool support_fp16_uniform() const; | ||
| 284 | + bool support_fp16_arithmetic() const; | ||
| 285 | + bool support_int8_packed() const; | ||
| 286 | + bool support_int8_storage() const; | ||
| 287 | + bool support_int8_uniform() const; | ||
| 288 | + bool support_int8_arithmetic() const; | ||
| 289 | + | ||
| 290 | + // r16f format in storage image | ||
| 291 | + bool support_fp16_image() const; | ||
| 292 | + | ||
| 293 | + // ycbcr conversion feature | ||
| 294 | + bool support_ycbcr_conversion() const; | ||
| 295 | + | ||
| 296 | + // cooperative matrix feature | ||
| 297 | + bool support_cooperative_matrix() const; | ||
| 298 | + bool support_cooperative_matrix_8_8_16() const; | ||
| 299 | + bool support_cooperative_matrix_16_8_8() const; | ||
| 300 | + bool support_cooperative_matrix_16_8_16() const; | ||
| 301 | + bool support_cooperative_matrix_16_16_16() const; | ||
| 302 | + | ||
| 303 | + // extension capability | ||
| 304 | + int support_VK_KHR_8bit_storage() const; | ||
| 305 | + int support_VK_KHR_16bit_storage() const; | ||
| 306 | + int support_VK_KHR_bind_memory2() const; | ||
| 307 | + int support_VK_KHR_buffer_device_address() const; | ||
| 308 | + int support_VK_KHR_create_renderpass2() const; | ||
| 309 | + int support_VK_KHR_cooperative_matrix() const; | ||
| 310 | + int support_VK_KHR_dedicated_allocation() const; | ||
| 311 | + int support_VK_KHR_descriptor_update_template() const; | ||
| 312 | + int support_VK_KHR_driver_properties() const; | ||
| 313 | + int support_VK_KHR_external_memory() const; | ||
| 314 | + int support_VK_KHR_get_memory_requirements2() const; | ||
| 315 | + int support_VK_KHR_maintenance1() const; | ||
| 316 | + int support_VK_KHR_maintenance2() const; | ||
| 317 | + int support_VK_KHR_maintenance3() const; | ||
| 318 | + int support_VK_KHR_multiview() const; | ||
| 319 | + int support_VK_KHR_portability_subset() const; | ||
| 320 | + int support_VK_KHR_push_descriptor() const; | ||
| 321 | + int support_VK_KHR_sampler_ycbcr_conversion() const; | ||
| 322 | + int support_VK_KHR_shader_float16_int8() const; | ||
| 323 | + int support_VK_KHR_shader_float_controls() const; | ||
| 324 | + int support_VK_KHR_shader_non_semantic_info() const; | ||
| 325 | + int support_VK_KHR_shader_subgroup_extended_types() const; | ||
| 326 | + int support_VK_KHR_shader_subgroup_rotate() const; | ||
| 327 | + int support_VK_KHR_storage_buffer_storage_class() const; | ||
| 328 | + int support_VK_KHR_swapchain() const; | ||
| 329 | + int support_VK_KHR_zero_initialize_workgroup_memory() const; | ||
| 330 | + int support_VK_EXT_buffer_device_address() const; | ||
| 331 | + int support_VK_EXT_descriptor_indexing() const; | ||
| 332 | + int support_VK_EXT_memory_budget() const; | ||
| 333 | + int support_VK_EXT_memory_priority() const; | ||
| 334 | + int support_VK_EXT_queue_family_foreign() const; | ||
| 335 | + int support_VK_EXT_shader_atomic_float() const; | ||
| 336 | + int support_VK_EXT_shader_atomic_float2() const; | ||
| 337 | + int support_VK_EXT_subgroup_size_control() const; | ||
| 338 | + int support_VK_AMD_device_coherent_memory() const; | ||
| 339 | +#if __ANDROID_API__ >= 26 | ||
| 340 | + int support_VK_ANDROID_external_memory_android_hardware_buffer() const; | ||
| 341 | +#endif // __ANDROID_API__ >= 26 | ||
| 342 | + int support_VK_NV_cooperative_matrix() const; | ||
| 343 | + | ||
| 344 | + // extension features | ||
| 345 | + const void* queryExtensionFeatures() const; | ||
| 346 | + const VkPhysicalDevice8BitStorageFeaturesKHR& query8BitStorageFeatures() const; | ||
| 347 | + const VkPhysicalDevice16BitStorageFeaturesKHR& query16BitStorageFeatures() const; | ||
| 348 | + const VkPhysicalDeviceFloat16Int8FeaturesKHR& queryFloat16Int8Features() const; | ||
| 349 | + const VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR& querySamplerYcbcrConversionFeatures() const; | ||
| 350 | + const VkPhysicalDeviceCooperativeMatrixFeaturesKHR& queryCooperativeMatrixFeatures() const; | ||
| 351 | + const VkPhysicalDeviceCooperativeMatrixFeaturesNV& queryCooperativeMatrixFeaturesNV() const; | ||
| 352 | + const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT& querySubgroupSizeControlFeatures() const; | ||
| 353 | + const VkPhysicalDeviceShaderSubgroupRotateFeaturesKHR& queryShaderSubgroupRotateFeatures() const; | ||
| 354 | + const VkPhysicalDeviceShaderAtomicFloatFeaturesEXT& queryShaderAtomicFloatFeatures() const; | ||
| 355 | + const VkPhysicalDeviceShaderAtomicFloat2FeaturesEXT& queryShaderAtomicFloat2Features() const; | ||
| 356 | + | ||
| 357 | + // extension properties | ||
| 358 | + const void* queryDeviceProperties() const; | ||
| 359 | + const VkPhysicalDeviceSubgroupProperties& querySubgroupProperties() const; | ||
| 360 | + const VkPhysicalDeviceDriverPropertiesKHR& queryDriverProperties() const; | ||
| 361 | + const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& querySubgroupSizeControlProperties() const; | ||
| 362 | + | ||
| 363 | +private: | ||
| 364 | + GpuInfo(const GpuInfo&); | ||
| 365 | + GpuInfo& operator=(const GpuInfo&); | ||
| 366 | + | ||
| 367 | +private: | ||
| 368 | + friend int create_gpu_instance(const char* driver_path); | ||
| 369 | + GpuInfoPrivate* const d; | ||
| 370 | +}; | ||
| 371 | + | ||
| 372 | +NCNN_EXPORT const GpuInfo& get_gpu_info(int device_index = get_default_gpu_index()); | ||
| 373 | + | ||
| 374 | +class VkAllocator; | ||
| 375 | +class VkCompute; | ||
| 376 | +class Option; | ||
| 377 | +class PipelineCache; | ||
| 378 | +class VulkanDevicePrivate; | ||
| 379 | +class NCNN_EXPORT VulkanDevice | ||
| 380 | +{ | ||
| 381 | +public: | ||
| 382 | + VulkanDevice(int device_index = get_default_gpu_index()); | ||
| 383 | + ~VulkanDevice(); | ||
| 384 | + | ||
| 385 | + const GpuInfo& info; | ||
| 386 | + | ||
| 387 | + VkDevice vkdevice() const; | ||
| 388 | + bool is_valid() const; | ||
| 389 | + | ||
| 390 | + VkShaderModule compile_shader_module(const uint32_t* spv_data, size_t spv_data_size) const; | ||
| 391 | + | ||
| 392 | + // with fixed workgroup size | ||
| 393 | + VkShaderModule compile_shader_module(const uint32_t* spv_data, size_t spv_data_size, uint32_t local_size_x, uint32_t local_size_y, uint32_t local_size_z) const; | ||
| 394 | + | ||
| 395 | + // helper for creating pipeline | ||
| 396 | + int create_descriptorset_layout(int binding_count, const int* binding_types, VkDescriptorSetLayout* descriptorset_layout) const; | ||
| 397 | + int create_pipeline_layout(int push_constant_count, VkDescriptorSetLayout descriptorset_layout, VkPipelineLayout* pipeline_layout) const; | ||
| 398 | + int create_pipeline(VkShaderModule shader_module, VkPipelineLayout pipeline_layout, const std::vector<vk_specialization_type>& specializations, uint32_t subgroup_size, VkPipeline* pipeline) const; | ||
| 399 | + int create_descriptor_update_template(int binding_count, const int* binding_types, VkDescriptorSetLayout descriptorset_layout, VkPipelineLayout pipeline_layout, VkDescriptorUpdateTemplateKHR* descriptor_update_template) const; | ||
| 400 | + | ||
| 401 | + uint32_t find_memory_index(uint32_t memory_type_bits, VkFlags required, VkFlags preferred, VkFlags preferred_not) const; | ||
| 402 | + bool is_mappable(uint32_t memory_type_index) const; | ||
| 403 | + bool is_coherent(uint32_t memory_type_index) const; | ||
| 404 | + | ||
| 405 | + VkQueue acquire_queue(uint32_t queue_family_index) const; | ||
| 406 | + void reclaim_queue(uint32_t queue_family_index, VkQueue queue) const; | ||
| 407 | + | ||
| 408 | + // allocator on this device | ||
| 409 | + VkAllocator* acquire_blob_allocator() const; | ||
| 410 | + void reclaim_blob_allocator(VkAllocator* allocator) const; | ||
| 411 | + | ||
| 412 | + VkAllocator* acquire_staging_allocator() const; | ||
| 413 | + void reclaim_staging_allocator(VkAllocator* allocator) const; | ||
| 414 | + | ||
| 415 | + // immutable sampler for texelfetch | ||
| 416 | + const VkSampler* immutable_texelfetch_sampler() const; | ||
| 417 | + | ||
| 418 | + // dummy buffer image | ||
| 419 | + VkMat get_dummy_buffer() const; | ||
| 420 | + VkImageMat get_dummy_image() const; | ||
| 421 | + VkImageMat get_dummy_image_readonly() const; | ||
| 422 | + | ||
| 423 | + // pipeline cache on this device | ||
| 424 | + const PipelineCache* get_pipeline_cache() const; | ||
| 425 | + | ||
| 426 | + // test image allocation | ||
| 427 | + bool shape_support_image_storage(const Mat& shape) const; | ||
| 428 | + | ||
| 429 | + // current gpu heap memory budget in MB | ||
| 430 | + uint32_t get_heap_budget() const; | ||
| 431 | + | ||
| 432 | + // utility operator | ||
| 433 | + void convert_packing(const VkMat& src, VkMat& dst, int dst_elempack, VkCompute& cmd, const Option& opt) const; | ||
| 434 | + void convert_packing(const VkImageMat& src, VkImageMat& dst, int dst_elempack, VkCompute& cmd, const Option& opt) const; | ||
| 435 | + void convert_packing(const VkMat& src, VkImageMat& dst, int dst_elempack, VkCompute& cmd, const Option& opt) const; | ||
| 436 | + void convert_packing(const VkImageMat& src, VkMat& dst, int dst_elempack, VkCompute& cmd, const Option& opt) const; | ||
| 437 | + | ||
| 438 | + // VK_KHR_bind_memory2 | ||
| 439 | + PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR; | ||
| 440 | + PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR; | ||
| 441 | + | ||
| 442 | + // VK_KHR_buffer_device_address | ||
| 443 | + PFN_vkGetBufferDeviceAddressKHR vkGetBufferDeviceAddressKHR; | ||
| 444 | + PFN_vkGetBufferOpaqueCaptureAddressKHR vkGetBufferOpaqueCaptureAddressKHR; | ||
| 445 | + PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR vkGetDeviceMemoryOpaqueCaptureAddressKHR; | ||
| 446 | + | ||
| 447 | + // VK_KHR_descriptor_update_template | ||
| 448 | + PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR; | ||
| 449 | + PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR; | ||
| 450 | + PFN_vkUpdateDescriptorSetWithTemplateKHR vkUpdateDescriptorSetWithTemplateKHR; | ||
| 451 | + | ||
| 452 | + // VK_KHR_get_memory_requirements2 | ||
| 453 | + PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR; | ||
| 454 | + PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR; | ||
| 455 | + | ||
| 456 | + // VK_KHR_maintenance1 | ||
| 457 | + PFN_vkTrimCommandPoolKHR vkTrimCommandPoolKHR; | ||
| 458 | + | ||
| 459 | + // VK_KHR_maintenance3 | ||
| 460 | + PFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR; | ||
| 461 | + | ||
| 462 | + // VK_KHR_push_descriptor | ||
| 463 | + PFN_vkCmdPushDescriptorSetWithTemplateKHR vkCmdPushDescriptorSetWithTemplateKHR; | ||
| 464 | + PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR; | ||
| 465 | + | ||
| 466 | + // VK_KHR_sampler_ycbcr_conversion | ||
| 467 | + PFN_vkCreateSamplerYcbcrConversionKHR vkCreateSamplerYcbcrConversionKHR; | ||
| 468 | + PFN_vkDestroySamplerYcbcrConversionKHR vkDestroySamplerYcbcrConversionKHR; | ||
| 469 | + | ||
| 470 | + // VK_KHR_swapchain | ||
| 471 | + PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR; | ||
| 472 | + PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR; | ||
| 473 | + PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR; | ||
| 474 | + PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR; | ||
| 475 | + PFN_vkQueuePresentKHR vkQueuePresentKHR; | ||
| 476 | + | ||
| 477 | + // VK_EXT_buffer_device_address | ||
| 478 | + PFN_vkGetBufferDeviceAddressEXT vkGetBufferDeviceAddressEXT; | ||
| 479 | + | ||
| 480 | +#if __ANDROID_API__ >= 26 | ||
| 481 | + // VK_ANDROID_external_memory_android_hardware_buffer | ||
| 482 | + PFN_vkGetAndroidHardwareBufferPropertiesANDROID vkGetAndroidHardwareBufferPropertiesANDROID; | ||
| 483 | + PFN_vkGetMemoryAndroidHardwareBufferANDROID vkGetMemoryAndroidHardwareBufferANDROID; | ||
| 484 | +#endif // __ANDROID_API__ >= 26 | ||
| 485 | + | ||
| 486 | +protected: | ||
| 487 | + // device extension | ||
| 488 | + int init_device_extension(); | ||
| 489 | + | ||
| 490 | +private: | ||
| 491 | + VulkanDevice(const VulkanDevice&); | ||
| 492 | + VulkanDevice& operator=(const VulkanDevice&); | ||
| 493 | + | ||
| 494 | +private: | ||
| 495 | + VulkanDevicePrivate* const d; | ||
| 496 | +}; | ||
| 497 | + | ||
| 498 | +NCNN_EXPORT VulkanDevice* get_gpu_device(int device_index = get_default_gpu_index()); | ||
| 499 | + | ||
| 500 | +// online spirv compilation | ||
| 501 | +NCNN_EXPORT int compile_spirv_module(const char* comp_string, const Option& opt, std::vector<uint32_t>& spirv); | ||
| 502 | +NCNN_EXPORT int compile_spirv_module(const char* comp_data, int comp_data_size, const Option& opt, std::vector<uint32_t>& spirv); | ||
| 503 | +NCNN_EXPORT int compile_spirv_module(int shader_type_index, const Option& opt, std::vector<uint32_t>& spirv); | ||
| 504 | + | ||
| 505 | +// info from spirv | ||
| 506 | +class NCNN_EXPORT ShaderInfo | ||
| 507 | +{ | ||
| 508 | +public: | ||
| 509 | + int specialization_count; | ||
| 510 | + int binding_count; | ||
| 511 | + int push_constant_count; | ||
| 512 | + | ||
| 513 | + // 0 = null | ||
| 514 | + // 1 = storage buffer | ||
| 515 | + // 2 = storage image | ||
| 516 | + // 3 = combined image sampler | ||
| 517 | + int binding_types[16]; // 16 is large enough I think ... | ||
| 518 | + | ||
| 519 | + int reserved_0; | ||
| 520 | + int reserved_1; | ||
| 521 | + int reserved_2; | ||
| 522 | + int reserved_3; | ||
| 523 | +}; | ||
| 524 | + | ||
| 525 | +NCNN_EXPORT int resolve_shader_info(const uint32_t* spv_data, size_t spv_data_size, ShaderInfo& shader_info); | ||
| 526 | + | ||
| 527 | +} // namespace ncnn | ||
| 528 | + | ||
| 529 | +#endif // NCNN_VULKAN | ||
| 530 | + | ||
| 531 | +#endif // NCNN_GPU_H |
-
请 注册 或 登录 后发表评论