xuning

init

正在显示 31 个修改的文件 包含 4861 行增加0 行删除

要显示太多修改。

为保证性能只显示 31 of 31+ 个文件。

*.iml
.kotlin
.gradle
**/build/
xcuserdata
!src/**/build/
local.properties
.idea
.DS_Store
captures
.externalNativeBuild
.cxx
*.xcodeproj/*
!*.xcodeproj/project.pbxproj
!*.xcodeproj/xcshareddata/
!*.xcodeproj/project.xcworkspace/
!*.xcworkspace/contents.xcworkspacedata
**/xcshareddata/WorkspaceSettings.xcsettings
/opencvsdk/
... ...
This is a Kotlin Multiplatform project targeting Android, iOS, Web, Desktop.
* `/composeApp` is for code that will be shared across your Compose Multiplatform applications.
It contains several subfolders:
- `commonMain` is for code that’s common for all targets.
- Other folders are for Kotlin code that will be compiled for only the platform indicated in the folder name.
For example, if you want to use Apple’s CoreCrypto for the iOS part of your Kotlin app,
`iosMain` would be the right folder for such calls.
* `/iosApp` contains iOS applications. Even if you’re sharing your UI with Compose Multiplatform,
you need this entry point for your iOS app. This is also where you should add SwiftUI code for your project.
Learn more about [Kotlin Multiplatform](https://www.jetbrains.com/help/kotlin-multiplatform-dev/get-started.html),
[Compose Multiplatform](https://github.com/JetBrains/compose-multiplatform/#compose-multiplatform),
[Kotlin/Wasm](https://kotl.in/wasm/)
We would appreciate your feedback on Compose/Web and Kotlin/Wasm in the public Slack channel [#compose-web](https://slack-chats.kotlinlang.org/c/compose-web).
If you face any issues, please report them on [YouTrack](https://youtrack.jetbrains.com/newIssue?project=CMP).
You can open the web application by running the `:composeApp:wasmJsBrowserDevelopmentRun` Gradle task.
\ No newline at end of file
... ...
plugins {
// this is necessary to avoid the plugins to be loaded multiple times
// in each subproject's classloader
alias(libs.plugins.androidApplication) apply false
alias(libs.plugins.androidLibrary) apply false
alias(libs.plugins.composeHotReload) apply false
alias(libs.plugins.composeMultiplatform) apply false
alias(libs.plugins.composeCompiler) apply false
alias(libs.plugins.kotlinMultiplatform) apply false
}
\ No newline at end of file
... ...
import org.jetbrains.compose.desktop.application.dsl.TargetFormat
import org.jetbrains.kotlin.gradle.ExperimentalKotlinGradlePluginApi
import org.jetbrains.kotlin.gradle.ExperimentalWasmDsl
import org.jetbrains.kotlin.gradle.dsl.JvmTarget
import org.jetbrains.kotlin.gradle.targets.js.webpack.KotlinWebpackConfig
plugins {
alias(libs.plugins.kotlinMultiplatform)
alias(libs.plugins.androidApplication)
alias(libs.plugins.composeMultiplatform)
alias(libs.plugins.composeCompiler)
alias(libs.plugins.composeHotReload)
kotlin("plugin.serialization") version "2.2.0"
}
kotlin {
val ktorVersion = "3.2.2"
androidTarget {
@OptIn(ExperimentalKotlinGradlePluginApi::class)
compilerOptions {
jvmTarget.set(JvmTarget.JVM_11)
}
}
listOf(
iosX64(),
iosArm64(),
iosSimulatorArm64()
).forEach { iosTarget ->
iosTarget.binaries.framework {
baseName = "ComposeApp"
isStatic = true
}
}
jvm("desktop")
@OptIn(ExperimentalWasmDsl::class)
wasmJs {
outputModuleName.set("composeApp")
browser {
val rootDirPath = project.rootDir.path
val projectDirPath = project.projectDir.path
commonWebpackConfig {
outputFileName = "composeApp.js"
devServer = (devServer ?: KotlinWebpackConfig.DevServer()).apply {
static = (static ?: mutableListOf()).apply {
// Serve sources to debug inside browser
add(rootDirPath)
add(projectDirPath)
}
}
}
}
binaries.executable()
}
sourceSets {
val desktopMain by getting
androidMain.dependencies {
implementation(compose.preview)
implementation(libs.androidx.activity.compose)
implementation("io.ktor:ktor-client-android:$ktorVersion")
implementation("androidx.lifecycle:lifecycle-viewmodel-ktx:2.6.2")
implementation("androidx.lifecycle:lifecycle-runtime-compose:2.6.2")
implementation("androidx.lifecycle:lifecycle-viewmodel-compose:2.6.2")
implementation("org.opencv:opencv:4.12.0")
}
commonMain.dependencies {
implementation(compose.runtime)
implementation(compose.foundation)
implementation(compose.material3)
implementation(compose.ui)
implementation(compose.components.resources)
implementation(compose.components.uiToolingPreview)
implementation(libs.androidx.lifecycle.viewmodel)
implementation(libs.androidx.lifecycle.runtimeCompose)
implementation("org.jetbrains.kotlinx:kotlinx-datetime:0.6.2")
implementation("org.jetbrains.kotlinx:kotlinx-coroutines-core:1.10.2")
implementation("io.ktor:ktor-client-core:$ktorVersion")
implementation("io.ktor:ktor-client-content-negotiation:$ktorVersion")
implementation("io.ktor:ktor-serialization-kotlinx-json:$ktorVersion")
implementation("io.ktor:ktor-client-cio:$ktorVersion")
}
commonTest.dependencies {
implementation(libs.kotlin.test)
}
desktopMain.dependencies {
implementation(compose.desktop.currentOs)
implementation(libs.kotlinx.coroutinesSwing)
}
iosMain.dependencies {
implementation("io.ktor:ktor-client-darwin:$ktorVersion")
}
}
}
android {
namespace = "org.example.project"
compileSdk = libs.versions.android.compileSdk.get().toInt()
defaultConfig {
applicationId = "org.example.project"
minSdk = libs.versions.android.minSdk.get().toInt()
targetSdk = libs.versions.android.targetSdk.get().toInt()
versionCode = 1
versionName = "1.0"
externalNativeBuild {
cmake {
arguments += "-DANDROID_SUPPORT_FLEXIBLE_PAGE_SIZES=ON"
}
}
}
sourceSets {
getByName("main") {
jniLibs.srcDirs("src/androidMain/jniLibs")
}
}
packaging {
resources {
excludes += "/META-INF/{AL2.0,LGPL2.1}"
}
}
buildTypes {
getByName("release") {
isMinifyEnabled = false
}
}
compileOptions {
sourceCompatibility = JavaVersion.VERSION_11
targetCompatibility = JavaVersion.VERSION_11
}
externalNativeBuild {
cmake {
path = file("src/androidMain/jni/CMakeLists.txt")
}
}
}
dependencies {
debugImplementation(compose.uiTooling)
}
compose.desktop {
application {
mainClass = "org.example.project.MainKt"
nativeDistributions {
targetFormats(TargetFormat.Dmg, TargetFormat.Msi, TargetFormat.Deb)
packageName = "org.example.project"
packageVersion = "1.0.0"
}
}
}
... ...
<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android">
<uses-permission android:name="android.permission.INTERNET"/>
<uses-permission android:name="android.permission.CAMERA" />
<uses-feature android:name="android.hardware.camera2.full" />
<uses-feature android:name="android.hardware.camera" android:required="false" />
<application
android:allowBackup="true"
android:icon="@mipmap/ic_launcher"
android:label="@string/app_name"
android:roundIcon="@mipmap/ic_launcher_round"
android:supportsRtl="true"
android:theme="@android:style/Theme.Material.Light.NoActionBar">
<activity
android:exported="true"
android:name=".MainActivity">
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
</activity>
</application>
</manifest>
\ No newline at end of file
... ...
7767517
321 415
Input in0 0 1 in0
Split splitncnn_0 1 6 in0 1 2 3 4 5 6
Input in1 0 1 in1
Split splitncnn_1 1 7 in1 8 9 10 11 12 13 14
Input in2 0 1 in2
Split splitncnn_2 1 3 in2 16 17 18
Input in3 0 1 in3
Split splitncnn_3 1 3 in3 20 21 22
Input in4 0 1 in4
Split splitncnn_4 1 3 in4 24 25 26
Input in5 0 1 in5
Split splitncnn_5 1 3 in5 28 29 30
MemoryData pnnx_fold_mean0.1 0 1 31 0=1 1=1 2=3
MemoryData pnnx_fold_std0.1 0 1 32 0=1 1=1 2=3
BinaryOp sub_0 2 1 14 31 33 0=1
BinaryOp div_1 2 1 33 32 34 0=3
Convolution conv_12 1 1 34 35 0=16 1=3 11=3 12=1 13=2 14=1 2=1 3=2 4=1 5=1 6=432
HardSwish hswish_87 1 1 35 36 0=1.666667e-01 1=5.000000e-01
Split splitncnn_6 1 2 36 37 38
ConvolutionDepthWise convdwrelu_0 1 1 38 39 0=16 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=144 7=16 9=1
Convolution conv_13 1 1 39 40 0=16 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=256
BinaryOp add_2 2 1 40 37 41 0=0
Split splitncnn_7 1 2 41 42 43
Convolution convrelu_0 1 1 43 44 0=64 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=1024 9=1
ConvolutionDepthWise convdwrelu_1 1 1 44 45 0=64 1=3 11=3 12=1 13=2 14=1 2=1 3=2 4=1 5=1 6=576 7=64 9=1
Convolution conv_15 1 1 45 46 0=24 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=1536
Split splitncnn_8 1 2 46 47 48
Convolution convrelu_1 1 1 48 49 0=72 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=1728 9=1
ConvolutionDepthWise convdwrelu_2 1 1 49 50 0=72 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=648 7=72 9=1
Convolution conv_17 1 1 50 51 0=24 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=1728
BinaryOp add_3 2 1 51 47 52 0=0
Split splitncnn_9 1 2 52 53 54
Convolution convrelu_2 1 1 54 55 0=72 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=1728 9=1
ConvolutionDepthWise convdwrelu_3 1 1 55 56 0=72 1=5 11=5 12=1 13=2 14=2 2=1 3=2 4=2 5=1 6=1800 7=72 9=1
Split splitncnn_10 1 2 56 57 58
Pooling gap_0 1 1 58 59 0=1 4=1
Convolution convrelu_3 1 1 59 60 0=24 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=1728 9=1
Convolution conv_20 1 1 60 61 0=72 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=1728
HardSigmoid hsigmoid_79 1 1 61 62 0=1.666667e-01 1=5.000000e-01
Reshape reshape_147 1 1 62 63 0=1 1=1 2=-1
BinaryOp mul_4 2 1 63 57 64 0=2
Convolution conv_21 1 1 64 65 0=40 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=2880
Split splitncnn_11 1 2 65 66 67
Convolution convrelu_4 1 1 67 68 0=120 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=4800 9=1
ConvolutionDepthWise convdwrelu_4 1 1 68 69 0=120 1=5 11=5 12=1 13=1 14=2 2=1 3=1 4=2 5=1 6=3000 7=120 9=1
Split splitncnn_12 1 2 69 70 71
Pooling gap_1 1 1 71 72 0=1 4=1
Convolution convrelu_5 1 1 72 73 0=32 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=3840 9=1
Convolution conv_24 1 1 73 74 0=120 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=3840
HardSigmoid hsigmoid_80 1 1 74 75 0=1.666667e-01 1=5.000000e-01
Reshape reshape_148 1 1 75 76 0=1 1=1 2=-1
BinaryOp mul_5 2 1 76 70 77 0=2
Convolution conv_25 1 1 77 78 0=40 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=4800
BinaryOp add_6 2 1 78 66 79 0=0
Split splitncnn_13 1 2 79 80 81
Convolution convrelu_6 1 1 81 82 0=120 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=4800 9=1
ConvolutionDepthWise convdwrelu_5 1 1 82 83 0=120 1=5 11=5 12=1 13=1 14=2 2=1 3=1 4=2 5=1 6=3000 7=120 9=1
Split splitncnn_14 1 2 83 84 85
Pooling gap_2 1 1 85 86 0=1 4=1
Convolution convrelu_7 1 1 86 87 0=32 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=3840 9=1
Convolution conv_28 1 1 87 88 0=120 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=3840
HardSigmoid hsigmoid_81 1 1 88 89 0=1.666667e-01 1=5.000000e-01
Reshape reshape_149 1 1 89 90 0=1 1=1 2=-1
BinaryOp mul_7 2 1 90 84 91 0=2
Convolution conv_29 1 1 91 92 0=40 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=4800
BinaryOp add_8 2 1 92 80 93 0=0
Split splitncnn_15 1 2 93 94 95
Convolution conv_30 1 1 95 96 0=240 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=9600
HardSwish hswish_88 1 1 96 97 0=1.666667e-01 1=5.000000e-01
ConvolutionDepthWise convdw_165 1 1 97 98 0=240 1=3 11=3 12=1 13=2 14=1 2=1 3=2 4=1 5=1 6=2160 7=240
HardSwish hswish_89 1 1 98 99 0=1.666667e-01 1=5.000000e-01
Convolution conv_31 1 1 99 100 0=80 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=19200
Split splitncnn_16 1 2 100 101 102
Convolution conv_32 1 1 102 103 0=200 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=16000
HardSwish hswish_90 1 1 103 104 0=1.666667e-01 1=5.000000e-01
ConvolutionDepthWise convdw_166 1 1 104 105 0=200 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=1800 7=200
HardSwish hswish_91 1 1 105 106 0=1.666667e-01 1=5.000000e-01
Convolution conv_33 1 1 106 107 0=80 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=16000
BinaryOp add_9 2 1 107 101 108 0=0
Split splitncnn_17 1 2 108 109 110
Convolution conv_34 1 1 110 111 0=184 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=14720
HardSwish hswish_92 1 1 111 112 0=1.666667e-01 1=5.000000e-01
ConvolutionDepthWise convdw_167 1 1 112 113 0=184 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=1656 7=184
HardSwish hswish_93 1 1 113 114 0=1.666667e-01 1=5.000000e-01
Convolution conv_35 1 1 114 115 0=80 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=14720
BinaryOp add_10 2 1 115 109 116 0=0
Split splitncnn_18 1 2 116 117 118
Convolution conv_36 1 1 118 119 0=184 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=14720
HardSwish hswish_94 1 1 119 120 0=1.666667e-01 1=5.000000e-01
ConvolutionDepthWise convdw_168 1 1 120 121 0=184 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=1656 7=184
HardSwish hswish_95 1 1 121 122 0=1.666667e-01 1=5.000000e-01
Convolution conv_37 1 1 122 123 0=80 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=14720
BinaryOp add_11 2 1 123 117 124 0=0
Convolution conv_38 1 1 124 125 0=480 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=38400
HardSwish hswish_96 1 1 125 126 0=1.666667e-01 1=5.000000e-01
ConvolutionDepthWise convdw_169 1 1 126 127 0=480 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=4320 7=480
HardSwish hswish_97 1 1 127 128 0=1.666667e-01 1=5.000000e-01
Split splitncnn_19 1 2 128 129 130
Pooling gap_3 1 1 130 131 0=1 4=1
Convolution convrelu_8 1 1 131 132 0=120 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=57600 9=1
Convolution conv_40 1 1 132 133 0=480 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=57600
HardSigmoid hsigmoid_82 1 1 133 134 0=1.666667e-01 1=5.000000e-01
Reshape reshape_150 1 1 134 135 0=1 1=1 2=-1
BinaryOp mul_12 2 1 135 129 136 0=2
Convolution conv_41 1 1 136 137 0=112 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=53760
Split splitncnn_20 1 2 137 138 139
Convolution conv_42 1 1 139 140 0=672 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=75264
HardSwish hswish_98 1 1 140 141 0=1.666667e-01 1=5.000000e-01
ConvolutionDepthWise convdw_170 1 1 141 142 0=672 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=6048 7=672
HardSwish hswish_99 1 1 142 143 0=1.666667e-01 1=5.000000e-01
Split splitncnn_21 1 2 143 144 145
Pooling gap_4 1 1 145 146 0=1 4=1
Convolution convrelu_9 1 1 146 147 0=168 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=112896 9=1
Convolution conv_44 1 1 147 148 0=672 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=112896
HardSigmoid hsigmoid_83 1 1 148 149 0=1.666667e-01 1=5.000000e-01
Reshape reshape_151 1 1 149 150 0=1 1=1 2=-1
BinaryOp mul_13 2 1 150 144 151 0=2
Convolution conv_45 1 1 151 152 0=112 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=75264
BinaryOp add_14 2 1 152 138 153 0=0
Convolution conv_46 1 1 153 154 0=672 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=75264
HardSwish hswish_100 1 1 154 155 0=1.666667e-01 1=5.000000e-01
ConvolutionDepthWise convdw_171 1 1 155 156 0=672 1=5 11=5 12=2 13=1 14=4 2=2 3=1 4=4 5=1 6=16800 7=672
HardSwish hswish_101 1 1 156 157 0=1.666667e-01 1=5.000000e-01
Split splitncnn_22 1 2 157 158 159
Pooling gap_5 1 1 159 160 0=1 4=1
Convolution convrelu_10 1 1 160 161 0=168 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=112896 9=1
Convolution conv_48 1 1 161 162 0=672 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=112896
HardSigmoid hsigmoid_84 1 1 162 163 0=1.666667e-01 1=5.000000e-01
Reshape reshape_152 1 1 163 164 0=1 1=1 2=-1
BinaryOp mul_15 2 1 164 158 165 0=2
Convolution conv_49 1 1 165 166 0=160 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=107520
Split splitncnn_23 1 2 166 167 168
Convolution conv_50 1 1 168 169 0=960 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=153600
HardSwish hswish_102 1 1 169 170 0=1.666667e-01 1=5.000000e-01
ConvolutionDepthWise convdw_172 1 1 170 171 0=960 1=5 11=5 12=2 13=1 14=4 2=2 3=1 4=4 5=1 6=24000 7=960
HardSwish hswish_103 1 1 171 172 0=1.666667e-01 1=5.000000e-01
Split splitncnn_24 1 2 172 173 174
Pooling gap_6 1 1 174 175 0=1 4=1
Convolution convrelu_11 1 1 175 176 0=240 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=230400 9=1
Convolution conv_52 1 1 176 177 0=960 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=230400
HardSigmoid hsigmoid_85 1 1 177 178 0=1.666667e-01 1=5.000000e-01
Reshape reshape_153 1 1 178 179 0=1 1=1 2=-1
BinaryOp mul_16 2 1 179 173 180 0=2
Convolution conv_53 1 1 180 181 0=160 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=153600
BinaryOp add_17 2 1 181 167 182 0=0
Split splitncnn_25 1 2 182 183 184
Convolution conv_54 1 1 184 185 0=960 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=153600
HardSwish hswish_104 1 1 185 186 0=1.666667e-01 1=5.000000e-01
ConvolutionDepthWise convdw_173 1 1 186 187 0=960 1=5 11=5 12=2 13=1 14=4 2=2 3=1 4=4 5=1 6=24000 7=960
HardSwish hswish_105 1 1 187 188 0=1.666667e-01 1=5.000000e-01
Split splitncnn_26 1 2 188 189 190
Pooling gap_7 1 1 190 191 0=1 4=1
Convolution convrelu_12 1 1 191 192 0=240 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=230400 9=1
Convolution conv_56 1 1 192 193 0=960 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=230400
HardSigmoid hsigmoid_86 1 1 193 194 0=1.666667e-01 1=5.000000e-01
Reshape reshape_154 1 1 194 195 0=1 1=1 2=-1
BinaryOp mul_18 2 1 195 189 196 0=2
Convolution conv_57 1 1 196 197 0=160 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=153600
BinaryOp add_19 2 1 197 183 198 0=0
Convolution conv_58 1 1 198 199 0=960 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=153600
HardSwish hswish_106 1 1 199 200 0=1.666667e-01 1=5.000000e-01
Split splitncnn_27 1 2 200 201 202
Convolution convrelu_13 1 1 202 203 0=128 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=122880 9=1
Pooling gap_8 1 1 201 204 0=1 4=1
Convolution convsigmoid_21 1 1 204 205 0=128 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=0 6=122880 9=4
Reshape reshape_155 1 1 205 206 0=1 1=1 2=-1
BinaryOp mul_20 2 1 203 206 207 0=2
Pooling avgpool2d_9 1 1 12 208 0=1 1=2 11=2 12=2 13=0 2=2 3=0 5=0 6=0
Split splitncnn_28 1 3 208 209 210 211
Pooling avgpool2d_10 1 1 211 212 0=1 1=2 11=2 12=2 13=0 2=2 3=0 5=0 6=0
Split splitncnn_29 1 3 212 213 214 215
Pooling avgpool2d_11 1 1 215 216 0=1 1=2 11=2 12=2 13=0 2=2 3=0 5=0 6=0
Split splitncnn_30 1 2 216 217 218
Slice split_0 1 2 207 219 220 -23300=2,64,-233 1=0
Split splitncnn_31 1 2 220 221 222
Concat cat_0 2 1 221 28 223 0=0
Convolution convsigmoid_22 1 1 223 224 0=128 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=147456 9=4
Slice split_1 1 2 224 225 226 -23300=2,64,-233 1=0
Split splitncnn_32 1 2 226 227 228
BinaryOp mul_21 2 1 225 29 229 0=2
Concat cat_1 2 1 222 229 230 0=0
Convolution conv_62 1 1 230 231 0=64 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=73728
TanH tanh_139 1 1 231 232
BinaryOp mul_22 2 1 227 232 233 0=2
BinaryOp sub_23 1 1 228 234 0=7 1=1 2=1.000000e+00
BinaryOp mul_24 2 1 234 30 235 0=2
BinaryOp add_25 2 1 235 233 236 0=0
Split splitncnn_33 1 2 236 237 out10
Concat cat_2 2 1 219 237 239 0=0
Interp upsample_143 1 1 239 240 0=2 1=2.000000e+00 2=2.000000e+00 6=0
Crop slice3_0 2 1 240 218 241 19="0,0" 20="1h,1w" 21="1,2"
Concat cat_3 3 1 241 94 217 242 0=0
Convolution convrelu_14 1 1 242 243 0=80 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=123120 9=1
Slice split_2 1 2 243 244 245 -23300=2,40,-233 1=0
Split splitncnn_34 1 2 245 246 247
Concat cat_4 2 1 246 24 248 0=0
Convolution convsigmoid_23 1 1 248 249 0=80 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=57600 9=4
Slice split_3 1 2 249 250 251 -23300=2,40,-233 1=0
Split splitncnn_35 1 2 251 252 253
BinaryOp mul_26 2 1 250 25 254 0=2
Concat cat_5 2 1 247 254 255 0=0
Convolution conv_65 1 1 255 256 0=40 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=28800
TanH tanh_140 1 1 256 257
BinaryOp mul_27 2 1 252 257 258 0=2
BinaryOp sub_28 1 1 253 259 0=7 1=1 2=1.000000e+00
BinaryOp mul_29 2 1 259 26 260 0=2
BinaryOp add_30 2 1 260 258 261 0=0
Split splitncnn_36 1 2 261 262 out9
Concat cat_6 2 1 244 262 264 0=0
Interp upsample_144 1 1 264 265 0=2 1=2.000000e+00 2=2.000000e+00 6=0
Crop slice3_1 2 1 265 214 266 19="0,0" 20="1h,1w" 21="1,2"
Concat cat_7 3 1 266 53 213 267 0=0
Convolution convrelu_15 1 1 267 268 0=40 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=38520 9=1
Slice split_4 1 2 268 269 270 -23300=2,20,-233 1=0
Split splitncnn_37 1 2 270 271 272
Concat cat_8 2 1 271 20 273 0=0
Convolution convsigmoid_24 1 1 273 274 0=40 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=14400 9=4
Slice split_5 1 2 274 275 276 -23300=2,20,-233 1=0
Split splitncnn_38 1 2 276 277 278
BinaryOp mul_31 2 1 275 21 279 0=2
Concat cat_9 2 1 272 279 280 0=0
Convolution conv_68 1 1 280 281 0=20 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=7200
TanH tanh_141 1 1 281 282
BinaryOp mul_32 2 1 277 282 283 0=2
BinaryOp sub_33 1 1 278 284 0=7 1=1 2=1.000000e+00
BinaryOp mul_34 2 1 284 22 285 0=2
BinaryOp add_35 2 1 285 283 286 0=0
Split splitncnn_39 1 2 286 287 out8
Concat cat_10 2 1 269 287 289 0=0
Interp upsample_145 1 1 289 290 0=2 1=2.000000e+00 2=2.000000e+00 6=0
Crop slice3_2 2 1 290 210 291 19="0,0" 20="1h,1w" 21="1,2"
Concat cat_11 3 1 291 42 209 292 0=0
Convolution convrelu_16 1 1 292 293 0=32 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=16992 9=1
Slice split_6 1 2 293 294 295 -23300=2,16,-233 1=0
Split splitncnn_40 1 2 295 296 297
Concat cat_12 2 1 296 16 298 0=0
Convolution convsigmoid_25 1 1 298 299 0=32 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=9216 9=4
Slice split_7 1 2 299 300 301 -23300=2,16,-233 1=0
Split splitncnn_41 1 2 301 302 303
BinaryOp mul_36 2 1 300 17 304 0=2
Concat cat_13 2 1 297 304 305 0=0
Convolution conv_71 1 1 305 306 0=16 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=4608
TanH tanh_142 1 1 306 307
BinaryOp mul_37 2 1 302 307 308 0=2
BinaryOp sub_38 1 1 303 309 0=7 1=1 2=1.000000e+00
BinaryOp mul_39 2 1 309 18 310 0=2
BinaryOp add_40 2 1 310 308 311 0=0
Split splitncnn_42 1 2 311 312 out7
Concat cat_14 2 1 294 312 314 0=0
Interp upsample_146 1 1 314 315 0=2 1=2.000000e+00 2=2.000000e+00 6=0
Crop slice3_3 2 1 315 11 316 19="0,0" 20="1h,1w" 21="1,2"
Concat cat_15 2 1 316 8 317 0=0
Convolution convrelu_17 1 1 317 318 0=16 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=5040 9=1
Convolution convrelu_18 1 1 318 319 0=16 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=2304 9=1
Split splitncnn_43 1 3 319 320 321 322
Convolution conv_74 1 1 322 323 0=4 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=64
Slice split_8 1 2 323 324 325 -23300=2,3,1 1=0
Split splitncnn_45 1 2 325 326 out1
Split splitncnn_44 1 2 324 328 329
BinaryOp add_41 2 1 329 10 out0 0=0
Reduction mean_157 1 1 6 331 0=3 1=0 -23303=1,0 4=1 5=1
Concat cat_16 2 1 1 331 332 0=0
Split splitncnn_46 1 4 332 333 334 335 336
Reduction mean_158 1 1 13 337 0=3 1=0 -23303=1,0 4=1 5=1
Concat cat_17 2 1 9 337 338 0=0
Split splitncnn_47 1 5 338 339 340 341 342 343
Concat cat_18 2 1 328 326 344 0=0
Split splitncnn_48 1 3 344 345 346 347
ConvolutionDepthWise convdw_174 1 1 343 348 0=4 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=0 6=36 7=4
Split splitncnn_49 1 4 348 349 350 351 352
ConvolutionDepthWise convdw_175 1 1 347 353 0=4 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=0 6=36 7=4
Split splitncnn_50 1 2 353 354 355
BinaryOp mul_42 2 1 339 345 356 0=2
Split splitncnn_51 1 2 356 357 358
ConvolutionDepthWise convdw_176 1 1 358 359 0=4 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=0 6=36 7=4
BinaryOp mul_43 2 1 349 354 360 0=2
BinaryOp sub_44 2 1 359 360 361 0=1
BinaryOp mul_45 2 1 340 341 362 0=2
Split splitncnn_52 1 2 362 363 364
ConvolutionDepthWise convdw_177 1 1 364 365 0=4 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=0 6=36 7=4
BinaryOp mul_46 2 1 350 351 366 0=2
BinaryOp sub_47 2 1 365 366 367 0=1
Concat cat_19 3 1 361 367 320 368 0=0
Convolution convrelu_19 1 1 368 369 0=16 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=384 9=1
Convolution convrelu_20 1 1 369 370 0=16 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=256 9=1
Convolution conv_77 1 1 370 371 0=4 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=64
Split splitncnn_53 1 2 371 372 373
BinaryOp mul_48 2 1 373 352 374 0=2
BinaryOp sub_49 2 1 355 374 375 0=1
Interp F.upsample_89 2 1 372 4 376 0=2 5=1 6=0 9="1w,1h"
Interp F.upsample_90 2 1 375 5 377 0=2 5=1 6=0 9="1w,1h"
BinaryOp mul_50 2 1 376 333 378 0=2
BinaryOp add_51 2 1 378 377 379 0=0
Slice split_9 1 2 379 380 out3 -23300=2,3,1 1=0
BinaryOp add_52 2 1 380 2 out2 0=0
ConvolutionDepthWise convdw_178 1 1 342 383 0=4 1=3 11=1 12=1 13=1 14=0 2=1 3=1 4=1 5=0 6=12 7=4
ConvolutionDepthWise convdw_179 1 1 383 384 0=4 1=1 11=3 12=1 13=1 14=1 2=1 3=1 4=0 5=0 6=12 7=4
Split splitncnn_54 1 4 384 385 386 387 388
ConvolutionDepthWise convdw_180 1 1 346 389 0=4 1=3 11=1 12=1 13=1 14=0 2=1 3=1 4=1 5=0 6=12 7=4
ConvolutionDepthWise convdw_181 1 1 389 390 0=4 1=1 11=3 12=1 13=1 14=1 2=1 3=1 4=0 5=0 6=12 7=4
Split splitncnn_55 1 2 390 391 392
ConvolutionDepthWise convdw_182 1 1 357 393 0=4 1=3 11=1 12=1 13=1 14=0 2=1 3=1 4=1 5=0 6=12 7=4
ConvolutionDepthWise convdw_183 1 1 393 394 0=4 1=1 11=3 12=1 13=1 14=1 2=1 3=1 4=0 5=0 6=12 7=4
ConvolutionDepthWise convdw_184 1 1 363 395 0=4 1=3 11=1 12=1 13=1 14=0 2=1 3=1 4=1 5=0 6=12 7=4
ConvolutionDepthWise convdw_185 1 1 395 396 0=4 1=1 11=3 12=1 13=1 14=1 2=1 3=1 4=0 5=0 6=12 7=4
BinaryOp mul_53 2 1 385 386 397 0=2
BinaryOp sub_54 2 1 396 397 398 0=1
BinaryOp add_55 1 1 398 399 0=0 1=1 2=1.000000e-05
BinaryOp mul_56 2 1 387 391 400 0=2
BinaryOp sub_57 2 1 394 400 401 0=1
BinaryOp div_58 2 1 401 399 402 0=3 31=1
Split splitncnn_56 1 2 402 403 404
BinaryOp mul_59 2 1 404 388 405 0=2
BinaryOp sub_60 2 1 392 405 406 0=1
Interp F.upsample_91 2 1 403 335 407 0=2 5=1 6=0 9="1w,1h"
Interp F.upsample_92 2 1 406 336 408 0=2 5=1 6=0 9="1w,1h"
BinaryOp mul_61 2 1 407 334 409 0=2
BinaryOp add_62 2 1 409 408 410 0=0
Slice split_10 1 2 410 411 out5 -23300=2,3,1 1=0
BinaryOp add_63 2 1 411 3 out4 0=0
Convolution conv_78 1 1 321 out6 0=1 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=16
... ...
7767517
275 367
Input in0 0 1 in0
Split splitncnn_0 1 6 in0 1 2 3 4 5 6
Input in1 0 1 in1
Split splitncnn_1 1 7 in1 8 9 10 11 12 13 14
Input in2 0 1 in2
Split splitncnn_2 1 3 in2 16 17 18
Input in3 0 1 in3
Split splitncnn_3 1 3 in3 20 21 22
Input in4 0 1 in4
Split splitncnn_4 1 3 in4 24 25 26
Input in5 0 1 in5
Split splitncnn_5 1 3 in5 28 29 30
Convolution convrelu_0 1 1 13 31 0=64 1=7 11=7 12=1 13=2 14=3 2=1 3=2 4=3 5=1 6=9408 9=1
Split splitncnn_6 1 2 31 32 33
Pooling maxpool2d_77 1 1 33 34 0=0 1=3 11=3 12=2 13=1 2=2 3=1 5=1
Split splitncnn_7 1 2 34 35 36
Convolution convrelu_1 1 1 36 37 0=64 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=4096 9=1
Convolution convrelu_2 1 1 37 38 0=64 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=36864 9=1
Convolution conv_7 1 1 38 39 0=256 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=16384
Convolution conv_8 1 1 35 40 0=256 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=16384
BinaryOp add_0 2 1 39 40 41 0=0
ReLU relu_81 1 1 41 42
Split splitncnn_8 1 2 42 43 44
Convolution convrelu_3 1 1 44 45 0=64 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=16384 9=1
Convolution convrelu_4 1 1 45 46 0=64 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=36864 9=1
Convolution conv_11 1 1 46 47 0=256 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=16384
BinaryOp add_1 2 1 47 43 48 0=0
ReLU relu_84 1 1 48 49
Split splitncnn_9 1 2 49 50 51
Convolution convrelu_5 1 1 51 52 0=64 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=16384 9=1
Convolution convrelu_6 1 1 52 53 0=64 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=36864 9=1
Convolution conv_14 1 1 53 54 0=256 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=16384
BinaryOp add_2 2 1 54 50 55 0=0
ReLU relu_87 1 1 55 56
Split splitncnn_10 1 3 56 57 58 59
Convolution convrelu_7 1 1 59 60 0=128 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=32768 9=1
Convolution convrelu_8 1 1 60 61 0=128 1=3 11=3 12=1 13=2 14=1 2=1 3=2 4=1 5=1 6=147456 9=1
Convolution conv_17 1 1 61 62 0=512 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=65536
Convolution conv_18 1 1 58 63 0=512 1=1 11=1 12=1 13=2 14=0 2=1 3=2 4=0 5=1 6=131072
BinaryOp add_3 2 1 62 63 64 0=0
ReLU relu_90 1 1 64 65
Split splitncnn_11 1 2 65 66 67
Convolution convrelu_9 1 1 67 68 0=128 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=65536 9=1
Convolution convrelu_10 1 1 68 69 0=128 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=147456 9=1
Convolution conv_21 1 1 69 70 0=512 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=65536
BinaryOp add_4 2 1 70 66 71 0=0
ReLU relu_93 1 1 71 72
Split splitncnn_12 1 2 72 73 74
Convolution convrelu_11 1 1 74 75 0=128 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=65536 9=1
Convolution convrelu_12 1 1 75 76 0=128 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=147456 9=1
Convolution conv_24 1 1 76 77 0=512 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=65536
BinaryOp add_5 2 1 77 73 78 0=0
ReLU relu_96 1 1 78 79
Split splitncnn_13 1 2 79 80 81
Convolution convrelu_13 1 1 81 82 0=128 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=65536 9=1
Convolution convrelu_14 1 1 82 83 0=128 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=147456 9=1
Convolution conv_27 1 1 83 84 0=512 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=65536
BinaryOp add_6 2 1 84 80 85 0=0
ReLU relu_99 1 1 85 86
Split splitncnn_14 1 3 86 87 88 89
Convolution convrelu_15 1 1 89 90 0=256 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=131072 9=1
Convolution convrelu_16 1 1 90 91 0=256 1=3 11=3 12=1 13=2 14=1 2=1 3=2 4=1 5=1 6=589824 9=1
Convolution conv_30 1 1 91 92 0=1024 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=262144
Convolution conv_31 1 1 88 93 0=1024 1=1 11=1 12=1 13=2 14=0 2=1 3=2 4=0 5=1 6=524288
BinaryOp add_7 2 1 92 93 94 0=0
ReLU relu_102 1 1 94 95
Split splitncnn_15 1 2 95 96 97
Convolution convrelu_17 1 1 97 98 0=256 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=262144 9=1
Convolution convrelu_18 1 1 98 99 0=256 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=589824 9=1
Convolution conv_34 1 1 99 100 0=1024 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=262144
BinaryOp add_8 2 1 100 96 101 0=0
ReLU relu_105 1 1 101 102
Split splitncnn_16 1 2 102 103 104
Convolution convrelu_19 1 1 104 105 0=256 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=262144 9=1
Convolution convrelu_20 1 1 105 106 0=256 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=589824 9=1
Convolution conv_37 1 1 106 107 0=1024 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=262144
BinaryOp add_9 2 1 107 103 108 0=0
ReLU relu_108 1 1 108 109
Split splitncnn_17 1 2 109 110 111
Convolution convrelu_21 1 1 111 112 0=256 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=262144 9=1
Convolution convrelu_22 1 1 112 113 0=256 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=589824 9=1
Convolution conv_40 1 1 113 114 0=1024 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=262144
BinaryOp add_10 2 1 114 110 115 0=0
ReLU relu_111 1 1 115 116
Split splitncnn_18 1 2 116 117 118
Convolution convrelu_23 1 1 118 119 0=256 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=262144 9=1
Convolution convrelu_24 1 1 119 120 0=256 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=589824 9=1
Convolution conv_43 1 1 120 121 0=1024 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=262144
BinaryOp add_11 2 1 121 117 122 0=0
ReLU relu_114 1 1 122 123
Split splitncnn_19 1 2 123 124 125
Convolution convrelu_25 1 1 125 126 0=256 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=262144 9=1
Convolution convrelu_26 1 1 126 127 0=256 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=589824 9=1
Convolution conv_46 1 1 127 128 0=1024 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=262144
BinaryOp add_12 2 1 128 124 129 0=0
ReLU relu_117 1 1 129 130
Split splitncnn_20 1 2 130 131 132
Convolution convrelu_27 1 1 132 133 0=512 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=524288 9=1
Convolution convrelu_28 1 1 133 134 0=512 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=2359296 9=1
Convolution conv_49 1 1 134 135 0=2048 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=1048576
Convolution conv_50 1 1 131 136 0=2048 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=2097152
BinaryOp add_13 2 1 135 136 137 0=0
ReLU relu_120 1 1 137 138
Split splitncnn_21 1 2 138 139 140
Convolution convrelu_29 1 1 140 141 0=512 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=1048576 9=1
Convolution convrelu_30 1 1 141 142 0=512 1=3 11=3 12=2 13=1 14=2 2=2 3=1 4=2 5=1 6=2359296 9=1
Convolution conv_53 1 1 142 143 0=2048 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=1048576
BinaryOp add_14 2 1 143 139 144 0=0
ReLU relu_123 1 1 144 145
Split splitncnn_22 1 2 145 146 147
Convolution convrelu_31 1 1 147 148 0=512 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=1048576 9=1
Convolution convrelu_32 1 1 148 149 0=512 1=3 11=3 12=2 13=1 14=2 2=2 3=1 4=2 5=1 6=2359296 9=1
Convolution conv_56 1 1 149 150 0=2048 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=1048576
BinaryOp add_15 2 1 150 146 151 0=0
ReLU relu_126 1 1 151 152
Split splitncnn_23 1 2 152 153 154
Convolution convrelu_33 1 1 154 155 0=256 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=524288 9=1
Pooling gap_0 1 1 153 156 0=1 4=1
Convolution convsigmoid_41 1 1 156 157 0=256 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=0 6=524288 9=4
Reshape reshape_148 1 1 157 158 0=1 1=1 2=-1
BinaryOp mul_16 2 1 155 158 159 0=2
Pooling avgpool2d_1 1 1 12 160 0=1 1=2 11=2 12=2 13=0 2=2 3=0 5=0 6=0
Split splitncnn_24 1 3 160 161 162 163
Pooling avgpool2d_2 1 1 163 164 0=1 1=2 11=2 12=2 13=0 2=2 3=0 5=0 6=0
Split splitncnn_25 1 3 164 165 166 167
Pooling avgpool2d_3 1 1 167 168 0=1 1=2 11=2 12=2 13=0 2=2 3=0 5=0 6=0
Split splitncnn_26 1 2 168 169 170
Slice split_0 1 2 159 171 172 -23300=2,128,-233 1=0
Split splitncnn_27 1 2 172 173 174
Concat cat_0 2 1 173 28 175 0=0
Convolution convsigmoid_42 1 1 175 176 0=256 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=589824 9=4
Slice split_1 1 2 176 177 178 -23300=2,128,-233 1=0
Split splitncnn_28 1 2 178 179 180
BinaryOp mul_17 2 1 177 29 181 0=2
Concat cat_1 2 1 174 181 182 0=0
Convolution conv_60 1 1 182 183 0=128 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=294912
TanH tanh_140 1 1 183 184
BinaryOp mul_18 2 1 179 184 185 0=2
BinaryOp sub_19 1 1 180 186 0=7 1=1 2=1.000000e+00
BinaryOp mul_20 2 1 186 30 187 0=2
BinaryOp add_21 2 1 187 185 188 0=0
Split splitncnn_29 1 2 188 189 out10
Concat cat_2 2 1 171 189 191 0=0
Interp upsample_144 1 1 191 192 0=2 1=2.000000e+00 2=2.000000e+00 6=0
Crop slice3_0 2 1 192 170 193 19="0,0" 20="1h,1w" 21="1,2"
Concat cat_3 3 1 193 87 169 194 0=0
Convolution convrelu_34 1 1 194 195 0=128 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=888192 9=1
Slice split_2 1 2 195 196 197 -23300=2,64,-233 1=0
Split splitncnn_30 1 2 197 198 199
Concat cat_4 2 1 198 24 200 0=0
Convolution convsigmoid_43 1 1 200 201 0=128 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=147456 9=4
Slice split_3 1 2 201 202 203 -23300=2,64,-233 1=0
Split splitncnn_31 1 2 203 204 205
BinaryOp mul_22 2 1 202 25 206 0=2
Concat cat_5 2 1 199 206 207 0=0
Convolution conv_63 1 1 207 208 0=64 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=73728
TanH tanh_141 1 1 208 209
BinaryOp mul_23 2 1 204 209 210 0=2
BinaryOp sub_24 1 1 205 211 0=7 1=1 2=1.000000e+00
BinaryOp mul_25 2 1 211 26 212 0=2
BinaryOp add_26 2 1 212 210 213 0=0
Split splitncnn_32 1 2 213 214 out9
Concat cat_6 2 1 196 214 216 0=0
Interp upsample_145 1 1 216 217 0=2 1=2.000000e+00 2=2.000000e+00 6=0
Crop slice3_1 2 1 217 166 218 19="0,0" 20="1h,1w" 21="1,2"
Concat cat_7 3 1 218 57 165 219 0=0
Convolution convrelu_35 1 1 219 220 0=64 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=222912 9=1
Slice split_4 1 2 220 221 222 -23300=2,32,-233 1=0
Split splitncnn_33 1 2 222 223 224
Concat cat_8 2 1 223 20 225 0=0
Convolution convsigmoid_44 1 1 225 226 0=64 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=36864 9=4
Slice split_5 1 2 226 227 228 -23300=2,32,-233 1=0
Split splitncnn_34 1 2 228 229 230
BinaryOp mul_27 2 1 227 21 231 0=2
Concat cat_9 2 1 224 231 232 0=0
Convolution conv_66 1 1 232 233 0=32 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=18432
TanH tanh_142 1 1 233 234
BinaryOp mul_28 2 1 229 234 235 0=2
BinaryOp sub_29 1 1 230 236 0=7 1=1 2=1.000000e+00
BinaryOp mul_30 2 1 236 22 237 0=2
BinaryOp add_31 2 1 237 235 238 0=0
Split splitncnn_35 1 2 238 239 out8
Concat cat_10 2 1 221 239 241 0=0
Interp upsample_146 1 1 241 242 0=2 1=2.000000e+00 2=2.000000e+00 6=0
Crop slice3_2 2 1 242 162 243 19="0,0" 20="1h,1w" 21="1,2"
Concat cat_11 3 1 243 32 161 244 0=0
Convolution convrelu_36 1 1 244 245 0=32 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=37728 9=1
Slice split_6 1 2 245 246 247 -23300=2,16,-233 1=0
Split splitncnn_36 1 2 247 248 249
Concat cat_12 2 1 248 16 250 0=0
Convolution convsigmoid_45 1 1 250 251 0=32 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=9216 9=4
Slice split_7 1 2 251 252 253 -23300=2,16,-233 1=0
Split splitncnn_37 1 2 253 254 255
BinaryOp mul_32 2 1 252 17 256 0=2
Concat cat_13 2 1 249 256 257 0=0
Convolution conv_69 1 1 257 258 0=16 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=4608
TanH tanh_143 1 1 258 259
BinaryOp mul_33 2 1 254 259 260 0=2
BinaryOp sub_34 1 1 255 261 0=7 1=1 2=1.000000e+00
BinaryOp mul_35 2 1 261 18 262 0=2
BinaryOp add_36 2 1 262 260 263 0=0
Split splitncnn_38 1 2 263 264 out7
Concat cat_14 2 1 246 264 266 0=0
Interp upsample_147 1 1 266 267 0=2 1=2.000000e+00 2=2.000000e+00 6=0
Crop slice3_3 2 1 267 11 268 19="0,0" 20="1h,1w" 21="1,2"
Concat cat_15 2 1 268 8 269 0=0
Convolution convrelu_37 1 1 269 270 0=16 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=5040 9=1
Convolution convrelu_38 1 1 270 271 0=16 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=1 6=2304 9=1
Split splitncnn_39 1 3 271 272 273 274
Convolution conv_72 1 1 274 275 0=4 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=64
Slice split_8 1 2 275 276 277 -23300=2,3,1 1=0
Split splitncnn_41 1 2 277 278 out1
Split splitncnn_40 1 2 276 280 281
BinaryOp add_37 2 1 281 10 out0 0=0
Reduction mean_149 1 1 6 283 0=3 1=0 -23303=1,0 4=1 5=1
Concat cat_16 2 1 1 283 284 0=0
Split splitncnn_42 1 4 284 285 286 287 288
Reduction mean_150 1 1 14 289 0=3 1=0 -23303=1,0 4=1 5=1
Concat cat_17 2 1 9 289 290 0=0
Split splitncnn_43 1 5 290 291 292 293 294 295
Concat cat_18 2 1 280 278 296 0=0
Split splitncnn_44 1 3 296 297 298 299
ConvolutionDepthWise convdw_151 1 1 295 300 0=4 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=0 6=36 7=4
Split splitncnn_45 1 4 300 301 302 303 304
ConvolutionDepthWise convdw_152 1 1 299 305 0=4 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=0 6=36 7=4
Split splitncnn_46 1 2 305 306 307
BinaryOp mul_38 2 1 291 297 308 0=2
Split splitncnn_47 1 2 308 309 310
ConvolutionDepthWise convdw_153 1 1 310 311 0=4 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=0 6=36 7=4
BinaryOp mul_39 2 1 301 306 312 0=2
BinaryOp sub_40 2 1 311 312 313 0=1
BinaryOp mul_41 2 1 292 293 314 0=2
Split splitncnn_48 1 2 314 315 316
ConvolutionDepthWise convdw_154 1 1 316 317 0=4 1=3 11=3 12=1 13=1 14=1 2=1 3=1 4=1 5=0 6=36 7=4
BinaryOp mul_42 2 1 302 303 318 0=2
BinaryOp sub_43 2 1 317 318 319 0=1
Concat cat_19 3 1 313 319 272 320 0=0
Convolution convrelu_39 1 1 320 321 0=16 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=384 9=1
Convolution convrelu_40 1 1 321 322 0=16 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=256 9=1
Convolution conv_75 1 1 322 323 0=4 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=64
Split splitncnn_49 1 2 323 324 325
BinaryOp mul_44 2 1 325 304 326 0=2
BinaryOp sub_45 2 1 307 326 327 0=1
Interp F.upsample_84 2 1 324 4 328 0=2 5=1 6=0 9="1w,1h"
Interp F.upsample_85 2 1 327 5 329 0=2 5=1 6=0 9="1w,1h"
BinaryOp mul_46 2 1 328 285 330 0=2
BinaryOp add_47 2 1 330 329 331 0=0
Slice split_9 1 2 331 332 out3 -23300=2,3,1 1=0
BinaryOp add_48 2 1 332 2 out2 0=0
ConvolutionDepthWise convdw_155 1 1 294 335 0=4 1=3 11=1 12=1 13=1 14=0 2=1 3=1 4=1 5=0 6=12 7=4
ConvolutionDepthWise convdw_156 1 1 335 336 0=4 1=1 11=3 12=1 13=1 14=1 2=1 3=1 4=0 5=0 6=12 7=4
Split splitncnn_50 1 4 336 337 338 339 340
ConvolutionDepthWise convdw_157 1 1 298 341 0=4 1=3 11=1 12=1 13=1 14=0 2=1 3=1 4=1 5=0 6=12 7=4
ConvolutionDepthWise convdw_158 1 1 341 342 0=4 1=1 11=3 12=1 13=1 14=1 2=1 3=1 4=0 5=0 6=12 7=4
Split splitncnn_51 1 2 342 343 344
ConvolutionDepthWise convdw_159 1 1 309 345 0=4 1=3 11=1 12=1 13=1 14=0 2=1 3=1 4=1 5=0 6=12 7=4
ConvolutionDepthWise convdw_160 1 1 345 346 0=4 1=1 11=3 12=1 13=1 14=1 2=1 3=1 4=0 5=0 6=12 7=4
ConvolutionDepthWise convdw_161 1 1 315 347 0=4 1=3 11=1 12=1 13=1 14=0 2=1 3=1 4=1 5=0 6=12 7=4
ConvolutionDepthWise convdw_162 1 1 347 348 0=4 1=1 11=3 12=1 13=1 14=1 2=1 3=1 4=0 5=0 6=12 7=4
BinaryOp mul_49 2 1 337 338 349 0=2
BinaryOp sub_50 2 1 348 349 350 0=1
BinaryOp add_51 1 1 350 351 0=0 1=1 2=1.000000e-05
BinaryOp mul_52 2 1 339 343 352 0=2
BinaryOp sub_53 2 1 346 352 353 0=1
BinaryOp div_54 2 1 353 351 354 0=3 31=1
Split splitncnn_52 1 2 354 355 356
BinaryOp mul_55 2 1 356 340 357 0=2
BinaryOp sub_56 2 1 344 357 358 0=1
Interp F.upsample_86 2 1 355 287 359 0=2 5=1 6=0 9="1w,1h"
Interp F.upsample_87 2 1 358 288 360 0=2 5=1 6=0 9="1w,1h"
BinaryOp mul_57 2 1 359 286 361 0=2
BinaryOp add_58 2 1 361 360 362 0=0
Slice split_10 1 2 362 363 out5 -23300=2,3,1 1=0
BinaryOp add_59 2 1 363 3 out4 0=0
Convolution conv_76 1 1 273 out6 0=1 1=1 11=1 12=1 13=1 14=0 2=1 3=1 4=0 5=1 6=16
... ...
cmake_minimum_required(VERSION 3.10)
set(OpenCV_DIR ${CMAKE_SOURCE_DIR}/opencv-mobile-4.11.0-android/sdk/native/jni)
find_package(OpenCV REQUIRED core imgproc)
set(ncnn_DIR ${CMAKE_SOURCE_DIR}/ncnn-20250503-android-vulkan/${ANDROID_ABI}/lib/cmake/ncnn)
find_package(ncnn REQUIRED)
add_library(rvmncnn SHARED rvmncnn.cpp rvm.cpp ndkcamera.cpp)
target_link_libraries(rvmncnn ncnn ${OpenCV_LIBS} camera2ndk mediandk)
\ No newline at end of file
... ...
# 背景图片替换功能使用说明
## 功能概述
此修改允许将Robust Video Matting (RVM)的背景从单一颜色替换为指定的图片。
## 使用方式
### 1. Java/Kotlin层调用
```java
// 设置背景图片
Bitmap backgroundBitmap = BitmapFactory.decodeResource(getResources(), R.drawable.background);
RVMNcnn.setBackgroundImage(backgroundBitmap);
// 清除背景图片,恢复默认颜色
RVMNcnn.setBackgroundImage(null);
```
### 2. JNI接口
新增JNI函数:
- `Java_org_example_project_RVMNcnn_setBackgroundImage(JNIEnv* env, jobject thiz, jobject bitmap)`
### 3. C++层API
新增RVM类方法:
- `void set_background_image(const cv::Mat& background)` - 设置背景图片
- `void clear_background_image()` - 清除背景图片,使用默认颜色
## 技术细节
### 背景图片处理
- 支持RGBA_8888和RGB_565格式的Bitmap
- 自动转换为OpenCV BGR格式
- 支持任意尺寸的图片,会自动缩放适配
- 如果未设置背景图片,使用默认颜色RGB(120, 255, 155)
### 混合算法
使用alpha混合公式:
```
result = foreground * alpha + background * (1 - alpha)
```
### 性能考虑
- 背景图片会在设置时进行一次格式转换和缩放
- 每帧渲染时进行实时像素采样
- 建议使用与输入视频分辨率相近的背景图片以获得最佳性能
## 注意事项
1. 背景图片应该是RGB或RGBA格式的8位图像
2. 图片尺寸不需要与输入视频完全一致,会自动适配
3. 设置null可以恢复默认的背景颜色
4. 背景图片会在RVM实例销毁时自动释放
\ No newline at end of file
... ...
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2013 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _RESOURCE_LIMITS_INCLUDED_
#define _RESOURCE_LIMITS_INCLUDED_
struct TLimits {
bool nonInductiveForLoops;
bool whileLoops;
bool doWhileLoops;
bool generalUniformIndexing;
bool generalAttributeMatrixVectorIndexing;
bool generalVaryingIndexing;
bool generalSamplerIndexing;
bool generalVariableIndexing;
bool generalConstantMatrixVectorIndexing;
};
struct TBuiltInResource {
int maxLights;
int maxClipPlanes;
int maxTextureUnits;
int maxTextureCoords;
int maxVertexAttribs;
int maxVertexUniformComponents;
int maxVaryingFloats;
int maxVertexTextureImageUnits;
int maxCombinedTextureImageUnits;
int maxTextureImageUnits;
int maxFragmentUniformComponents;
int maxDrawBuffers;
int maxVertexUniformVectors;
int maxVaryingVectors;
int maxFragmentUniformVectors;
int maxVertexOutputVectors;
int maxFragmentInputVectors;
int minProgramTexelOffset;
int maxProgramTexelOffset;
int maxClipDistances;
int maxComputeWorkGroupCountX;
int maxComputeWorkGroupCountY;
int maxComputeWorkGroupCountZ;
int maxComputeWorkGroupSizeX;
int maxComputeWorkGroupSizeY;
int maxComputeWorkGroupSizeZ;
int maxComputeUniformComponents;
int maxComputeTextureImageUnits;
int maxComputeImageUniforms;
int maxComputeAtomicCounters;
int maxComputeAtomicCounterBuffers;
int maxVaryingComponents;
int maxVertexOutputComponents;
int maxGeometryInputComponents;
int maxGeometryOutputComponents;
int maxFragmentInputComponents;
int maxImageUnits;
int maxCombinedImageUnitsAndFragmentOutputs;
int maxCombinedShaderOutputResources;
int maxImageSamples;
int maxVertexImageUniforms;
int maxTessControlImageUniforms;
int maxTessEvaluationImageUniforms;
int maxGeometryImageUniforms;
int maxFragmentImageUniforms;
int maxCombinedImageUniforms;
int maxGeometryTextureImageUnits;
int maxGeometryOutputVertices;
int maxGeometryTotalOutputComponents;
int maxGeometryUniformComponents;
int maxGeometryVaryingComponents;
int maxTessControlInputComponents;
int maxTessControlOutputComponents;
int maxTessControlTextureImageUnits;
int maxTessControlUniformComponents;
int maxTessControlTotalOutputComponents;
int maxTessEvaluationInputComponents;
int maxTessEvaluationOutputComponents;
int maxTessEvaluationTextureImageUnits;
int maxTessEvaluationUniformComponents;
int maxTessPatchComponents;
int maxPatchVertices;
int maxTessGenLevel;
int maxViewports;
int maxVertexAtomicCounters;
int maxTessControlAtomicCounters;
int maxTessEvaluationAtomicCounters;
int maxGeometryAtomicCounters;
int maxFragmentAtomicCounters;
int maxCombinedAtomicCounters;
int maxAtomicCounterBindings;
int maxVertexAtomicCounterBuffers;
int maxTessControlAtomicCounterBuffers;
int maxTessEvaluationAtomicCounterBuffers;
int maxGeometryAtomicCounterBuffers;
int maxFragmentAtomicCounterBuffers;
int maxCombinedAtomicCounterBuffers;
int maxAtomicCounterBufferSize;
int maxTransformFeedbackBuffers;
int maxTransformFeedbackInterleavedComponents;
int maxCullDistances;
int maxCombinedClipAndCullDistances;
int maxSamples;
int maxMeshOutputVerticesNV;
int maxMeshOutputPrimitivesNV;
int maxMeshWorkGroupSizeX_NV;
int maxMeshWorkGroupSizeY_NV;
int maxMeshWorkGroupSizeZ_NV;
int maxTaskWorkGroupSizeX_NV;
int maxTaskWorkGroupSizeY_NV;
int maxTaskWorkGroupSizeZ_NV;
int maxMeshViewCountNV;
int maxMeshOutputVerticesEXT;
int maxMeshOutputPrimitivesEXT;
int maxMeshWorkGroupSizeX_EXT;
int maxMeshWorkGroupSizeY_EXT;
int maxMeshWorkGroupSizeZ_EXT;
int maxTaskWorkGroupSizeX_EXT;
int maxTaskWorkGroupSizeY_EXT;
int maxTaskWorkGroupSizeZ_EXT;
int maxMeshViewCountEXT;
int maxDualSourceDrawBuffersEXT;
TLimits limits;
};
#endif // _RESOURCE_LIMITS_INCLUDED_
... ...
/**
This code is based on the glslang_c_interface implementation by Viktor Latypov
**/
/**
BSD 2-Clause License
Copyright (c) 2019, Viktor Latypov
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**/
#ifndef GLSLANG_C_IFACE_H_INCLUDED
#define GLSLANG_C_IFACE_H_INCLUDED
#include <stdbool.h>
#include <stdlib.h>
#include "glslang_c_shader_types.h"
#include "visibility.h"
typedef struct glslang_shader_s glslang_shader_t;
typedef struct glslang_program_s glslang_program_t;
typedef struct glslang_mapper_s glslang_mapper_t;
typedef struct glslang_resolver_s glslang_resolver_t;
/* Version counterpart */
typedef struct glslang_version_s {
int major;
int minor;
int patch;
const char* flavor;
} glslang_version_t;
/* TLimits counterpart */
typedef struct glslang_limits_s {
bool non_inductive_for_loops;
bool while_loops;
bool do_while_loops;
bool general_uniform_indexing;
bool general_attribute_matrix_vector_indexing;
bool general_varying_indexing;
bool general_sampler_indexing;
bool general_variable_indexing;
bool general_constant_matrix_vector_indexing;
} glslang_limits_t;
/* TBuiltInResource counterpart */
typedef struct glslang_resource_s {
int max_lights;
int max_clip_planes;
int max_texture_units;
int max_texture_coords;
int max_vertex_attribs;
int max_vertex_uniform_components;
int max_varying_floats;
int max_vertex_texture_image_units;
int max_combined_texture_image_units;
int max_texture_image_units;
int max_fragment_uniform_components;
int max_draw_buffers;
int max_vertex_uniform_vectors;
int max_varying_vectors;
int max_fragment_uniform_vectors;
int max_vertex_output_vectors;
int max_fragment_input_vectors;
int min_program_texel_offset;
int max_program_texel_offset;
int max_clip_distances;
int max_compute_work_group_count_x;
int max_compute_work_group_count_y;
int max_compute_work_group_count_z;
int max_compute_work_group_size_x;
int max_compute_work_group_size_y;
int max_compute_work_group_size_z;
int max_compute_uniform_components;
int max_compute_texture_image_units;
int max_compute_image_uniforms;
int max_compute_atomic_counters;
int max_compute_atomic_counter_buffers;
int max_varying_components;
int max_vertex_output_components;
int max_geometry_input_components;
int max_geometry_output_components;
int max_fragment_input_components;
int max_image_units;
int max_combined_image_units_and_fragment_outputs;
int max_combined_shader_output_resources;
int max_image_samples;
int max_vertex_image_uniforms;
int max_tess_control_image_uniforms;
int max_tess_evaluation_image_uniforms;
int max_geometry_image_uniforms;
int max_fragment_image_uniforms;
int max_combined_image_uniforms;
int max_geometry_texture_image_units;
int max_geometry_output_vertices;
int max_geometry_total_output_components;
int max_geometry_uniform_components;
int max_geometry_varying_components;
int max_tess_control_input_components;
int max_tess_control_output_components;
int max_tess_control_texture_image_units;
int max_tess_control_uniform_components;
int max_tess_control_total_output_components;
int max_tess_evaluation_input_components;
int max_tess_evaluation_output_components;
int max_tess_evaluation_texture_image_units;
int max_tess_evaluation_uniform_components;
int max_tess_patch_components;
int max_patch_vertices;
int max_tess_gen_level;
int max_viewports;
int max_vertex_atomic_counters;
int max_tess_control_atomic_counters;
int max_tess_evaluation_atomic_counters;
int max_geometry_atomic_counters;
int max_fragment_atomic_counters;
int max_combined_atomic_counters;
int max_atomic_counter_bindings;
int max_vertex_atomic_counter_buffers;
int max_tess_control_atomic_counter_buffers;
int max_tess_evaluation_atomic_counter_buffers;
int max_geometry_atomic_counter_buffers;
int max_fragment_atomic_counter_buffers;
int max_combined_atomic_counter_buffers;
int max_atomic_counter_buffer_size;
int max_transform_feedback_buffers;
int max_transform_feedback_interleaved_components;
int max_cull_distances;
int max_combined_clip_and_cull_distances;
int max_samples;
int max_mesh_output_vertices_nv;
int max_mesh_output_primitives_nv;
int max_mesh_work_group_size_x_nv;
int max_mesh_work_group_size_y_nv;
int max_mesh_work_group_size_z_nv;
int max_task_work_group_size_x_nv;
int max_task_work_group_size_y_nv;
int max_task_work_group_size_z_nv;
int max_mesh_view_count_nv;
int max_mesh_output_vertices_ext;
int max_mesh_output_primitives_ext;
int max_mesh_work_group_size_x_ext;
int max_mesh_work_group_size_y_ext;
int max_mesh_work_group_size_z_ext;
int max_task_work_group_size_x_ext;
int max_task_work_group_size_y_ext;
int max_task_work_group_size_z_ext;
int max_mesh_view_count_ext;
union
{
int max_dual_source_draw_buffers_ext;
/* Incorrectly capitalized name retained for backward compatibility */
int maxDualSourceDrawBuffersEXT;
};
glslang_limits_t limits;
} glslang_resource_t;
/* Inclusion result structure allocated by C include_local/include_system callbacks */
typedef struct glsl_include_result_s {
/* Header file name or NULL if inclusion failed */
const char* header_name;
/* Header contents or NULL */
const char* header_data;
size_t header_length;
} glsl_include_result_t;
/* Callback for local file inclusion */
typedef glsl_include_result_t* (*glsl_include_local_func)(void* ctx, const char* header_name, const char* includer_name,
size_t include_depth);
/* Callback for system file inclusion */
typedef glsl_include_result_t* (*glsl_include_system_func)(void* ctx, const char* header_name,
const char* includer_name, size_t include_depth);
/* Callback for include result destruction */
typedef int (*glsl_free_include_result_func)(void* ctx, glsl_include_result_t* result);
/* Collection of callbacks for GLSL preprocessor */
typedef struct glsl_include_callbacks_s {
glsl_include_system_func include_system;
glsl_include_local_func include_local;
glsl_free_include_result_func free_include_result;
} glsl_include_callbacks_t;
typedef struct glslang_input_s {
glslang_source_t language;
glslang_stage_t stage;
glslang_client_t client;
glslang_target_client_version_t client_version;
glslang_target_language_t target_language;
glslang_target_language_version_t target_language_version;
/** Shader source code */
const char* code;
int default_version;
glslang_profile_t default_profile;
int force_default_version_and_profile;
int forward_compatible;
glslang_messages_t messages;
const glslang_resource_t* resource;
glsl_include_callbacks_t callbacks;
void* callbacks_ctx;
} glslang_input_t;
/* SpvOptions counterpart */
typedef struct glslang_spv_options_s {
bool generate_debug_info;
bool strip_debug_info;
bool disable_optimizer;
bool optimize_size;
bool disassemble;
bool validate;
bool emit_nonsemantic_shader_debug_info;
bool emit_nonsemantic_shader_debug_source;
bool compile_only;
bool optimize_allow_expanded_id_bound;
} glslang_spv_options_t;
#ifdef __cplusplus
extern "C" {
#endif
GLSLANG_EXPORT void glslang_get_version(glslang_version_t* version);
GLSLANG_EXPORT int glslang_initialize_process(void);
GLSLANG_EXPORT void glslang_finalize_process(void);
GLSLANG_EXPORT glslang_shader_t* glslang_shader_create(const glslang_input_t* input);
GLSLANG_EXPORT void glslang_shader_delete(glslang_shader_t* shader);
GLSLANG_EXPORT void glslang_shader_set_preamble(glslang_shader_t* shader, const char* s);
GLSLANG_EXPORT void glslang_shader_shift_binding(glslang_shader_t* shader, glslang_resource_type_t res, unsigned int base);
GLSLANG_EXPORT void glslang_shader_shift_binding_for_set(glslang_shader_t* shader, glslang_resource_type_t res, unsigned int base, unsigned int set);
GLSLANG_EXPORT void glslang_shader_set_options(glslang_shader_t* shader, int options); // glslang_shader_options_t
GLSLANG_EXPORT void glslang_shader_set_glsl_version(glslang_shader_t* shader, int version);
GLSLANG_EXPORT void glslang_shader_set_default_uniform_block_set_and_binding(glslang_shader_t* shader, unsigned int set, unsigned int binding);
GLSLANG_EXPORT void glslang_shader_set_default_uniform_block_name(glslang_shader_t* shader, const char *name);
GLSLANG_EXPORT void glslang_shader_set_resource_set_binding(glslang_shader_t* shader, const char *const *bindings, unsigned int num_bindings);
GLSLANG_EXPORT int glslang_shader_preprocess(glslang_shader_t* shader, const glslang_input_t* input);
GLSLANG_EXPORT int glslang_shader_parse(glslang_shader_t* shader, const glslang_input_t* input);
GLSLANG_EXPORT const char* glslang_shader_get_preprocessed_code(glslang_shader_t* shader);
GLSLANG_EXPORT void glslang_shader_set_preprocessed_code(glslang_shader_t* shader, const char* code);
GLSLANG_EXPORT const char* glslang_shader_get_info_log(glslang_shader_t* shader);
GLSLANG_EXPORT const char* glslang_shader_get_info_debug_log(glslang_shader_t* shader);
GLSLANG_EXPORT glslang_program_t* glslang_program_create(void);
GLSLANG_EXPORT void glslang_program_delete(glslang_program_t* program);
GLSLANG_EXPORT void glslang_program_add_shader(glslang_program_t* program, glslang_shader_t* shader);
GLSLANG_EXPORT int glslang_program_link(glslang_program_t* program, int messages); // glslang_messages_t
GLSLANG_EXPORT void glslang_program_add_source_text(glslang_program_t* program, glslang_stage_t stage, const char* text, size_t len);
GLSLANG_EXPORT void glslang_program_set_source_file(glslang_program_t* program, glslang_stage_t stage, const char* file);
GLSLANG_EXPORT int glslang_program_map_io(glslang_program_t* program);
GLSLANG_EXPORT int glslang_program_map_io_with_resolver_and_mapper(glslang_program_t* program, glslang_resolver_t* resolver, glslang_mapper_t* mapper);
GLSLANG_EXPORT void glslang_program_SPIRV_generate(glslang_program_t* program, glslang_stage_t stage);
GLSLANG_EXPORT void glslang_program_SPIRV_generate_with_options(glslang_program_t* program, glslang_stage_t stage, glslang_spv_options_t* spv_options);
GLSLANG_EXPORT size_t glslang_program_SPIRV_get_size(glslang_program_t* program);
GLSLANG_EXPORT void glslang_program_SPIRV_get(glslang_program_t* program, unsigned int*);
GLSLANG_EXPORT unsigned int* glslang_program_SPIRV_get_ptr(glslang_program_t* program);
GLSLANG_EXPORT const char* glslang_program_SPIRV_get_messages(glslang_program_t* program);
GLSLANG_EXPORT const char* glslang_program_get_info_log(glslang_program_t* program);
GLSLANG_EXPORT const char* glslang_program_get_info_debug_log(glslang_program_t* program);
GLSLANG_EXPORT glslang_mapper_t* glslang_glsl_mapper_create(void);
GLSLANG_EXPORT void glslang_glsl_mapper_delete(glslang_mapper_t* mapper);
GLSLANG_EXPORT glslang_resolver_t* glslang_glsl_resolver_create(glslang_program_t* program, glslang_stage_t stage);
GLSLANG_EXPORT void glslang_glsl_resolver_delete(glslang_resolver_t* resolver);
#ifdef __cplusplus
}
#endif
#endif /* #ifdef GLSLANG_C_IFACE_INCLUDED */
... ...
/**
This code is based on the glslang_c_interface implementation by Viktor Latypov
**/
/**
BSD 2-Clause License
Copyright (c) 2019, Viktor Latypov
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**/
#ifndef C_SHADER_TYPES_H_INCLUDED
#define C_SHADER_TYPES_H_INCLUDED
#define LAST_ELEMENT_MARKER(x) x
/* EShLanguage counterpart */
typedef enum {
GLSLANG_STAGE_VERTEX,
GLSLANG_STAGE_TESSCONTROL,
GLSLANG_STAGE_TESSEVALUATION,
GLSLANG_STAGE_GEOMETRY,
GLSLANG_STAGE_FRAGMENT,
GLSLANG_STAGE_COMPUTE,
GLSLANG_STAGE_RAYGEN,
GLSLANG_STAGE_RAYGEN_NV = GLSLANG_STAGE_RAYGEN,
GLSLANG_STAGE_INTERSECT,
GLSLANG_STAGE_INTERSECT_NV = GLSLANG_STAGE_INTERSECT,
GLSLANG_STAGE_ANYHIT,
GLSLANG_STAGE_ANYHIT_NV = GLSLANG_STAGE_ANYHIT,
GLSLANG_STAGE_CLOSESTHIT,
GLSLANG_STAGE_CLOSESTHIT_NV = GLSLANG_STAGE_CLOSESTHIT,
GLSLANG_STAGE_MISS,
GLSLANG_STAGE_MISS_NV = GLSLANG_STAGE_MISS,
GLSLANG_STAGE_CALLABLE,
GLSLANG_STAGE_CALLABLE_NV = GLSLANG_STAGE_CALLABLE,
GLSLANG_STAGE_TASK,
GLSLANG_STAGE_TASK_NV = GLSLANG_STAGE_TASK,
GLSLANG_STAGE_MESH,
GLSLANG_STAGE_MESH_NV = GLSLANG_STAGE_MESH,
LAST_ELEMENT_MARKER(GLSLANG_STAGE_COUNT),
} glslang_stage_t; // would be better as stage, but this is ancient now
/* EShLanguageMask counterpart */
typedef enum {
GLSLANG_STAGE_VERTEX_MASK = (1 << GLSLANG_STAGE_VERTEX),
GLSLANG_STAGE_TESSCONTROL_MASK = (1 << GLSLANG_STAGE_TESSCONTROL),
GLSLANG_STAGE_TESSEVALUATION_MASK = (1 << GLSLANG_STAGE_TESSEVALUATION),
GLSLANG_STAGE_GEOMETRY_MASK = (1 << GLSLANG_STAGE_GEOMETRY),
GLSLANG_STAGE_FRAGMENT_MASK = (1 << GLSLANG_STAGE_FRAGMENT),
GLSLANG_STAGE_COMPUTE_MASK = (1 << GLSLANG_STAGE_COMPUTE),
GLSLANG_STAGE_RAYGEN_MASK = (1 << GLSLANG_STAGE_RAYGEN),
GLSLANG_STAGE_RAYGEN_NV_MASK = GLSLANG_STAGE_RAYGEN_MASK,
GLSLANG_STAGE_INTERSECT_MASK = (1 << GLSLANG_STAGE_INTERSECT),
GLSLANG_STAGE_INTERSECT_NV_MASK = GLSLANG_STAGE_INTERSECT_MASK,
GLSLANG_STAGE_ANYHIT_MASK = (1 << GLSLANG_STAGE_ANYHIT),
GLSLANG_STAGE_ANYHIT_NV_MASK = GLSLANG_STAGE_ANYHIT_MASK,
GLSLANG_STAGE_CLOSESTHIT_MASK = (1 << GLSLANG_STAGE_CLOSESTHIT),
GLSLANG_STAGE_CLOSESTHIT_NV_MASK = GLSLANG_STAGE_CLOSESTHIT_MASK,
GLSLANG_STAGE_MISS_MASK = (1 << GLSLANG_STAGE_MISS),
GLSLANG_STAGE_MISS_NV_MASK = GLSLANG_STAGE_MISS_MASK,
GLSLANG_STAGE_CALLABLE_MASK = (1 << GLSLANG_STAGE_CALLABLE),
GLSLANG_STAGE_CALLABLE_NV_MASK = GLSLANG_STAGE_CALLABLE_MASK,
GLSLANG_STAGE_TASK_MASK = (1 << GLSLANG_STAGE_TASK),
GLSLANG_STAGE_TASK_NV_MASK = GLSLANG_STAGE_TASK_MASK,
GLSLANG_STAGE_MESH_MASK = (1 << GLSLANG_STAGE_MESH),
GLSLANG_STAGE_MESH_NV_MASK = GLSLANG_STAGE_MESH_MASK,
LAST_ELEMENT_MARKER(GLSLANG_STAGE_MASK_COUNT),
} glslang_stage_mask_t;
/* EShSource counterpart */
typedef enum {
GLSLANG_SOURCE_NONE,
GLSLANG_SOURCE_GLSL,
GLSLANG_SOURCE_HLSL,
LAST_ELEMENT_MARKER(GLSLANG_SOURCE_COUNT),
} glslang_source_t;
/* EShClient counterpart */
typedef enum {
GLSLANG_CLIENT_NONE,
GLSLANG_CLIENT_VULKAN,
GLSLANG_CLIENT_OPENGL,
LAST_ELEMENT_MARKER(GLSLANG_CLIENT_COUNT),
} glslang_client_t;
/* EShTargetLanguage counterpart */
typedef enum {
GLSLANG_TARGET_NONE,
GLSLANG_TARGET_SPV,
LAST_ELEMENT_MARKER(GLSLANG_TARGET_COUNT),
} glslang_target_language_t;
/* SH_TARGET_ClientVersion counterpart */
typedef enum {
GLSLANG_TARGET_VULKAN_1_0 = (1 << 22),
GLSLANG_TARGET_VULKAN_1_1 = (1 << 22) | (1 << 12),
GLSLANG_TARGET_VULKAN_1_2 = (1 << 22) | (2 << 12),
GLSLANG_TARGET_VULKAN_1_3 = (1 << 22) | (3 << 12),
GLSLANG_TARGET_VULKAN_1_4 = (1 << 22) | (4 << 12),
GLSLANG_TARGET_OPENGL_450 = 450,
LAST_ELEMENT_MARKER(GLSLANG_TARGET_CLIENT_VERSION_COUNT = 6),
} glslang_target_client_version_t;
/* SH_TARGET_LanguageVersion counterpart */
typedef enum {
GLSLANG_TARGET_SPV_1_0 = (1 << 16),
GLSLANG_TARGET_SPV_1_1 = (1 << 16) | (1 << 8),
GLSLANG_TARGET_SPV_1_2 = (1 << 16) | (2 << 8),
GLSLANG_TARGET_SPV_1_3 = (1 << 16) | (3 << 8),
GLSLANG_TARGET_SPV_1_4 = (1 << 16) | (4 << 8),
GLSLANG_TARGET_SPV_1_5 = (1 << 16) | (5 << 8),
GLSLANG_TARGET_SPV_1_6 = (1 << 16) | (6 << 8),
LAST_ELEMENT_MARKER(GLSLANG_TARGET_LANGUAGE_VERSION_COUNT = 7),
} glslang_target_language_version_t;
/* EShExecutable counterpart */
typedef enum { GLSLANG_EX_VERTEX_FRAGMENT, GLSLANG_EX_FRAGMENT } glslang_executable_t;
// EShOptimizationLevel counterpart
// This enum is not used in the current C interface, but could be added at a later date.
// GLSLANG_OPT_NONE is the current default.
typedef enum {
GLSLANG_OPT_NO_GENERATION,
GLSLANG_OPT_NONE,
GLSLANG_OPT_SIMPLE,
GLSLANG_OPT_FULL,
LAST_ELEMENT_MARKER(GLSLANG_OPT_LEVEL_COUNT),
} glslang_optimization_level_t;
/* EShTextureSamplerTransformMode counterpart */
typedef enum {
GLSLANG_TEX_SAMP_TRANS_KEEP,
GLSLANG_TEX_SAMP_TRANS_UPGRADE_TEXTURE_REMOVE_SAMPLER,
LAST_ELEMENT_MARKER(GLSLANG_TEX_SAMP_TRANS_COUNT),
} glslang_texture_sampler_transform_mode_t;
/* EShMessages counterpart */
typedef enum {
GLSLANG_MSG_DEFAULT_BIT = 0,
GLSLANG_MSG_RELAXED_ERRORS_BIT = (1 << 0),
GLSLANG_MSG_SUPPRESS_WARNINGS_BIT = (1 << 1),
GLSLANG_MSG_AST_BIT = (1 << 2),
GLSLANG_MSG_SPV_RULES_BIT = (1 << 3),
GLSLANG_MSG_VULKAN_RULES_BIT = (1 << 4),
GLSLANG_MSG_ONLY_PREPROCESSOR_BIT = (1 << 5),
GLSLANG_MSG_READ_HLSL_BIT = (1 << 6),
GLSLANG_MSG_CASCADING_ERRORS_BIT = (1 << 7),
GLSLANG_MSG_KEEP_UNCALLED_BIT = (1 << 8),
GLSLANG_MSG_HLSL_OFFSETS_BIT = (1 << 9),
GLSLANG_MSG_DEBUG_INFO_BIT = (1 << 10),
GLSLANG_MSG_HLSL_ENABLE_16BIT_TYPES_BIT = (1 << 11),
GLSLANG_MSG_HLSL_LEGALIZATION_BIT = (1 << 12),
GLSLANG_MSG_HLSL_DX9_COMPATIBLE_BIT = (1 << 13),
GLSLANG_MSG_BUILTIN_SYMBOL_TABLE_BIT = (1 << 14),
GLSLANG_MSG_ENHANCED = (1 << 15),
GLSLANG_MSG_ABSOLUTE_PATH = (1 << 16),
GLSLANG_MSG_DISPLAY_ERROR_COLUMN = (1 << 17),
GLSLANG_MSG_LINK_TIME_OPTIMIZATION_BIT = (1 << 18),
LAST_ELEMENT_MARKER(GLSLANG_MSG_COUNT),
} glslang_messages_t;
/* EShReflectionOptions counterpart */
typedef enum {
GLSLANG_REFLECTION_DEFAULT_BIT = 0,
GLSLANG_REFLECTION_STRICT_ARRAY_SUFFIX_BIT = (1 << 0),
GLSLANG_REFLECTION_BASIC_ARRAY_SUFFIX_BIT = (1 << 1),
GLSLANG_REFLECTION_INTERMEDIATE_IOO_BIT = (1 << 2),
GLSLANG_REFLECTION_SEPARATE_BUFFERS_BIT = (1 << 3),
GLSLANG_REFLECTION_ALL_BLOCK_VARIABLES_BIT = (1 << 4),
GLSLANG_REFLECTION_UNWRAP_IO_BLOCKS_BIT = (1 << 5),
GLSLANG_REFLECTION_ALL_IO_VARIABLES_BIT = (1 << 6),
GLSLANG_REFLECTION_SHARED_STD140_SSBO_BIT = (1 << 7),
GLSLANG_REFLECTION_SHARED_STD140_UBO_BIT = (1 << 8),
LAST_ELEMENT_MARKER(GLSLANG_REFLECTION_COUNT),
} glslang_reflection_options_t;
/* EProfile counterpart (from Versions.h) */
typedef enum {
GLSLANG_BAD_PROFILE = 0,
GLSLANG_NO_PROFILE = (1 << 0),
GLSLANG_CORE_PROFILE = (1 << 1),
GLSLANG_COMPATIBILITY_PROFILE = (1 << 2),
GLSLANG_ES_PROFILE = (1 << 3),
LAST_ELEMENT_MARKER(GLSLANG_PROFILE_COUNT),
} glslang_profile_t;
/* Shader options */
typedef enum {
GLSLANG_SHADER_DEFAULT_BIT = 0,
GLSLANG_SHADER_AUTO_MAP_BINDINGS = (1 << 0),
GLSLANG_SHADER_AUTO_MAP_LOCATIONS = (1 << 1),
GLSLANG_SHADER_VULKAN_RULES_RELAXED = (1 << 2),
LAST_ELEMENT_MARKER(GLSLANG_SHADER_COUNT),
} glslang_shader_options_t;
/* TResourceType counterpart */
typedef enum {
GLSLANG_RESOURCE_TYPE_SAMPLER,
GLSLANG_RESOURCE_TYPE_TEXTURE,
GLSLANG_RESOURCE_TYPE_IMAGE,
GLSLANG_RESOURCE_TYPE_UBO,
GLSLANG_RESOURCE_TYPE_SSBO,
GLSLANG_RESOURCE_TYPE_UAV,
LAST_ELEMENT_MARKER(GLSLANG_RESOURCE_TYPE_COUNT),
} glslang_resource_type_t;
#undef LAST_ELEMENT_MARKER
#endif
... ...
//
// Copyright (C) 2023 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifdef GLSLANG_IS_SHARED_LIBRARY
#ifdef _WIN32
#ifdef GLSLANG_EXPORTING
#define GLSLANG_EXPORT __declspec(dllexport)
#else
#define GLSLANG_EXPORT __declspec(dllimport)
#endif
#elif __GNUC__ >= 4
#define GLSLANG_EXPORT __attribute__((visibility("default")))
#endif
#endif // GLSLANG_IS_SHARED_LIBRARY
#ifndef GLSLANG_EXPORT
#define GLSLANG_EXPORT
#endif
// Symbols marked with this macro are only meant for public use by the test suite
// and do not appear in publicly installed headers. They are not considered to be
// part of the glslang library ABI.
#define GLSLANG_EXPORT_FOR_TESTS GLSLANG_EXPORT
... ...
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2012-2013 LunarG, Inc.
// Copyright (C) 2017, 2022-2024 Arm Limited.
// Copyright (C) 2015-2018 Google, Inc.
// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved.
// Modifications Copyright (C) 2024 Valve Corporation.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _VERSIONS_INCLUDED_
#define _VERSIONS_INCLUDED_
#define LAST_ELEMENT_MARKER(x) x
//
// Help manage multiple profiles, versions, extensions etc.
//
//
// Profiles are set up for masking operations, so queries can be done on multiple
// profiles at the same time.
//
// Don't maintain an ordinal set of enums (0,1,2,3...) to avoid all possible
// defects from mixing the two different forms.
//
typedef enum : unsigned {
EBadProfile = 0,
ENoProfile = (1 << 0), // only for desktop, before profiles showed up
ECoreProfile = (1 << 1),
ECompatibilityProfile = (1 << 2),
EEsProfile = (1 << 3),
LAST_ELEMENT_MARKER(EProfileCount),
} EProfile;
namespace glslang {
//
// Map from profile enum to externally readable text name.
//
inline const char* ProfileName(EProfile profile)
{
switch (profile) {
case ENoProfile: return "none";
case ECoreProfile: return "core";
case ECompatibilityProfile: return "compatibility";
case EEsProfile: return "es";
default: return "unknown profile";
}
}
//
// What source rules, validation rules, target language, etc. are needed or
// desired for SPIR-V?
//
// 0 means a target or rule set is not enabled (ignore rules from that entity).
// Non-0 means to apply semantic rules arising from that version of its rule set.
// The union of all requested rule sets will be applied.
//
struct SpvVersion {
SpvVersion() : spv(0), vulkanGlsl(0), vulkan(0), openGl(0), vulkanRelaxed(false) {}
unsigned int spv; // the version of SPIR-V to target, as defined by "word 1" of the SPIR-V binary header
int vulkanGlsl; // the version of GLSL semantics for Vulkan, from GL_KHR_vulkan_glsl, for "#define VULKAN XXX"
int vulkan; // the version of Vulkan, for which SPIR-V execution environment rules to use
int openGl; // the version of GLSL semantics for OpenGL, from GL_ARB_gl_spirv, for "#define GL_SPIRV XXX"
bool vulkanRelaxed; // relax changes to GLSL for Vulkan, allowing some GL-specific to be compiled to Vulkan SPIR-V target
};
//
// The behaviors from the GLSL "#extension extension_name : behavior"
//
typedef enum {
EBhMissing = 0,
EBhRequire,
EBhEnable,
EBhWarn,
EBhDisable,
EBhDisablePartial // use as initial state of an extension that is only partially implemented
} TExtensionBehavior;
//
// Symbolic names for extensions. Strings may be directly used when calling the
// functions, but better to have the compiler do spelling checks.
//
const char* const E_GL_OES_texture_3D = "GL_OES_texture_3D";
const char* const E_GL_OES_standard_derivatives = "GL_OES_standard_derivatives";
const char* const E_GL_EXT_frag_depth = "GL_EXT_frag_depth";
const char* const E_GL_OES_EGL_image_external = "GL_OES_EGL_image_external";
const char* const E_GL_OES_EGL_image_external_essl3 = "GL_OES_EGL_image_external_essl3";
const char* const E_GL_EXT_YUV_target = "GL_EXT_YUV_target";
const char* const E_GL_EXT_shader_texture_lod = "GL_EXT_shader_texture_lod";
const char* const E_GL_EXT_shadow_samplers = "GL_EXT_shadow_samplers";
const char* const E_GL_ARB_texture_rectangle = "GL_ARB_texture_rectangle";
const char* const E_GL_3DL_array_objects = "GL_3DL_array_objects";
const char* const E_GL_ARB_shading_language_420pack = "GL_ARB_shading_language_420pack";
const char* const E_GL_ARB_texture_gather = "GL_ARB_texture_gather";
const char* const E_GL_ARB_gpu_shader5 = "GL_ARB_gpu_shader5";
const char* const E_GL_ARB_separate_shader_objects = "GL_ARB_separate_shader_objects";
const char* const E_GL_ARB_compute_shader = "GL_ARB_compute_shader";
const char* const E_GL_ARB_tessellation_shader = "GL_ARB_tessellation_shader";
const char* const E_GL_ARB_enhanced_layouts = "GL_ARB_enhanced_layouts";
const char* const E_GL_ARB_texture_cube_map_array = "GL_ARB_texture_cube_map_array";
const char* const E_GL_ARB_texture_multisample = "GL_ARB_texture_multisample";
const char* const E_GL_ARB_shader_texture_lod = "GL_ARB_shader_texture_lod";
const char* const E_GL_ARB_explicit_attrib_location = "GL_ARB_explicit_attrib_location";
const char* const E_GL_ARB_explicit_uniform_location = "GL_ARB_explicit_uniform_location";
const char* const E_GL_ARB_shader_image_load_store = "GL_ARB_shader_image_load_store";
const char* const E_GL_ARB_shader_atomic_counters = "GL_ARB_shader_atomic_counters";
const char* const E_GL_ARB_shader_atomic_counter_ops = "GL_ARB_shader_atomic_counter_ops";
const char* const E_GL_ARB_shader_draw_parameters = "GL_ARB_shader_draw_parameters";
const char* const E_GL_ARB_shader_group_vote = "GL_ARB_shader_group_vote";
const char* const E_GL_ARB_derivative_control = "GL_ARB_derivative_control";
const char* const E_GL_ARB_shader_texture_image_samples = "GL_ARB_shader_texture_image_samples";
const char* const E_GL_ARB_viewport_array = "GL_ARB_viewport_array";
const char* const E_GL_ARB_gpu_shader_int64 = "GL_ARB_gpu_shader_int64";
const char* const E_GL_ARB_gpu_shader_fp64 = "GL_ARB_gpu_shader_fp64";
const char* const E_GL_ARB_shader_ballot = "GL_ARB_shader_ballot";
const char* const E_GL_ARB_sparse_texture2 = "GL_ARB_sparse_texture2";
const char* const E_GL_ARB_sparse_texture_clamp = "GL_ARB_sparse_texture_clamp";
const char* const E_GL_ARB_shader_stencil_export = "GL_ARB_shader_stencil_export";
// const char* const E_GL_ARB_cull_distance = "GL_ARB_cull_distance"; // present for 4.5, but need extension control over block members
const char* const E_GL_ARB_post_depth_coverage = "GL_ARB_post_depth_coverage";
const char* const E_GL_ARB_shader_viewport_layer_array = "GL_ARB_shader_viewport_layer_array";
const char* const E_GL_ARB_fragment_shader_interlock = "GL_ARB_fragment_shader_interlock";
const char* const E_GL_ARB_shader_clock = "GL_ARB_shader_clock";
const char* const E_GL_ARB_uniform_buffer_object = "GL_ARB_uniform_buffer_object";
const char* const E_GL_ARB_sample_shading = "GL_ARB_sample_shading";
const char* const E_GL_ARB_shader_bit_encoding = "GL_ARB_shader_bit_encoding";
const char* const E_GL_ARB_shader_image_size = "GL_ARB_shader_image_size";
const char* const E_GL_ARB_shader_storage_buffer_object = "GL_ARB_shader_storage_buffer_object";
const char* const E_GL_ARB_shading_language_packing = "GL_ARB_shading_language_packing";
const char* const E_GL_ARB_texture_query_lod = "GL_ARB_texture_query_lod";
const char* const E_GL_ARB_vertex_attrib_64bit = "GL_ARB_vertex_attrib_64bit";
const char* const E_GL_ARB_draw_instanced = "GL_ARB_draw_instanced";
const char* const E_GL_ARB_fragment_coord_conventions = "GL_ARB_fragment_coord_conventions";
const char* const E_GL_ARB_bindless_texture = "GL_ARB_bindless_texture";
const char* const E_GL_KHR_shader_subgroup_basic = "GL_KHR_shader_subgroup_basic";
const char* const E_GL_KHR_shader_subgroup_vote = "GL_KHR_shader_subgroup_vote";
const char* const E_GL_KHR_shader_subgroup_arithmetic = "GL_KHR_shader_subgroup_arithmetic";
const char* const E_GL_KHR_shader_subgroup_ballot = "GL_KHR_shader_subgroup_ballot";
const char* const E_GL_KHR_shader_subgroup_shuffle = "GL_KHR_shader_subgroup_shuffle";
const char* const E_GL_KHR_shader_subgroup_shuffle_relative = "GL_KHR_shader_subgroup_shuffle_relative";
const char* const E_GL_KHR_shader_subgroup_rotate = "GL_KHR_shader_subgroup_rotate";
const char* const E_GL_KHR_shader_subgroup_clustered = "GL_KHR_shader_subgroup_clustered";
const char* const E_GL_KHR_shader_subgroup_quad = "GL_KHR_shader_subgroup_quad";
const char* const E_GL_KHR_memory_scope_semantics = "GL_KHR_memory_scope_semantics";
const char* const E_GL_KHR_cooperative_matrix = "GL_KHR_cooperative_matrix";
const char* const E_GL_EXT_shader_atomic_int64 = "GL_EXT_shader_atomic_int64";
const char* const E_GL_EXT_shader_non_constant_global_initializers = "GL_EXT_shader_non_constant_global_initializers";
const char* const E_GL_EXT_shader_image_load_formatted = "GL_EXT_shader_image_load_formatted";
const char* const E_GL_EXT_shader_16bit_storage = "GL_EXT_shader_16bit_storage";
const char* const E_GL_EXT_shader_8bit_storage = "GL_EXT_shader_8bit_storage";
// EXT extensions
const char* const E_GL_EXT_device_group = "GL_EXT_device_group";
const char* const E_GL_EXT_multiview = "GL_EXT_multiview";
const char* const E_GL_EXT_post_depth_coverage = "GL_EXT_post_depth_coverage";
const char* const E_GL_EXT_control_flow_attributes = "GL_EXT_control_flow_attributes";
const char* const E_GL_EXT_nonuniform_qualifier = "GL_EXT_nonuniform_qualifier";
const char* const E_GL_EXT_samplerless_texture_functions = "GL_EXT_samplerless_texture_functions";
const char* const E_GL_EXT_scalar_block_layout = "GL_EXT_scalar_block_layout";
const char* const E_GL_EXT_fragment_invocation_density = "GL_EXT_fragment_invocation_density";
const char* const E_GL_EXT_buffer_reference = "GL_EXT_buffer_reference";
const char* const E_GL_EXT_buffer_reference2 = "GL_EXT_buffer_reference2";
const char* const E_GL_EXT_buffer_reference_uvec2 = "GL_EXT_buffer_reference_uvec2";
const char* const E_GL_EXT_demote_to_helper_invocation = "GL_EXT_demote_to_helper_invocation";
const char* const E_GL_EXT_shader_realtime_clock = "GL_EXT_shader_realtime_clock";
const char* const E_GL_EXT_debug_printf = "GL_EXT_debug_printf";
const char* const E_GL_EXT_ray_tracing = "GL_EXT_ray_tracing";
const char* const E_GL_EXT_ray_query = "GL_EXT_ray_query";
const char* const E_GL_EXT_ray_flags_primitive_culling = "GL_EXT_ray_flags_primitive_culling";
const char* const E_GL_EXT_ray_cull_mask = "GL_EXT_ray_cull_mask";
const char* const E_GL_EXT_blend_func_extended = "GL_EXT_blend_func_extended";
const char* const E_GL_EXT_shader_implicit_conversions = "GL_EXT_shader_implicit_conversions";
const char* const E_GL_EXT_fragment_shading_rate = "GL_EXT_fragment_shading_rate";
const char* const E_GL_EXT_shader_image_int64 = "GL_EXT_shader_image_int64";
const char* const E_GL_EXT_null_initializer = "GL_EXT_null_initializer";
const char* const E_GL_EXT_shared_memory_block = "GL_EXT_shared_memory_block";
const char* const E_GL_EXT_subgroup_uniform_control_flow = "GL_EXT_subgroup_uniform_control_flow";
const char* const E_GL_EXT_spirv_intrinsics = "GL_EXT_spirv_intrinsics";
const char* const E_GL_EXT_fragment_shader_barycentric = "GL_EXT_fragment_shader_barycentric";
const char* const E_GL_EXT_mesh_shader = "GL_EXT_mesh_shader";
const char* const E_GL_EXT_opacity_micromap = "GL_EXT_opacity_micromap";
const char* const E_GL_EXT_shader_quad_control = "GL_EXT_shader_quad_control";
const char* const E_GL_EXT_draw_instanced = "GL_EXT_draw_instanced";
const char* const E_GL_EXT_texture_array = "GL_EXT_texture_array";
const char* const E_GL_EXT_maximal_reconvergence = "GL_EXT_maximal_reconvergence";
const char* const E_GL_EXT_expect_assume = "GL_EXT_expect_assume";
const char* const E_GL_EXT_control_flow_attributes2 = "GL_EXT_control_flow_attributes2";
const char* const E_GL_EXT_spec_constant_composites = "GL_EXT_spec_constant_composites";
const char* const E_GL_EXT_texture_offset_non_const = "GL_EXT_texture_offset_non_const";
const char* const E_GL_EXT_nontemporal_keyword = "GL_EXT_nontemporal_keyword";
// Arrays of extensions for the above viewportEXTs duplications
const char* const post_depth_coverageEXTs[] = { E_GL_ARB_post_depth_coverage, E_GL_EXT_post_depth_coverage };
const int Num_post_depth_coverageEXTs = sizeof(post_depth_coverageEXTs) / sizeof(post_depth_coverageEXTs[0]);
// Array of extensions to cover both extensions providing ray tracing capabilities.
const char* const ray_tracing_EXTs[] = { E_GL_EXT_ray_query, E_GL_EXT_ray_tracing };
const int Num_ray_tracing_EXTs = sizeof(ray_tracing_EXTs) / sizeof(ray_tracing_EXTs[0]);
// OVR extensions
const char* const E_GL_OVR_multiview = "GL_OVR_multiview";
const char* const E_GL_OVR_multiview2 = "GL_OVR_multiview2";
const char* const OVR_multiview_EXTs[] = { E_GL_OVR_multiview, E_GL_OVR_multiview2 };
const int Num_OVR_multiview_EXTs = sizeof(OVR_multiview_EXTs) / sizeof(OVR_multiview_EXTs[0]);
// #line and #include
const char* const E_GL_GOOGLE_cpp_style_line_directive = "GL_GOOGLE_cpp_style_line_directive";
const char* const E_GL_GOOGLE_include_directive = "GL_GOOGLE_include_directive";
const char* const E_GL_ARB_shading_language_include = "GL_ARB_shading_language_include";
const char* const E_GL_AMD_shader_ballot = "GL_AMD_shader_ballot";
const char* const E_GL_AMD_shader_trinary_minmax = "GL_AMD_shader_trinary_minmax";
const char* const E_GL_AMD_shader_explicit_vertex_parameter = "GL_AMD_shader_explicit_vertex_parameter";
const char* const E_GL_AMD_gcn_shader = "GL_AMD_gcn_shader";
const char* const E_GL_AMD_gpu_shader_half_float = "GL_AMD_gpu_shader_half_float";
const char* const E_GL_AMD_texture_gather_bias_lod = "GL_AMD_texture_gather_bias_lod";
const char* const E_GL_AMD_gpu_shader_int16 = "GL_AMD_gpu_shader_int16";
const char* const E_GL_AMD_shader_image_load_store_lod = "GL_AMD_shader_image_load_store_lod";
const char* const E_GL_AMD_shader_fragment_mask = "GL_AMD_shader_fragment_mask";
const char* const E_GL_AMD_gpu_shader_half_float_fetch = "GL_AMD_gpu_shader_half_float_fetch";
const char* const E_GL_AMD_shader_early_and_late_fragment_tests = "GL_AMD_shader_early_and_late_fragment_tests";
const char* const E_GL_INTEL_shader_integer_functions2 = "GL_INTEL_shader_integer_functions2";
const char* const E_GL_NV_sample_mask_override_coverage = "GL_NV_sample_mask_override_coverage";
const char* const E_SPV_NV_geometry_shader_passthrough = "GL_NV_geometry_shader_passthrough";
const char* const E_GL_NV_viewport_array2 = "GL_NV_viewport_array2";
const char* const E_GL_NV_stereo_view_rendering = "GL_NV_stereo_view_rendering";
const char* const E_GL_NVX_multiview_per_view_attributes = "GL_NVX_multiview_per_view_attributes";
const char* const E_GL_NV_shader_atomic_int64 = "GL_NV_shader_atomic_int64";
const char* const E_GL_NV_conservative_raster_underestimation = "GL_NV_conservative_raster_underestimation";
const char* const E_GL_NV_shader_noperspective_interpolation = "GL_NV_shader_noperspective_interpolation";
const char* const E_GL_NV_shader_subgroup_partitioned = "GL_NV_shader_subgroup_partitioned";
const char* const E_GL_NV_shading_rate_image = "GL_NV_shading_rate_image";
const char* const E_GL_NV_ray_tracing = "GL_NV_ray_tracing";
const char* const E_GL_NV_ray_tracing_motion_blur = "GL_NV_ray_tracing_motion_blur";
const char* const E_GL_NV_fragment_shader_barycentric = "GL_NV_fragment_shader_barycentric";
const char* const E_GL_NV_compute_shader_derivatives = "GL_NV_compute_shader_derivatives";
const char* const E_GL_NV_shader_texture_footprint = "GL_NV_shader_texture_footprint";
const char* const E_GL_NV_mesh_shader = "GL_NV_mesh_shader";
const char* const E_GL_NV_cooperative_matrix = "GL_NV_cooperative_matrix";
const char* const E_GL_NV_shader_sm_builtins = "GL_NV_shader_sm_builtins";
const char* const E_GL_NV_integer_cooperative_matrix = "GL_NV_integer_cooperative_matrix";
const char* const E_GL_NV_shader_invocation_reorder = "GL_NV_shader_invocation_reorder";
const char* const E_GL_EXT_ray_tracing_position_fetch = "GL_EXT_ray_tracing_position_fetch";
const char* const E_GL_NV_displacement_micromap = "GL_NV_displacement_micromap";
const char* const E_GL_NV_shader_atomic_fp16_vector = "GL_NV_shader_atomic_fp16_vector";
const char* const E_GL_NV_cooperative_matrix2 = "GL_NV_cooperative_matrix2";
const char* const E_GL_NV_cooperative_vector = "GL_NV_cooperative_vector";
const char* const E_GL_NV_cluster_acceleration_structure = "GL_NV_cluster_acceleration_structure";
const char* const E_GL_NV_linear_swept_spheres = "GL_NV_linear_swept_spheres";
// ARM
const char* const E_GL_ARM_shader_core_builtins = "GL_ARM_shader_core_builtins";
// Arrays of extensions for the above viewportEXTs duplications
const char* const viewportEXTs[] = { E_GL_ARB_shader_viewport_layer_array, E_GL_NV_viewport_array2 };
const int Num_viewportEXTs = sizeof(viewportEXTs) / sizeof(viewportEXTs[0]);
const char* const E_GL_QCOM_image_processing = "GL_QCOM_image_processing";
const char* const E_GL_QCOM_image_processing2 = "GL_QCOM_image_processing2";
// AEP
const char* const E_GL_ANDROID_extension_pack_es31a = "GL_ANDROID_extension_pack_es31a";
const char* const E_GL_KHR_blend_equation_advanced = "GL_KHR_blend_equation_advanced";
const char* const E_GL_OES_sample_variables = "GL_OES_sample_variables";
const char* const E_GL_OES_shader_image_atomic = "GL_OES_shader_image_atomic";
const char* const E_GL_OES_shader_multisample_interpolation = "GL_OES_shader_multisample_interpolation";
const char* const E_GL_OES_texture_storage_multisample_2d_array = "GL_OES_texture_storage_multisample_2d_array";
const char* const E_GL_EXT_geometry_shader = "GL_EXT_geometry_shader";
const char* const E_GL_EXT_geometry_point_size = "GL_EXT_geometry_point_size";
const char* const E_GL_EXT_gpu_shader5 = "GL_EXT_gpu_shader5";
const char* const E_GL_EXT_primitive_bounding_box = "GL_EXT_primitive_bounding_box";
const char* const E_GL_EXT_shader_io_blocks = "GL_EXT_shader_io_blocks";
const char* const E_GL_EXT_tessellation_shader = "GL_EXT_tessellation_shader";
const char* const E_GL_EXT_tessellation_point_size = "GL_EXT_tessellation_point_size";
const char* const E_GL_EXT_texture_buffer = "GL_EXT_texture_buffer";
const char* const E_GL_EXT_texture_cube_map_array = "GL_EXT_texture_cube_map_array";
const char* const E_GL_EXT_shader_integer_mix = "GL_EXT_shader_integer_mix";
// OES matching AEP
const char* const E_GL_OES_geometry_shader = "GL_OES_geometry_shader";
const char* const E_GL_OES_geometry_point_size = "GL_OES_geometry_point_size";
const char* const E_GL_OES_gpu_shader5 = "GL_OES_gpu_shader5";
const char* const E_GL_OES_primitive_bounding_box = "GL_OES_primitive_bounding_box";
const char* const E_GL_OES_shader_io_blocks = "GL_OES_shader_io_blocks";
const char* const E_GL_OES_tessellation_shader = "GL_OES_tessellation_shader";
const char* const E_GL_OES_tessellation_point_size = "GL_OES_tessellation_point_size";
const char* const E_GL_OES_texture_buffer = "GL_OES_texture_buffer";
const char* const E_GL_OES_texture_cube_map_array = "GL_OES_texture_cube_map_array";
// EXT
const char* const E_GL_EXT_shader_explicit_arithmetic_types = "GL_EXT_shader_explicit_arithmetic_types";
const char* const E_GL_EXT_shader_explicit_arithmetic_types_int8 = "GL_EXT_shader_explicit_arithmetic_types_int8";
const char* const E_GL_EXT_shader_explicit_arithmetic_types_int16 = "GL_EXT_shader_explicit_arithmetic_types_int16";
const char* const E_GL_EXT_shader_explicit_arithmetic_types_int32 = "GL_EXT_shader_explicit_arithmetic_types_int32";
const char* const E_GL_EXT_shader_explicit_arithmetic_types_int64 = "GL_EXT_shader_explicit_arithmetic_types_int64";
const char* const E_GL_EXT_shader_explicit_arithmetic_types_float16 = "GL_EXT_shader_explicit_arithmetic_types_float16";
const char* const E_GL_EXT_shader_explicit_arithmetic_types_float32 = "GL_EXT_shader_explicit_arithmetic_types_float32";
const char* const E_GL_EXT_shader_explicit_arithmetic_types_float64 = "GL_EXT_shader_explicit_arithmetic_types_float64";
const char* const E_GL_EXT_shader_subgroup_extended_types_int8 = "GL_EXT_shader_subgroup_extended_types_int8";
const char* const E_GL_EXT_shader_subgroup_extended_types_int16 = "GL_EXT_shader_subgroup_extended_types_int16";
const char* const E_GL_EXT_shader_subgroup_extended_types_int64 = "GL_EXT_shader_subgroup_extended_types_int64";
const char* const E_GL_EXT_shader_subgroup_extended_types_float16 = "GL_EXT_shader_subgroup_extended_types_float16";
const char* const E_GL_EXT_terminate_invocation = "GL_EXT_terminate_invocation";
const char* const E_GL_EXT_shader_atomic_float = "GL_EXT_shader_atomic_float";
const char* const E_GL_EXT_shader_atomic_float2 = "GL_EXT_shader_atomic_float2";
const char* const E_GL_EXT_shader_tile_image = "GL_EXT_shader_tile_image";
const char* const E_GL_EXT_texture_shadow_lod = "GL_EXT_texture_shadow_lod";
const char* const E_GL_EXT_integer_dot_product = "GL_EXT_integer_dot_product";
// Arrays of extensions for the above AEP duplications
const char* const AEP_geometry_shader[] = { E_GL_EXT_geometry_shader, E_GL_OES_geometry_shader };
const int Num_AEP_geometry_shader = sizeof(AEP_geometry_shader)/sizeof(AEP_geometry_shader[0]);
const char* const AEP_geometry_point_size[] = { E_GL_EXT_geometry_point_size, E_GL_OES_geometry_point_size };
const int Num_AEP_geometry_point_size = sizeof(AEP_geometry_point_size)/sizeof(AEP_geometry_point_size[0]);
const char* const AEP_gpu_shader5[] = { E_GL_EXT_gpu_shader5, E_GL_OES_gpu_shader5 };
const int Num_AEP_gpu_shader5 = sizeof(AEP_gpu_shader5)/sizeof(AEP_gpu_shader5[0]);
const char* const AEP_primitive_bounding_box[] = { E_GL_EXT_primitive_bounding_box, E_GL_OES_primitive_bounding_box };
const int Num_AEP_primitive_bounding_box = sizeof(AEP_primitive_bounding_box)/sizeof(AEP_primitive_bounding_box[0]);
const char* const AEP_shader_io_blocks[] = { E_GL_EXT_shader_io_blocks, E_GL_OES_shader_io_blocks };
const int Num_AEP_shader_io_blocks = sizeof(AEP_shader_io_blocks)/sizeof(AEP_shader_io_blocks[0]);
const char* const AEP_tessellation_shader[] = { E_GL_EXT_tessellation_shader, E_GL_OES_tessellation_shader };
const int Num_AEP_tessellation_shader = sizeof(AEP_tessellation_shader)/sizeof(AEP_tessellation_shader[0]);
const char* const AEP_tessellation_point_size[] = { E_GL_EXT_tessellation_point_size, E_GL_OES_tessellation_point_size };
const int Num_AEP_tessellation_point_size = sizeof(AEP_tessellation_point_size)/sizeof(AEP_tessellation_point_size[0]);
const char* const AEP_texture_buffer[] = { E_GL_EXT_texture_buffer, E_GL_OES_texture_buffer };
const int Num_AEP_texture_buffer = sizeof(AEP_texture_buffer)/sizeof(AEP_texture_buffer[0]);
const char* const AEP_texture_cube_map_array[] = { E_GL_EXT_texture_cube_map_array, E_GL_OES_texture_cube_map_array };
const int Num_AEP_texture_cube_map_array = sizeof(AEP_texture_cube_map_array)/sizeof(AEP_texture_cube_map_array[0]);
const char* const AEP_mesh_shader[] = { E_GL_NV_mesh_shader, E_GL_EXT_mesh_shader };
const int Num_AEP_mesh_shader = sizeof(AEP_mesh_shader)/sizeof(AEP_mesh_shader[0]);
} // end namespace glslang
#endif // _VERSIONS_INCLUDED_
... ...
//
// Copyright (C) 2016 Google, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#ifndef _STAND_ALONE_RESOURCE_LIMITS_INCLUDED_
#define _STAND_ALONE_RESOURCE_LIMITS_INCLUDED_
#include <string>
#include "../Include/ResourceLimits.h"
#include "../Include/visibility.h"
// Return pointer to user-writable Resource to pass through API in
// future-proof way.
GLSLANG_EXPORT extern TBuiltInResource* GetResources();
// These are the default resources for TBuiltInResources, used for both
// - parsing this string for the case where the user didn't supply one,
// - dumping out a template for user construction of a config file.
GLSLANG_EXPORT extern const TBuiltInResource* GetDefaultResources();
// Returns the DefaultTBuiltInResource as a human-readable string.
GLSLANG_EXPORT std::string GetDefaultTBuiltInResourceString();
// Decodes the resource limits from |config| to |resources|.
GLSLANG_EXPORT void DecodeResourceLimits(TBuiltInResource* resources, char* config);
#endif // _STAND_ALONE_RESOURCE_LIMITS_INCLUDED_
... ...
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2013-2016 LunarG, Inc.
// Copyright (C) 2015-2018 Google, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _COMPILER_INTERFACE_INCLUDED_
#define _COMPILER_INTERFACE_INCLUDED_
#include "../Include/ResourceLimits.h"
#include "../Include/visibility.h"
#include "../MachineIndependent/Versions.h"
#include <cstring>
#include <vector>
#ifdef _WIN32
#define C_DECL __cdecl
#else
#define C_DECL
#endif
//
// This is the platform independent interface between an OGL driver
// and the shading language compiler/linker.
//
#ifdef __cplusplus
extern "C" {
#endif
//
// Call before doing any other compiler/linker operations.
//
// (Call once per process, not once per thread.)
//
GLSLANG_EXPORT int ShInitialize();
//
// Call this at process shutdown to clean up memory.
//
GLSLANG_EXPORT int ShFinalize();
//
// Types of languages the compiler can consume.
//
typedef enum {
EShLangVertex,
EShLangTessControl,
EShLangTessEvaluation,
EShLangGeometry,
EShLangFragment,
EShLangCompute,
EShLangRayGen,
EShLangRayGenNV = EShLangRayGen,
EShLangIntersect,
EShLangIntersectNV = EShLangIntersect,
EShLangAnyHit,
EShLangAnyHitNV = EShLangAnyHit,
EShLangClosestHit,
EShLangClosestHitNV = EShLangClosestHit,
EShLangMiss,
EShLangMissNV = EShLangMiss,
EShLangCallable,
EShLangCallableNV = EShLangCallable,
EShLangTask,
EShLangTaskNV = EShLangTask,
EShLangMesh,
EShLangMeshNV = EShLangMesh,
LAST_ELEMENT_MARKER(EShLangCount),
} EShLanguage; // would be better as stage, but this is ancient now
typedef enum : unsigned {
EShLangVertexMask = (1 << EShLangVertex),
EShLangTessControlMask = (1 << EShLangTessControl),
EShLangTessEvaluationMask = (1 << EShLangTessEvaluation),
EShLangGeometryMask = (1 << EShLangGeometry),
EShLangFragmentMask = (1 << EShLangFragment),
EShLangComputeMask = (1 << EShLangCompute),
EShLangRayGenMask = (1 << EShLangRayGen),
EShLangRayGenNVMask = EShLangRayGenMask,
EShLangIntersectMask = (1 << EShLangIntersect),
EShLangIntersectNVMask = EShLangIntersectMask,
EShLangAnyHitMask = (1 << EShLangAnyHit),
EShLangAnyHitNVMask = EShLangAnyHitMask,
EShLangClosestHitMask = (1 << EShLangClosestHit),
EShLangClosestHitNVMask = EShLangClosestHitMask,
EShLangMissMask = (1 << EShLangMiss),
EShLangMissNVMask = EShLangMissMask,
EShLangCallableMask = (1 << EShLangCallable),
EShLangCallableNVMask = EShLangCallableMask,
EShLangTaskMask = (1 << EShLangTask),
EShLangTaskNVMask = EShLangTaskMask,
EShLangMeshMask = (1 << EShLangMesh),
EShLangMeshNVMask = EShLangMeshMask,
LAST_ELEMENT_MARKER(EShLanguageMaskCount),
} EShLanguageMask;
namespace glslang {
class TType;
typedef enum {
EShSourceNone,
EShSourceGlsl, // GLSL, includes ESSL (OpenGL ES GLSL)
EShSourceHlsl, // HLSL
LAST_ELEMENT_MARKER(EShSourceCount),
} EShSource; // if EShLanguage were EShStage, this could be EShLanguage instead
typedef enum {
EShClientNone, // use when there is no client, e.g. for validation
EShClientVulkan, // as GLSL dialect, specifies KHR_vulkan_glsl extension
EShClientOpenGL, // as GLSL dialect, specifies ARB_gl_spirv extension
LAST_ELEMENT_MARKER(EShClientCount),
} EShClient;
typedef enum {
EShTargetNone,
EShTargetSpv, // SPIR-V (preferred spelling)
EshTargetSpv = EShTargetSpv, // legacy spelling
LAST_ELEMENT_MARKER(EShTargetCount),
} EShTargetLanguage;
typedef enum {
EShTargetVulkan_1_0 = (1 << 22), // Vulkan 1.0
EShTargetVulkan_1_1 = (1 << 22) | (1 << 12), // Vulkan 1.1
EShTargetVulkan_1_2 = (1 << 22) | (2 << 12), // Vulkan 1.2
EShTargetVulkan_1_3 = (1 << 22) | (3 << 12), // Vulkan 1.3
EShTargetVulkan_1_4 = (1 << 22) | (4 << 12), // Vulkan 1.4
EShTargetOpenGL_450 = 450, // OpenGL
LAST_ELEMENT_MARKER(EShTargetClientVersionCount = 6),
} EShTargetClientVersion;
typedef EShTargetClientVersion EshTargetClientVersion;
typedef enum {
EShTargetSpv_1_0 = (1 << 16), // SPIR-V 1.0
EShTargetSpv_1_1 = (1 << 16) | (1 << 8), // SPIR-V 1.1
EShTargetSpv_1_2 = (1 << 16) | (2 << 8), // SPIR-V 1.2
EShTargetSpv_1_3 = (1 << 16) | (3 << 8), // SPIR-V 1.3
EShTargetSpv_1_4 = (1 << 16) | (4 << 8), // SPIR-V 1.4
EShTargetSpv_1_5 = (1 << 16) | (5 << 8), // SPIR-V 1.5
EShTargetSpv_1_6 = (1 << 16) | (6 << 8), // SPIR-V 1.6
LAST_ELEMENT_MARKER(EShTargetLanguageVersionCount = 7),
} EShTargetLanguageVersion;
//
// Following are a series of helper enums for managing layouts and qualifiers,
// used for TPublicType, TType, others.
//
enum TLayoutPacking {
ElpNone,
ElpShared, // default, but different than saying nothing
ElpStd140,
ElpStd430,
ElpPacked,
ElpScalar,
ElpCount // If expanding, see bitfield width below
};
struct TInputLanguage {
EShSource languageFamily; // redundant information with other input, this one overrides when not EShSourceNone
EShLanguage stage; // redundant information with other input, this one overrides when not EShSourceNone
EShClient dialect;
int dialectVersion; // version of client's language definition, not the client (when not EShClientNone)
bool vulkanRulesRelaxed;
};
struct TClient {
EShClient client;
EShTargetClientVersion version; // version of client itself (not the client's input dialect)
};
struct TTarget {
EShTargetLanguage language;
EShTargetLanguageVersion version; // version to target, if SPIR-V, defined by "word 1" of the SPIR-V header
bool hlslFunctionality1; // can target hlsl_functionality1 extension(s)
};
// All source/client/target versions and settings.
// Can override previous methods of setting, when items are set here.
// Expected to grow, as more are added, rather than growing parameter lists.
struct TEnvironment {
TInputLanguage input; // definition of the input language
TClient client; // what client is the overall compilation being done for?
TTarget target; // what to generate
};
GLSLANG_EXPORT const char* StageName(EShLanguage);
} // end namespace glslang
//
// Types of output the linker will create.
//
typedef enum {
EShExVertexFragment,
EShExFragment
} EShExecutable;
//
// Optimization level for the compiler.
//
typedef enum {
EShOptNoGeneration,
EShOptNone,
EShOptSimple, // Optimizations that can be done quickly
EShOptFull, // Optimizations that will take more time
LAST_ELEMENT_MARKER(EshOptLevelCount),
} EShOptimizationLevel;
//
// Texture and Sampler transformation mode.
//
typedef enum {
EShTexSampTransKeep, // keep textures and samplers as is (default)
EShTexSampTransUpgradeTextureRemoveSampler, // change texture w/o embeded sampler into sampled texture and throw away all samplers
LAST_ELEMENT_MARKER(EShTexSampTransCount),
} EShTextureSamplerTransformMode;
//
// Message choices for what errors and warnings are given.
//
enum EShMessages : unsigned {
EShMsgDefault = 0, // default is to give all required errors and extra warnings
EShMsgRelaxedErrors = (1 << 0), // be liberal in accepting input
EShMsgSuppressWarnings = (1 << 1), // suppress all warnings, except those required by the specification
EShMsgAST = (1 << 2), // print the AST intermediate representation
EShMsgSpvRules = (1 << 3), // issue messages for SPIR-V generation
EShMsgVulkanRules = (1 << 4), // issue messages for Vulkan-requirements of GLSL for SPIR-V
EShMsgOnlyPreprocessor = (1 << 5), // only print out errors produced by the preprocessor
EShMsgReadHlsl = (1 << 6), // use HLSL parsing rules and semantics
EShMsgCascadingErrors = (1 << 7), // get cascading errors; risks error-recovery issues, instead of an early exit
EShMsgKeepUncalled = (1 << 8), // for testing, don't eliminate uncalled functions
EShMsgHlslOffsets = (1 << 9), // allow block offsets to follow HLSL rules instead of GLSL rules
EShMsgDebugInfo = (1 << 10), // save debug information
EShMsgHlslEnable16BitTypes = (1 << 11), // enable use of 16-bit types in SPIR-V for HLSL
EShMsgHlslLegalization = (1 << 12), // enable HLSL Legalization messages
EShMsgHlslDX9Compatible = (1 << 13), // enable HLSL DX9 compatible mode (for samplers and semantics)
EShMsgBuiltinSymbolTable = (1 << 14), // print the builtin symbol table
EShMsgEnhanced = (1 << 15), // enhanced message readability
EShMsgAbsolutePath = (1 << 16), // Output Absolute path for messages
EShMsgDisplayErrorColumn = (1 << 17), // Display error message column aswell as line
EShMsgLinkTimeOptimization = (1 << 18), // perform cross-stage optimizations during linking
LAST_ELEMENT_MARKER(EShMsgCount),
};
//
// Options for building reflection
//
typedef enum {
EShReflectionDefault = 0, // default is original behaviour before options were added
EShReflectionStrictArraySuffix = (1 << 0), // reflection will follow stricter rules for array-of-structs suffixes
EShReflectionBasicArraySuffix = (1 << 1), // arrays of basic types will be appended with [0] as in GL reflection
EShReflectionIntermediateIO = (1 << 2), // reflect inputs and outputs to program, even with no vertex shader
EShReflectionSeparateBuffers = (1 << 3), // buffer variables and buffer blocks are reflected separately
EShReflectionAllBlockVariables = (1 << 4), // reflect all variables in blocks, even if they are inactive
EShReflectionUnwrapIOBlocks = (1 << 5), // unwrap input/output blocks the same as with uniform blocks
EShReflectionAllIOVariables = (1 << 6), // reflect all input/output variables, even if they are inactive
EShReflectionSharedStd140SSBO = (1 << 7), // Apply std140/shared rules for ubo to ssbo
EShReflectionSharedStd140UBO = (1 << 8), // Apply std140/shared rules for ubo to ssbo
LAST_ELEMENT_MARKER(EShReflectionCount),
} EShReflectionOptions;
//
// Build a table for bindings. This can be used for locating
// attributes, uniforms, globals, etc., as needed.
//
typedef struct {
const char* name;
int binding;
} ShBinding;
typedef struct {
int numBindings;
ShBinding* bindings; // array of bindings
} ShBindingTable;
//
// ShHandle held by but opaque to the driver. It is allocated,
// managed, and de-allocated by the compiler/linker. Its contents
// are defined by and used by the compiler and linker. For example,
// symbol table information and object code passed from the compiler
// to the linker can be stored where ShHandle points.
//
// If handle creation fails, 0 will be returned.
//
typedef void* ShHandle;
//
// Driver calls these to create and destroy compiler/linker
// objects.
//
GLSLANG_EXPORT ShHandle ShConstructCompiler(const EShLanguage, int /*debugOptions unused*/); // one per shader
GLSLANG_EXPORT ShHandle ShConstructLinker(const EShExecutable, int /*debugOptions unused*/); // one per shader pair
GLSLANG_EXPORT ShHandle ShConstructUniformMap(); // one per uniform namespace (currently entire program object)
GLSLANG_EXPORT void ShDestruct(ShHandle);
//
// The return value of ShCompile is boolean, non-zero indicating
// success.
//
// The info-log should be written by ShCompile into
// ShHandle, so it can answer future queries.
//
GLSLANG_EXPORT int ShCompile(const ShHandle, const char* const shaderStrings[], const int numStrings,
const int* lengths, const EShOptimizationLevel, const TBuiltInResource* resources,
int, // debugOptions unused
int defaultVersion = 110, // use 100 for ES environment, overridden by #version in shader
bool forwardCompatible = false, // give errors for use of deprecated features
EShMessages messages = EShMsgDefault, // warnings and errors
const char* fileName = nullptr
);
GLSLANG_EXPORT int ShLinkExt(
const ShHandle, // linker object
const ShHandle h[], // compiler objects to link together
const int numHandles);
//
// ShSetEncrpytionMethod is a place-holder for specifying
// how source code is encrypted.
//
GLSLANG_EXPORT void ShSetEncryptionMethod(ShHandle);
//
// All the following return 0 if the information is not
// available in the object passed down, or the object is bad.
//
GLSLANG_EXPORT const char* ShGetInfoLog(const ShHandle);
GLSLANG_EXPORT const void* ShGetExecutable(const ShHandle);
GLSLANG_EXPORT int ShSetVirtualAttributeBindings(const ShHandle, const ShBindingTable*); // to detect user aliasing
GLSLANG_EXPORT int ShSetFixedAttributeBindings(const ShHandle, const ShBindingTable*); // to force any physical mappings
//
// Tell the linker to never assign a vertex attribute to this list of physical attributes
//
GLSLANG_EXPORT int ShExcludeAttributes(const ShHandle, int *attributes, int count);
//
// Returns the location ID of the named uniform.
// Returns -1 if error.
//
GLSLANG_EXPORT int ShGetUniformLocation(const ShHandle uniformMap, const char* name);
#ifdef __cplusplus
} // end extern "C"
#endif
////////////////////////////////////////////////////////////////////////////////////////////
//
// Deferred-Lowering C++ Interface
// -----------------------------------
//
// Below is a new alternate C++ interface, which deprecates the above
// opaque handle-based interface.
//
// The below is further designed to handle multiple compilation units per stage, where
// the intermediate results, including the parse tree, are preserved until link time,
// rather than the above interface which is designed to have each compilation unit
// lowered at compile time. In the above model, linking occurs on the lowered results,
// whereas in this model intra-stage linking can occur at the parse tree
// (treeRoot in TIntermediate) level, and then a full stage can be lowered.
//
#include <list>
#include <string>
#include <utility>
class TCompiler;
class TInfoSink;
namespace glslang {
struct Version {
int major;
int minor;
int patch;
const char* flavor;
};
GLSLANG_EXPORT Version GetVersion();
GLSLANG_EXPORT const char* GetEsslVersionString();
GLSLANG_EXPORT const char* GetGlslVersionString();
GLSLANG_EXPORT int GetKhronosToolId();
class TIntermediate;
class TProgram;
class TPoolAllocator;
class TIoMapResolver;
// Call this exactly once per process before using anything else
GLSLANG_EXPORT bool InitializeProcess();
// Call once per process to tear down everything
GLSLANG_EXPORT void FinalizeProcess();
// Resource type for IO resolver
enum TResourceType {
EResSampler,
EResTexture,
EResImage,
EResUbo,
EResSsbo,
EResUav,
EResCount
};
enum TBlockStorageClass
{
EbsUniform = 0,
EbsStorageBuffer,
EbsPushConstant,
EbsNone, // not a uniform or buffer variable
EbsCount,
};
// Make one TShader per shader that you will link into a program. Then
// - provide the shader through setStrings() or setStringsWithLengths()
// - optionally call setEnv*(), see below for more detail
// - optionally use setPreamble() to set a special shader string that will be
// processed before all others but won't affect the validity of #version
// - optionally call addProcesses() for each setting/transform,
// see comment for class TProcesses
// - call parse(): source language and target environment must be selected
// either by correct setting of EShMessages sent to parse(), or by
// explicitly calling setEnv*()
// - query the info logs
//
// N.B.: Does not yet support having the same TShader instance being linked into
// multiple programs.
//
// N.B.: Destruct a linked program *before* destructing the shaders linked into it.
//
class TShader {
public:
GLSLANG_EXPORT explicit TShader(EShLanguage);
GLSLANG_EXPORT virtual ~TShader();
GLSLANG_EXPORT void setStrings(const char* const* s, int n);
GLSLANG_EXPORT void setStringsWithLengths(
const char* const* s, const int* l, int n);
GLSLANG_EXPORT void setStringsWithLengthsAndNames(
const char* const* s, const int* l, const char* const* names, int n);
void setPreamble(const char* s) { preamble = s; }
GLSLANG_EXPORT void setEntryPoint(const char* entryPoint);
GLSLANG_EXPORT void setSourceEntryPoint(const char* sourceEntryPointName);
GLSLANG_EXPORT void addProcesses(const std::vector<std::string>&);
GLSLANG_EXPORT void setUniqueId(unsigned long long id);
GLSLANG_EXPORT void setOverrideVersion(int version);
GLSLANG_EXPORT void setDebugInfo(bool debugInfo);
// IO resolver binding data: see comments in ShaderLang.cpp
GLSLANG_EXPORT void setShiftBinding(TResourceType res, unsigned int base);
GLSLANG_EXPORT void setShiftSamplerBinding(unsigned int base); // DEPRECATED: use setShiftBinding
GLSLANG_EXPORT void setShiftTextureBinding(unsigned int base); // DEPRECATED: use setShiftBinding
GLSLANG_EXPORT void setShiftImageBinding(unsigned int base); // DEPRECATED: use setShiftBinding
GLSLANG_EXPORT void setShiftUboBinding(unsigned int base); // DEPRECATED: use setShiftBinding
GLSLANG_EXPORT void setShiftUavBinding(unsigned int base); // DEPRECATED: use setShiftBinding
GLSLANG_EXPORT void setShiftCbufferBinding(unsigned int base); // synonym for setShiftUboBinding
GLSLANG_EXPORT void setShiftSsboBinding(unsigned int base); // DEPRECATED: use setShiftBinding
GLSLANG_EXPORT void setShiftBindingForSet(TResourceType res, unsigned int base, unsigned int set);
GLSLANG_EXPORT void setResourceSetBinding(const std::vector<std::string>& base);
GLSLANG_EXPORT void setAutoMapBindings(bool map);
GLSLANG_EXPORT void setAutoMapLocations(bool map);
GLSLANG_EXPORT void addUniformLocationOverride(const char* name, int loc);
GLSLANG_EXPORT void setUniformLocationBase(int base);
GLSLANG_EXPORT void setInvertY(bool invert);
GLSLANG_EXPORT void setDxPositionW(bool dxPosW);
GLSLANG_EXPORT void setEnhancedMsgs();
#ifdef ENABLE_HLSL
GLSLANG_EXPORT void setHlslIoMapping(bool hlslIoMap);
GLSLANG_EXPORT void setFlattenUniformArrays(bool flatten);
#endif
GLSLANG_EXPORT void setNoStorageFormat(bool useUnknownFormat);
GLSLANG_EXPORT void setNanMinMaxClamp(bool nanMinMaxClamp);
GLSLANG_EXPORT void setTextureSamplerTransformMode(EShTextureSamplerTransformMode mode);
GLSLANG_EXPORT void addBlockStorageOverride(const char* nameStr, glslang::TBlockStorageClass backing);
GLSLANG_EXPORT void setGlobalUniformBlockName(const char* name);
GLSLANG_EXPORT void setAtomicCounterBlockName(const char* name);
GLSLANG_EXPORT void setGlobalUniformSet(unsigned int set);
GLSLANG_EXPORT void setGlobalUniformBinding(unsigned int binding);
GLSLANG_EXPORT void setAtomicCounterBlockSet(unsigned int set);
GLSLANG_EXPORT void setAtomicCounterBlockBinding(unsigned int binding);
GLSLANG_EXPORT void addSourceText(const char* text, size_t len);
GLSLANG_EXPORT void setSourceFile(const char* file);
// For setting up the environment (cleared to nothingness in the constructor).
// These must be called so that parsing is done for the right source language and
// target environment, either indirectly through TranslateEnvironment() based on
// EShMessages et. al., or directly by the user.
//
// setEnvInput: The input source language and stage. If generating code for a
// specific client, the input client semantics to use and the
// version of that client's input semantics to use, otherwise
// use EShClientNone and version of 0, e.g. for validation mode.
// Note 'version' does not describe the target environment,
// just the version of the source dialect to compile under.
// For example, to choose the Vulkan dialect of GLSL defined by
// version 100 of the KHR_vulkan_glsl extension: lang = EShSourceGlsl,
// dialect = EShClientVulkan, and version = 100.
//
// See the definitions of TEnvironment, EShSource, EShLanguage,
// and EShClient for choices and more detail.
//
// setEnvClient: The client that will be hosting the execution, and its version.
// Note 'version' is not the version of the languages involved, but
// the version of the client environment.
// Use EShClientNone and version of 0 if there is no client, e.g.
// for validation mode.
//
// See EShTargetClientVersion for choices.
//
// setEnvTarget: The language to translate to when generating code, and that
// language's version.
// Use EShTargetNone and version of 0 if there is no client, e.g.
// for validation mode.
//
void setEnvInput(EShSource lang, EShLanguage envStage, EShClient client, int version)
{
environment.input.languageFamily = lang;
environment.input.stage = envStage;
environment.input.dialect = client;
environment.input.dialectVersion = version;
}
void setEnvClient(EShClient client, EShTargetClientVersion version)
{
environment.client.client = client;
environment.client.version = version;
}
void setEnvTarget(EShTargetLanguage lang, EShTargetLanguageVersion version)
{
environment.target.language = lang;
environment.target.version = version;
}
void getStrings(const char* const* &s, int& n) { s = strings; n = numStrings; }
#ifdef ENABLE_HLSL
void setEnvTargetHlslFunctionality1() { environment.target.hlslFunctionality1 = true; }
bool getEnvTargetHlslFunctionality1() const { return environment.target.hlslFunctionality1; }
#else
bool getEnvTargetHlslFunctionality1() const { return false; }
#endif
void setEnvInputVulkanRulesRelaxed() { environment.input.vulkanRulesRelaxed = true; }
bool getEnvInputVulkanRulesRelaxed() const { return environment.input.vulkanRulesRelaxed; }
void setCompileOnly() { compileOnly = true; }
bool getCompileOnly() const { return compileOnly; }
// Interface to #include handlers.
//
// To support #include, a client of Glslang does the following:
// 1. Call setStringsWithNames to set the source strings and associated
// names. For example, the names could be the names of the files
// containing the shader sources.
// 2. Call parse with an Includer.
//
// When the Glslang parser encounters an #include directive, it calls
// the Includer's include method with the requested include name
// together with the current string name. The returned IncludeResult
// contains the fully resolved name of the included source, together
// with the source text that should replace the #include directive
// in the source stream. After parsing that source, Glslang will
// release the IncludeResult object.
class Includer {
public:
// An IncludeResult contains the resolved name and content of a source
// inclusion.
struct IncludeResult {
IncludeResult(const std::string& headerName, const char* const headerData, const size_t headerLength, void* userData) :
headerName(headerName), headerData(headerData), headerLength(headerLength), userData(userData) { }
// For a successful inclusion, the fully resolved name of the requested
// include. For example, in a file system-based includer, full resolution
// should convert a relative path name into an absolute path name.
// For a failed inclusion, this is an empty string.
const std::string headerName;
// The content and byte length of the requested inclusion. The
// Includer producing this IncludeResult retains ownership of the
// storage.
// For a failed inclusion, the header
// field points to a string containing error details.
const char* const headerData;
const size_t headerLength;
// Include resolver's context.
void* userData;
protected:
IncludeResult& operator=(const IncludeResult&);
IncludeResult();
};
// For both include methods below:
//
// Resolves an inclusion request by name, current source name,
// and include depth.
// On success, returns an IncludeResult containing the resolved name
// and content of the include.
// On failure, returns a nullptr, or an IncludeResult
// with an empty string for the headerName and error details in the
// header field.
// The Includer retains ownership of the contents
// of the returned IncludeResult value, and those contents must
// remain valid until the releaseInclude method is called on that
// IncludeResult object.
//
// Note "local" vs. "system" is not an "either/or": "local" is an
// extra thing to do over "system". Both might get called, as per
// the C++ specification.
// For the "system" or <>-style includes; search the "system" paths.
virtual IncludeResult* includeSystem(const char* /*headerName*/,
const char* /*includerName*/,
size_t /*inclusionDepth*/) { return nullptr; }
// For the "local"-only aspect of a "" include. Should not search in the
// "system" paths, because on returning a failure, the parser will
// call includeSystem() to look in the "system" locations.
virtual IncludeResult* includeLocal(const char* /*headerName*/,
const char* /*includerName*/,
size_t /*inclusionDepth*/) { return nullptr; }
// Signals that the parser will no longer use the contents of the
// specified IncludeResult.
virtual void releaseInclude(IncludeResult*) = 0;
virtual ~Includer() {}
};
// Fail all Includer searches
class ForbidIncluder : public Includer {
public:
virtual void releaseInclude(IncludeResult*) override { }
};
GLSLANG_EXPORT bool parse(
const TBuiltInResource*, int defaultVersion, EProfile defaultProfile,
bool forceDefaultVersionAndProfile, bool forwardCompatible,
EShMessages, Includer&);
bool parse(const TBuiltInResource* res, int defaultVersion, EProfile defaultProfile, bool forceDefaultVersionAndProfile,
bool forwardCompatible, EShMessages messages)
{
TShader::ForbidIncluder includer;
return parse(res, defaultVersion, defaultProfile, forceDefaultVersionAndProfile, forwardCompatible, messages, includer);
}
// Equivalent to parse() without a default profile and without forcing defaults.
bool parse(const TBuiltInResource* builtInResources, int defaultVersion, bool forwardCompatible, EShMessages messages)
{
return parse(builtInResources, defaultVersion, ENoProfile, false, forwardCompatible, messages);
}
bool parse(const TBuiltInResource* builtInResources, int defaultVersion, bool forwardCompatible, EShMessages messages,
Includer& includer)
{
return parse(builtInResources, defaultVersion, ENoProfile, false, forwardCompatible, messages, includer);
}
// NOTE: Doing just preprocessing to obtain a correct preprocessed shader string
// is not an officially supported or fully working path.
GLSLANG_EXPORT bool preprocess(
const TBuiltInResource* builtInResources, int defaultVersion,
EProfile defaultProfile, bool forceDefaultVersionAndProfile,
bool forwardCompatible, EShMessages message, std::string* outputString,
Includer& includer);
GLSLANG_EXPORT const char* getInfoLog();
GLSLANG_EXPORT const char* getInfoDebugLog();
EShLanguage getStage() const { return stage; }
TIntermediate* getIntermediate() const { return intermediate; }
protected:
TPoolAllocator* pool;
EShLanguage stage;
TCompiler* compiler;
TIntermediate* intermediate;
TInfoSink* infoSink;
// strings and lengths follow the standard for glShaderSource:
// strings is an array of numStrings pointers to string data.
// lengths can be null, but if not it is an array of numStrings
// integers containing the length of the associated strings.
// if lengths is null or lengths[n] < 0 the associated strings[n] is
// assumed to be null-terminated.
// stringNames is the optional names for all the strings. If stringNames
// is null, then none of the strings has name. If a certain element in
// stringNames is null, then the corresponding string does not have name.
const char* const* strings; // explicit code to compile, see previous comment
const int* lengths;
const char* const* stringNames;
int numStrings; // size of the above arrays
const char* preamble; // string of implicit code to compile before the explicitly provided code
// a function in the source string can be renamed FROM this TO the name given in setEntryPoint.
std::string sourceEntryPointName;
// overrides #version in shader source or default version if #version isn't present
int overrideVersion;
TEnvironment environment;
// Indicates this shader is meant to be used without linking
bool compileOnly = false;
friend class TProgram;
private:
TShader& operator=(TShader&);
};
//
// A reflection database and its interface, consistent with the OpenGL API reflection queries.
//
// Data needed for just a single object at the granularity exchanged by the reflection API
class TObjectReflection {
public:
GLSLANG_EXPORT TObjectReflection(const std::string& pName, const TType& pType, int pOffset, int pGLDefineType, int pSize, int pIndex);
const TType* getType() const { return type; }
GLSLANG_EXPORT int getBinding() const;
GLSLANG_EXPORT void dump() const;
static TObjectReflection badReflection() { return TObjectReflection(); }
GLSLANG_EXPORT unsigned int layoutLocation() const;
std::string name;
int offset;
int glDefineType;
int size; // data size in bytes for a block, array size for a (non-block) object that's an array
int index;
int counterIndex;
int numMembers;
int arrayStride; // stride of an array variable
int topLevelArraySize; // size of the top-level variable in a storage buffer member
int topLevelArrayStride; // stride of the top-level variable in a storage buffer member
EShLanguageMask stages;
protected:
TObjectReflection()
: offset(-1), glDefineType(-1), size(-1), index(-1), counterIndex(-1), numMembers(-1), arrayStride(0),
topLevelArrayStride(0), stages(EShLanguageMask(0)), type(nullptr)
{
}
const TType* type;
};
class TReflection;
class TIoMapper;
struct TVarEntryInfo;
// Allows to customize the binding layout after linking.
// All used uniform variables will invoke at least validateBinding.
// If validateBinding returned true then the other resolveBinding,
// resolveSet, and resolveLocation are invoked to resolve the binding
// and descriptor set index respectively.
//
// Invocations happen in a particular order:
// 1) all shader inputs
// 2) all shader outputs
// 3) all uniforms with binding and set already defined
// 4) all uniforms with binding but no set defined
// 5) all uniforms with set but no binding defined
// 6) all uniforms with no binding and no set defined
//
// mapIO will use this resolver in two phases. The first
// phase is a notification phase, calling the corresponging
// notifiy callbacks, this phase ends with a call to endNotifications.
// Phase two starts directly after the call to endNotifications
// and calls all other callbacks to validate and to get the
// bindings, sets, locations, component and color indices.
//
// NOTE: that still limit checks are applied to bindings and sets
// and may result in an error.
class TIoMapResolver
{
public:
virtual ~TIoMapResolver() {}
// Should return true if the resulting/current binding would be okay.
// Basic idea is to do aliasing binding checks with this.
virtual bool validateBinding(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Should return a value >= 0 if the current binding should be overridden.
// Return -1 if the current binding (including no binding) should be kept.
virtual int resolveBinding(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Should return a value >= 0 if the current set should be overridden.
// Return -1 if the current set (including no set) should be kept.
virtual int resolveSet(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Should return a value >= 0 if the current location should be overridden.
// Return -1 if the current location (including no location) should be kept.
virtual int resolveUniformLocation(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Should return true if the resulting/current setup would be okay.
// Basic idea is to do aliasing checks and reject invalid semantic names.
virtual bool validateInOut(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Should return a value >= 0 if the current location should be overridden.
// Return -1 if the current location (including no location) should be kept.
virtual int resolveInOutLocation(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Should return a value >= 0 if the current component index should be overridden.
// Return -1 if the current component index (including no index) should be kept.
virtual int resolveInOutComponent(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Should return a value >= 0 if the current color index should be overridden.
// Return -1 if the current color index (including no index) should be kept.
virtual int resolveInOutIndex(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Notification of a uniform variable
virtual void notifyBinding(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Notification of a in or out variable
virtual void notifyInOut(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Called by mapIO when it starts its notify pass for the given stage
virtual void beginNotifications(EShLanguage stage) = 0;
// Called by mapIO when it has finished the notify pass
virtual void endNotifications(EShLanguage stage) = 0;
// Called by mipIO when it starts its resolve pass for the given stage
virtual void beginResolve(EShLanguage stage) = 0;
// Called by mapIO when it has finished the resolve pass
virtual void endResolve(EShLanguage stage) = 0;
// Called by mapIO when it starts its symbol collect for teh given stage
virtual void beginCollect(EShLanguage stage) = 0;
// Called by mapIO when it has finished the symbol collect
virtual void endCollect(EShLanguage stage) = 0;
// Called by TSlotCollector to resolve storage locations or bindings
virtual void reserverStorageSlot(TVarEntryInfo& ent, TInfoSink& infoSink) = 0;
// Called by TSlotCollector to resolve resource locations or bindings
virtual void reserverResourceSlot(TVarEntryInfo& ent, TInfoSink& infoSink) = 0;
// Called by mapIO.addStage to set shader stage mask to mark a stage be added to this pipeline
virtual void addStage(EShLanguage stage, TIntermediate& stageIntermediate) = 0;
};
// I/O mapper
class TIoMapper {
public:
TIoMapper() {}
virtual ~TIoMapper() {}
// grow the reflection stage by stage
bool virtual addStage(EShLanguage, TIntermediate&, TInfoSink&, TIoMapResolver*);
bool virtual doMap(TIoMapResolver*, TInfoSink&) { return true; }
bool virtual setAutoPushConstantBlock(const char*, unsigned int, TLayoutPacking) { return false; }
};
// Get the default GLSL IO mapper
GLSLANG_EXPORT TIoMapper* GetGlslIoMapper();
// Make one TProgram per set of shaders that will get linked together. Add all
// the shaders that are to be linked together. After calling shader.parse()
// for all shaders, call link().
//
// N.B.: Destruct a linked program *before* destructing the shaders linked into it.
//
class TProgram {
public:
GLSLANG_EXPORT TProgram();
GLSLANG_EXPORT virtual ~TProgram();
void addShader(TShader* shader) { stages[shader->stage].push_back(shader); }
std::list<TShader*>& getShaders(EShLanguage stage) { return stages[stage]; }
// Link Validation interface
GLSLANG_EXPORT bool link(EShMessages);
GLSLANG_EXPORT const char* getInfoLog();
GLSLANG_EXPORT const char* getInfoDebugLog();
TIntermediate* getIntermediate(EShLanguage stage) const { return intermediate[stage]; }
// Reflection Interface
// call first, to do liveness analysis, index mapping, etc.; returns false on failure
GLSLANG_EXPORT bool buildReflection(int opts = EShReflectionDefault);
GLSLANG_EXPORT unsigned getLocalSize(int dim) const; // return dim'th local size
GLSLANG_EXPORT int getReflectionIndex(const char *name) const;
GLSLANG_EXPORT int getReflectionPipeIOIndex(const char* name, const bool inOrOut) const;
GLSLANG_EXPORT int getNumUniformVariables() const;
GLSLANG_EXPORT const TObjectReflection& getUniform(int index) const;
GLSLANG_EXPORT int getNumUniformBlocks() const;
GLSLANG_EXPORT const TObjectReflection& getUniformBlock(int index) const;
GLSLANG_EXPORT int getNumPipeInputs() const;
GLSLANG_EXPORT const TObjectReflection& getPipeInput(int index) const;
GLSLANG_EXPORT int getNumPipeOutputs() const;
GLSLANG_EXPORT const TObjectReflection& getPipeOutput(int index) const;
GLSLANG_EXPORT int getNumBufferVariables() const;
GLSLANG_EXPORT const TObjectReflection& getBufferVariable(int index) const;
GLSLANG_EXPORT int getNumBufferBlocks() const;
GLSLANG_EXPORT const TObjectReflection& getBufferBlock(int index) const;
GLSLANG_EXPORT int getNumAtomicCounters() const;
GLSLANG_EXPORT const TObjectReflection& getAtomicCounter(int index) const;
// Legacy Reflection Interface - expressed in terms of above interface
// can be used for glGetProgramiv(GL_ACTIVE_UNIFORMS)
int getNumLiveUniformVariables() const { return getNumUniformVariables(); }
// can be used for glGetProgramiv(GL_ACTIVE_UNIFORM_BLOCKS)
int getNumLiveUniformBlocks() const { return getNumUniformBlocks(); }
// can be used for glGetProgramiv(GL_ACTIVE_ATTRIBUTES)
int getNumLiveAttributes() const { return getNumPipeInputs(); }
// can be used for glGetUniformIndices()
int getUniformIndex(const char *name) const { return getReflectionIndex(name); }
int getPipeIOIndex(const char *name, const bool inOrOut) const
{ return getReflectionPipeIOIndex(name, inOrOut); }
// can be used for "name" part of glGetActiveUniform()
const char *getUniformName(int index) const { return getUniform(index).name.c_str(); }
// returns the binding number
int getUniformBinding(int index) const { return getUniform(index).getBinding(); }
// returns Shaders Stages where a Uniform is present
EShLanguageMask getUniformStages(int index) const { return getUniform(index).stages; }
// can be used for glGetActiveUniformsiv(GL_UNIFORM_BLOCK_INDEX)
int getUniformBlockIndex(int index) const { return getUniform(index).index; }
// can be used for glGetActiveUniformsiv(GL_UNIFORM_TYPE)
int getUniformType(int index) const { return getUniform(index).glDefineType; }
// can be used for glGetActiveUniformsiv(GL_UNIFORM_OFFSET)
int getUniformBufferOffset(int index) const { return getUniform(index).offset; }
// can be used for glGetActiveUniformsiv(GL_UNIFORM_SIZE)
int getUniformArraySize(int index) const { return getUniform(index).size; }
// returns a TType*
const TType *getUniformTType(int index) const { return getUniform(index).getType(); }
// can be used for glGetActiveUniformBlockName()
const char *getUniformBlockName(int index) const { return getUniformBlock(index).name.c_str(); }
// can be used for glGetActiveUniformBlockiv(UNIFORM_BLOCK_DATA_SIZE)
int getUniformBlockSize(int index) const { return getUniformBlock(index).size; }
// returns the block binding number
int getUniformBlockBinding(int index) const { return getUniformBlock(index).getBinding(); }
// returns block index of associated counter.
int getUniformBlockCounterIndex(int index) const { return getUniformBlock(index).counterIndex; }
// returns a TType*
const TType *getUniformBlockTType(int index) const { return getUniformBlock(index).getType(); }
// can be used for glGetActiveAttrib()
const char *getAttributeName(int index) const { return getPipeInput(index).name.c_str(); }
// can be used for glGetActiveAttrib()
int getAttributeType(int index) const { return getPipeInput(index).glDefineType; }
// returns a TType*
const TType *getAttributeTType(int index) const { return getPipeInput(index).getType(); }
GLSLANG_EXPORT void dumpReflection();
// Get the IO resolver to use for mapIO
GLSLANG_EXPORT TIoMapResolver* getGlslIoResolver(EShLanguage stage);
// I/O mapping: apply base offsets and map live unbound variables
// If resolver is not provided it uses the previous approach
// and respects auto assignment and offsets.
GLSLANG_EXPORT bool mapIO(TIoMapResolver* pResolver = nullptr, TIoMapper* pIoMapper = nullptr);
protected:
GLSLANG_EXPORT bool linkStage(EShLanguage, EShMessages);
GLSLANG_EXPORT bool crossStageCheck(EShMessages);
TPoolAllocator* pool;
std::list<TShader*> stages[EShLangCount];
TIntermediate* intermediate[EShLangCount];
bool newedIntermediate[EShLangCount]; // track which intermediate were "new" versus reusing a singleton unit in a stage
TInfoSink* infoSink;
TReflection* reflection;
bool linked;
private:
TProgram(TProgram&);
TProgram& operator=(TProgram&);
};
} // end namespace glslang
#endif // _COMPILER_INTERFACE_INCLUDED_
... ...
/**
BSD 2-Clause License
Copyright (c) 2020, Travis Fort
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**/
#ifndef _STAND_ALONE_RESOURCE_LIMITS_C_INCLUDED_
#define _STAND_ALONE_RESOURCE_LIMITS_C_INCLUDED_
#include "../Include/glslang_c_interface.h"
#include "../Include/visibility.h"
#ifdef __cplusplus
extern "C" {
#endif
// Returns a struct that can be use to create custom resource values.
GLSLANG_EXPORT glslang_resource_t* glslang_resource(void);
// These are the default resources for TBuiltInResources, used for both
// - parsing this string for the case where the user didn't supply one,
// - dumping out a template for user construction of a config file.
GLSLANG_EXPORT const glslang_resource_t* glslang_default_resource(void);
// Returns the DefaultTBuiltInResource as a human-readable string.
// NOTE: User is responsible for freeing this string.
GLSLANG_EXPORT const char* glslang_default_resource_string();
// Decodes the resource limits from |config| to |resources|.
GLSLANG_EXPORT void glslang_decode_resource_limits(glslang_resource_t* resources, char* config);
#ifdef __cplusplus
}
#endif
#endif // _STAND_ALONE_RESOURCE_LIMITS_C_INCLUDED_
... ...
//
// Copyright (C) 2014 LunarG, Inc.
// Copyright (C) 2015-2018 Google, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#pragma once
#include <string>
#include <vector>
#include "Logger.h"
#include "glslang/Include/visibility.h"
namespace glslang {
class TIntermediate;
struct SpvOptions {
bool generateDebugInfo {false};
bool stripDebugInfo {false};
bool disableOptimizer {true};
bool optimizeSize {false};
bool disassemble {false};
bool validate {false};
bool emitNonSemanticShaderDebugInfo {false};
bool emitNonSemanticShaderDebugSource{ false };
bool compileOnly{false};
bool optimizerAllowExpandedIDBound{false};
};
GLSLANG_EXPORT void GetSpirvVersion(std::string&);
GLSLANG_EXPORT int GetSpirvGeneratorVersion();
GLSLANG_EXPORT void GlslangToSpv(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
SpvOptions* options = nullptr);
GLSLANG_EXPORT void GlslangToSpv(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
spv::SpvBuildLogger* logger, SpvOptions* options = nullptr);
GLSLANG_EXPORT bool OutputSpvBin(const std::vector<unsigned int>& spirv, const char* baseName);
GLSLANG_EXPORT bool OutputSpvHex(const std::vector<unsigned int>& spirv, const char* baseName, const char* varName);
}
... ...
//
// Copyright (C) 2016 Google, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#ifndef GLSLANG_SPIRV_LOGGER_H
#define GLSLANG_SPIRV_LOGGER_H
#include <string>
#include <vector>
#include "glslang/Include/visibility.h"
namespace spv {
// A class for holding all SPIR-V build status messages, including
// missing/TBD functionalities, warnings, and errors.
class GLSLANG_EXPORT SpvBuildLogger {
public:
SpvBuildLogger() {}
// Registers a TBD functionality.
void tbdFunctionality(const std::string& f);
// Registers a missing functionality.
void missingFunctionality(const std::string& f);
// Logs a warning.
void warning(const std::string& w) { warnings.push_back(w); }
// Logs an error.
void error(const std::string& e) { errors.push_back(e); }
// Returns all messages accumulated in the order of:
// TBD functionalities, missing functionalities, warnings, errors.
std::string getAllMessages() const;
private:
SpvBuildLogger(const SpvBuildLogger&);
std::vector<std::string> tbdFeatures;
std::vector<std::string> missingFeatures;
std::vector<std::string> warnings;
std::vector<std::string> errors;
};
} // end spv namespace
#endif // GLSLANG_SPIRV_LOGGER_H
... ...
//
// Copyright (C) 2015 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef SPIRVREMAPPER_H
#define SPIRVREMAPPER_H
#include <string>
#include <vector>
#include <cstdlib>
#include <exception>
#ifdef GLSLANG_IS_SHARED_LIBRARY
#ifdef _WIN32
#ifdef GLSLANG_EXPORTING
#define GLSLANG_EXPORT __declspec(dllexport)
#else
#define GLSLANG_EXPORT __declspec(dllimport)
#endif
#elif __GNUC__ >= 4
#define GLSLANG_EXPORT __attribute__((visibility("default")))
#endif
#endif // GLSLANG_IS_SHARED_LIBRARY
#ifndef GLSLANG_EXPORT
#define GLSLANG_EXPORT
#endif
namespace spv {
class spirvbin_base_t
{
public:
enum Options {
NONE = 0,
STRIP = (1<<0),
MAP_TYPES = (1<<1),
MAP_NAMES = (1<<2),
MAP_FUNCS = (1<<3),
DCE_FUNCS = (1<<4),
DCE_VARS = (1<<5),
DCE_TYPES = (1<<6),
OPT_LOADSTORE = (1<<7),
OPT_FWD_LS = (1<<8), // EXPERIMENTAL: PRODUCES INVALID SCHEMA-0 SPIRV
MAP_ALL = (MAP_TYPES | MAP_NAMES | MAP_FUNCS),
DCE_ALL = (DCE_FUNCS | DCE_VARS | DCE_TYPES),
OPT_ALL = (OPT_LOADSTORE),
ALL_BUT_STRIP = (MAP_ALL | DCE_ALL | OPT_ALL),
DO_EVERYTHING = (STRIP | ALL_BUT_STRIP)
};
};
} // namespace SPV
#include <functional>
#include <cstdint>
#include <unordered_map>
#include <unordered_set>
#include <map>
#include <set>
#include <cassert>
#include "spirv.hpp"
namespace spv {
static inline constexpr Id NoResult = 0;
// class to hold SPIR-V binary data for remapping, DCE, and debug stripping
class GLSLANG_EXPORT spirvbin_t : public spirvbin_base_t
{
public:
spirvbin_t(int verbose = 0) : entryPoint(spv::NoResult), largestNewId(0), verbose(verbose), errorLatch(false)
{ }
virtual ~spirvbin_t() { }
// remap on an existing binary in memory
void remap(std::vector<std::uint32_t>& spv, const std::vector<std::string>& whiteListStrings,
std::uint32_t opts = DO_EVERYTHING);
// remap on an existing binary in memory - legacy interface without white list
void remap(std::vector<std::uint32_t>& spv, std::uint32_t opts = DO_EVERYTHING);
// Type for error/log handler functions
typedef std::function<void(const std::string&)> errorfn_t;
typedef std::function<void(const std::string&)> logfn_t;
// Register error/log handling functions (can be lambda fn / functor / etc)
static void registerErrorHandler(errorfn_t handler) { errorHandler = handler; }
static void registerLogHandler(logfn_t handler) { logHandler = handler; }
protected:
// This can be overridden to provide other message behavior if needed
virtual void msg(int minVerbosity, int indent, const std::string& txt) const;
private:
// Local to global, or global to local ID map
typedef std::unordered_map<spv::Id, spv::Id> idmap_t;
typedef std::unordered_set<spv::Id> idset_t;
typedef std::unordered_map<spv::Id, int> blockmap_t;
void remap(std::uint32_t opts = DO_EVERYTHING);
// Map of names to IDs
typedef std::unordered_map<std::string, spv::Id> namemap_t;
typedef std::uint32_t spirword_t;
typedef std::pair<unsigned, unsigned> range_t;
typedef std::function<void(spv::Id&)> idfn_t;
typedef std::function<bool(spv::Op, unsigned start)> instfn_t;
// Special Values for ID map:
static const spv::Id unmapped; // unchanged from default value
static const spv::Id unused; // unused ID
static const int header_size; // SPIR header = 5 words
class id_iterator_t;
// For mapping type entries between different shaders
typedef std::vector<spirword_t> typeentry_t;
typedef std::map<spv::Id, typeentry_t> globaltypes_t;
// A set that preserves position order, and a reverse map
typedef std::set<int> posmap_t;
typedef std::unordered_map<spv::Id, int> posmap_rev_t;
// Maps and ID to the size of its base type, if known.
typedef std::unordered_map<spv::Id, unsigned> typesize_map_t;
// handle error
void error(const std::string& txt) const { errorLatch = true; errorHandler(txt); }
bool isConstOp(spv::Op opCode) const;
bool isTypeOp(spv::Op opCode) const;
bool isStripOp(spv::Op opCode) const;
bool isFlowCtrl(spv::Op opCode) const;
range_t literalRange(spv::Op opCode) const;
range_t typeRange(spv::Op opCode) const;
range_t constRange(spv::Op opCode) const;
unsigned typeSizeInWords(spv::Id id) const;
unsigned idTypeSizeInWords(spv::Id id) const;
bool isStripOp(spv::Op opCode, unsigned start) const;
spv::Id& asId(unsigned word) { return spv[word]; }
const spv::Id& asId(unsigned word) const { return spv[word]; }
spv::Op asOpCode(unsigned word) const { return opOpCode(spv[word]); }
std::uint32_t asOpCodeHash(unsigned word);
spv::Decoration asDecoration(unsigned word) const { return spv::Decoration(spv[word]); }
unsigned asWordCount(unsigned word) const { return opWordCount(spv[word]); }
spv::Id asTypeConstId(unsigned word) const { return asId(word + (isTypeOp(asOpCode(word)) ? 1 : 2)); }
unsigned idPos(spv::Id id) const;
static unsigned opWordCount(spirword_t data) { return data >> spv::WordCountShift; }
static spv::Op opOpCode(spirword_t data) { return spv::Op(data & spv::OpCodeMask); }
// Header access & set methods
spirword_t magic() const { return spv[0]; } // return magic number
spirword_t bound() const { return spv[3]; } // return Id bound from header
spirword_t bound(spirword_t b) { return spv[3] = b; }
spirword_t genmagic() const { return spv[2]; } // generator magic
spirword_t genmagic(spirword_t m) { return spv[2] = m; }
spirword_t schemaNum() const { return spv[4]; } // schema number from header
// Mapping fns: get
spv::Id localId(spv::Id id) const { return idMapL[id]; }
// Mapping fns: set
inline spv::Id localId(spv::Id id, spv::Id newId);
void countIds(spv::Id id);
// Return next unused new local ID.
// NOTE: boost::dynamic_bitset would be more efficient due to find_next(),
// which std::vector<bool> doens't have.
inline spv::Id nextUnusedId(spv::Id id);
void buildLocalMaps();
std::string literalString(unsigned word) const; // Return literal as a std::string
int literalStringWords(const std::string& str) const { return (int(str.size())+4)/4; }
bool isNewIdMapped(spv::Id newId) const { return isMapped(newId); }
bool isOldIdUnmapped(spv::Id oldId) const { return localId(oldId) == unmapped; }
bool isOldIdUnused(spv::Id oldId) const { return localId(oldId) == unused; }
bool isOldIdMapped(spv::Id oldId) const { return !isOldIdUnused(oldId) && !isOldIdUnmapped(oldId); }
bool isFunction(spv::Id oldId) const { return fnPos.find(oldId) != fnPos.end(); }
// bool matchType(const globaltypes_t& globalTypes, spv::Id lt, spv::Id gt) const;
// spv::Id findType(const globaltypes_t& globalTypes, spv::Id lt) const;
std::uint32_t hashType(unsigned typeStart) const;
spirvbin_t& process(instfn_t, idfn_t, unsigned begin = 0, unsigned end = 0);
int processInstruction(unsigned word, instfn_t, idfn_t);
void validate() const;
void mapTypeConst();
void mapFnBodies();
void optLoadStore();
void dceFuncs();
void dceVars();
void dceTypes();
void mapNames();
void foldIds(); // fold IDs to smallest space
void forwardLoadStores(); // load store forwarding (EXPERIMENTAL)
void offsetIds(); // create relative offset IDs
void applyMap(); // remap per local name map
void mapRemainder(); // map any IDs we haven't touched yet
void stripDebug(); // strip all debug info
void stripDeadRefs(); // strips debug info for now-dead references after DCE
void strip(); // remove debug symbols
std::vector<spirword_t> spv; // SPIR words
std::vector<std::string> stripWhiteList;
namemap_t nameMap; // ID names from OpName
// Since we want to also do binary ops, we can't use std::vector<bool>. we could use
// boost::dynamic_bitset, but we're trying to avoid a boost dependency.
typedef std::uint64_t bits_t;
std::vector<bits_t> mapped; // which new IDs have been mapped
static const int mBits = sizeof(bits_t) * 4;
bool isMapped(spv::Id id) const { return id < maxMappedId() && ((mapped[id/mBits] & (1LL<<(id%mBits))) != 0); }
void setMapped(spv::Id id) { resizeMapped(id); mapped[id/mBits] |= (1LL<<(id%mBits)); }
void resizeMapped(spv::Id id) { if (id >= maxMappedId()) mapped.resize(id/mBits+1, 0); }
size_t maxMappedId() const { return mapped.size() * mBits; }
// Add a strip range for a given instruction starting at 'start'
// Note: avoiding brace initializers to please older versions os MSVC.
void stripInst(unsigned start) { stripRange.push_back(range_t(start, start + asWordCount(start))); }
// Function start and end. use unordered_map because we'll have
// many fewer functions than IDs.
std::unordered_map<spv::Id, range_t> fnPos;
// Which functions are called, anywhere in the module, with a call count
std::unordered_map<spv::Id, int> fnCalls;
posmap_t typeConstPos; // word positions that define types & consts (ordered)
posmap_rev_t idPosR; // reverse map from IDs to positions
typesize_map_t idTypeSizeMap; // maps each ID to its type size, if known.
std::vector<spv::Id> idMapL; // ID {M}ap from {L}ocal to {G}lobal IDs
spv::Id entryPoint; // module entry point
spv::Id largestNewId; // biggest new ID we have mapped anything to
// Sections of the binary to strip, given as [begin,end)
std::vector<range_t> stripRange;
// processing options:
std::uint32_t options;
int verbose; // verbosity level
// Error latch: this is set if the error handler is ever executed. It would be better to
// use a try/catch block and throw, but that's not desired for certain environments, so
// this is the alternative.
mutable bool errorLatch;
static errorfn_t errorHandler;
static logfn_t logHandler;
};
} // namespace SPV
#endif // SPIRVREMAPPER_H
... ...
//
// Copyright (C) 2014-2016 LunarG, Inc.
// Copyright (C) 2018 Google, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Call into SPIRV-Tools to disassemble, validate, and optimize.
//
#pragma once
#ifndef GLSLANG_SPV_TOOLS_H
#define GLSLANG_SPV_TOOLS_H
#if ENABLE_OPT
#include <vector>
#include <ostream>
#include <unordered_set>
#include "spirv-tools/libspirv.h"
#endif
#include "glslang/MachineIndependent/Versions.h"
#include "glslang/Include/visibility.h"
#include "GlslangToSpv.h"
#include "Logger.h"
namespace glslang {
#if ENABLE_OPT
class TIntermediate;
// Translate glslang's view of target versioning to what SPIRV-Tools uses.
GLSLANG_EXPORT spv_target_env MapToSpirvToolsEnv(const SpvVersion& spvVersion, spv::SpvBuildLogger* logger);
GLSLANG_EXPORT spv_target_env MapToSpirvToolsEnv(const glslang::TIntermediate& intermediate, spv::SpvBuildLogger* logger);
// Use the SPIRV-Tools disassembler to print SPIR-V using a SPV_ENV_UNIVERSAL_1_3 environment.
GLSLANG_EXPORT void SpirvToolsDisassemble(std::ostream& out, const std::vector<unsigned int>& spirv);
// Use the SPIRV-Tools disassembler to print SPIR-V with a provided SPIR-V environment.
GLSLANG_EXPORT void SpirvToolsDisassemble(std::ostream& out, const std::vector<unsigned int>& spirv,
spv_target_env requested_context);
// Apply the SPIRV-Tools validator to generated SPIR-V.
GLSLANG_EXPORT void SpirvToolsValidate(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
spv::SpvBuildLogger*, bool prelegalization);
// Apply the SPIRV-Tools optimizer to generated SPIR-V. HLSL SPIR-V is legalized in the process.
GLSLANG_EXPORT void SpirvToolsTransform(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
spv::SpvBuildLogger*, const SpvOptions*);
// Apply the SPIRV-Tools EliminateDeadInputComponents pass to generated SPIR-V. Put result in |spirv|.
GLSLANG_EXPORT void SpirvToolsEliminateDeadInputComponents(spv_target_env target_env, std::vector<unsigned int>& spirv,
spv::SpvBuildLogger*);
// Apply the SPIRV-Tools AnalyzeDeadOutputStores pass to generated SPIR-V. Put result in |live_locs|.
// Return true if the result is valid.
GLSLANG_EXPORT bool SpirvToolsAnalyzeDeadOutputStores(spv_target_env target_env, std::vector<unsigned int>& spirv,
std::unordered_set<uint32_t>* live_locs,
std::unordered_set<uint32_t>* live_builtins,
spv::SpvBuildLogger*);
// Apply the SPIRV-Tools EliminateDeadOutputStores and AggressiveDeadCodeElimination passes to generated SPIR-V using
// |live_locs|. Put result in |spirv|.
GLSLANG_EXPORT void SpirvToolsEliminateDeadOutputStores(spv_target_env target_env, std::vector<unsigned int>& spirv,
std::unordered_set<uint32_t>* live_locs,
std::unordered_set<uint32_t>* live_builtins,
spv::SpvBuildLogger*);
// Apply the SPIRV-Tools optimizer to strip debug info from SPIR-V. This is implicitly done by
// SpirvToolsTransform if spvOptions->stripDebugInfo is set, but can be called separately if
// optimization is disabled.
GLSLANG_EXPORT void SpirvToolsStripDebugInfo(const glslang::TIntermediate& intermediate,
std::vector<unsigned int>& spirv, spv::SpvBuildLogger*);
#endif
} // end namespace glslang
#endif // GLSLANG_SPV_TOOLS_H
... ...
//
// Copyright (C) 2014-2015 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Disassembler for SPIR-V.
//
#pragma once
#ifndef disassembler_H
#define disassembler_H
#include <iostream>
#include <vector>
#include "glslang/Include/visibility.h"
namespace spv {
// disassemble with glslang custom disassembler
GLSLANG_EXPORT void Disassemble(std::ostream& out, const std::vector<unsigned int>&);
} // end namespace spv
#endif // disassembler_H
... ...
// Copyright (C) 2020 The Khronos Group Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of The Khronos Group Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#ifndef GLSLANG_BUILD_INFO
#define GLSLANG_BUILD_INFO
#define GLSLANG_VERSION_MAJOR 15
#define GLSLANG_VERSION_MINOR 1
#define GLSLANG_VERSION_PATCH 0
#define GLSLANG_VERSION_FLAVOR ""
#define GLSLANG_VERSION_GREATER_THAN(major, minor, patch) \
((GLSLANG_VERSION_MAJOR) > (major) || ((major) == GLSLANG_VERSION_MAJOR && \
((GLSLANG_VERSION_MINOR) > (minor) || ((minor) == GLSLANG_VERSION_MINOR && \
(GLSLANG_VERSION_PATCH) > (patch)))))
#define GLSLANG_VERSION_GREATER_OR_EQUAL_TO(major, minor, patch) \
((GLSLANG_VERSION_MAJOR) > (major) || ((major) == GLSLANG_VERSION_MAJOR && \
((GLSLANG_VERSION_MINOR) > (minor) || ((minor) == GLSLANG_VERSION_MINOR && \
(GLSLANG_VERSION_PATCH >= (patch))))))
#define GLSLANG_VERSION_LESS_THAN(major, minor, patch) \
((GLSLANG_VERSION_MAJOR) < (major) || ((major) == GLSLANG_VERSION_MAJOR && \
((GLSLANG_VERSION_MINOR) < (minor) || ((minor) == GLSLANG_VERSION_MINOR && \
(GLSLANG_VERSION_PATCH) < (patch)))))
#define GLSLANG_VERSION_LESS_OR_EQUAL_TO(major, minor, patch) \
((GLSLANG_VERSION_MAJOR) < (major) || ((major) == GLSLANG_VERSION_MAJOR && \
((GLSLANG_VERSION_MINOR) < (minor) || ((minor) == GLSLANG_VERSION_MINOR && \
(GLSLANG_VERSION_PATCH <= (patch))))))
#endif // GLSLANG_BUILD_INFO
... ...
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_ALLOCATOR_H
#define NCNN_ALLOCATOR_H
#ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#endif
#include "platform.h"
#include <stdlib.h>
#if NCNN_PLATFORM_API
#if __ANDROID_API__ >= 26
#include <android/hardware_buffer.h>
#endif // __ANDROID_API__ >= 26
#endif // NCNN_PLATFORM_API
namespace ncnn {
// the alignment of all the allocated buffers
#if NCNN_AVX512
#define NCNN_MALLOC_ALIGN 64
#elif NCNN_AVX
#define NCNN_MALLOC_ALIGN 32
#else
#define NCNN_MALLOC_ALIGN 16
#endif
// we have some optimized kernels that may overread buffer a bit in loop
// it is common to interleave next-loop data load with arithmetic instructions
// allocating more bytes keeps us safe from SEGV_ACCERR failure
#define NCNN_MALLOC_OVERREAD 64
// Aligns a pointer to the specified number of bytes
// ptr Aligned pointer
// n Alignment size that must be a power of two
template<typename _Tp>
static NCNN_FORCEINLINE _Tp* alignPtr(_Tp* ptr, int n = (int)sizeof(_Tp))
{
return (_Tp*)(((size_t)ptr + n - 1) & -n);
}
// Aligns a buffer size to the specified number of bytes
// The function returns the minimum number that is greater or equal to sz and is divisible by n
// sz Buffer size to align
// n Alignment size that must be a power of two
static NCNN_FORCEINLINE size_t alignSize(size_t sz, int n)
{
return (sz + n - 1) & -n;
}
static NCNN_FORCEINLINE void* fastMalloc(size_t size)
{
#if _MSC_VER
return _aligned_malloc(size, NCNN_MALLOC_ALIGN);
#elif (defined(__unix__) || defined(__APPLE__)) && _POSIX_C_SOURCE >= 200112L || (__ANDROID__ && __ANDROID_API__ >= 17)
void* ptr = 0;
if (posix_memalign(&ptr, NCNN_MALLOC_ALIGN, size + NCNN_MALLOC_OVERREAD))
ptr = 0;
return ptr;
#elif __ANDROID__ && __ANDROID_API__ < 17
return memalign(NCNN_MALLOC_ALIGN, size + NCNN_MALLOC_OVERREAD);
#else
unsigned char* udata = (unsigned char*)malloc(size + sizeof(void*) + NCNN_MALLOC_ALIGN + NCNN_MALLOC_OVERREAD);
if (!udata)
return 0;
unsigned char** adata = alignPtr((unsigned char**)udata + 1, NCNN_MALLOC_ALIGN);
adata[-1] = udata;
return adata;
#endif
}
static NCNN_FORCEINLINE void fastFree(void* ptr)
{
if (ptr)
{
#if _MSC_VER
_aligned_free(ptr);
#elif (defined(__unix__) || defined(__APPLE__)) && _POSIX_C_SOURCE >= 200112L || (__ANDROID__ && __ANDROID_API__ >= 17)
free(ptr);
#elif __ANDROID__ && __ANDROID_API__ < 17
free(ptr);
#else
unsigned char* udata = ((unsigned char**)ptr)[-1];
free(udata);
#endif
}
}
#if NCNN_THREADS
// exchange-add operation for atomic operations on reference counters
#if defined __riscv && !defined __riscv_atomic
// riscv target without A extension
static NCNN_FORCEINLINE int NCNN_XADD(int* addr, int delta)
{
int tmp = *addr;
*addr += delta;
return tmp;
}
#elif defined __INTEL_COMPILER && !(defined WIN32 || defined _WIN32)
// atomic increment on the linux version of the Intel(tm) compiler
#define NCNN_XADD(addr, delta) (int)_InterlockedExchangeAdd(const_cast<void*>(reinterpret_cast<volatile void*>(addr)), delta)
#elif defined __GNUC__
#if defined __clang__ && __clang_major__ >= 3 && !defined __ANDROID__ && !defined __EMSCRIPTEN__ && !defined(__CUDACC__)
#ifdef __ATOMIC_ACQ_REL
#define NCNN_XADD(addr, delta) __c11_atomic_fetch_add((_Atomic(int)*)(addr), delta, __ATOMIC_ACQ_REL)
#else
#define NCNN_XADD(addr, delta) __atomic_fetch_add((_Atomic(int)*)(addr), delta, 4)
#endif
#else
#if defined __ATOMIC_ACQ_REL && !defined __clang__
// version for gcc >= 4.7
#define NCNN_XADD(addr, delta) (int)__atomic_fetch_add((unsigned*)(addr), (unsigned)(delta), __ATOMIC_ACQ_REL)
#else
#define NCNN_XADD(addr, delta) (int)__sync_fetch_and_add((unsigned*)(addr), (unsigned)(delta))
#endif
#endif
#elif defined _MSC_VER && !defined RC_INVOKED
#define NCNN_XADD(addr, delta) (int)_InterlockedExchangeAdd((long volatile*)addr, delta)
#else
// thread-unsafe branch
static NCNN_FORCEINLINE int NCNN_XADD(int* addr, int delta)
{
int tmp = *addr;
*addr += delta;
return tmp;
}
#endif
#else // NCNN_THREADS
static NCNN_FORCEINLINE int NCNN_XADD(int* addr, int delta)
{
int tmp = *addr;
*addr += delta;
return tmp;
}
#endif // NCNN_THREADS
class NCNN_EXPORT Allocator
{
public:
virtual ~Allocator();
virtual void* fastMalloc(size_t size) = 0;
virtual void fastFree(void* ptr) = 0;
};
class PoolAllocatorPrivate;
class NCNN_EXPORT PoolAllocator : public Allocator
{
public:
PoolAllocator();
~PoolAllocator();
// ratio range 0 ~ 1
// default cr = 0
void set_size_compare_ratio(float scr);
// budget drop threshold
// default threshold = 10
void set_size_drop_threshold(size_t);
// release all budgets immediately
void clear();
virtual void* fastMalloc(size_t size);
virtual void fastFree(void* ptr);
private:
PoolAllocator(const PoolAllocator&);
PoolAllocator& operator=(const PoolAllocator&);
private:
PoolAllocatorPrivate* const d;
};
class UnlockedPoolAllocatorPrivate;
class NCNN_EXPORT UnlockedPoolAllocator : public Allocator
{
public:
UnlockedPoolAllocator();
~UnlockedPoolAllocator();
// ratio range 0 ~ 1
// default cr = 0
void set_size_compare_ratio(float scr);
// budget drop threshold
// default threshold = 10
void set_size_drop_threshold(size_t);
// release all budgets immediately
void clear();
virtual void* fastMalloc(size_t size);
virtual void fastFree(void* ptr);
private:
UnlockedPoolAllocator(const UnlockedPoolAllocator&);
UnlockedPoolAllocator& operator=(const UnlockedPoolAllocator&);
private:
UnlockedPoolAllocatorPrivate* const d;
};
#if NCNN_VULKAN
class VulkanDevice;
class NCNN_EXPORT VkBufferMemory
{
public:
VkBuffer buffer;
// the base offset assigned by allocator
size_t offset;
size_t capacity;
VkDeviceMemory memory;
void* mapped_ptr;
// buffer state, modified by command functions internally
mutable VkAccessFlags access_flags;
mutable VkPipelineStageFlags stage_flags;
// initialize and modified by mat
int refcount;
};
class NCNN_EXPORT VkImageMemory
{
public:
VkImage image;
VkImageView imageview;
// underlying info assigned by allocator
int width;
int height;
int depth;
VkFormat format;
VkDeviceMemory memory;
void* mapped_ptr;
// the base offset assigned by allocator
size_t bind_offset;
size_t bind_capacity;
// image state, modified by command functions internally
mutable VkAccessFlags access_flags;
mutable VkImageLayout image_layout;
mutable VkPipelineStageFlags stage_flags;
// in-execution state, modified by command functions internally
mutable int command_refcount;
// initialize and modified by mat
int refcount;
};
class NCNN_EXPORT VkAllocator
{
public:
explicit VkAllocator(const VulkanDevice* _vkdev);
virtual ~VkAllocator();
virtual void clear();
virtual VkBufferMemory* fastMalloc(size_t size) = 0;
virtual void fastFree(VkBufferMemory* ptr) = 0;
virtual int flush(VkBufferMemory* ptr);
virtual int invalidate(VkBufferMemory* ptr);
virtual VkImageMemory* fastMalloc(int w, int h, int c, size_t elemsize, int elempack) = 0;
virtual void fastFree(VkImageMemory* ptr) = 0;
public:
const VulkanDevice* vkdev;
uint32_t buffer_memory_type_index;
uint32_t image_memory_type_index;
uint32_t reserved_type_index;
bool mappable;
bool coherent;
protected:
VkBuffer create_buffer(size_t size, VkBufferUsageFlags usage);
VkDeviceMemory allocate_memory(size_t size, uint32_t memory_type_index);
VkDeviceMemory allocate_dedicated_memory(size_t size, uint32_t memory_type_index, VkImage image, VkBuffer buffer);
VkImage create_image(int width, int height, int depth, VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage);
VkImageView create_imageview(VkImage image, VkFormat format);
};
class VkBlobAllocatorPrivate;
class NCNN_EXPORT VkBlobAllocator : public VkAllocator
{
public:
explicit VkBlobAllocator(const VulkanDevice* vkdev, size_t preferred_block_size = 16 * 1024 * 1024); // 16M
virtual ~VkBlobAllocator();
public:
// release all budgets immediately
virtual void clear();
virtual VkBufferMemory* fastMalloc(size_t size);
virtual void fastFree(VkBufferMemory* ptr);
virtual VkImageMemory* fastMalloc(int w, int h, int c, size_t elemsize, int elempack);
virtual void fastFree(VkImageMemory* ptr);
private:
VkBlobAllocator(const VkBlobAllocator&);
VkBlobAllocator& operator=(const VkBlobAllocator&);
private:
VkBlobAllocatorPrivate* const d;
};
class VkWeightAllocatorPrivate;
class NCNN_EXPORT VkWeightAllocator : public VkAllocator
{
public:
explicit VkWeightAllocator(const VulkanDevice* vkdev, size_t preferred_block_size = 8 * 1024 * 1024); // 8M
virtual ~VkWeightAllocator();
public:
// release all blocks immediately
virtual void clear();
public:
virtual VkBufferMemory* fastMalloc(size_t size);
virtual void fastFree(VkBufferMemory* ptr);
virtual VkImageMemory* fastMalloc(int w, int h, int c, size_t elemsize, int elempack);
virtual void fastFree(VkImageMemory* ptr);
private:
VkWeightAllocator(const VkWeightAllocator&);
VkWeightAllocator& operator=(const VkWeightAllocator&);
private:
VkWeightAllocatorPrivate* const d;
};
class VkStagingAllocatorPrivate;
class NCNN_EXPORT VkStagingAllocator : public VkAllocator
{
public:
explicit VkStagingAllocator(const VulkanDevice* vkdev);
virtual ~VkStagingAllocator();
public:
// ratio range 0 ~ 1
// default cr = 0.75
void set_size_compare_ratio(float scr);
// release all budgets immediately
virtual void clear();
virtual VkBufferMemory* fastMalloc(size_t size);
virtual void fastFree(VkBufferMemory* ptr);
virtual VkImageMemory* fastMalloc(int w, int h, int c, size_t elemsize, int elempack);
virtual void fastFree(VkImageMemory* ptr);
private:
VkStagingAllocator(const VkStagingAllocator&);
VkStagingAllocator& operator=(const VkStagingAllocator&);
private:
VkStagingAllocatorPrivate* const d;
};
class VkWeightStagingAllocatorPrivate;
class NCNN_EXPORT VkWeightStagingAllocator : public VkAllocator
{
public:
explicit VkWeightStagingAllocator(const VulkanDevice* vkdev);
virtual ~VkWeightStagingAllocator();
public:
virtual VkBufferMemory* fastMalloc(size_t size);
virtual void fastFree(VkBufferMemory* ptr);
virtual VkImageMemory* fastMalloc(int w, int h, int c, size_t elemsize, int elempack);
virtual void fastFree(VkImageMemory* ptr);
private:
VkWeightStagingAllocator(const VkWeightStagingAllocator&);
VkWeightStagingAllocator& operator=(const VkWeightStagingAllocator&);
private:
VkWeightStagingAllocatorPrivate* const d;
};
#if NCNN_PLATFORM_API
#if __ANDROID_API__ >= 26
class NCNN_EXPORT VkAndroidHardwareBufferImageAllocator : public VkAllocator
{
public:
VkAndroidHardwareBufferImageAllocator(const VulkanDevice* _vkdev, AHardwareBuffer* _hb);
virtual ~VkAndroidHardwareBufferImageAllocator();
public:
virtual VkBufferMemory* fastMalloc(size_t size);
virtual void fastFree(VkBufferMemory* ptr);
virtual VkImageMemory* fastMalloc(int w, int h, int c, size_t elemsize, int elempack);
virtual void fastFree(VkImageMemory* ptr);
private:
VkAndroidHardwareBufferImageAllocator(const VkAndroidHardwareBufferImageAllocator&);
VkAndroidHardwareBufferImageAllocator& operator=(const VkAndroidHardwareBufferImageAllocator&);
public:
int init();
int width() const;
int height() const;
uint64_t external_format() const;
public:
AHardwareBuffer* hb;
AHardwareBuffer_Desc bufferDesc;
VkAndroidHardwareBufferFormatPropertiesANDROID bufferFormatProperties;
VkAndroidHardwareBufferPropertiesANDROID bufferProperties;
VkSamplerYcbcrConversionKHR samplerYcbcrConversion;
};
#endif // __ANDROID_API__ >= 26
#endif // NCNN_PLATFORM_API
#endif // NCNN_VULKAN
} // namespace ncnn
#endif // NCNN_ALLOCATOR_H
... ...
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_BENCHMARK_H
#define NCNN_BENCHMARK_H
#include "layer.h"
#include "mat.h"
#include "platform.h"
namespace ncnn {
// get now timestamp in ms
NCNN_EXPORT double get_current_time();
// sleep milliseconds
NCNN_EXPORT void sleep(unsigned long long int milliseconds = 1000);
#if NCNN_BENCHMARK
NCNN_EXPORT void benchmark(const Layer* layer, double start, double end);
NCNN_EXPORT void benchmark(const Layer* layer, const Mat& bottom_blob, Mat& top_blob, double start, double end);
#endif // NCNN_BENCHMARK
} // namespace ncnn
#endif // NCNN_BENCHMARK_H
... ...
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_BLOB_H
#define NCNN_BLOB_H
#include "mat.h"
#include "platform.h"
namespace ncnn {
class NCNN_EXPORT Blob
{
public:
// empty
Blob();
public:
#if NCNN_STRING
// blob name
std::string name;
#endif // NCNN_STRING
// layer index which produce this blob as output
int producer;
// layer index which need this blob as input
int consumer;
// shape hint
Mat shape;
};
} // namespace ncnn
#endif // NCNN_BLOB_H
... ...
/* Tencent is pleased to support the open source community by making ncnn available.
*
* Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
*
* Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* https://opensource.org/licenses/BSD-3-Clause
*
* Unless required by applicable law or agreed to in writing, software distributed
* under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
#ifndef NCNN_C_API_H
#define NCNN_C_API_H
#include "platform.h"
#if NCNN_C_API
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
NCNN_EXPORT const char* ncnn_version(void);
/* allocator api */
typedef struct __ncnn_allocator_t* ncnn_allocator_t;
struct NCNN_EXPORT __ncnn_allocator_t
{
void* pthis;
void* (*fast_malloc)(ncnn_allocator_t allocator, size_t size);
void (*fast_free)(ncnn_allocator_t allocator, void* ptr);
};
NCNN_EXPORT ncnn_allocator_t ncnn_allocator_create_pool_allocator(void);
NCNN_EXPORT ncnn_allocator_t ncnn_allocator_create_unlocked_pool_allocator(void);
NCNN_EXPORT void ncnn_allocator_destroy(ncnn_allocator_t allocator);
/* option api */
typedef struct __ncnn_option_t* ncnn_option_t;
NCNN_EXPORT ncnn_option_t ncnn_option_create(void);
NCNN_EXPORT void ncnn_option_destroy(ncnn_option_t opt);
NCNN_EXPORT int ncnn_option_get_num_threads(const ncnn_option_t opt);
NCNN_EXPORT void ncnn_option_set_num_threads(ncnn_option_t opt, int num_threads);
NCNN_EXPORT int ncnn_option_get_use_local_pool_allocator(const ncnn_option_t opt);
NCNN_EXPORT void ncnn_option_set_use_local_pool_allocator(ncnn_option_t opt, int use_local_pool_allocator);
NCNN_EXPORT void ncnn_option_set_blob_allocator(ncnn_option_t opt, ncnn_allocator_t allocator);
NCNN_EXPORT void ncnn_option_set_workspace_allocator(ncnn_option_t opt, ncnn_allocator_t allocator);
NCNN_EXPORT int ncnn_option_get_use_vulkan_compute(const ncnn_option_t opt);
NCNN_EXPORT void ncnn_option_set_use_vulkan_compute(ncnn_option_t opt, int use_vulkan_compute);
/* mat api */
typedef struct __ncnn_mat_t* ncnn_mat_t;
NCNN_EXPORT ncnn_mat_t ncnn_mat_create(void);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_1d(int w, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_2d(int w, int h, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_3d(int w, int h, int c, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_4d(int w, int h, int d, int c, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_1d(int w, void* data, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_2d(int w, int h, void* data, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_3d(int w, int h, int c, void* data, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_4d(int w, int h, int d, int c, void* data, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_1d_elem(int w, size_t elemsize, int elempack, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_2d_elem(int w, int h, size_t elemsize, int elempack, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_3d_elem(int w, int h, int c, size_t elemsize, int elempack, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_4d_elem(int w, int h, int d, int c, size_t elemsize, int elempack, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_1d_elem(int w, void* data, size_t elemsize, int elempack, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_2d_elem(int w, int h, void* data, size_t elemsize, int elempack, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_3d_elem(int w, int h, int c, void* data, size_t elemsize, int elempack, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_create_external_4d_elem(int w, int h, int d, int c, void* data, size_t elemsize, int elempack, ncnn_allocator_t allocator);
NCNN_EXPORT void ncnn_mat_destroy(ncnn_mat_t mat);
NCNN_EXPORT void ncnn_mat_fill_float(ncnn_mat_t mat, float v);
NCNN_EXPORT ncnn_mat_t ncnn_mat_clone(const ncnn_mat_t mat, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_reshape_1d(const ncnn_mat_t mat, int w, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_reshape_2d(const ncnn_mat_t mat, int w, int h, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_reshape_3d(const ncnn_mat_t mat, int w, int h, int c, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_reshape_4d(const ncnn_mat_t mat, int w, int h, int d, int c, ncnn_allocator_t allocator);
NCNN_EXPORT int ncnn_mat_get_dims(const ncnn_mat_t mat);
NCNN_EXPORT int ncnn_mat_get_w(const ncnn_mat_t mat);
NCNN_EXPORT int ncnn_mat_get_h(const ncnn_mat_t mat);
NCNN_EXPORT int ncnn_mat_get_d(const ncnn_mat_t mat);
NCNN_EXPORT int ncnn_mat_get_c(const ncnn_mat_t mat);
NCNN_EXPORT size_t ncnn_mat_get_elemsize(const ncnn_mat_t mat);
NCNN_EXPORT int ncnn_mat_get_elempack(const ncnn_mat_t mat);
NCNN_EXPORT size_t ncnn_mat_get_cstep(const ncnn_mat_t mat);
NCNN_EXPORT void* ncnn_mat_get_data(const ncnn_mat_t mat);
NCNN_EXPORT void* ncnn_mat_get_channel_data(const ncnn_mat_t mat, int c);
#if NCNN_PIXEL
/* mat pixel api */
#define NCNN_MAT_PIXEL_RGB 1
#define NCNN_MAT_PIXEL_BGR 2
#define NCNN_MAT_PIXEL_GRAY 3
#define NCNN_MAT_PIXEL_RGBA 4
#define NCNN_MAT_PIXEL_BGRA 5
#define NCNN_MAT_PIXEL_X2Y(X, Y) (X | (Y << 16))
NCNN_EXPORT ncnn_mat_t ncnn_mat_from_pixels(const unsigned char* pixels, int type, int w, int h, int stride, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_from_pixels_resize(const unsigned char* pixels, int type, int w, int h, int stride, int target_width, int target_height, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_from_pixels_roi(const unsigned char* pixels, int type, int w, int h, int stride, int roix, int roiy, int roiw, int roih, ncnn_allocator_t allocator);
NCNN_EXPORT ncnn_mat_t ncnn_mat_from_pixels_roi_resize(const unsigned char* pixels, int type, int w, int h, int stride, int roix, int roiy, int roiw, int roih, int target_width, int target_height, ncnn_allocator_t allocator);
NCNN_EXPORT void ncnn_mat_to_pixels(const ncnn_mat_t mat, unsigned char* pixels, int type, int stride);
NCNN_EXPORT void ncnn_mat_to_pixels_resize(const ncnn_mat_t mat, unsigned char* pixels, int type, int target_width, int target_height, int target_stride);
#endif /* NCNN_PIXEL */
NCNN_EXPORT void ncnn_mat_substract_mean_normalize(ncnn_mat_t mat, const float* mean_vals, const float* norm_vals);
NCNN_EXPORT void ncnn_convert_packing(const ncnn_mat_t src, ncnn_mat_t* dst, int elempack, const ncnn_option_t opt);
NCNN_EXPORT void ncnn_flatten(const ncnn_mat_t src, ncnn_mat_t* dst, const ncnn_option_t opt);
/* blob api */
typedef struct __ncnn_blob_t* ncnn_blob_t;
#if NCNN_STRING
NCNN_EXPORT const char* ncnn_blob_get_name(const ncnn_blob_t blob);
#endif /* NCNN_STRING */
NCNN_EXPORT int ncnn_blob_get_producer(const ncnn_blob_t blob);
NCNN_EXPORT int ncnn_blob_get_consumer(const ncnn_blob_t blob);
NCNN_EXPORT void ncnn_blob_get_shape(const ncnn_blob_t blob, int* dims, int* w, int* h, int* c);
/* paramdict api */
typedef struct __ncnn_paramdict_t* ncnn_paramdict_t;
NCNN_EXPORT ncnn_paramdict_t ncnn_paramdict_create(void);
NCNN_EXPORT void ncnn_paramdict_destroy(ncnn_paramdict_t pd);
NCNN_EXPORT int ncnn_paramdict_get_type(const ncnn_paramdict_t pd, int id);
NCNN_EXPORT int ncnn_paramdict_get_int(const ncnn_paramdict_t pd, int id, int def);
NCNN_EXPORT float ncnn_paramdict_get_float(const ncnn_paramdict_t pd, int id, float def);
NCNN_EXPORT ncnn_mat_t ncnn_paramdict_get_array(const ncnn_paramdict_t pd, int id, const ncnn_mat_t def);
NCNN_EXPORT void ncnn_paramdict_set_int(ncnn_paramdict_t pd, int id, int i);
NCNN_EXPORT void ncnn_paramdict_set_float(ncnn_paramdict_t pd, int id, float f);
NCNN_EXPORT void ncnn_paramdict_set_array(ncnn_paramdict_t pd, int id, const ncnn_mat_t v);
/* datareader api */
typedef struct __ncnn_datareader_t* ncnn_datareader_t;
struct NCNN_EXPORT __ncnn_datareader_t
{
void* pthis;
#if NCNN_STRING
int (*scan)(ncnn_datareader_t dr, const char* format, void* p);
#endif /* NCNN_STRING */
size_t (*read)(ncnn_datareader_t dr, void* buf, size_t size);
};
NCNN_EXPORT ncnn_datareader_t ncnn_datareader_create(void);
#if NCNN_STDIO
NCNN_EXPORT ncnn_datareader_t ncnn_datareader_create_from_stdio(FILE* fp);
#endif /* NCNN_STDIO */
NCNN_EXPORT ncnn_datareader_t ncnn_datareader_create_from_memory(const unsigned char** mem);
NCNN_EXPORT void ncnn_datareader_destroy(ncnn_datareader_t dr);
/* modelbin api */
typedef struct __ncnn_modelbin_t* ncnn_modelbin_t;
struct NCNN_EXPORT __ncnn_modelbin_t
{
void* pthis;
ncnn_mat_t (*load_1d)(const ncnn_modelbin_t mb, int w, int type);
ncnn_mat_t (*load_2d)(const ncnn_modelbin_t mb, int w, int h, int type);
ncnn_mat_t (*load_3d)(const ncnn_modelbin_t mb, int w, int h, int c, int type);
};
NCNN_EXPORT ncnn_modelbin_t ncnn_modelbin_create_from_datareader(const ncnn_datareader_t dr);
NCNN_EXPORT ncnn_modelbin_t ncnn_modelbin_create_from_mat_array(const ncnn_mat_t* weights, int n);
NCNN_EXPORT void ncnn_modelbin_destroy(ncnn_modelbin_t mb);
/* layer api */
typedef struct __ncnn_layer_t* ncnn_layer_t;
struct NCNN_EXPORT __ncnn_layer_t
{
void* pthis;
int (*load_param)(ncnn_layer_t layer, const ncnn_paramdict_t pd);
int (*load_model)(ncnn_layer_t layer, const ncnn_modelbin_t mb);
int (*create_pipeline)(ncnn_layer_t layer, const ncnn_option_t opt);
int (*destroy_pipeline)(ncnn_layer_t layer, const ncnn_option_t opt);
int (*forward_1)(const ncnn_layer_t layer, const ncnn_mat_t bottom_blob, ncnn_mat_t* top_blob, const ncnn_option_t opt);
int (*forward_n)(const ncnn_layer_t layer, const ncnn_mat_t* bottom_blobs, int n, ncnn_mat_t* top_blobs, int n2, const ncnn_option_t opt);
int (*forward_inplace_1)(const ncnn_layer_t layer, ncnn_mat_t bottom_top_blob, const ncnn_option_t opt);
int (*forward_inplace_n)(const ncnn_layer_t layer, ncnn_mat_t* bottom_top_blobs, int n, const ncnn_option_t opt);
};
NCNN_EXPORT ncnn_layer_t ncnn_layer_create(void);
NCNN_EXPORT ncnn_layer_t ncnn_layer_create_by_typeindex(int typeindex);
#if NCNN_STRING
NCNN_EXPORT ncnn_layer_t ncnn_layer_create_by_type(const char* type);
NCNN_EXPORT int ncnn_layer_type_to_index(const char* type);
#endif /* NCNN_STRING */
NCNN_EXPORT void ncnn_layer_destroy(ncnn_layer_t layer);
#if NCNN_STRING
NCNN_EXPORT const char* ncnn_layer_get_name(const ncnn_layer_t layer);
#endif /* NCNN_STRING */
NCNN_EXPORT int ncnn_layer_get_typeindex(const ncnn_layer_t layer);
#if NCNN_STRING
NCNN_EXPORT const char* ncnn_layer_get_type(const ncnn_layer_t layer);
#endif /* NCNN_STRING */
NCNN_EXPORT int ncnn_layer_get_one_blob_only(const ncnn_layer_t layer);
NCNN_EXPORT int ncnn_layer_get_support_inplace(const ncnn_layer_t layer);
NCNN_EXPORT int ncnn_layer_get_support_vulkan(const ncnn_layer_t layer);
NCNN_EXPORT int ncnn_layer_get_support_packing(const ncnn_layer_t layer);
NCNN_EXPORT int ncnn_layer_get_support_bf16_storage(const ncnn_layer_t layer);
NCNN_EXPORT int ncnn_layer_get_support_fp16_storage(const ncnn_layer_t layer);
NCNN_EXPORT int ncnn_layer_get_support_image_storage(const ncnn_layer_t layer);
NCNN_EXPORT void ncnn_layer_set_one_blob_only(ncnn_layer_t layer, int enable);
NCNN_EXPORT void ncnn_layer_set_support_inplace(ncnn_layer_t layer, int enable);
NCNN_EXPORT void ncnn_layer_set_support_vulkan(ncnn_layer_t layer, int enable);
NCNN_EXPORT void ncnn_layer_set_support_packing(ncnn_layer_t layer, int enable);
NCNN_EXPORT void ncnn_layer_set_support_bf16_storage(ncnn_layer_t layer, int enable);
NCNN_EXPORT void ncnn_layer_set_support_fp16_storage(ncnn_layer_t layer, int enable);
NCNN_EXPORT void ncnn_layer_set_support_image_storage(ncnn_layer_t layer, int enable);
NCNN_EXPORT int ncnn_layer_get_bottom_count(const ncnn_layer_t layer);
NCNN_EXPORT int ncnn_layer_get_bottom(const ncnn_layer_t layer, int i);
NCNN_EXPORT int ncnn_layer_get_top_count(const ncnn_layer_t layer);
NCNN_EXPORT int ncnn_layer_get_top(const ncnn_layer_t layer, int i);
NCNN_EXPORT void ncnn_blob_get_bottom_shape(const ncnn_layer_t layer, int i, int* dims, int* w, int* h, int* c);
NCNN_EXPORT void ncnn_blob_get_top_shape(const ncnn_layer_t layer, int i, int* dims, int* w, int* h, int* c);
/* layer factory function */
typedef ncnn_layer_t (*ncnn_layer_creator_t)(void* userdata);
typedef void (*ncnn_layer_destroyer_t)(ncnn_layer_t layer, void* userdata);
typedef struct __ncnn_net_custom_layer_factory_t* ncnn_net_custom_layer_factory_t;
struct __ncnn_net_custom_layer_factory_t
{
ncnn_layer_creator_t creator;
ncnn_layer_destroyer_t destroyer;
void* userdata;
ncnn_net_custom_layer_factory_t next;
};
/* net api */
typedef struct __ncnn_net_t* ncnn_net_t;
struct __ncnn_net_t
{
void* pthis;
ncnn_net_custom_layer_factory_t custom_layer_factory;
};
NCNN_EXPORT ncnn_net_t ncnn_net_create(void);
NCNN_EXPORT void ncnn_net_destroy(ncnn_net_t net);
NCNN_EXPORT ncnn_option_t ncnn_net_get_option(ncnn_net_t net);
NCNN_EXPORT void ncnn_net_set_option(ncnn_net_t net, ncnn_option_t opt);
#if NCNN_VULKAN
NCNN_EXPORT void ncnn_net_set_vulkan_device(ncnn_net_t net, int device_index);
#endif
#if NCNN_STRING
NCNN_EXPORT void ncnn_net_register_custom_layer_by_type(ncnn_net_t net, const char* type, ncnn_layer_creator_t creator, ncnn_layer_destroyer_t destroyer, void* userdata);
#endif /* NCNN_STRING */
NCNN_EXPORT void ncnn_net_register_custom_layer_by_typeindex(ncnn_net_t net, int typeindex, ncnn_layer_creator_t creator, ncnn_layer_destroyer_t destroyer, void* userdata);
#if NCNN_STDIO
#if NCNN_STRING
NCNN_EXPORT int ncnn_net_load_param(ncnn_net_t net, const char* path);
#endif /* NCNN_STRING */
NCNN_EXPORT int ncnn_net_load_param_bin(ncnn_net_t net, const char* path);
NCNN_EXPORT int ncnn_net_load_model(ncnn_net_t net, const char* path);
#endif /* NCNN_STDIO */
#if NCNN_STDIO
#if NCNN_STRING
NCNN_EXPORT int ncnn_net_load_param_memory(ncnn_net_t net, const char* mem);
#endif /* NCNN_STRING */
#endif /* NCNN_STDIO */
NCNN_EXPORT int ncnn_net_load_param_bin_memory(ncnn_net_t net, const unsigned char* mem);
NCNN_EXPORT int ncnn_net_load_model_memory(ncnn_net_t net, const unsigned char* mem);
#if NCNN_STRING
NCNN_EXPORT int ncnn_net_load_param_datareader(ncnn_net_t net, const ncnn_datareader_t dr);
#endif /* NCNN_STRING */
NCNN_EXPORT int ncnn_net_load_param_bin_datareader(ncnn_net_t net, const ncnn_datareader_t dr);
NCNN_EXPORT int ncnn_net_load_model_datareader(ncnn_net_t net, const ncnn_datareader_t dr);
NCNN_EXPORT void ncnn_net_clear(ncnn_net_t net);
NCNN_EXPORT int ncnn_net_get_input_count(const ncnn_net_t net);
NCNN_EXPORT int ncnn_net_get_output_count(const ncnn_net_t net);
#if NCNN_STRING
NCNN_EXPORT const char* ncnn_net_get_input_name(const ncnn_net_t net, int i);
NCNN_EXPORT const char* ncnn_net_get_output_name(const ncnn_net_t net, int i);
#endif /* NCNN_STRING */
NCNN_EXPORT int ncnn_net_get_input_index(const ncnn_net_t net, int i);
NCNN_EXPORT int ncnn_net_get_output_index(const ncnn_net_t net, int i);
/* extractor api */
typedef struct __ncnn_extractor_t* ncnn_extractor_t;
NCNN_EXPORT ncnn_extractor_t ncnn_extractor_create(ncnn_net_t net);
NCNN_EXPORT void ncnn_extractor_destroy(ncnn_extractor_t ex);
NCNN_EXPORT void ncnn_extractor_set_option(ncnn_extractor_t ex, const ncnn_option_t opt);
#if NCNN_STRING
NCNN_EXPORT int ncnn_extractor_input(ncnn_extractor_t ex, const char* name, const ncnn_mat_t mat);
NCNN_EXPORT int ncnn_extractor_extract(ncnn_extractor_t ex, const char* name, ncnn_mat_t* mat);
#endif /* NCNN_STRING */
NCNN_EXPORT int ncnn_extractor_input_index(ncnn_extractor_t ex, int index, const ncnn_mat_t mat);
NCNN_EXPORT int ncnn_extractor_extract_index(ncnn_extractor_t ex, int index, ncnn_mat_t* mat);
/* mat process api */
#define NCNN_BORDER_CONSTANT 0
#define NCNN_BORDER_REPLICATE 1
#define NCNN_BORDER_REFLECT 2
#define NCNN_BORDER_TRANSPARENT -233
NCNN_EXPORT void ncnn_copy_make_border(const ncnn_mat_t src, ncnn_mat_t dst, int top, int bottom, int left, int right, int type, float v, const ncnn_option_t opt);
NCNN_EXPORT void ncnn_copy_make_border_3d(const ncnn_mat_t src, ncnn_mat_t dst, int top, int bottom, int left, int right, int front, int behind, int type, float v, const ncnn_option_t opt);
NCNN_EXPORT void ncnn_copy_cut_border(const ncnn_mat_t src, ncnn_mat_t dst, int top, int bottom, int left, int right, const ncnn_option_t opt);
NCNN_EXPORT void ncnn_copy_cut_border_3d(const ncnn_mat_t src, ncnn_mat_t dst, int top, int bottom, int left, int right, int front, int behind, const ncnn_option_t opt);
#if NCNN_PIXEL_DRAWING
/* mat pixel drawing api*/
NCNN_EXPORT void ncnn_draw_rectangle_c1(unsigned char* pixels, int w, int h, int rx, int ry, int rw, int rh, unsigned int color, int thickness);
NCNN_EXPORT void ncnn_draw_rectangle_c2(unsigned char* pixels, int w, int h, int rx, int ry, int rw, int rh, unsigned int color, int thickness);
NCNN_EXPORT void ncnn_draw_rectangle_c3(unsigned char* pixels, int w, int h, int rx, int ry, int rw, int rh, unsigned int color, int thickness);
NCNN_EXPORT void ncnn_draw_rectangle_c4(unsigned char* pixels, int w, int h, int rx, int ry, int rw, int rh, unsigned int color, int thickness);
NCNN_EXPORT void ncnn_draw_text_c1(unsigned char* pixels, int w, int h, const char* text, int x, int y, int fontpixelsize, unsigned int color);
NCNN_EXPORT void ncnn_draw_text_c2(unsigned char* pixels, int w, int h, const char* text, int x, int y, int fontpixelsize, unsigned int color);
NCNN_EXPORT void ncnn_draw_text_c3(unsigned char* pixels, int w, int h, const char* text, int x, int y, int fontpixelsize, unsigned int color);
NCNN_EXPORT void ncnn_draw_text_c4(unsigned char* pixels, int w, int h, const char* text, int x, int y, int fontpixelsize, unsigned int color);
NCNN_EXPORT void ncnn_draw_circle_c1(unsigned char* pixels, int w, int h, int cx, int cy, int radius, unsigned int color, int thickness);
NCNN_EXPORT void ncnn_draw_circle_c2(unsigned char* pixels, int w, int h, int cx, int cy, int radius, unsigned int color, int thickness);
NCNN_EXPORT void ncnn_draw_circle_c3(unsigned char* pixels, int w, int h, int cx, int cy, int radius, unsigned int color, int thickness);
NCNN_EXPORT void ncnn_draw_circle_c4(unsigned char* pixels, int w, int h, int cx, int cy, int radius, unsigned int color, int thickness);
NCNN_EXPORT void ncnn_draw_line_c1(unsigned char* pixels, int w, int h, int x0, int y0, int x1, int y1, unsigned int color, int thickness);
NCNN_EXPORT void ncnn_draw_line_c2(unsigned char* pixels, int w, int h, int x0, int y0, int x1, int y1, unsigned int color, int thickness);
NCNN_EXPORT void ncnn_draw_line_c3(unsigned char* pixels, int w, int h, int x0, int y0, int x1, int y1, unsigned int color, int thickness);
NCNN_EXPORT void ncnn_draw_line_c4(unsigned char* pixels, int w, int h, int x0, int y0, int x1, int y1, unsigned int color, int thickness);
#endif /* NCNN_PIXEL_DRAWING */
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* NCNN_C_API */
#endif /* NCNN_C_API_H */
... ...
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef NCNN_COMMAND_H
#define NCNN_COMMAND_H
#include "platform.h"
#if NCNN_VULKAN
#include "mat.h"
namespace ncnn {
class Pipeline;
#if NCNN_PLATFORM_API
#if __ANDROID_API__ >= 26
class ImportAndroidHardwareBufferPipeline;
#endif // __ANDROID_API__ >= 26
#endif // NCNN_PLATFORM_API
class VkComputePrivate;
class NCNN_EXPORT VkCompute
{
public:
explicit VkCompute(const VulkanDevice* vkdev);
virtual ~VkCompute();
public:
void record_upload(const Mat& src, VkMat& dst, const Option& opt);
void record_upload(const Mat& src, VkImageMat& dst, const Option& opt);
void record_download(const VkMat& src, Mat& dst, const Option& opt);
void record_download(const VkImageMat& src, Mat& dst, const Option& opt);
void record_buffer_to_image(const VkMat& src, VkImageMat& dst, const Option& opt);
void record_image_to_buffer(const VkImageMat& src, VkMat& dst, const Option& opt);
void record_clone(const Mat& src, VkMat& dst, const Option& opt);
void record_clone(const Mat& src, VkImageMat& dst, const Option& opt);
void record_clone(const VkMat& src, Mat& dst, const Option& opt);
void record_clone(const VkImageMat& src, Mat& dst, const Option& opt);
void record_clone(const VkMat& src, VkMat& dst, const Option& opt);
void record_clone(const VkImageMat& src, VkImageMat& dst, const Option& opt);
void record_clone(const VkMat& src, VkImageMat& dst, const Option& opt);
void record_clone(const VkImageMat& src, VkMat& dst, const Option& opt);
void record_pipeline(const Pipeline* pipeline, const std::vector<VkMat>& bindings, const std::vector<vk_constant_type>& constants, const VkMat& dispatcher);
void record_pipeline(const Pipeline* pipeline, const std::vector<VkImageMat>& bindings, const std::vector<vk_constant_type>& constants, const VkImageMat& dispatcher);
void record_pipeline(const Pipeline* pipeline, const std::vector<VkMat>& buffer_bindings, const std::vector<VkImageMat>& image_bindings, const std::vector<vk_constant_type>& constants, const VkMat& dispatcher);
void record_pipeline(const Pipeline* pipeline, const std::vector<VkMat>& buffer_bindings, const std::vector<VkImageMat>& image_bindings, const std::vector<vk_constant_type>& constants, const VkImageMat& dispatcher);
void record_pipeline(const Pipeline* pipeline, const std::vector<VkMat>& buffer_bindings, const std::vector<VkImageMat>& image_bindings, const std::vector<vk_constant_type>& constants, const Mat& dispatcher);
#if NCNN_BENCHMARK
void record_write_timestamp(uint32_t query);
#endif // NCNN_BENCHMARK
#if NCNN_PLATFORM_API
#if __ANDROID_API__ >= 26
void record_import_android_hardware_buffer(const ImportAndroidHardwareBufferPipeline* pipeline, const VkImageMat& src, const VkMat& dst);
void record_import_android_hardware_buffer(const ImportAndroidHardwareBufferPipeline* pipeline, const VkImageMat& src, const VkImageMat& dst);
#endif // __ANDROID_API__ >= 26
#endif // NCNN_PLATFORM_API
int submit_and_wait();
int reset();
#if NCNN_BENCHMARK
int create_query_pool(uint32_t query_count);
int get_query_pool_results(uint32_t first_query, uint32_t query_count, std::vector<uint64_t>& results);
#endif // NCNN_BENCHMARK
protected:
const VulkanDevice* vkdev;
void barrier_readwrite(const VkMat& binding);
void barrier_readwrite(const VkImageMat& binding);
void barrier_readonly(const VkImageMat& binding);
private:
VkComputePrivate* const d;
};
class VkTransferPrivate;
class NCNN_EXPORT VkTransfer
{
public:
explicit VkTransfer(const VulkanDevice* vkdev);
virtual ~VkTransfer();
public:
void record_upload(const Mat& src, VkMat& dst, const Option& opt, bool flatten = true);
void record_upload(const Mat& src, VkImageMat& dst, const Option& opt);
int submit_and_wait();
protected:
const VulkanDevice* vkdev;
private:
VkTransferPrivate* const d;
};
} // namespace ncnn
#endif // NCNN_VULKAN
#endif // NCNN_COMMAND_H
... ...