stack.cc
2.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
// sherpa-onnx/csrc/stack.cc
//
// Copyright (c) 2023 Jingzhao Ou (jingzhao.ou@gmail.com)
#include "sherpa-onnx/csrc/stack.h"
#include <algorithm>
#include <functional>
#include <iostream>
#include <numeric>
#include <utility>
#include "sherpa-onnx/csrc/onnx-utils.h"
namespace sherpa_onnx {
static bool Compare(const std::vector<int64_t> &a,
const std::vector<int64_t> &b) {
if (a.size() != b.size()) return false;
for (int32_t i = 0; i != static_cast<int32_t>(a.size()); ++i) {
if (a[i] != b[i]) return false;
}
return true;
}
static void PrintShape(const std::vector<int64_t> &a) {
for (auto i : a) {
fprintf(stderr, "%d ", static_cast<int32_t>(i));
}
fprintf(stderr, "\n");
}
template <typename T /*=float*/>
Ort::Value Stack(OrtAllocator *allocator,
const std::vector<const Ort::Value *> &values, int32_t dim) {
std::vector<int64_t> v0_shape =
values[0]->GetTensorTypeAndShapeInfo().GetShape();
for (int32_t i = 1; i != static_cast<int32_t>(values.size()); ++i) {
auto s = values[i]->GetTensorTypeAndShapeInfo().GetShape();
bool ret = Compare(v0_shape, s);
if (!ret) {
fprintf(stderr, "Incorrect shape in Stack !\n");
fprintf(stderr, "Shape for tensor 0: ");
PrintShape(v0_shape);
fprintf(stderr, "Shape for tensor %d: ", i);
PrintShape(s);
exit(-1);
}
}
std::vector<int64_t> ans_shape;
ans_shape.reserve(v0_shape.size() + 1);
ans_shape.insert(ans_shape.end(), v0_shape.data(), v0_shape.data() + dim);
ans_shape.push_back(values.size());
ans_shape.insert(
ans_shape.end(),
v0_shape.data() + dim,
v0_shape.data() + v0_shape.size());
auto leading_size = static_cast<int32_t>(std::accumulate(
v0_shape.begin(), v0_shape.begin() + dim, 1, std::multiplies<int64_t>()));
auto trailing_size = static_cast<int32_t>(
std::accumulate(v0_shape.begin() + dim,
v0_shape.end(), 1,
std::multiplies<int64_t>()));
Ort::Value ans = Ort::Value::CreateTensor<T>(
allocator, ans_shape.data(), ans_shape.size());
T *dst = ans.GetTensorMutableData<T>();
for (int32_t i = 0; i != leading_size; ++i) {
for (int32_t n = 0; n != static_cast<int32_t>(values.size()); ++n) {
const T *src = values[n]->GetTensorData<T>();
src += i * trailing_size;
std::copy(src, src + trailing_size, dst);
dst += trailing_size;
}
}
return ans;
}
template Ort::Value Stack<float>(
OrtAllocator *allocator,
const std::vector<const Ort::Value *> &values,
int32_t dim);
template Ort::Value Stack<int64_t>(
OrtAllocator *allocator,
const std::vector<const Ort::Value *> &values,
int32_t dim);
} // namespace sherpa_onnx