TinyLlama.cpp 1.0
A lightweight C++ implementation of the TinyLlama language model
Loading...
Searching...
No Matches
Classes | Typedefs | Enumerations | Functions
model.h File Reference
#include <cstdint>
#include <functional>
#include <nlohmann/json.hpp>
#include <string>
#include <unordered_map>
#include <vector>
#include "safetensors_loader.h"
#include <memory>
#include "quantization.h"
Include dependency graph for model.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Classes

struct  ModelConfig
 Model configuration structure holding architecture and hyperparameters. More...
 
struct  KVCacheLayer
 Key-Value cache for a single transformer layer. More...
 
struct  KVCache
 Complete Key-Value cache for all transformer layers. More...
 
struct  LayerWeights
 Structure holding all weights for a single transformer layer. More...
 
class  TinyLlamaModel
 Main transformer model class for TinyLlama. More...
 

Typedefs

using ForwardDiagCallback = std::function< void(int layer, const std::string &name, const std::vector< float > &v)>
 

Enumerations

enum class  TensorName {
  Q_PROJ , K_PROJ , V_PROJ , O_PROJ ,
  GATE_PROJ , UP_PROJ , DOWN_PROJ , TOKEN_EMBD ,
  LM_HEAD , UNKNOWN
}
 Enumeration of tensor names used in the TinyLlama model. More...
 

Functions

static std::string tensor_name_to_string (TensorName tn)
 
ModelConfig parse_model_config_from_gguf (const GGUFData &gguf)
 
ModelConfig parse_model_config (const nlohmann::json &json)
 
int argmax (const std::vector< float > &v)
 
float bfloat16_to_float32 (uint16_t b16)
 
void rmsnorm (const std::vector< float > &x, const std::vector< uint16_t > &weight, float eps, std::vector< float > &out)
 
void matvec_bf16_f32 (const std::vector< uint16_t > &mat, const std::vector< float > &vec, std::vector< float > &out, int M, int N)
 
void softmax (std::vector< float > &x)
 
std::vector< uint16_t > uint8_vector_to_uint16_vector (const std::vector< uint8_t > &bytes, size_t numel)
 
void log_vector_summary (const std::string &name, const std::vector< float > &v, int head_count=5)
 
void log_vector_summary_batch (const std::string &name, const std::vector< float > &batch_vector, int num_tokens_in_batch, int single_token_vector_size, int head_count=5)
 

Typedef Documentation

◆ ForwardDiagCallback

using ForwardDiagCallback = std::function<void( int layer, const std::string& name, const std::vector<float>& v)>

Definition at line 230 of file model.h.

Enumeration Type Documentation

◆ TensorName

enum class TensorName
strong

Enumeration of tensor names used in the TinyLlama model.

This enum class defines the different types of tensors used in the transformer architecture, including attention projections, feed-forward layers, and embeddings.

Enumerator
Q_PROJ 

Query projection matrix

K_PROJ 

Key projection matrix

V_PROJ 

Value projection matrix

O_PROJ 

Output projection matrix

GATE_PROJ 

Gate projection for SwiGLU activation

UP_PROJ 

Upward projection in feed-forward network

DOWN_PROJ 

Downward projection in feed-forward network

TOKEN_EMBD 

Token embedding matrix

LM_HEAD 

Language model head for final token prediction

UNKNOWN 

Unknown tensor type

Definition at line 36 of file model.h.

Function Documentation

◆ argmax()

int argmax ( const std::vector< float > &  v)

Definition at line 185 of file utils.cpp.

185 {
186 if (v.empty()) {
187 Logger::error("Cannot perform argmax on empty vector");
188 return -1;
189 }
190 auto max_it = std::max_element(v.begin(), v.end());
191 float max_val = *max_it;
192 int max_idx = std::distance(v.begin(), max_it);
193 Logger::debug("[ARGMAX HELPER] Max value found: " + std::to_string(max_val) +
194 " at index: " + std::to_string(max_idx));
195 return max_idx;
196}
static void debug(const std::string &message)
Definition logger.cpp:131
static void error(const std::string &message)
Definition logger.cpp:143

References Logger::debug(), and Logger::error().

◆ bfloat16_to_float32()

float bfloat16_to_float32 ( uint16_t  b16)

Definition at line 144 of file utils.cpp.

144 {
145 if (bf16 == bfloat16::ZERO) return 0.0f;
146 if (bf16 == bfloat16::NEG_ZERO) return -0.0f;
147
148 bool is_nan = ((bf16 & bfloat16::EXPONENT_MASK) == bfloat16::EXPONENT_MASK) &&
149 ((bf16 & bfloat16::MANTISSA_MASK) != 0);
150 if (is_nan) return std::numeric_limits<float>::quiet_NaN();
151
153 (bf16 & bfloat16::MANTISSA_MASK) == 0) {
154 return (bf16 & bfloat16::SIGN_BIT) ? -std::numeric_limits<float>::infinity()
155 : std::numeric_limits<float>::infinity();
156 }
157
158 uint32_t bits = static_cast<uint32_t>(bf16) << bfloat16::SHIFT_BITS;
159 float result;
160 std::memcpy(&result, &bits, sizeof(float));
161
162 return result;
163}
constexpr uint16_t ZERO
constexpr uint16_t SIGN_BIT
constexpr uint16_t NEG_ZERO
constexpr uint16_t EXPONENT_MASK
constexpr uint16_t MANTISSA_MASK
constexpr int SHIFT_BITS

References bfloat16::EXPONENT_MASK, bfloat16::MANTISSA_MASK, bfloat16::NEG_ZERO, bfloat16::SHIFT_BITS, bfloat16::SIGN_BIT, and bfloat16::ZERO.

◆ log_vector_summary()

void log_vector_summary ( const std::string &  name,
const std::vector< float > &  v,
int  head_count = 5 
)

Definition at line 207 of file utils.cpp.

207 {
208 if (v.empty()) {
209 Logger::info(name + ": EMPTY");
210 return;
211 }
212 std::stringstream ss;
213 size_t actual_head_count = SAFE_MIN(static_cast<size_t>(head_count), v.size());
214
215 ss << name << ": size=" << v.size();
216
217 if (actual_head_count > 0) {
218 ss << ", first " << actual_head_count << ": [";
219 for (size_t i = 0; i < actual_head_count; ++i) {
220 ss << (i > 0 ? " " : "") << std::fixed << std::setprecision(4) << v[i];
221 }
222 ss << "]";
223 }
224 float minv = *std::min_element(v.begin(), v.end());
225 float maxv = *std::max_element(v.begin(), v.end());
226 double sum = std::accumulate(v.begin(), v.end(), 0.0);
227 float mean = sum / v.size();
228 bool all_finite = std::all_of(v.begin(), v.end(), [](float x) { return std::isfinite(x); });
229 ss << ", min=" << minv << ", max=" << maxv << ", mean=" << mean
230 << ", finite=" << (all_finite ? "yes" : "NO");
231 Logger::info(ss.str());
232}
static void info(const std::string &message)
Definition logger.cpp:135
#define SAFE_MIN(a, b)

References Logger::info(), and SAFE_MIN.

Referenced by TinyLlamaModel::forward().

◆ log_vector_summary_batch()

void log_vector_summary_batch ( const std::string &  name,
const std::vector< float > &  batch_vector,
int  num_tokens_in_batch,
int  single_token_vector_size,
int  head_count = 5 
)

◆ matvec_bf16_f32()

void matvec_bf16_f32 ( const std::vector< uint16_t > &  mat,
const std::vector< float > &  vec,
std::vector< float > &  out,
int  M,
int  N 
)

◆ parse_model_config()

ModelConfig parse_model_config ( const nlohmann::json json)

Definition at line 20 of file model_config.cpp.

20 {
21 ModelConfig cfg;
22 cfg.hidden_size = json.value("hidden_size", 0);
23 cfg.intermediate_size = json.value("intermediate_size", 0);
24 cfg.num_attention_heads = json.value("num_attention_heads", 0);
25 cfg.num_key_value_heads = json.value("num_key_value_heads", 0);
26 cfg.num_hidden_layers = json.value("num_hidden_layers", 0);
27 cfg.vocab_size = json.value("vocab_size", 0);
28 cfg.max_position_embeddings = json.value("max_position_embeddings", 0);
29 cfg.rms_norm_eps = json.value("rms_norm_eps", 1e-5f);
30 cfg.rope_theta = json.value("rope_theta", 10000.0f);
31 cfg.hidden_act = json.value("hidden_act", "silu");
32 cfg.torch_dtype = json.value("torch_dtype", "bfloat16");
33 cfg.bos_token_id = json.value("bos_token_id", 1);
34 cfg.eos_token_id = json.value("eos_token_id", 2);
35 cfg.unk_token_id = json.value("unk_token_id", -1);
36 cfg.pad_token_id = json.value("pad_token_id", -1);
37
38 // Infer Architecture if available
39 if (json.contains("architectures") && json["architectures"].is_array() && !json["architectures"].empty()) {
40 // Take the first architecture string if multiple are listed
41 cfg.architecture = json["architectures"][0].get<std::string>();
42 } else {
43 cfg.architecture = "unknown";
44 }
45 cfg.model_name = json.value("model_type", cfg.architecture); // Use model_type or fallback to architecture
46
47
48 Logger::info("[parse_json_config] Inferring tokenizer family for SafeTensors. Arch: '" + cfg.architecture + "', Vocab: " + std::to_string(cfg.vocab_size));
49 bool is_llama3_vocab_size_json = (cfg.vocab_size == 128256);
50 bool is_llama3_arch_hint_json = (cfg.architecture.find("LlamaForCausalLM") != std::string::npos && // Llama 3 often uses this
51 cfg.architecture.find("Llama2") == std::string::npos); // Exclude Llama 2 explicitly if needed
52
53 if (is_llama3_vocab_size_json && is_llama3_arch_hint_json) {
55 Logger::info("[parse_json_config] Result: Identified LLAMA3_TIKTOKEN (vocab size + arch hint).");
56 if (cfg.rope_theta == 10000.0f) {
57 float llama3_rope_candidate = json.value("rope_theta", 500000.0f); // Check rope_theta in config.json
58 if (llama3_rope_candidate > 10000.0f) {
59 cfg.rope_theta = llama3_rope_candidate;
60 Logger::info("[parse_json_config] Adjusted rope_theta to " + std::to_string(cfg.rope_theta) + " for Llama 3 model (was 10000.0).");
61 }
62 }
63 } else if (cfg.vocab_size == 32000 || cfg.architecture.find("Llama") != std::string::npos) { // Common for Llama 1/2/TinyLlama
65 Logger::info("[parse_json_config] Result: Identified LLAMA_SENTENCEPIECE (vocab size or arch hint).");
66 } else {
68 Logger::warning("[parse_json_config] Result: UNKNOWN tokenizer family.");
69 }
70
71
72 return cfg;
73}
static void warning(const std::string &message)
Definition logger.cpp:139
nlohmann::json json
Definition server.cpp:54
Model configuration structure holding architecture and hyperparameters.
Definition model.h:80
int hidden_size
Definition model.h:81
int vocab_size
Definition model.h:86
int pad_token_id
Definition model.h:95
std::string architecture
Definition model.h:96
std::string model_name
Definition model.h:97
float rms_norm_eps
Definition model.h:88
int num_attention_heads
Definition model.h:83
int intermediate_size
Definition model.h:82
int eos_token_id
Definition model.h:93
std::string torch_dtype
Definition model.h:91
float rope_theta
Definition model.h:89
int num_hidden_layers
Definition model.h:85
int num_key_value_heads
Definition model.h:84
int bos_token_id
Definition model.h:92
std::string hidden_act
Definition model.h:90
TokenizerFamily tokenizer_family
Definition model.h:117
int unk_token_id
Definition model.h:94
int max_position_embeddings
Definition model.h:87

References ModelConfig::architecture, ModelConfig::bos_token_id, ModelConfig::eos_token_id, ModelConfig::hidden_act, ModelConfig::hidden_size, Logger::info(), ModelConfig::intermediate_size, ModelConfig::LLAMA3_TIKTOKEN, ModelConfig::LLAMA_SENTENCEPIECE, ModelConfig::max_position_embeddings, ModelConfig::model_name, ModelConfig::num_attention_heads, ModelConfig::num_hidden_layers, ModelConfig::num_key_value_heads, ModelConfig::pad_token_id, ModelConfig::rms_norm_eps, ModelConfig::rope_theta, ModelConfig::tokenizer_family, ModelConfig::torch_dtype, ModelConfig::unk_token_id, ModelConfig::UNKNOWN, ModelConfig::vocab_size, and Logger::warning().

◆ parse_model_config_from_gguf()

ModelConfig parse_model_config_from_gguf ( const GGUFData gguf)

Definition at line 75 of file model_config.cpp.

75 {
76 ModelConfig config;
77 Logger::info("[parse_gguf_config] Entered function.");
78
79 auto get_meta_string = [&](const std::string& key,
80 const std::string& default_val) -> std::string {
81 auto it = gguf.metadata.find(key);
82 if (it != gguf.metadata.end() &&
83 std::holds_alternative<std::string>(it->second)) {
84 return std::get<std::string>(it->second);
85 }
86 return default_val;
87 };
88
89 auto get_meta_value = [&](const std::string& key, auto default_value) {
90 using TargetType = typename std::decay<decltype(default_value)>::type;
91 auto it = gguf.metadata.find(key);
92 if (it != gguf.metadata.end()) {
93 return std::visit(
94 [&](const auto& val) -> TargetType {
95 using T = std::decay_t<decltype(val)>;
96
97 if constexpr (std::is_integral_v<TargetType>) {
98 if constexpr (std::is_integral_v<T> && !std::is_same_v<T, bool>) {
99 if constexpr (std::is_unsigned_v<T> &&
100 std::is_signed_v<TargetType>) {
101 if (val > static_cast<std::make_unsigned_t<TargetType>>(
102 std::numeric_limits<TargetType>::max())) {
103 Logger::warning("Metadata key '" + key + "' value " +
104 std::to_string(val) +
105 " overflows TargetType. Using default.");
106 return default_value;
107 }
108 }
109
110 else if constexpr (std::is_signed_v<T> &&
111 std::is_signed_v<TargetType> &&
112 sizeof(T) > sizeof(TargetType)) {
113 if (val > static_cast<T>(
114 std::numeric_limits<TargetType>::max()) ||
115 val < static_cast<T>(
116 std::numeric_limits<TargetType>::lowest())) {
117 Logger::warning("Metadata key '" + key + "' value " +
118 std::to_string(val) +
119 " overflows TargetType. Using default.");
120 return default_value;
121 }
122 }
123 return static_cast<TargetType>(val);
124 }
125 } else if constexpr (std::is_floating_point_v<TargetType>) {
126 if constexpr (std::is_floating_point_v<T>) {
127 return static_cast<TargetType>(val);
128 }
129 } else if constexpr (std::is_same_v<TargetType, bool>) {
130 if constexpr (std::is_same_v<T, bool>) {
131 return val;
132 }
133 } else if constexpr (std::is_same_v<TargetType, std::string>) {
134 if constexpr (std::is_same_v<T, std::string>) {
135 return val;
136 }
137 }
138 Logger::warning("Metadata key '" + key +
139 "' has stored type incompatible with requested "
140 "TargetType. Using default.");
141 return default_value;
142 },
143 it->second);
144 } else {
145 return default_value;
146 }
147 };
148
149 config.vocab_size = get_meta_value("tokenizer.ggml.vocab_size",
150 get_meta_value("llama.vocab_size", 32000));
151 config.hidden_size = get_meta_value("llama.embedding_length", 4096);
152 config.intermediate_size = get_meta_value("llama.feed_forward_length", 11008);
153 config.num_attention_heads = get_meta_value("llama.attention.head_count", 32);
154 config.num_hidden_layers = get_meta_value("llama.block_count", 32);
155 config.num_key_value_heads = get_meta_value("llama.attention.head_count_kv",
156 config.num_attention_heads);
157 config.max_position_embeddings = get_meta_value("llama.context_length", 4096);
158 if (config.max_position_embeddings == 0 ||
159 config.max_position_embeddings > 8192) {
160 Logger::warning("max_position_embeddings from GGUF is " +
161 std::to_string(config.max_position_embeddings) +
162 ", overriding to sensible default (2048)");
163 config.max_position_embeddings = 2048;
164 }
165 config.rms_norm_eps =
166 get_meta_value("llama.attention.layer_norm_rms_epsilon", 1e-5f);
167 config.rope_theta = get_meta_value("llama.rope.freq_base", 10000.0f);
168 config.hidden_act = "silu";
169 config.bos_token_id = get_meta_value("tokenizer.ggml.bos_token_id", -1);
170 config.eos_token_id = get_meta_value("tokenizer.ggml.eos_token_id", -1);
171 config.unk_token_id = get_meta_value("tokenizer.ggml.unk_token_id", -1);
172 config.pad_token_id = get_meta_value("tokenizer.ggml.padding_token_id", -1);
173
174 config.architecture = get_meta_string("general.architecture", "unknown");
175 config.model_name = get_meta_string("general.name", "unknown");
176 bool has_pre_key = gguf.metadata.count("tokenizer.ggml.pre");
177 bool has_merges = !gguf.tokenizer_merges.empty();
178
179 Logger::info("[parse_gguf_config] Architecture: " + config.architecture +
180 ", Vocab Size: " + std::to_string(config.vocab_size) +
181 ", Has Merges: " + (has_merges ? "Yes" : "No"));
182
183
184 Logger::info("[parse_gguf_config] Identifying tokenizer family...");
185 bool is_llama3_arch_hint = (config.architecture.find("llama3") != std::string::npos ||
186 config.architecture.find("Llama-3") != std::string::npos ||
187 config.architecture.find("Meta-Llama-3") != std::string::npos);
188 bool is_llama3_vocab_size = (config.vocab_size == 128256);
189 std::string ggml_tokenizer_model = get_meta_string("tokenizer.ggml.model", "");
190 bool is_tiktoken_style_tokenizer_model = (ggml_tokenizer_model == "gpt2");
191
192 Logger::info("[parse_gguf_config] L3 Hints: arch_hint=" + std::string(is_llama3_arch_hint ? "Y":"N") +
193 ", vocab_size_match=" + std::string(is_llama3_vocab_size ? "Y":"N") +
194 ", has_merges=" + std::string(has_merges ? "Y":"N") +
195 ", ggml_tokenizer_model_key='" + ggml_tokenizer_model + "' (is_tiktoken_style: " + std::string(is_tiktoken_style_tokenizer_model ? "Y":"N") + ")" );
196
197 if (has_merges && is_llama3_vocab_size && is_tiktoken_style_tokenizer_model) {
199 Logger::info("[parse_gguf_config] Result: Identified LLAMA3_TIKTOKEN (merges + vocab_size + ggml_tokenizer_model='gpt2'). Architecture string was: '" + config.architecture + "'");
200 if (!is_llama3_arch_hint && config.architecture == "llama") {
201 Logger::info("[parse_gguf_config] Note: Classified as Llama 3 based on tokenizer/vocab, but arch string was 'llama'.");
202 }
203 if (config.rope_theta == 10000.0f) {
204 float llama3_rope_candidate = get_meta_value("llama.rope.freq_base", 500000.0f);
205 if (llama3_rope_candidate > 10000.0f) {
206 config.rope_theta = llama3_rope_candidate;
207 Logger::info("[parse_gguf_config] Adjusted rope_theta to " + std::to_string(config.rope_theta) + " for Llama 3 model (was 10000.0).");
208 }
209 }
210 } else if (config.architecture == "llama" || config.architecture.find("Llama-2") != std::string::npos || config.architecture.find("TinyLlama") != std::string::npos) {
212 Logger::info("[parse_gguf_config] Result: Identified LLAMA_SENTENCEPIECE based on architecture: '" + config.architecture + "'");
213 } else {
215 Logger::info("[parse_gguf_config] Result: UNKNOWN tokenizer family for architecture: '" + config.architecture + "'");
216 }
217
218 // Existing chat_template_type and pre_tokenizer_type logic based on architecture and pre_key
219 if (config.model_name.find("TinyLlama") != std::string::npos ||
220 (config.architecture == "llama" && has_pre_key)) {
221 config.chat_template_type = "tinyllama";
222 } else if (config.architecture == "llama" && !has_pre_key) {
223 config.chat_template_type = "llama2";
224 } else {
225 config.chat_template_type = "unknown";
226 Logger::warning("Could not determine chat template type for arch='" +
227 config.architecture + "', name='" + config.model_name +
228 "'.");
229 }
230
231 if (has_pre_key) {
232 config.pre_tokenizer_type =
233 get_meta_string("tokenizer.ggml.pre", "unknown");
234 } else if (config.architecture == "llama") {
235 config.pre_tokenizer_type = "llama";
236 } else {
237 config.pre_tokenizer_type = "unknown";
238 }
239 Logger::info("Determined config: architecture='" + config.architecture +
240 "', model_name='" + config.model_name + "', chat_template='" +
241 config.chat_template_type + "', pre_tokenizer='" +
242 config.pre_tokenizer_type + "'");
243
244 if (config.model_name == "llama" && config.pre_tokenizer_type != "llama") {
245 config.chat_template_type = "llama2";
247 "Inferred chat_template_type='llama2' based on model_type and "
248 "missing/different pre_tokenizer_type.");
249 }
250
251 auto template_it = gguf.metadata.find("tokenizer.chat_template");
252 if (template_it != gguf.metadata.end() &&
253 std::holds_alternative<std::string>(template_it->second)) {
254 config.chat_template_string = std::get<std::string>(template_it->second);
255 Logger::info("Found tokenizer.chat_template in metadata.");
256
257 } else {
259 "tokenizer.chat_template not found or not a string in metadata. Will "
260 "use fallback logic.");
261 config.chat_template_string = "";
262 }
263 if (config.chat_template_type == "unknown") {
264 if (config.model_name == "llama" && config.pre_tokenizer_type != "llama") {
265 config.chat_template_type = "llama2";
267 "Inferred chat_template_type='llama2' based on model name and "
268 "missing/different pre_tokenizer_type.");
270 Logger::info("Llama 3 model identified. Chat template will primarily rely on 'tokenizer.chat_template' from GGUF if present.");
271 // Set a generic type for now, actual application will use the string.
272 if (gguf.metadata.count("tokenizer.chat_template")) {
273 config.chat_template_type = "llama3_gguf_direct";
274 } else {
275 config.chat_template_type = "llama3_fallback"; // Or some other indicator
276 Logger::warning("Llama 3 model detected, but 'tokenizer.chat_template' not found in GGUF metadata.");
277 }
278 }
279 }
280
281 Logger::info(std::string("[parse_gguf_config] Finished parsing. Returning config. Family: ") +
283 (config.tokenizer_family == ModelConfig::TokenizerFamily::LLAMA_SENTENCEPIECE ? "L2_SPM" : "UNKNOWN")));
284 return config;
285}
std::vector< std::string > tokenizer_merges
std::map< std::string, GGUFMetadataValue > metadata
std::string chat_template_string
Definition model.h:100
std::string pre_tokenizer_type
Definition model.h:99
std::string chat_template_type
Definition model.h:98

References ModelConfig::architecture, ModelConfig::bos_token_id, ModelConfig::chat_template_string, ModelConfig::chat_template_type, ModelConfig::eos_token_id, ModelConfig::hidden_act, ModelConfig::hidden_size, Logger::info(), ModelConfig::intermediate_size, ModelConfig::LLAMA3_TIKTOKEN, ModelConfig::LLAMA_SENTENCEPIECE, ModelConfig::max_position_embeddings, GGUFData::metadata, ModelConfig::model_name, ModelConfig::num_attention_heads, ModelConfig::num_hidden_layers, ModelConfig::num_key_value_heads, ModelConfig::pad_token_id, ModelConfig::pre_tokenizer_type, ModelConfig::rms_norm_eps, ModelConfig::rope_theta, ModelConfig::tokenizer_family, GGUFData::tokenizer_merges, ModelConfig::unk_token_id, ModelConfig::UNKNOWN, ModelConfig::vocab_size, and Logger::warning().

Referenced by TinyLlamaModel::TinyLlamaModel().

◆ rmsnorm()

void rmsnorm ( const std::vector< float > &  x,
const std::vector< uint16_t > &  weight,
float  eps,
std::vector< float > &  out 
)

◆ softmax()

void softmax ( std::vector< float > &  x)

◆ tensor_name_to_string()

static std::string tensor_name_to_string ( TensorName  tn)
static

Definition at line 49 of file model.h.

49 {
50 switch (tn) {
52 return "Q_PROJ";
54 return "K_PROJ";
56 return "V_PROJ";
58 return "O_PROJ";
60 return "GATE_PROJ";
62 return "UP_PROJ";
64 return "DOWN_PROJ";
66 return "TOKEN_EMBD";
68 return "LM_HEAD";
69 default:
70 return "UNKNOWN";
71 }
72}

References DOWN_PROJ, GATE_PROJ, K_PROJ, LM_HEAD, O_PROJ, Q_PROJ, TOKEN_EMBD, UP_PROJ, and V_PROJ.

◆ uint8_vector_to_uint16_vector()

std::vector< uint16_t > uint8_vector_to_uint16_vector ( const std::vector< uint8_t > &  bytes,
size_t  numel 
)

Definition at line 176 of file utils.cpp.

176 {
177 if (bytes.size() != numel * 2) {
178 throw std::runtime_error("Byte vector size mismatch for uint16_t conversion");
179 }
180 std::vector<uint16_t> out(numel);
181 std::memcpy(out.data(), bytes.data(), bytes.size());
182 return out;
183}