Files
llama.cpp/src/llama-io.cpp
Georgi Gerganov 0754b7b6fe server : avoid checkpoint data host copies (#22558)
* server : avoid checkpoint data host copies

* llama : refactor llama_io_read_i
2026-05-02 18:03:25 +03:00

21 lines
447 B
C++

#include "llama-io.h"
#include <vector>
void llama_io_write_i::write_string(const std::string & str) {
uint32_t str_size = str.size();
write(&str_size, sizeof(str_size));
write(str.data(), str_size);
}
void llama_io_read_i::read_string(std::string & str) {
uint32_t str_size;
read(&str_size, sizeof(str_size));
std::vector<char> buf(str_size);
read(buf.data(), str_size);
str.assign(buf.data(), str_size);
}