mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2026-05-09 02:24:17 +00:00
Compare commits
26 Commits
master-3d5
...
q4_3-range
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
102cd98074 | ||
|
|
71e6ae3779 | ||
|
|
bd166f7ffc | ||
|
|
4282f9b0f3 | ||
|
|
93c95fcc1b | ||
|
|
b7e704658e | ||
|
|
5d5f2b2efa | ||
|
|
3698f79e6a | ||
|
|
0e018fe008 | ||
|
|
857308d1e8 | ||
|
|
c50b628810 | ||
|
|
5f939498d5 | ||
|
|
36b4f7e064 | ||
|
|
10f19c1121 | ||
|
|
7e312f165c | ||
|
|
872c365a91 | ||
|
|
955ef9a5d5 | ||
|
|
c5aa5e5777 | ||
|
|
e9a9cb0c54 | ||
|
|
b6e7f9b09e | ||
|
|
50cb666b8a | ||
|
|
25d7abbd1f | ||
|
|
018f2279f5 | ||
|
|
9411288271 | ||
|
|
8687c1f258 | ||
|
|
1bfc153e2f |
16
.github/workflows/build.yml
vendored
16
.github/workflows/build.yml
vendored
@@ -12,7 +12,7 @@ on:
|
||||
- master
|
||||
paths: ['.github/workflows/**', '**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.c', '**/*.cpp']
|
||||
pull_request:
|
||||
types: [opened, synchronize, edited, reopened, review_requested, ready_for_review]
|
||||
types: [opened, synchronize, reopened]
|
||||
paths: ['**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.c', '**/*.cpp']
|
||||
|
||||
env:
|
||||
@@ -20,8 +20,6 @@ env:
|
||||
|
||||
jobs:
|
||||
ubuntu-latest-make:
|
||||
if: github.event.pull_request.draft == false
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
@@ -41,8 +39,6 @@ jobs:
|
||||
make
|
||||
|
||||
ubuntu-latest-cmake:
|
||||
if: github.event.pull_request.draft == false
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
@@ -71,8 +67,6 @@ jobs:
|
||||
ctest --verbose
|
||||
|
||||
ubuntu-latest-cmake-sanitizer:
|
||||
if: github.event.pull_request.draft == false
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
continue-on-error: true
|
||||
@@ -108,8 +102,6 @@ jobs:
|
||||
ctest --verbose
|
||||
|
||||
macOS-latest-make:
|
||||
if: github.event.pull_request.draft == false
|
||||
|
||||
runs-on: macos-latest
|
||||
|
||||
steps:
|
||||
@@ -128,8 +120,6 @@ jobs:
|
||||
make
|
||||
|
||||
macOS-latest-cmake:
|
||||
if: github.event.pull_request.draft == false
|
||||
|
||||
runs-on: macOS-latest
|
||||
|
||||
steps:
|
||||
@@ -157,8 +147,6 @@ jobs:
|
||||
ctest --verbose
|
||||
|
||||
windows-latest-cmake:
|
||||
if: github.event.pull_request.draft == false
|
||||
|
||||
runs-on: windows-latest
|
||||
|
||||
strategy:
|
||||
@@ -169,7 +157,7 @@ jobs:
|
||||
- build: 'avx'
|
||||
defines: '-DLLAMA_AVX2=OFF'
|
||||
- build: 'avx512'
|
||||
defines: '-DLLAMA_AVX512=ON'
|
||||
defines: '-DLLAMA_AVX512=ON -DBUILD_SHARED_LIBS=ON'
|
||||
|
||||
steps:
|
||||
- name: Clone
|
||||
|
||||
@@ -201,6 +201,10 @@ endif()
|
||||
|
||||
if (MSVC)
|
||||
add_compile_definitions(_CRT_SECURE_NO_WARNINGS)
|
||||
|
||||
if (BUILD_SHARED_LIBS)
|
||||
set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (LLAMA_LTO)
|
||||
@@ -307,7 +311,8 @@ add_library(ggml OBJECT
|
||||
|
||||
target_include_directories(ggml PUBLIC .)
|
||||
target_compile_features(ggml PUBLIC c_std_11) # don't bump
|
||||
target_link_libraries(ggml PRIVATE Threads::Threads ${LLAMA_EXTRA_LIBS})
|
||||
target_link_libraries(ggml PUBLIC Threads::Threads ${LLAMA_EXTRA_LIBS})
|
||||
|
||||
if (BUILD_SHARED_LIBS)
|
||||
set_target_properties(ggml PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||
endif()
|
||||
@@ -320,6 +325,7 @@ add_library(llama
|
||||
target_include_directories(llama PUBLIC .)
|
||||
target_compile_features(llama PUBLIC cxx_std_11) # don't bump
|
||||
target_link_libraries(llama PRIVATE ggml ${LLAMA_EXTRA_LIBS})
|
||||
|
||||
if (BUILD_SHARED_LIBS)
|
||||
set_target_properties(llama PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||
target_compile_definitions(llama PRIVATE LLAMA_SHARED LLAMA_BUILD)
|
||||
|
||||
20
Makefile
20
Makefile
@@ -74,13 +74,17 @@ endif
|
||||
# feel free to update the Makefile for your architecture and send a pull request or issue
|
||||
ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686))
|
||||
# Use all CPU extensions that are available:
|
||||
CFLAGS += -march=native -mtune=native
|
||||
CFLAGS += -march=native -mtune=native
|
||||
CXXFLAGS += -march=native -mtune=native
|
||||
|
||||
# Usage AVX-only
|
||||
#CFLAGS += -mfma -mf16c -mavx
|
||||
#CXXFLAGS += -mfma -mf16c -mavx
|
||||
endif
|
||||
ifneq ($(filter ppc64%,$(UNAME_M)),)
|
||||
POWER9_M := $(shell grep "POWER9" /proc/cpuinfo)
|
||||
ifneq (,$(findstring POWER9,$(POWER9_M)))
|
||||
CFLAGS += -mcpu=power9
|
||||
CFLAGS += -mcpu=power9
|
||||
CXXFLAGS += -mcpu=power9
|
||||
endif
|
||||
# Require c++23's std::byteswap for big-endian support.
|
||||
@@ -101,18 +105,20 @@ ifdef LLAMA_OPENBLAS
|
||||
LDFLAGS += -lopenblas
|
||||
endif
|
||||
ifdef LLAMA_CUBLAS
|
||||
CFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include
|
||||
LDFLAGS += -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64
|
||||
OBJS += ggml-cuda.o
|
||||
CFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include
|
||||
LDFLAGS += -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64
|
||||
OBJS += ggml-cuda.o
|
||||
NVCC = nvcc
|
||||
NVCCFLAGS = --forward-unknown-to-host-linker -arch=native
|
||||
ggml-cuda.o: ggml-cuda.cu ggml-cuda.h
|
||||
nvcc -arch=native -c -o $@ $<
|
||||
$(NVCC) $(NVCCFLAGS) $(CXXFLAGS) -c $< -o $@
|
||||
endif
|
||||
ifdef LLAMA_GPROF
|
||||
CFLAGS += -pg
|
||||
CXXFLAGS += -pg
|
||||
endif
|
||||
ifneq ($(filter aarch64%,$(UNAME_M)),)
|
||||
CFLAGS += -mcpu=native
|
||||
CFLAGS += -mcpu=native
|
||||
CXXFLAGS += -mcpu=native
|
||||
endif
|
||||
ifneq ($(filter armv6%,$(UNAME_M)),)
|
||||
|
||||
@@ -7,4 +7,13 @@
|
||||
cd `dirname $0`
|
||||
cd ..
|
||||
|
||||
./main -m ./models/ggml-alpaca-7b-q4.bin --color -f ./prompts/alpaca.txt --ctx_size 2048 -n -1 -ins -b 256 --top_k 10000 --temp 0.2 --repeat_penalty 1 -t 7
|
||||
./main -m ./models/ggml-alpaca-7b-q4.bin \
|
||||
--color \
|
||||
-f ./prompts/alpaca.txt \
|
||||
--ctx_size 2048 \
|
||||
-n -1 \
|
||||
-ins -b 256 \
|
||||
--top_k 10000 \
|
||||
--temp 0.2 \
|
||||
--repeat_penalty 1.1 \
|
||||
-t 7
|
||||
|
||||
@@ -20,7 +20,7 @@ struct gpt_params {
|
||||
int32_t repeat_last_n = 64; // last n tokens to penalize
|
||||
int32_t n_parts = -1; // amount of model parts (-1 = determine from model dimensions)
|
||||
int32_t n_ctx = 512; // context size
|
||||
int32_t n_batch = 8; // batch size for prompt processing
|
||||
int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS)
|
||||
int32_t n_keep = 0; // number of tokens to keep from initial prompt
|
||||
|
||||
// sampling parameters
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
#endif
|
||||
|
||||
static console_state con_st;
|
||||
static llama_context ** g_ctx;
|
||||
|
||||
static bool is_interacting = false;
|
||||
|
||||
@@ -36,6 +37,7 @@ void sigint_handler(int signo) {
|
||||
if (!is_interacting) {
|
||||
is_interacting=true;
|
||||
} else {
|
||||
llama_print_timings(*g_ctx);
|
||||
_exit(130);
|
||||
}
|
||||
}
|
||||
@@ -94,6 +96,7 @@ int main(int argc, char ** argv) {
|
||||
//bool is_prime(int n) {)";
|
||||
|
||||
llama_context * ctx;
|
||||
g_ctx = &ctx;
|
||||
|
||||
// load the model
|
||||
{
|
||||
@@ -264,7 +267,7 @@ int main(int argc, char ** argv) {
|
||||
// infinite text generation via context swapping
|
||||
// if we run out of context:
|
||||
// - take the n_keep first tokens from the original prompt (via n_past)
|
||||
// - take half of the last (n_ctx - n_keep) tokens and recompute the logits in a batch
|
||||
// - take half of the last (n_ctx - n_keep) tokens and recompute the logits in batches
|
||||
if (n_past + (int) embd.size() > n_ctx) {
|
||||
const int n_left = n_past - params.n_keep;
|
||||
|
||||
@@ -282,13 +285,21 @@ int main(int argc, char ** argv) {
|
||||
//printf("\n---\n");
|
||||
}
|
||||
|
||||
if (llama_eval(ctx, embd.data(), embd.size(), n_past, params.n_threads)) {
|
||||
fprintf(stderr, "%s : failed to eval\n", __func__);
|
||||
return 1;
|
||||
// evaluate tokens in batches
|
||||
// embd is typically prepared beforehand to fit within a batch, but not always
|
||||
for (int i = 0; i < (int) embd.size(); i += params.n_batch) {
|
||||
int n_eval = (int) embd.size() - i;
|
||||
if (n_eval > params.n_batch) {
|
||||
n_eval = params.n_batch;
|
||||
}
|
||||
if (llama_eval(ctx, &embd[i], n_eval, n_past, params.n_threads)) {
|
||||
fprintf(stderr, "%s : failed to eval\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
n_past += n_eval;
|
||||
}
|
||||
}
|
||||
|
||||
n_past += embd.size();
|
||||
embd.clear();
|
||||
|
||||
if ((int) embd_inp.size() <= n_consumed && !is_interacting) {
|
||||
|
||||
146
ggml-cuda.cu
146
ggml-cuda.cu
@@ -1,5 +1,7 @@
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <cuda_fp16.h>
|
||||
#include <atomic>
|
||||
#include "ggml-cuda.h"
|
||||
|
||||
typedef uint16_t ggml_fp16_t;
|
||||
@@ -29,14 +31,12 @@ static_assert(sizeof(block_q4_2) == sizeof(ggml_fp16_t) + QK4_2 / 2, "wrong q4_2
|
||||
|
||||
#define QK4_3 16
|
||||
typedef struct {
|
||||
__half d; // delta
|
||||
__half m; // min
|
||||
uint8_t qs[QK4_3 / 2]; // nibbles / quants
|
||||
__half d0; // delta
|
||||
__half d1; // delta
|
||||
uint8_t qs[QK4_3 / 2]; // nibbles / quants
|
||||
} block_q4_3;
|
||||
static_assert(sizeof(block_q4_3) == 2 * sizeof(ggml_fp16_t) + QK4_3 / 2, "wrong q4_3 block size/padding");
|
||||
|
||||
|
||||
|
||||
static __global__ void dequantize_block_q4_0(const void * vx, float * y) {
|
||||
const block_q4_0 * x = (const block_q4_0 *) vx;
|
||||
|
||||
@@ -112,43 +112,127 @@ static __global__ void dequantize_block_q4_3(const void * vx, float * y) {
|
||||
|
||||
const int i = blockIdx.x;
|
||||
|
||||
const float d = x[i].d;
|
||||
const float m = x[i].m;
|
||||
const float d0 = x[i].d0;
|
||||
const float d1 = x[i].d1;
|
||||
|
||||
const uint8_t * pp = x[i].qs;
|
||||
|
||||
for (int l = 0; l < QK4_3; l += 2) {
|
||||
const uint8_t vi = pp[l/2];
|
||||
for (int l = 0; l < QK4_3/2; l += 2) {
|
||||
const uint8_t vi0 = pp[l/2];
|
||||
const uint8_t vi1 = pp[l/2 + QK4_3/4];
|
||||
|
||||
const int8_t vi0 = vi & 0xf;
|
||||
const int8_t vi1 = vi >> 4;
|
||||
const int8_t vi0_0 = vi0 & 0xf;
|
||||
const int8_t vi0_1 = vi0 >> 4;
|
||||
|
||||
const float v0 = vi0*d + m;
|
||||
const float v1 = vi1*d + m;
|
||||
const int8_t vi1_0 = vi1 & 0xf;
|
||||
const int8_t vi1_1 = vi1 >> 4;
|
||||
|
||||
y[i*QK4_3 + l + 0] = v0;
|
||||
y[i*QK4_3 + l + 1] = v1;
|
||||
const float v0_0 = (vi0_0 - 8)*d0;
|
||||
const float v0_1 = (vi0_1 - 8)*d0;
|
||||
|
||||
const float v1_0 = (vi1_0 - 8)*d1;
|
||||
const float v1_1 = (vi1_1 - 8)*d1;
|
||||
|
||||
y[i*QK4_3 + l + 0] = v0_0;
|
||||
y[i*QK4_3 + l + 1] = v0_1;
|
||||
|
||||
y[i*QK4_3 + l + 0 + QK4_3/2] = v1_0;
|
||||
y[i*QK4_3 + l + 1 + QK4_3/2] = v1_1;
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
__host__ void dequantize_row_q4_0_cuda(const void * vx, float * y, int k, cudaStream_t stream) {
|
||||
const int nb = k / QK4_0;
|
||||
dequantize_block_q4_0<<<nb, 1, 0, stream>>>(vx, y);
|
||||
}
|
||||
void dequantize_row_q4_0_cuda(const void * vx, float * y, int k, cudaStream_t stream) {
|
||||
const int nb = k / QK4_0;
|
||||
dequantize_block_q4_0<<<nb, 1, 0, stream>>>(vx, y);
|
||||
}
|
||||
|
||||
__host__ void dequantize_row_q4_1_cuda(const void * vx, float * y, int k, cudaStream_t stream) {
|
||||
const int nb = k / QK4_1;
|
||||
dequantize_block_q4_1<<<nb, 1, 0, stream>>>(vx, y);
|
||||
}
|
||||
void dequantize_row_q4_1_cuda(const void * vx, float * y, int k, cudaStream_t stream) {
|
||||
const int nb = k / QK4_1;
|
||||
dequantize_block_q4_1<<<nb, 1, 0, stream>>>(vx, y);
|
||||
}
|
||||
|
||||
__host__ void dequantize_row_q4_2_cuda(const void * vx, float * y, int k, cudaStream_t stream) {
|
||||
const int nb = k / QK4_2;
|
||||
dequantize_block_q4_2<<<nb, 1, 0, stream>>>(vx, y);
|
||||
}
|
||||
void dequantize_row_q4_2_cuda(const void * vx, float * y, int k, cudaStream_t stream) {
|
||||
const int nb = k / QK4_2;
|
||||
dequantize_block_q4_2<<<nb, 1, 0, stream>>>(vx, y);
|
||||
}
|
||||
|
||||
__host__ void dequantize_row_q4_3_cuda(const void * vx, float * y, int k, cudaStream_t stream) {
|
||||
const int nb = k / QK4_3;
|
||||
dequantize_block_q4_3<<<nb, 1, 0, stream>>>(vx, y);
|
||||
void dequantize_row_q4_3_cuda(const void * vx, float * y, int k, cudaStream_t stream) {
|
||||
const int nb = k / QK4_3;
|
||||
dequantize_block_q4_3<<<nb, 1, 0, stream>>>(vx, y);
|
||||
}
|
||||
|
||||
// buffer pool for cuda
|
||||
#define MAX_CUDA_BUFFERS 16
|
||||
|
||||
struct scoped_spin_lock {
|
||||
std::atomic_flag& lock;
|
||||
scoped_spin_lock(std::atomic_flag& lock) : lock(lock) {
|
||||
while (lock.test_and_set(std::memory_order_acquire)) {
|
||||
; // spin
|
||||
}
|
||||
}
|
||||
~scoped_spin_lock() {
|
||||
lock.clear(std::memory_order_release);
|
||||
}
|
||||
scoped_spin_lock(const scoped_spin_lock&) = delete;
|
||||
scoped_spin_lock& operator=(const scoped_spin_lock&) = delete;
|
||||
};
|
||||
|
||||
struct cuda_buffer {
|
||||
void * ptr = nullptr;
|
||||
size_t size = 0;
|
||||
};
|
||||
|
||||
static cuda_buffer g_cuda_buffer_pool[MAX_CUDA_BUFFERS];
|
||||
static std::atomic_flag g_cuda_pool_lock = ATOMIC_FLAG_INIT;
|
||||
|
||||
void * ggml_cuda_pool_malloc(size_t size, size_t * actual_size) {
|
||||
scoped_spin_lock lock(g_cuda_pool_lock);
|
||||
|
||||
for (int i = 0; i < MAX_CUDA_BUFFERS; ++i) {
|
||||
cuda_buffer& b = g_cuda_buffer_pool[i];
|
||||
if (b.size >= size && b.ptr != nullptr) {
|
||||
void * ptr = b.ptr;
|
||||
*actual_size = b.size;
|
||||
b.ptr = nullptr;
|
||||
b.size = 0;
|
||||
return ptr;
|
||||
}
|
||||
}
|
||||
void * ptr;
|
||||
CUDA_CHECK(cudaMalloc((void **) &ptr, size));
|
||||
*actual_size = size;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void ggml_cuda_pool_free(void * ptr, size_t size) {
|
||||
scoped_spin_lock lock(g_cuda_pool_lock);
|
||||
|
||||
for (int i = 0; i < MAX_CUDA_BUFFERS; ++i) {
|
||||
cuda_buffer& b = g_cuda_buffer_pool[i];
|
||||
if (b.ptr == nullptr) {
|
||||
b.ptr = ptr;
|
||||
b.size = size;
|
||||
return;
|
||||
}
|
||||
}
|
||||
fprintf(stderr, "WARNING: cuda buffer pool full, increase MAX_CUDA_BUFFERS\n");
|
||||
CUDA_CHECK(cudaFree(ptr));
|
||||
}
|
||||
|
||||
cublasHandle_t g_cublasH = NULL;
|
||||
cudaStream_t g_cudaStream = NULL;
|
||||
|
||||
void ggml_init_cublas(void) {
|
||||
if (g_cublasH == NULL) {
|
||||
// create cublas handle, bind a stream
|
||||
CUBLAS_CHECK(cublasCreate(&g_cublasH));
|
||||
|
||||
CUDA_CHECK(cudaStreamCreateWithFlags(&g_cudaStream, cudaStreamNonBlocking));
|
||||
|
||||
CUBLAS_CHECK(cublasSetStream(g_cublasH, g_cudaStream));
|
||||
|
||||
// configure logging to stdout
|
||||
// CUBLAS_CHECK(cublasLoggerConfigure(1, 1, 0, NULL));
|
||||
}
|
||||
}
|
||||
|
||||
29
ggml-cuda.h
29
ggml-cuda.h
@@ -1,7 +1,36 @@
|
||||
#include <cublas_v2.h>
|
||||
#include <cuda_runtime.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define CUDA_CHECK(err) \
|
||||
do { \
|
||||
cudaError_t err_ = (err); \
|
||||
if (err_ != cudaSuccess) { \
|
||||
fprintf(stderr, "CUDA error %d at %s:%d: %s\n", err_, __FILE__, __LINE__, \
|
||||
cudaGetErrorString(err_)); \
|
||||
exit(1); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define CUBLAS_CHECK(err) \
|
||||
do { \
|
||||
cublasStatus_t err_ = (err); \
|
||||
if (err_ != CUBLAS_STATUS_SUCCESS) { \
|
||||
fprintf(stderr, "cuBLAS error %d at %s:%d\n", err_, __FILE__, __LINE__); \
|
||||
exit(1); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
extern cublasHandle_t g_cublasH;
|
||||
extern cudaStream_t g_cudaStream;
|
||||
|
||||
void ggml_init_cublas(void);
|
||||
void * ggml_cuda_pool_malloc(size_t size, size_t * actual_size);
|
||||
void ggml_cuda_pool_free(void * ptr, size_t size);
|
||||
|
||||
void dequantize_row_q4_0_cuda(const void * vx, float * y, int k, cudaStream_t stream);
|
||||
void dequantize_row_q4_1_cuda(const void * vx, float * y, int k, cudaStream_t stream);
|
||||
void dequantize_row_q4_2_cuda(const void * vx, float * y, int k, cudaStream_t stream);
|
||||
|
||||
131
llama.cpp
131
llama.cpp
@@ -27,6 +27,7 @@
|
||||
#include <thread>
|
||||
#include <atomic>
|
||||
#include <mutex>
|
||||
#include <sstream>
|
||||
|
||||
#define LLAMA_USE_SCRATCH
|
||||
#define LLAMA_MAX_SCRATCH_BUFFERS 16
|
||||
@@ -67,7 +68,7 @@ static const std::map<e_model, size_t> & MEM_REQ_SCRATCH1()
|
||||
{ MODEL_65B, 512ull * MB },
|
||||
};
|
||||
return _MEM_REQ_SCRATCH1;
|
||||
};
|
||||
}
|
||||
|
||||
// 2*n_embd*n_ctx*n_layer*sizeof(float16)
|
||||
static const std::map<e_model, size_t> & MEM_REQ_KV_SELF()
|
||||
@@ -79,7 +80,7 @@ static const std::map<e_model, size_t> & MEM_REQ_KV_SELF()
|
||||
{ MODEL_65B, 5120ull * MB },
|
||||
};
|
||||
return _MEM_REQ_KV_SELF;
|
||||
};
|
||||
}
|
||||
|
||||
// this is mostly needed for temporary mul_mat buffers to dequantize the data
|
||||
// not actually needed if BLAS is disabled
|
||||
@@ -92,7 +93,7 @@ static const std::map<e_model, size_t> & MEM_REQ_EVAL()
|
||||
{ MODEL_65B, 1536ull * MB },
|
||||
};
|
||||
return _MEM_REQ_EVAL;
|
||||
};
|
||||
}
|
||||
|
||||
// default hparams (LLaMA 7B)
|
||||
struct llama_hparams {
|
||||
@@ -1787,7 +1788,7 @@ struct llama_context * llama_init_from_file(
|
||||
if (params.logits_all) {
|
||||
ctx->logits.reserve(hparams.n_ctx*hparams.n_vocab);
|
||||
} else {
|
||||
ctx->logits.reserve(hparams.n_ctx);
|
||||
ctx->logits.reserve(hparams.n_vocab);
|
||||
}
|
||||
|
||||
if (params.embedding){
|
||||
@@ -2092,7 +2093,11 @@ void llama_set_kv_cache(
|
||||
int n_token_count) {
|
||||
// Make sure we have the same kv cache setup
|
||||
LLAMA_ASSERT(ctx->model.kv_self.buf.size == n_size);
|
||||
void * k_data = ctx->model.kv_self.k->data; // remember data pointers
|
||||
void * v_data = ctx->model.kv_self.v->data; // because their value is stored in buf and overwritten by memcpy
|
||||
memcpy(ctx->model.kv_self.buf.addr, kv_cache, n_size);
|
||||
ctx->model.kv_self.k->data = k_data; // restore correct data pointers
|
||||
ctx->model.kv_self.v->data = v_data;
|
||||
ctx->model.kv_self.n = n_token_count;
|
||||
}
|
||||
|
||||
@@ -2248,3 +2253,121 @@ const char * llama_print_system_info(void) {
|
||||
std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_internal_get_tensor_map(struct llama_context * ctx) {
|
||||
return ctx->model.tensors_by_name;
|
||||
}
|
||||
|
||||
// Returns the size of the state
|
||||
size_t llama_get_state_size(struct llama_context * ctx) {
|
||||
// we don't know size of rng until we actually serialize it. so reserve more than enough memory for its serialized state.
|
||||
// for reference, std::mt19937(1337) serializes to 6701 bytes.
|
||||
const size_t s_rng_size = sizeof(size_t);
|
||||
const size_t s_rng = 64*1024;
|
||||
const size_t s_logits_capacity = sizeof(size_t);
|
||||
const size_t s_logits_size = sizeof(size_t);
|
||||
const size_t s_logits = ctx->logits.capacity() * sizeof(float);
|
||||
const size_t s_embedding_size = sizeof(size_t);
|
||||
const size_t s_embedding = ctx->embedding.size() * sizeof(float);
|
||||
const size_t s_kv_size = sizeof(size_t);
|
||||
const size_t s_kv_ntok = sizeof(int);
|
||||
const size_t s_kv = llama_get_kv_cache_size(ctx);
|
||||
const size_t s_total = (
|
||||
+ s_rng_size
|
||||
+ s_rng
|
||||
+ s_logits_capacity
|
||||
+ s_logits_size
|
||||
+ s_logits
|
||||
+ s_embedding_size
|
||||
+ s_embedding
|
||||
+ s_kv_size
|
||||
+ s_kv_ntok
|
||||
+ s_kv
|
||||
);
|
||||
return s_total;
|
||||
}
|
||||
|
||||
// Copies the state to the specified destination address
|
||||
size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dest) {
|
||||
std::stringstream rng_ss;
|
||||
rng_ss << ctx->rng;
|
||||
const size_t rng_size = rng_ss.str().size();
|
||||
char rng_buf[64*1024];
|
||||
memset(&rng_buf[0], 0, 64*1024);
|
||||
memcpy(&rng_buf[0], rng_ss.str().data(), rng_ss.str().size());
|
||||
const size_t logits_capacity = ctx->logits.capacity();
|
||||
const size_t logits_size = ctx->logits.size();
|
||||
const size_t embedding_size = ctx->embedding.size();
|
||||
const size_t kv_size = llama_get_kv_cache_size(ctx);
|
||||
const int kv_ntok = llama_get_kv_cache_token_count(ctx);
|
||||
|
||||
uint8_t * out = dest;
|
||||
memcpy(out, &rng_size, sizeof(size_t)); out += sizeof(size_t);
|
||||
memcpy(out, &rng_buf[0], 64*1024); out += 64*1024;
|
||||
memcpy(out, &logits_capacity, sizeof(size_t)); out += sizeof(size_t);
|
||||
memcpy(out, &logits_size, sizeof(size_t)); out += sizeof(size_t);
|
||||
if (logits_size) {
|
||||
memcpy(out, ctx->logits.data(), logits_size * sizeof(float));
|
||||
}
|
||||
out += logits_capacity * sizeof(float);
|
||||
memcpy(out, &embedding_size, sizeof(size_t)); out += sizeof(size_t);
|
||||
if (embedding_size) {
|
||||
memcpy(out, ctx->embedding.data(), embedding_size * sizeof(float)); out += embedding_size * sizeof(float);
|
||||
}
|
||||
memcpy(out, &kv_size, sizeof(size_t)); out += sizeof(size_t);
|
||||
memcpy(out, &kv_ntok, sizeof(int)); out += sizeof(int);
|
||||
if (kv_size) {
|
||||
memcpy(out, llama_get_kv_cache(ctx), kv_size); out += kv_size;
|
||||
}
|
||||
const size_t written = out - dest;
|
||||
const size_t expected = llama_get_state_size(ctx);
|
||||
LLAMA_ASSERT(written == expected);
|
||||
return written;
|
||||
}
|
||||
|
||||
// Sets the state reading from the specified source address
|
||||
size_t llama_set_state_data(struct llama_context * ctx, const uint8_t * src) {
|
||||
size_t rng_size;
|
||||
char rng_buf[64*1024];
|
||||
std::stringstream rng_ss;
|
||||
|
||||
const uint8_t * in = src;
|
||||
memcpy(&rng_size, in, sizeof(size_t)); in += sizeof(size_t);
|
||||
memcpy(&rng_buf[0], in, 64*1024); in += 64*1024;
|
||||
rng_ss.str(std::string(&rng_buf[0], rng_size));
|
||||
rng_ss >> ctx->rng;
|
||||
LLAMA_ASSERT(rng_ss.fail() == false);
|
||||
|
||||
size_t logits_capacity;
|
||||
size_t logits_size;
|
||||
size_t embedding_size;
|
||||
size_t kv_size;
|
||||
int kv_ntok;
|
||||
|
||||
memcpy(&logits_capacity, in, sizeof(size_t)); in += sizeof(size_t);
|
||||
memcpy(&logits_size, in, sizeof(size_t)); in += sizeof(size_t);
|
||||
LLAMA_ASSERT(ctx->logits.capacity() == logits_capacity);
|
||||
if (logits_size) {
|
||||
ctx->logits.resize(logits_size);
|
||||
memcpy(ctx->logits.data(), in, logits_size * sizeof(float));
|
||||
}
|
||||
in += logits_capacity * sizeof(float);
|
||||
memcpy(&embedding_size, in, sizeof(size_t)); in += sizeof(size_t);
|
||||
LLAMA_ASSERT(ctx->embedding.capacity() == embedding_size);
|
||||
if (embedding_size) {
|
||||
memcpy(ctx->embedding.data(), in, embedding_size * sizeof(float));
|
||||
in += embedding_size * sizeof(float);
|
||||
}
|
||||
memcpy(&kv_size, in, sizeof(size_t)); in += sizeof(size_t);
|
||||
memcpy(&kv_ntok, in, sizeof(int)); in += sizeof(int);
|
||||
if (kv_size) {
|
||||
LLAMA_ASSERT(ctx->model.kv_self.buf.size == kv_size);
|
||||
void * k_data = ctx->model.kv_self.k->data; // remember data pointers
|
||||
void * v_data = ctx->model.kv_self.v->data; // because their value is stored in buf and overwritten by memcpy
|
||||
memcpy(ctx->model.kv_self.buf.addr, in, kv_size);
|
||||
ctx->model.kv_self.k->data = k_data; // restore correct data pointers
|
||||
ctx->model.kv_self.v->data = v_data;
|
||||
in += kv_size;
|
||||
}
|
||||
ctx->model.kv_self.n = kv_ntok;
|
||||
const size_t nread = in - src;
|
||||
const size_t expected = llama_get_state_size(ctx);
|
||||
LLAMA_ASSERT(nread == expected);
|
||||
return nread;
|
||||
}
|
||||
|
||||
12
llama.h
12
llama.h
@@ -129,6 +129,18 @@ extern "C" {
|
||||
size_t n_size,
|
||||
int n_token_count);
|
||||
|
||||
// Returns the size in bytes of the state (rng, logits, embedding and kv_cache)
|
||||
LLAMA_API size_t llama_get_state_size(struct llama_context * ctx);
|
||||
|
||||
// Copies the state to the specified destination address.
|
||||
// Destination needs to have allocated enough memory.
|
||||
// Returns the number of bytes copied
|
||||
LLAMA_API size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dest);
|
||||
|
||||
// Set the state reading from the specified address
|
||||
// Returns the number of bytes read
|
||||
LLAMA_API size_t llama_set_state_data(struct llama_context * ctx, const uint8_t * src);
|
||||
|
||||
// Run the llama inference to obtain the logits and probabilities for the next token.
|
||||
// tokens + n_tokens is the provided batch of new tokens to process
|
||||
// n_past is the number of tokens to use from previous eval calls
|
||||
|
||||
17
llama_util.h
17
llama_util.h
@@ -21,6 +21,9 @@
|
||||
#if defined(_POSIX_MAPPED_FILES)
|
||||
#include <sys/mman.h>
|
||||
#endif
|
||||
#if defined(_POSIX_MEMLOCK_RANGE)
|
||||
#include <sys/resource.h>
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
@@ -303,8 +306,18 @@ struct llama_mlock {
|
||||
if (!mlock(addr, size)) {
|
||||
return true;
|
||||
} else {
|
||||
fprintf(stderr, "warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n" MLOCK_SUGGESTION,
|
||||
size, this->size, std::strerror(errno));
|
||||
char* errmsg = std::strerror(errno);
|
||||
bool suggest = (errno == ENOMEM);
|
||||
|
||||
// Check if the resource limit is fine after all
|
||||
struct rlimit lock_limit;
|
||||
if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit))
|
||||
suggest = false;
|
||||
if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size))
|
||||
suggest = false;
|
||||
|
||||
fprintf(stderr, "warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s",
|
||||
size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : "");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,3 +2,8 @@ set(TARGET vdot)
|
||||
add_executable(${TARGET} vdot.cpp)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
||||
|
||||
set(TARGET q8dot)
|
||||
add_executable(${TARGET} q8dot.cpp)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
||||
|
||||
172
pocs/vdot/q8dot.cpp
Normal file
172
pocs/vdot/q8dot.cpp
Normal file
@@ -0,0 +1,172 @@
|
||||
#include <cstdio>
|
||||
#include <type_traits>
|
||||
#include <vector>
|
||||
#include <random>
|
||||
#include <chrono>
|
||||
#include <cstdlib>
|
||||
#include <cmath>
|
||||
#include <cassert>
|
||||
#include <cstring>
|
||||
#include <array>
|
||||
#include <type_traits>
|
||||
|
||||
#include <ggml.h>
|
||||
|
||||
constexpr int kVecSize = 1 << 16;
|
||||
|
||||
// Copy-pasted from ggml.c
|
||||
#define QK4_0 32
|
||||
typedef struct {
|
||||
float d; // delta
|
||||
uint8_t qs[QK4_0 / 2]; // nibbles / quants
|
||||
} block_q4_0;
|
||||
static_assert(sizeof(block_q4_0) == sizeof(float) + QK4_0 / 2, "wrong q4_0 block size/padding");
|
||||
|
||||
#define QK4_1 32
|
||||
typedef struct {
|
||||
float d; // delta
|
||||
float m; // min
|
||||
uint8_t qs[QK4_1 / 2]; // nibbles / quants
|
||||
} block_q4_1;
|
||||
static_assert(sizeof(block_q4_1) == sizeof(float) * 2 + QK4_1 / 2, "wrong q4_1 block size/padding");
|
||||
|
||||
// Copy-pasted from ggml.c
|
||||
#define QK8_0 32
|
||||
typedef struct {
|
||||
float d; // delta
|
||||
float s; // d * sum(qs[i])
|
||||
int8_t qs[QK8_0]; // quants
|
||||
} block_q8_0;
|
||||
static_assert(sizeof(block_q8_0) == 2*sizeof(float) + QK8_0, "wrong q8_0 block size/padding");
|
||||
|
||||
static_assert(QK4_1 == QK8_0, "QK4_1 and QK8_0 must be the same");
|
||||
static_assert(QK4_0 == QK8_0, "QK4_0 and QK8_0 must be the same");
|
||||
|
||||
template <typename T>
|
||||
void fillQ4blocks(std::vector<T>& blocks, std::mt19937& rndm) {
|
||||
for (auto& b : blocks) {
|
||||
b.d = 1;
|
||||
for (int i=0; i<QK4_1/2; ++i) {
|
||||
uint8_t v1 = rndm() >> 28;
|
||||
uint8_t v2 = rndm() >> 28;
|
||||
b.qs[i] = v1 | (v2 << 4);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void fillQ80blocks(std::vector<block_q8_0>& blocks, std::mt19937& rndm) {
|
||||
for (auto& b : blocks) {
|
||||
b.d = 1;
|
||||
int sum = 0;
|
||||
for (int i=0; i<QK8_0; ++i) {
|
||||
b.qs[i] = (rndm() >> 24) - 128;
|
||||
sum += b.qs[i];
|
||||
}
|
||||
b.s = b.d * sum;
|
||||
}
|
||||
}
|
||||
|
||||
float simpleDot(const block_q4_0& x, const block_q8_0& y) {
|
||||
int s1 = 0; //, s2 = 0;
|
||||
for (int i=0; i<QK4_1/2; i+=2) {
|
||||
int v1 = x.qs[i+0] & 0xf;
|
||||
int v2 = x.qs[i+0] >> 4;
|
||||
int v3 = x.qs[i+1] & 0xf;
|
||||
int v4 = x.qs[i+1] >> 4;
|
||||
int j = 2*i;
|
||||
s1 += v1*y.qs[j] + v2*y.qs[j+1] + v3*y.qs[j+2] + v4*y.qs[j+3];
|
||||
//s2 += y.qs[j] + y.qs[j+1] + y.qs[j+2] + y.qs[j+3];
|
||||
}
|
||||
return y.d * x.d * s1 - 8 * x.d * y.s;
|
||||
//return y.d * x.d * (s1 - 8 * s2);
|
||||
}
|
||||
|
||||
float simpleDot(const block_q4_1& x, const block_q8_0& y) {
|
||||
int s1 = 0; //, s2 = 0;
|
||||
for (int i=0; i<QK4_1/2; i+=2) {
|
||||
int v1 = x.qs[i+0] & 0xf;
|
||||
int v2 = x.qs[i+0] >> 4;
|
||||
int v3 = x.qs[i+1] & 0xf;
|
||||
int v4 = x.qs[i+1] >> 4;
|
||||
int j = 2*i;
|
||||
s1 += v1*y.qs[j] + v2*y.qs[j+1] + v3*y.qs[j+2] + v4*y.qs[j+3];
|
||||
//s2 += y.qs[j] + y.qs[j+1] + y.qs[j+2] + y.qs[j+3];
|
||||
}
|
||||
return y.d * x.d * s1 + y.s * x.m;
|
||||
//return y.d * (x.d * s1 + x.m * s2);
|
||||
}
|
||||
|
||||
struct Stat {
|
||||
double sum = 0, sumt = 0, sumt2 = 0, maxt = 0;
|
||||
int nloop = 0;
|
||||
void addResult(double s, double t) {
|
||||
sum += s;
|
||||
sumt += t; sumt2 += t*t; maxt = std::max(maxt, t);
|
||||
++nloop;
|
||||
}
|
||||
void reportResult(const char* title) const {
|
||||
if (nloop < 1) {
|
||||
printf("%s(%s): no result\n",__func__,title);
|
||||
return;
|
||||
}
|
||||
printf("============ %s\n",title);
|
||||
printf("<dot> = %g\n",sum/nloop);
|
||||
auto t = sumt/nloop, dt = sumt2/nloop - t*t;
|
||||
if (dt > 0) dt = sqrt(dt);
|
||||
printf("<time> = %g +/- %g us. Max. time = %g us.\n",t,dt,maxt);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
|
||||
int nloop = argc > 1 ? atoi(argv[1]) : 10;
|
||||
int type = argc > 2 ? atoi(argv[2]) : 1;
|
||||
|
||||
std::mt19937 rndm(1234);
|
||||
|
||||
std::vector<block_q4_1> x41;
|
||||
std::vector<block_q4_0> x40;
|
||||
std::vector<block_q8_0> y(kVecSize);
|
||||
if (type == 0) x40.resize(kVecSize);
|
||||
else {
|
||||
x41.resize(kVecSize);
|
||||
for (auto& b : x41) b.m = 1;
|
||||
}
|
||||
|
||||
auto ggml_type = type == 0 ? GGML_TYPE_Q4_0 : GGML_TYPE_Q4_1;
|
||||
|
||||
auto funcs = ggml_internal_get_quantize_fn(ggml_type);
|
||||
|
||||
Stat simple, ggml;
|
||||
|
||||
for (int iloop=0; iloop<nloop; ++iloop) {
|
||||
|
||||
if (type == 0) fillQ4blocks(x40, rndm);
|
||||
else fillQ4blocks(x41, rndm);
|
||||
fillQ80blocks(y, rndm);
|
||||
|
||||
auto t1 = std::chrono::high_resolution_clock::now();
|
||||
double s = 0;
|
||||
if (type == 0) for (int i=0; i<kVecSize; ++i) s += simpleDot(x40[i], y[i]);
|
||||
else for (int i=0; i<kVecSize; ++i) s += simpleDot(x41[i], y[i]);
|
||||
auto t2 = std::chrono::high_resolution_clock::now();
|
||||
auto t = 1e-3*std::chrono::duration_cast<std::chrono::nanoseconds>(t2-t1).count();
|
||||
if (iloop > 3) simple.addResult(s, t);
|
||||
|
||||
t1 = std::chrono::high_resolution_clock::now();
|
||||
float fs;
|
||||
if (type == 0) funcs.vec_dot_q(kVecSize * QK4_1, &fs, x40.data(), y.data());
|
||||
else funcs.vec_dot_q(kVecSize * QK4_1, &fs, x41.data(), y.data());
|
||||
t2 = std::chrono::high_resolution_clock::now();
|
||||
t = 1e-3*std::chrono::duration_cast<std::chrono::nanoseconds>(t2-t1).count();
|
||||
if (iloop > 3) ggml.addResult(fs, t);
|
||||
|
||||
}
|
||||
|
||||
// Report the time (and the average of the dot products so the compiler does not come up with the idea
|
||||
// of optimizing away the function calls after figuring that the result is not used).
|
||||
simple.reportResult("Simple");
|
||||
ggml.reportResult("ggml");
|
||||
return 0;
|
||||
}
|
||||
@@ -6,5 +6,6 @@ function(llama_add_test source)
|
||||
endfunction()
|
||||
|
||||
# llama_add_test(test-double-float.c) # SLOW
|
||||
llama_add_test(test-quantize.c)
|
||||
llama_add_test(test-quantize-fns.cpp)
|
||||
llama_add_test(test-quantize-perf.cpp)
|
||||
llama_add_test(test-tokenizer-0.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab.bin)
|
||||
|
||||
154
tests/test-quantize-fns.cpp
Normal file
154
tests/test-quantize-fns.cpp
Normal file
@@ -0,0 +1,154 @@
|
||||
// Unit tests for quantization specific functions - quantize, dequantize and dot product
|
||||
|
||||
#include "ggml.h"
|
||||
|
||||
#undef NDEBUG
|
||||
#include <assert.h>
|
||||
#include <math.h>
|
||||
#include <stdio.h>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
|
||||
const float MAX_QUANTIZATION_REFERENCE_ERROR = 0.0001;
|
||||
const float MAX_QUANTIZATION_TOTAL_ERROR = 0.002;
|
||||
const float MAX_DOT_PRODUCT_ERROR = 0.02;
|
||||
|
||||
const char* RESULT_STR[] = {"ok", "FAILED"};
|
||||
|
||||
|
||||
// Generate synthetic data
|
||||
void generate_data(float offset, size_t n, float * dst) {
|
||||
for (size_t i = 0; i < n; i++) {
|
||||
dst[i] = 0.1 + 2*cosf(i + offset);
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate RMSE between two float arrays
|
||||
float array_rmse(const float * a1, const float * a2, size_t n) {
|
||||
double sum = 0;
|
||||
for (size_t i = 0; i < n; i++) {
|
||||
double diff = a1[i] - a2[i];
|
||||
sum += diff * diff;
|
||||
}
|
||||
return sqrtf(sum) / n;
|
||||
}
|
||||
|
||||
// Total quantization error on test data
|
||||
float total_quantization_error(quantize_fns_t & qfns, size_t test_size, const float * test_data) {
|
||||
std::vector<uint8_t> tmp_q(test_size);
|
||||
std::vector<float> tmp_out(test_size);
|
||||
|
||||
qfns.quantize_row_q(test_data, tmp_q.data(), test_size);
|
||||
qfns.dequantize_row_q(tmp_q.data(), tmp_out.data(), test_size);
|
||||
return array_rmse(test_data, tmp_out.data(), test_size);
|
||||
}
|
||||
|
||||
// Total quantization error on test data
|
||||
float reference_quantization_error(quantize_fns_t & qfns, size_t test_size, const float * test_data) {
|
||||
std::vector<uint8_t> tmp_q(test_size);
|
||||
std::vector<float> tmp_out(test_size);
|
||||
std::vector<float> tmp_out_ref(test_size);
|
||||
|
||||
qfns.quantize_row_q(test_data, tmp_q.data(), test_size);
|
||||
qfns.dequantize_row_q(tmp_q.data(), tmp_out.data(), test_size);
|
||||
|
||||
qfns.quantize_row_q_reference(test_data, tmp_q.data(), test_size);
|
||||
qfns.dequantize_row_q(tmp_q.data(), tmp_out_ref.data(), test_size);
|
||||
|
||||
return array_rmse(tmp_out.data(), tmp_out_ref.data(), test_size);
|
||||
}
|
||||
|
||||
float dot_product(const float * a1, const float * a2, size_t test_size) {
|
||||
double sum = 0;
|
||||
for (size_t i = 0; i < test_size; i++) {
|
||||
sum += a1[i] * a2[i];
|
||||
}
|
||||
return sum;
|
||||
}
|
||||
|
||||
// Total dot product error
|
||||
float dot_product_error(quantize_fns_t & qfns, size_t test_size, const float * test_data1, const float *test_data2) {
|
||||
std::vector<uint8_t> tmp_q1(test_size);
|
||||
std::vector<uint8_t> tmp_q2(test_size*2);
|
||||
|
||||
qfns.quantize_row_q(test_data1, tmp_q1.data(), test_size);
|
||||
qfns.quantize_row_q_dot(test_data2, tmp_q2.data(), test_size);
|
||||
|
||||
float result = INFINITY;
|
||||
qfns.vec_dot_q(test_size, &result, tmp_q1.data(), tmp_q2.data());
|
||||
|
||||
const float dot_ref = dot_product(test_data1, test_data2, test_size);
|
||||
|
||||
return fabsf(result - dot_ref) / test_size;
|
||||
}
|
||||
|
||||
int main(int argc, char * argv[]) {
|
||||
bool verbose = false;
|
||||
const size_t test_size = 32 * 128;
|
||||
|
||||
std::string arg;
|
||||
for (int i = 1; i < argc; i++) {
|
||||
arg = argv[i];
|
||||
|
||||
if (arg == "-v") {
|
||||
verbose = true;
|
||||
} else {
|
||||
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<float> test_data(test_size);
|
||||
std::vector<float> test_data2(test_size);
|
||||
|
||||
generate_data(0.0, test_data.size(), test_data.data());
|
||||
generate_data(1.0, test_data2.size(), test_data2.data());
|
||||
|
||||
// Initialize GGML, ensures float conversion tables are initialized
|
||||
struct ggml_init_params ggml_params = {
|
||||
/* .mem_size = */ 1*1024,
|
||||
/* .mem_buffer = */ NULL,
|
||||
/* .no_alloc = */ true,
|
||||
};
|
||||
struct ggml_context * ctx = ggml_init(ggml_params);
|
||||
|
||||
int num_failed = 0;
|
||||
bool failed = false;
|
||||
|
||||
for (int i = 0; i < GGML_TYPE_COUNT; i++) {
|
||||
ggml_type type = (ggml_type) i;
|
||||
quantize_fns_t qfns = ggml_internal_get_quantize_fn(i);
|
||||
|
||||
if (qfns.quantize_row_q && qfns.dequantize_row_q) {
|
||||
const float total_error = total_quantization_error(qfns, test_size, test_data.data());
|
||||
failed = !(total_error < MAX_QUANTIZATION_TOTAL_ERROR);
|
||||
num_failed += failed;
|
||||
if (failed || verbose) {
|
||||
printf("%5s absolute quantization error: %s (%f)\n", ggml_type_name(type), RESULT_STR[failed], total_error);
|
||||
}
|
||||
|
||||
const float reference_error = reference_quantization_error(qfns, test_size, test_data.data());
|
||||
failed = !(reference_error < MAX_QUANTIZATION_REFERENCE_ERROR);
|
||||
num_failed += failed;
|
||||
if (failed || verbose) {
|
||||
printf("%5s reference implementation error: %s (%f)\n", ggml_type_name(type), RESULT_STR[failed], reference_error);
|
||||
}
|
||||
|
||||
const float vec_dot_error = dot_product_error(qfns, test_size, test_data.data(), test_data2.data());
|
||||
failed = !(vec_dot_error < MAX_DOT_PRODUCT_ERROR);
|
||||
num_failed += failed;
|
||||
if (failed || verbose) {
|
||||
printf("%5s dot product error: %s (%f)\n", ggml_type_name(type), RESULT_STR[failed], vec_dot_error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (num_failed || verbose) {
|
||||
printf("%d tests failed\n", num_failed);
|
||||
}
|
||||
|
||||
ggml_free(ctx);
|
||||
|
||||
return num_failed > 0;
|
||||
}
|
||||
310
tests/test-quantize-perf.cpp
Normal file
310
tests/test-quantize-perf.cpp
Normal file
@@ -0,0 +1,310 @@
|
||||
// Benchmark quantization specific functions on synthetic data
|
||||
|
||||
#include "ggml.h"
|
||||
|
||||
#undef NDEBUG
|
||||
#include <algorithm>
|
||||
#include <assert.h>
|
||||
#include <functional>
|
||||
#include <inttypes.h>
|
||||
#include <math.h>
|
||||
#include <memory>
|
||||
#include <stdio.h>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#define MAX_ALIGNMENT 64
|
||||
#define QK 32
|
||||
#define WARMUP 5
|
||||
#define ITERATIONS 10
|
||||
|
||||
#define L1_SIZE 32*128
|
||||
#define L2_SIZE 32*2048
|
||||
#define L3_SIZE 32*20480
|
||||
#define MEM_SIZE 32*2048000
|
||||
|
||||
struct quantize_perf_params {
|
||||
std::vector<std::string> include_types;
|
||||
std::vector<size_t> test_sizes;
|
||||
size_t alignment_offset = 0;
|
||||
bool op_quantize_row_q_reference = false;
|
||||
bool op_quantize_row_q = false;
|
||||
bool op_dequantize_row_q = false;
|
||||
bool op_quantize_row_q_dot = false;
|
||||
bool op_vec_dot_q = false;
|
||||
};
|
||||
|
||||
|
||||
#if defined(__x86_64__) || defined(__i386__)
|
||||
|
||||
#include <x86intrin.h>
|
||||
inline int64_t cpu_cycles() {
|
||||
// Rough way to detect new-ish CPUs
|
||||
#ifdef __POPCNT__
|
||||
unsigned int dummy;
|
||||
return __rdtscp(&dummy);
|
||||
#else
|
||||
return __rdtsc();
|
||||
#endif
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#define cpu_cycles() 0
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
// Generate synthetic data
|
||||
void generate_data(float offset, size_t n, float * dst) {
|
||||
for (size_t i = 0; i < n; i++) {
|
||||
dst[i] = 0.1 + 2*cosf(i + offset);
|
||||
}
|
||||
}
|
||||
|
||||
float gigabytes_per_second(size_t bytes, int64_t usecs) {
|
||||
return bytes / (float) usecs * 1000000 / (1024*1024*1024);
|
||||
}
|
||||
|
||||
void * align_with_offset(void * ptr, int offset) {
|
||||
size_t dummy_size = MAX_ALIGNMENT * 4;
|
||||
return (char *) std::align(MAX_ALIGNMENT, MAX_ALIGNMENT, ptr, dummy_size) + offset;
|
||||
}
|
||||
|
||||
void benchmark_function(size_t size, size_t q_size, std::function<size_t(void)> function) {
|
||||
int64_t min_time_us = INT64_MAX;
|
||||
int64_t total_time_us = 0;
|
||||
int64_t min_time_cycles = INT64_MAX;
|
||||
int64_t total_time_cycles = 0;
|
||||
|
||||
for (int i = 0; i < WARMUP; i++) {
|
||||
function();
|
||||
}
|
||||
|
||||
|
||||
for (int i = 0; i < ITERATIONS; i++) {
|
||||
const int64_t start_time = ggml_time_us();
|
||||
const int64_t start_cycles = cpu_cycles();
|
||||
|
||||
function();
|
||||
|
||||
const int64_t end_cycles = cpu_cycles();
|
||||
const int64_t end_time = ggml_time_us();
|
||||
|
||||
total_time_cycles += end_cycles - start_cycles;
|
||||
min_time_cycles = std::min(min_time_cycles, end_cycles - start_cycles);
|
||||
total_time_us += end_time - start_time;
|
||||
min_time_us = std::min(min_time_us, end_time - start_time);
|
||||
}
|
||||
|
||||
printf(" min cycles/%d vals : %9.2f\n", QK, QK * min_time_cycles / (float) size);
|
||||
printf(" avg cycles/%d vals : %9.2f\n", QK, QK * total_time_cycles / (float) (size * ITERATIONS));
|
||||
printf(" float32 throughput : %9.2f GB/s\n", gigabytes_per_second(4 * size * ITERATIONS, total_time_us));
|
||||
printf(" quantized throughput : %9.2f GB/s\n", gigabytes_per_second(q_size * ITERATIONS, total_time_us));
|
||||
}
|
||||
|
||||
int main(int argc, char * argv[]) {
|
||||
quantize_perf_params params {};
|
||||
|
||||
// read command line
|
||||
|
||||
bool invalid_param = false;
|
||||
std::string arg;
|
||||
for (int i = 1; i < argc; i++) {
|
||||
arg = argv[i];
|
||||
|
||||
if (arg == "--size") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
size_t size = std::stoi(argv[i]);
|
||||
if (size % 32 != 0) {
|
||||
fprintf(stderr, "error: size %zu not divisible by 32\n", size);
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.test_sizes.push_back(size);
|
||||
} else if (arg == "-3") {
|
||||
// quick select sizes that probably fit in CPU caches
|
||||
params.test_sizes.push_back(L1_SIZE);
|
||||
params.test_sizes.push_back(L2_SIZE);
|
||||
params.test_sizes.push_back(L3_SIZE);
|
||||
} else if (arg == "-4") {
|
||||
// quick select cache sizes + memory
|
||||
params.test_sizes.push_back(L1_SIZE);
|
||||
params.test_sizes.push_back(L2_SIZE);
|
||||
params.test_sizes.push_back(L3_SIZE);
|
||||
params.test_sizes.push_back(MEM_SIZE);
|
||||
} else if (arg == "--op") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
std::string op {argv[i]};
|
||||
if (op == "quantize_row_q_reference") {
|
||||
params.op_quantize_row_q_reference = true;
|
||||
} else if (op == "quantize_row_q") {
|
||||
params.op_quantize_row_q = true;
|
||||
} else if (op == "dequantize_row_q") {
|
||||
params.op_dequantize_row_q = true;
|
||||
} else if (op == "quantize_row_q_dot") {
|
||||
params.op_quantize_row_q_dot = true;
|
||||
} else if (op == "vec_dot_q") {
|
||||
params.op_vec_dot_q = true;
|
||||
} else {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
} else if (arg == "--type") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.include_types.push_back(argv[i]);
|
||||
} else if (arg == "--alignment-offset") {
|
||||
if (++i >= argc) {
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
int alignment = std::stoi(argv[i]);
|
||||
if (alignment < 0 || alignment > MAX_ALIGNMENT) {
|
||||
fprintf(stderr, "error: aligment-offset must be less than %d\n", MAX_ALIGNMENT);
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
params.alignment_offset = alignment;
|
||||
} else {
|
||||
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
if (invalid_param) {
|
||||
fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str());
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (params.test_sizes.empty()) {
|
||||
params.test_sizes.push_back(L1_SIZE);
|
||||
}
|
||||
if (!(params.op_quantize_row_q_reference || params.op_quantize_row_q || params.op_dequantize_row_q || params.op_quantize_row_q_dot || params.op_vec_dot_q)) {
|
||||
params.op_quantize_row_q_reference = params.op_quantize_row_q = params.op_dequantize_row_q = params.op_quantize_row_q_dot = params.op_vec_dot_q = true;
|
||||
}
|
||||
|
||||
std::sort(params.test_sizes.begin(), params.test_sizes.end());
|
||||
size_t largest = params.test_sizes.back();
|
||||
|
||||
std::vector<uint8_t> test_data1_v(largest*4 + MAX_ALIGNMENT*2);
|
||||
std::vector<uint8_t> test_data2_v(largest*4 + MAX_ALIGNMENT*2);
|
||||
std::vector<uint8_t> test_q1_v(largest*4 + MAX_ALIGNMENT*2);
|
||||
std::vector<uint8_t> test_q2_v(largest*4 + MAX_ALIGNMENT*2);
|
||||
std::vector<uint8_t> test_out_v(largest*4 + MAX_ALIGNMENT*2);
|
||||
|
||||
float * test_data1 = (float *) align_with_offset(test_data1_v.data(), params.alignment_offset);
|
||||
float * test_data2 = (float *) align_with_offset(test_data2_v.data(), params.alignment_offset);
|
||||
float * test_q1 = (float *) align_with_offset(test_q1_v.data(), params.alignment_offset);
|
||||
float * test_q2 = (float *) align_with_offset(test_q2_v.data(), params.alignment_offset);
|
||||
float * test_out = (float *) align_with_offset(test_out_v.data(), params.alignment_offset);
|
||||
|
||||
generate_data(0, largest, test_data1);
|
||||
generate_data(1, largest, test_data2);
|
||||
|
||||
|
||||
// Initialize GGML, ensures float conversion tables are initialized
|
||||
struct ggml_init_params ggml_params = {
|
||||
/* .mem_size = */ 1*1024,
|
||||
/* .mem_buffer = */ NULL,
|
||||
/* .no_alloc = */ true,
|
||||
};
|
||||
struct ggml_context * ctx = ggml_init(ggml_params);
|
||||
|
||||
for (int i = 0; i < GGML_TYPE_COUNT; i++) {
|
||||
ggml_type type = (ggml_type) i;
|
||||
quantize_fns_t qfns = ggml_internal_get_quantize_fn(i);
|
||||
if (!params.include_types.empty() && std::find(params.include_types.begin(), params.include_types.end(), ggml_type_name(type)) == params.include_types.end()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (qfns.quantize_row_q && qfns.dequantize_row_q) {
|
||||
printf("%s\n", ggml_type_name(type));
|
||||
|
||||
if (params.op_quantize_row_q_reference) {
|
||||
printf(" quantize_row_q_reference\n");
|
||||
for (size_t size : params.test_sizes) {
|
||||
printf(" %zu values (%.2f MB)\n", size, 4*size/(float)(1024*1024));
|
||||
auto quantize_fn = [&](void ) {
|
||||
qfns.quantize_row_q_reference(test_data1, test_q1, size);
|
||||
return test_q1[0];
|
||||
};
|
||||
size_t quantized_size = size / ggml_blck_size(type) * ggml_type_size(type);
|
||||
benchmark_function(size, quantized_size, quantize_fn);
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
if (params.op_quantize_row_q) {
|
||||
printf(" quantize_row_q\n");
|
||||
for (size_t size : params.test_sizes) {
|
||||
printf(" %zu values (%.2f MB)\n", size, 4*size/(float)(1024*1024));
|
||||
auto quantize_fn = [&](void ) {
|
||||
qfns.quantize_row_q(test_data1, test_q1, size);
|
||||
return test_q1[0];
|
||||
};
|
||||
size_t quantized_size = size / ggml_blck_size(type) * ggml_type_size(type);
|
||||
benchmark_function(size, quantized_size, quantize_fn);
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
if (params.op_dequantize_row_q) {
|
||||
printf(" dequantize_row_q\n");
|
||||
qfns.quantize_row_q(test_data1, test_q1, largest);
|
||||
for (size_t size : params.test_sizes) {
|
||||
printf(" %zu values (%.2f MB)\n", size, 4*size/(float)(1024*1024));
|
||||
auto quantize_fn = [&](void ) {
|
||||
qfns.dequantize_row_q(test_q1, test_out, size);
|
||||
return test_out[0];
|
||||
};
|
||||
size_t quantized_size = size / ggml_blck_size(type) * ggml_type_size(type);
|
||||
benchmark_function(size, quantized_size, quantize_fn);
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
if (params.op_quantize_row_q_dot) {
|
||||
printf(" quantize_row_q_dot\n");
|
||||
for (size_t size : params.test_sizes) {
|
||||
printf(" %zu values (%.2f MB)\n", size, 4*size/(float)(1024*1024));
|
||||
auto quantize_fn = [&](void ) {
|
||||
qfns.quantize_row_q_dot(test_data1, test_q1, size);
|
||||
return test_q1[0];
|
||||
};
|
||||
size_t quantized_size = size / ggml_blck_size(type) * ggml_type_size(type);
|
||||
benchmark_function(size, quantized_size, quantize_fn);
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
if (params.op_vec_dot_q) {
|
||||
printf(" vec_dot_q\n");
|
||||
qfns.quantize_row_q(test_data1, test_q1, largest);
|
||||
qfns.quantize_row_q(test_data2, test_q2, largest);
|
||||
for (size_t size : params.test_sizes) {
|
||||
printf(" %zu values (%.2f MB)\n", size, 4*size/(float)(1024*1024));
|
||||
auto quantize_fn = [&](void ) {
|
||||
float result;
|
||||
qfns.vec_dot_q(size, &result, test_q1, test_q2);
|
||||
return result;
|
||||
};
|
||||
size_t quantized_size = size / ggml_blck_size(type) * ggml_type_size(type);
|
||||
benchmark_function(size, quantized_size, quantize_fn);
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ggml_free(ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
#include "ggml.h"
|
||||
#undef NDEBUG
|
||||
#include <assert.h>
|
||||
#include <math.h>
|
||||
|
||||
int main(void) {
|
||||
#define QK 32
|
||||
float src[QK];
|
||||
uint8_t dst[24];
|
||||
int64_t hist[16];
|
||||
|
||||
for (int i = 0; i < QK; i++) {
|
||||
src[i] = (float)(i + 1);
|
||||
}
|
||||
|
||||
size_t size = ggml_quantize_q4_0(src, dst, QK, QK, hist);
|
||||
assert(size == 20);
|
||||
float max_result = ((float *)dst)[0];
|
||||
float max_expected = src[31] / ((1 << 3) - 1);
|
||||
assert(max_result == max_expected);
|
||||
for (int i = 0; i < QK; i++) {
|
||||
uint8_t q4_result = (i % 2) ? (dst[sizeof(float) + i/2] >> 4) : (dst[sizeof(float) + i/2] & 0xF);
|
||||
uint8_t q4_expected = roundf(src[i] / max_expected) + 8;
|
||||
assert(q4_result == q4_expected);
|
||||
}
|
||||
|
||||
size = ggml_quantize_q4_1(src, dst, QK, QK, hist);
|
||||
assert(size == 24);
|
||||
float delta_result = ((float *)dst)[0];
|
||||
float delta_expected = (src[31] - src[0]) / ((1 << 4) - 1);
|
||||
assert(delta_result == delta_expected);
|
||||
float min_result = ((float *)dst)[1];
|
||||
float min_expected = src[0];
|
||||
assert(min_result == min_expected);
|
||||
for (int i = 0; i < QK; i++) {
|
||||
uint8_t q4_result = (i % 2) ? (dst[sizeof(float)*2 + i/2] >> 4) : (dst[sizeof(float)*2 + i/2] & 0xF);
|
||||
uint8_t q4_expected = roundf((src[i] - min_expected) / delta_expected);
|
||||
assert(q4_result == q4_expected);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
Reference in New Issue
Block a user