mirror of
https://github.com/ggml-org/llama.cpp.git
synced 2026-05-02 15:14:06 +00:00
Compare commits
12 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d34633d8db | ||
|
|
4f56458d34 | ||
|
|
6efb8eb30e | ||
|
|
36e5a08b20 | ||
|
|
4dccb38d9a | ||
|
|
9a818f7c42 | ||
|
|
18adb4e9bb | ||
|
|
d9653894df | ||
|
|
128de3585b | ||
|
|
8c58330318 | ||
|
|
18c2e1752c | ||
|
|
8f900abfc0 |
@@ -137,6 +137,7 @@ as the main playground for developing new features for the [ggml](https://github
|
||||
- [semperai/amica](https://github.com/semperai/amica)
|
||||
- [psugihara/FreeChat](https://github.com/psugihara/FreeChat)
|
||||
- [ptsochantaris/emeltal](https://github.com/ptsochantaris/emeltal)
|
||||
- [iohub/collama](https://github.com/iohub/coLLaMA)
|
||||
|
||||
---
|
||||
|
||||
|
||||
969
convert.py
969
convert.py
File diff suppressed because it is too large
Load Diff
@@ -126,24 +126,7 @@ static struct ggml_tensor * get_tensor(struct ggml_context * ctx, const std::str
|
||||
}
|
||||
|
||||
static std::string get_ftype(int ftype) {
|
||||
switch (ftype) {
|
||||
case 0:
|
||||
return "f32";
|
||||
case 1:
|
||||
return "f16";
|
||||
case 2:
|
||||
return "q4_0";
|
||||
case 3:
|
||||
return "q4_1";
|
||||
case 6:
|
||||
return "q5_0";
|
||||
case 7:
|
||||
return "q5_1";
|
||||
case 8:
|
||||
return "q8_0";
|
||||
default:
|
||||
throw std::runtime_error(format("%s: Unrecognized file type: %d\n", __func__, ftype));
|
||||
}
|
||||
return ggml_type_name(static_cast<ggml_type>(ftype));
|
||||
}
|
||||
|
||||
//
|
||||
@@ -533,6 +516,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
||||
buffer_size += n_tensors * 128 /* CLIP PADDING */;
|
||||
|
||||
clip_ctx * new_clip = new clip_ctx;
|
||||
|
||||
#ifdef GGML_USE_CUBLAS
|
||||
new_clip->backend = ggml_backend_cuda_init(0);
|
||||
printf("%s: CLIP using CUDA backend\n", __func__);
|
||||
@@ -543,6 +527,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
||||
printf("%s: CLIP using Metal backend\n", __func__);
|
||||
#endif
|
||||
|
||||
|
||||
if (!new_clip->backend) {
|
||||
new_clip->backend = ggml_backend_cpu_init();
|
||||
printf("%s: CLIP using CPU backend\n", __func__);
|
||||
@@ -931,26 +916,8 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i
|
||||
|
||||
ggml_type type = GGML_TYPE_Q4_1;
|
||||
|
||||
switch (itype) {
|
||||
case 2:
|
||||
type = GGML_TYPE_Q4_0;
|
||||
break;
|
||||
case 3:
|
||||
type = GGML_TYPE_Q4_1;
|
||||
break;
|
||||
case 6:
|
||||
type = GGML_TYPE_Q5_0;
|
||||
break;
|
||||
case 7:
|
||||
type = GGML_TYPE_Q5_1;
|
||||
break;
|
||||
case 8:
|
||||
type = GGML_TYPE_Q8_0;
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "%s: invalid quantization type %d\n", __func__, itype);
|
||||
return false;
|
||||
};
|
||||
assert(itype < GGML_TYPE_COUNT);
|
||||
type = static_cast<ggml_type>(itype);
|
||||
|
||||
auto * ctx_clip = clip_model_load(fname_inp, 2);
|
||||
|
||||
@@ -1010,6 +977,10 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i
|
||||
|
||||
if (quantize) {
|
||||
new_type = type;
|
||||
if (new_type >= GGML_TYPE_Q2_K && name.find("embd") != std::string::npos) {
|
||||
new_type = GGML_TYPE_Q8_0; // ggml_get_rows needs non K type
|
||||
// fprintf(stderr, "%s: quantizing %s to %s\n", __func__, name.c_str(), ggml_type_name(new_type));
|
||||
}
|
||||
const size_t n_elms = ggml_nelements(cur);
|
||||
float * f32_data;
|
||||
|
||||
@@ -1054,6 +1025,21 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i
|
||||
case GGML_TYPE_Q8_0: {
|
||||
new_size = ggml_quantize_q8_0(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
|
||||
} break;
|
||||
case GGML_TYPE_Q2_K: {
|
||||
new_size = ggml_quantize_q2_K(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
|
||||
} break;
|
||||
case GGML_TYPE_Q3_K: {
|
||||
new_size = ggml_quantize_q3_K(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
|
||||
} break;
|
||||
case GGML_TYPE_Q4_K: {
|
||||
new_size = ggml_quantize_q4_K(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
|
||||
} break;
|
||||
case GGML_TYPE_Q5_K: {
|
||||
new_size = ggml_quantize_q5_K(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
|
||||
} break;
|
||||
case GGML_TYPE_Q6_K: {
|
||||
new_size = ggml_quantize_q6_K(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data());
|
||||
} break;
|
||||
default: {
|
||||
fprintf(stderr, "%s: unsupported quantization type %d\n", __func__, new_type);
|
||||
return false;
|
||||
|
||||
@@ -243,6 +243,9 @@ int main(int argc, char ** argv) {
|
||||
}
|
||||
|
||||
auto image_embed = load_image(ctx_llava, ¶ms);
|
||||
if (!image_embed) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
// process the prompt
|
||||
process_prompt(ctx_llava, image_embed, ¶ms, params.prompt);
|
||||
|
||||
@@ -23,6 +23,7 @@ Command line options:
|
||||
- `--host`: Set the hostname or ip address to listen. Default `127.0.0.1`.
|
||||
- `--port`: Set the port to listen. Default: `8080`.
|
||||
- `--path`: path from which to serve static files (default examples/server/public)
|
||||
- `--api-key`: Set an api key for request authorization. By default the server responds to every request. With an api key set, the requests must have the Authorization header set with the api key as Bearer token.
|
||||
- `--embedding`: Enable embedding extraction, Default: disabled.
|
||||
- `-np N`, `--parallel N`: Set the number of slots for process requests (default: 1)
|
||||
- `-cb`, `--cont-batching`: enable continuous batching (a.k.a dynamic batching) (default: disabled)
|
||||
@@ -174,35 +175,44 @@ node index.js
|
||||
|
||||
`system_prompt`: Change the system prompt (initial prompt of all slots), this is useful for chat applications. [See more](#change-system-prompt-on-runtime)
|
||||
|
||||
*Result JSON:*
|
||||
### Result JSON:
|
||||
|
||||
Note: When using streaming mode (`stream`) only `content` and `stop` will be returned until end of completion.
|
||||
* Note: When using streaming mode (`stream`) only `content` and `stop` will be returned until end of completion.
|
||||
|
||||
`content`: Completion result as a string (excluding `stopping_word` if any). In case of streaming mode, will contain the next token as a string.
|
||||
|
||||
`stop`: Boolean for use with `stream` to check whether the generation has stopped (Note: This is not related to stopping words array `stop` from input options)
|
||||
- `completion_probabilities`: An array of token probabilities for each completion. The array's length is `n_predict`. Each item in the array has the following structure:
|
||||
|
||||
`generation_settings`: The provided options above excluding `prompt` but including `n_ctx`, `model`
|
||||
```
|
||||
{
|
||||
"content": "<the token selected by the model>",
|
||||
"probs": [
|
||||
{
|
||||
"prob": float,
|
||||
"tok_str": "<most likely token>"
|
||||
},
|
||||
{
|
||||
"prob": float,
|
||||
"tok_str": "<second most likely tonen>"
|
||||
},
|
||||
...
|
||||
]
|
||||
},
|
||||
```
|
||||
Notice that each `probs` is an array of length `n_probs`.
|
||||
|
||||
`model`: The path to the model loaded with `-m`
|
||||
|
||||
`prompt`: The provided `prompt`
|
||||
|
||||
`stopped_eos`: Indicating whether the completion has stopped because it encountered the EOS token
|
||||
|
||||
`stopped_limit`: Indicating whether the completion stopped because `n_predict` tokens were generated before stop words or EOS was encountered
|
||||
|
||||
`stopped_word`: Indicating whether the completion stopped due to encountering a stopping word from `stop` JSON array provided
|
||||
|
||||
`stopping_word`: The stopping word encountered which stopped the generation (or "" if not stopped due to a stopping word)
|
||||
|
||||
`timings`: Hash of timing information about the completion such as the number of tokens `predicted_per_second`
|
||||
|
||||
`tokens_cached`: Number of tokens from the prompt which could be re-used from previous completion (`n_past`)
|
||||
|
||||
`tokens_evaluated`: Number of tokens evaluated in total from the prompt
|
||||
|
||||
`truncated`: Boolean indicating if the context size was exceeded during generation, i.e. the number of tokens provided in the prompt (`tokens_evaluated`) plus tokens generated (`tokens predicted`) exceeded the context size (`n_ctx`)
|
||||
- `content`: Completion result as a string (excluding `stopping_word` if any). In case of streaming mode, will contain the next token as a string.
|
||||
- `stop`: Boolean for use with `stream` to check whether the generation has stopped (Note: This is not related to stopping words array `stop` from input options)
|
||||
- `generation_settings`: The provided options above excluding `prompt` but including `n_ctx`, `model`
|
||||
- `model`: The path to the model loaded with `-m`
|
||||
- `prompt`: The provided `prompt`
|
||||
- `stopped_eos`: Indicating whether the completion has stopped because it encountered the EOS token
|
||||
- `stopped_limit`: Indicating whether the completion stopped because `n_predict` tokens were generated before stop words or EOS was encountered
|
||||
- `stopped_word`: Indicating whether the completion stopped due to encountering a stopping word from `stop` JSON array provided
|
||||
- `stopping_word`: The stopping word encountered which stopped the generation (or "" if not stopped due to a stopping word)
|
||||
- `timings`: Hash of timing information about the completion such as the number of tokens `predicted_per_second`
|
||||
- `tokens_cached`: Number of tokens from the prompt which could be re-used from previous completion (`n_past`)
|
||||
- `tokens_evaluated`: Number of tokens evaluated in total from the prompt
|
||||
- `truncated`: Boolean indicating if the context size was exceeded during generation, i.e. the number of tokens provided in the prompt (`tokens_evaluated`) plus tokens generated (`tokens predicted`) exceeded the context size (`n_ctx`)
|
||||
|
||||
- **POST** `/tokenize`: Tokenize a given text.
|
||||
|
||||
|
||||
333
ggml-cuda.cu
333
ggml-cuda.cu
@@ -116,6 +116,7 @@
|
||||
#include "ggml.h"
|
||||
#include "ggml-backend-impl.h"
|
||||
|
||||
#define CC_PASCAL 600
|
||||
#define MIN_CC_DP4A 610 // minimum compute capability for __dp4a, an intrinsic for byte-wise dot products
|
||||
#define CC_VOLTA 700
|
||||
#define CC_OFFSET_AMD 1000000
|
||||
@@ -556,11 +557,12 @@ static float g_tensor_split[GGML_CUDA_MAX_DEVICES] = {0};
|
||||
|
||||
struct cuda_device_capabilities {
|
||||
int cc; // compute capability
|
||||
size_t smpb; // max. shared memory per block
|
||||
bool vmm; // virtual memory support
|
||||
size_t vmm_granularity; // granularity of virtual memory
|
||||
};
|
||||
|
||||
static cuda_device_capabilities g_device_caps[GGML_CUDA_MAX_DEVICES] = { {0, false, 0} };
|
||||
static cuda_device_capabilities g_device_caps[GGML_CUDA_MAX_DEVICES] = { {0, 0, false, 0} };
|
||||
|
||||
static void * g_scratch_buffer = nullptr;
|
||||
static size_t g_scratch_size = 0; // disabled by default
|
||||
@@ -593,6 +595,19 @@ static __device__ __forceinline__ float2 warp_reduce_sum(float2 a) {
|
||||
return a;
|
||||
}
|
||||
|
||||
static __device__ __forceinline__ half2 warp_reduce_sum(half2 a) {
|
||||
#if __CUDA_ARCH__ < CC_PASCAL || (defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
|
||||
(void) a;
|
||||
bad_arch();
|
||||
#else
|
||||
#pragma unroll
|
||||
for (int mask = 16; mask > 0; mask >>= 1) {
|
||||
a = __hadd2(a, __shfl_xor_sync(0xffffffff, a, mask, 32));
|
||||
}
|
||||
return a;
|
||||
#endif // __CUDA_ARCH__ < CC_PASCAL || (defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
|
||||
}
|
||||
|
||||
static __device__ __forceinline__ float warp_reduce_max(float x) {
|
||||
#pragma unroll
|
||||
for (int mask = 16; mask > 0; mask >>= 1) {
|
||||
@@ -601,6 +616,19 @@ static __device__ __forceinline__ float warp_reduce_max(float x) {
|
||||
return x;
|
||||
}
|
||||
|
||||
static __device__ __forceinline__ half2 warp_reduce_max(half2 x) {
|
||||
#if __CUDA_ARCH__ < CC_PASCAL || (defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
|
||||
(void) x;
|
||||
bad_arch();
|
||||
#else
|
||||
#pragma unroll
|
||||
for (int mask = 16; mask > 0; mask >>= 1) {
|
||||
x = __hmax2(x, __shfl_xor_sync(0xffffffff, x, mask, 32));
|
||||
}
|
||||
return x;
|
||||
#endif // __CUDA_ARCH__ < CC_PASCAL || (defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
|
||||
}
|
||||
|
||||
static __device__ __forceinline__ float op_repeat(const float a, const float b) {
|
||||
return b;
|
||||
GGML_UNUSED(a);
|
||||
@@ -5385,75 +5413,233 @@ static __global__ void diag_mask_inf_f32(const float * x, float * dst, const int
|
||||
dst[i] = x[i] - (col > n_past + row % rows_per_channel) * FLT_MAX;
|
||||
}
|
||||
|
||||
static __global__ void soft_max_f32(const float * x, const float * y, float * dst, const int ncols, const int nrows_y, const float scale) {
|
||||
template <bool vals_smem, int ncols_template, int block_size_template, bool need_check>
|
||||
static __global__ void soft_max_f16(const float * x, const float * y, float * dst, const int ncols_par, const int nrows_y, const float scale) {
|
||||
#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL
|
||||
const int ncols_data = ncols_template == 0 ? ncols_par : ncols_template;
|
||||
const int ncols_smem = GGML_PAD(ncols_data, 2*WARP_SIZE)/2;
|
||||
|
||||
const int tid = threadIdx.x;
|
||||
const int rowx = blockIdx.x;
|
||||
const int rowy = rowx % nrows_y; // broadcast the mask (y) in the row dimension
|
||||
|
||||
const int block_size = blockDim.x;
|
||||
const int block_size = block_size_template == 0 ? blockDim.x : block_size_template;
|
||||
|
||||
const int warp_id = threadIdx.x / WARP_SIZE;
|
||||
const int lane_id = threadIdx.x % WARP_SIZE;
|
||||
|
||||
__shared__ float buf[CUDA_SOFT_MAX_BLOCK_SIZE/WARP_SIZE];
|
||||
extern __shared__ half data_soft_max_f16[];
|
||||
half * buf_iw = data_soft_max_f16 + 0; // shared memory buffer for inter-warp communication
|
||||
// (shared memory) buffer to cache values between iterations:
|
||||
half2 * vals = vals_smem ? (half2 *) (buf_iw + WARP_SIZE) : (half2 *) (dst + rowx*ncols_data);
|
||||
// if the buffer is larger than max. shared memory per block, use dst as temp. buffer instead
|
||||
// in that case col_smem == col_data must be enforced to avoid race conditions
|
||||
|
||||
float max_val = -INFINITY;
|
||||
half2 max_val = make_half2(-INFINITY, -INFINITY);
|
||||
|
||||
for (int col = tid; col < ncols; col += block_size) {
|
||||
const int ix = rowx*ncols + col;
|
||||
const int iy = rowy*ncols + col;
|
||||
max_val = max(max_val, x[ix]*scale + (y ? y[iy] : 0.0f));
|
||||
#pragma unroll
|
||||
for (int col0 = 0; col0 < ncols_smem; col0 += block_size) {
|
||||
const int col_data = 2*col0 + 2*WARP_SIZE*warp_id + lane_id;
|
||||
const int col_smem = vals_smem ? col0 + tid : col_data;
|
||||
|
||||
const int ix = rowx*ncols_data + col_data;
|
||||
const int iy = rowy*ncols_data + col_data;
|
||||
|
||||
half2 val;
|
||||
if (need_check && col_data + 0 >= ncols_data) {
|
||||
val.x = -INFINITY;
|
||||
} else {
|
||||
val.x = x[ix + 0]*scale + (y ? y[iy + 0] : 0.0f);
|
||||
}
|
||||
if (need_check && col_data + WARP_SIZE >= ncols_data) {
|
||||
val.y = -INFINITY;
|
||||
} else {
|
||||
val.y = x[ix + WARP_SIZE]*scale + (y ? y[iy + WARP_SIZE] : 0.0f);
|
||||
}
|
||||
if (!need_check || col_smem < (vals_smem ? ncols_smem : ncols_data)) {
|
||||
vals[col_smem] = val;
|
||||
}
|
||||
max_val = __hmax2(max_val, val);
|
||||
}
|
||||
|
||||
// find the max value in the block
|
||||
max_val = warp_reduce_max(max_val);
|
||||
if (block_size > WARP_SIZE) {
|
||||
if (warp_id == 0) {
|
||||
buf[lane_id] = -INFINITY;
|
||||
buf_iw[lane_id] = -INFINITY;
|
||||
}
|
||||
__syncthreads();
|
||||
|
||||
if (lane_id == 0) {
|
||||
buf[warp_id] = max_val;
|
||||
buf_iw[warp_id] = __hmax(max_val.x, max_val.y);
|
||||
}
|
||||
__syncthreads();
|
||||
|
||||
max_val = buf[lane_id];
|
||||
max_val = __half2half2(buf_iw[lane_id]);
|
||||
max_val = warp_reduce_max(max_val);
|
||||
} else {
|
||||
max_val = __half2half2(__hmax(max_val.x, max_val.y));
|
||||
}
|
||||
|
||||
float tmp = 0.f;
|
||||
half2 tmp = make_half2(0.0f, 0.0f); // partial sums
|
||||
|
||||
#pragma unroll
|
||||
for (int col0 = 0; col0 < ncols_smem; col0 += block_size) {
|
||||
const int col_smem = vals_smem ? col0 + tid : 2*col0 + 2*warp_id*WARP_SIZE + lane_id;
|
||||
|
||||
if (ncols_template == 0 && col_smem >= (vals_smem ? ncols_smem : ncols_data)) {
|
||||
break;
|
||||
}
|
||||
|
||||
const half2 val = h2exp(vals[col_smem] - max_val);
|
||||
|
||||
for (int col = tid; col < ncols; col += block_size) {
|
||||
const int ix = rowx*ncols + col;
|
||||
const int iy = rowy*ncols + col;
|
||||
const float val = expf((x[ix]*scale + (y ? y[iy] : 0.0f)) - max_val);
|
||||
tmp += val;
|
||||
dst[ix] = val;
|
||||
vals[col_smem] = val;
|
||||
}
|
||||
|
||||
// find the sum of exps in the block
|
||||
tmp = warp_reduce_sum(tmp);
|
||||
if (block_size > WARP_SIZE) {
|
||||
if (warp_id == 0) {
|
||||
buf[lane_id] = 0.f;
|
||||
buf_iw[lane_id] = 0.0f;
|
||||
}
|
||||
__syncthreads();
|
||||
|
||||
if (lane_id == 0) {
|
||||
buf[warp_id] = tmp;
|
||||
buf_iw[warp_id] = tmp.x + tmp.y;
|
||||
}
|
||||
__syncthreads();
|
||||
|
||||
tmp = buf[lane_id];
|
||||
tmp = __half2half2(buf_iw[lane_id]);
|
||||
tmp = warp_reduce_sum(tmp);
|
||||
} else {
|
||||
tmp = __half2half2(tmp.x + tmp.y);
|
||||
}
|
||||
|
||||
const half2 inv_sum = make_half2(1.0f, 1.0f) / tmp;
|
||||
|
||||
#pragma unroll
|
||||
for (int col0 = 0; col0 < ncols_smem; col0 += block_size) {
|
||||
const int col_data = 2*col0 + 2*WARP_SIZE*warp_id + lane_id;
|
||||
const int col_smem = vals_smem ? col0 + tid : col_data;
|
||||
|
||||
const int idst = rowx*ncols_data + col_data;
|
||||
const half2 result = vals[col_smem] * inv_sum;
|
||||
|
||||
if (need_check && col_data + 0 >= ncols_data) {
|
||||
return;
|
||||
}
|
||||
dst[idst] = result.x;
|
||||
|
||||
if (need_check && col_data + WARP_SIZE >= ncols_data) {
|
||||
return;
|
||||
}
|
||||
|
||||
dst[idst + WARP_SIZE] = result.y;
|
||||
}
|
||||
#else
|
||||
(void) x; (void) y; (void) dst; (void) ncols_par; (void) nrows_y; (void) scale;
|
||||
bad_arch();
|
||||
#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL
|
||||
}
|
||||
|
||||
template <bool vals_smem, int ncols_template, int block_size_template>
|
||||
static __global__ void soft_max_f32(const float * x, const float * y, float * dst, const int ncols_par, const int nrows_y, const float scale) {
|
||||
const int ncols = ncols_template == 0 ? ncols_par : ncols_template;
|
||||
|
||||
const int tid = threadIdx.x;
|
||||
const int rowx = blockIdx.x;
|
||||
const int rowy = rowx % nrows_y; // broadcast the mask (y) in the row dimension
|
||||
|
||||
const int block_size = block_size_template == 0 ? blockDim.x : block_size_template;
|
||||
|
||||
const int warp_id = threadIdx.x / WARP_SIZE;
|
||||
const int lane_id = threadIdx.x % WARP_SIZE;
|
||||
|
||||
extern __shared__ float data_soft_max_f32[];
|
||||
float * buf_iw = data_soft_max_f32; // shared memory buffer for inter-warp communication
|
||||
// shared memory buffer to cache values between iterations:
|
||||
float * vals = vals_smem ? buf_iw + WARP_SIZE : dst + rowx*ncols;
|
||||
|
||||
float max_val = -INFINITY;
|
||||
|
||||
#pragma unroll
|
||||
for (int col0 = 0; col0 < ncols; col0 += block_size) {
|
||||
const int col = col0 + tid;
|
||||
|
||||
if (ncols_template == 0 && col >= ncols) {
|
||||
break;
|
||||
}
|
||||
|
||||
const int ix = rowx*ncols + col;
|
||||
const int iy = rowy*ncols + col;
|
||||
|
||||
const float val = x[ix]*scale + (y ? y[iy] : 0.0f);
|
||||
vals[col] = val;
|
||||
max_val = max(max_val, val);
|
||||
}
|
||||
|
||||
// find the max value in the block
|
||||
max_val = warp_reduce_max(max_val);
|
||||
if (block_size > WARP_SIZE) {
|
||||
if (warp_id == 0) {
|
||||
buf_iw[lane_id] = -INFINITY;
|
||||
}
|
||||
__syncthreads();
|
||||
|
||||
if (lane_id == 0) {
|
||||
buf_iw[warp_id] = max_val;
|
||||
}
|
||||
__syncthreads();
|
||||
|
||||
max_val = buf_iw[lane_id];
|
||||
max_val = warp_reduce_max(max_val);
|
||||
}
|
||||
|
||||
float tmp = 0.0f; // partial sum
|
||||
|
||||
#pragma unroll
|
||||
for (int col0 = 0; col0 < ncols; col0 += block_size) {
|
||||
const int col = col0 + tid;
|
||||
|
||||
if (ncols_template == 0 && col >= ncols) {
|
||||
break;
|
||||
}
|
||||
|
||||
const float val = expf(vals[col] - max_val);
|
||||
tmp += val;
|
||||
vals[col] = val;
|
||||
}
|
||||
|
||||
// find the sum of exps in the block
|
||||
tmp = warp_reduce_sum(tmp);
|
||||
if (block_size > WARP_SIZE) {
|
||||
if (warp_id == 0) {
|
||||
buf_iw[lane_id] = 0.0f;
|
||||
}
|
||||
__syncthreads();
|
||||
|
||||
if (lane_id == 0) {
|
||||
buf_iw[warp_id] = tmp;
|
||||
}
|
||||
__syncthreads();
|
||||
|
||||
tmp = buf_iw[lane_id];
|
||||
tmp = warp_reduce_sum(tmp);
|
||||
}
|
||||
|
||||
const float inv_tmp = 1.f / tmp;
|
||||
const float inv_sum = 1.0f / tmp;
|
||||
|
||||
for (int col = tid; col < ncols; col += block_size) {
|
||||
const int i = rowx*ncols + col;
|
||||
dst[i] *= inv_tmp;
|
||||
#pragma unroll
|
||||
for (int col0 = 0; col0 < ncols; col0 += block_size) {
|
||||
const int col = col0 + tid;
|
||||
|
||||
if (ncols_template == 0 && col >= ncols) {
|
||||
return;
|
||||
}
|
||||
|
||||
const int idst = rowx*ncols + col;
|
||||
dst[idst] = vals[col] * inv_sum;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6752,12 +6938,90 @@ static void diag_mask_inf_f32_cuda(const float * x, float * dst, const int ncols
|
||||
diag_mask_inf_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols_x, rows_per_channel, n_past);
|
||||
}
|
||||
|
||||
static void soft_max_f16_cuda(const float * x, const float * y, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const float scale, cudaStream_t stream) {
|
||||
int nth = WARP_SIZE;
|
||||
while (nth < ncols_x/2 && nth < CUDA_SOFT_MAX_BLOCK_SIZE) nth *= 2;
|
||||
const dim3 block_dims(nth, 1, 1);
|
||||
const dim3 block_nums(nrows_x, 1, 1);
|
||||
const size_t shmem = (GGML_PAD(ncols_x, 2*WARP_SIZE) + WARP_SIZE)*sizeof(half);
|
||||
static_assert(CUDA_SOFT_MAX_BLOCK_SIZE == 1024, "These values need to be adjusted.");
|
||||
if (shmem <= g_device_caps[g_main_device].smpb) {
|
||||
switch (ncols_x) {
|
||||
case 32:
|
||||
soft_max_f16<true, 32, 32, true><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale);
|
||||
break;
|
||||
case 64:
|
||||
soft_max_f16<true, 64, 32, false><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale);
|
||||
break;
|
||||
case 128:
|
||||
soft_max_f16<true, 128, 64, false><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale);
|
||||
break;
|
||||
case 256:
|
||||
soft_max_f16<true, 256, 128, false><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale);
|
||||
break;
|
||||
case 512:
|
||||
soft_max_f16<true, 512, 256, false><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale);
|
||||
break;
|
||||
case 1024:
|
||||
soft_max_f16<true, 1024, 512, false><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale);
|
||||
break;
|
||||
case 2048:
|
||||
soft_max_f16<true, 2048, 1024, false><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale);
|
||||
break;
|
||||
case 4096:
|
||||
soft_max_f16<true, 4096, 1024, false><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale);
|
||||
break;
|
||||
default:
|
||||
soft_max_f16<true, 0, 0, true><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
const size_t shmem_low = WARP_SIZE*sizeof(half);
|
||||
soft_max_f16<false, 0, 0, true><<<block_nums, block_dims, shmem_low, stream>>>(x, y, dst, ncols_x, nrows_y, scale);
|
||||
}
|
||||
}
|
||||
|
||||
static void soft_max_f32_cuda(const float * x, const float * y, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const float scale, cudaStream_t stream) {
|
||||
int nth = WARP_SIZE;
|
||||
while (nth < ncols_x && nth < CUDA_SOFT_MAX_BLOCK_SIZE) nth *= 2;
|
||||
const dim3 block_dims(nth, 1, 1);
|
||||
const dim3 block_nums(nrows_x, 1, 1);
|
||||
soft_max_f32<<<block_nums, block_dims, 0, stream>>>(x, y, dst, ncols_x, nrows_y, scale);
|
||||
const size_t shmem = (GGML_PAD(ncols_x, WARP_SIZE) + WARP_SIZE)*sizeof(float);
|
||||
static_assert(CUDA_SOFT_MAX_BLOCK_SIZE == 1024, "These values need to be adjusted.");
|
||||
if (shmem < g_device_caps[g_main_device].smpb) {
|
||||
switch (ncols_x) {
|
||||
case 32:
|
||||
soft_max_f32<true, 32, 32><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale);
|
||||
break;
|
||||
case 64:
|
||||
soft_max_f32<true, 64, 64><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale);
|
||||
break;
|
||||
case 128:
|
||||
soft_max_f32<true, 128, 128><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale);
|
||||
break;
|
||||
case 256:
|
||||
soft_max_f32<true, 256, 256><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale);
|
||||
break;
|
||||
case 512:
|
||||
soft_max_f32<true, 512, 512><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale);
|
||||
break;
|
||||
case 1024:
|
||||
soft_max_f32<true, 1024, 1024><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale);
|
||||
break;
|
||||
case 2048:
|
||||
soft_max_f32<true, 2048, 1024><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale);
|
||||
break;
|
||||
case 4096:
|
||||
soft_max_f32<true, 4096, 1024><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale);
|
||||
break;
|
||||
default:
|
||||
soft_max_f32<true, 0, 0><<<block_nums, block_dims, shmem, stream>>>(x, y, dst, ncols_x, nrows_y, scale);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
const size_t shmem_low = WARP_SIZE*sizeof(float);
|
||||
soft_max_f32<false, 0, 0><<<block_nums, block_dims, shmem_low, stream>>>(x, y, dst, ncols_x, nrows_y, scale);
|
||||
}
|
||||
}
|
||||
|
||||
static void im2col_f32_f16_cuda(const float* x, half* dst,
|
||||
@@ -7072,6 +7336,7 @@ void ggml_init_cublas() {
|
||||
#else
|
||||
g_device_caps[id].cc = 100*prop.major + 10*prop.minor;
|
||||
#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
|
||||
g_device_caps[id].smpb = prop.sharedMemPerBlock;
|
||||
}
|
||||
for (int id = 0; id < g_device_count; ++id) {
|
||||
g_tensor_split[id] /= total_vram;
|
||||
@@ -8087,7 +8352,21 @@ static void ggml_cuda_op_soft_max(
|
||||
float scale = 1.0f;
|
||||
memcpy(&scale, dst->op_params, sizeof(float));
|
||||
|
||||
soft_max_f32_cuda(src0_dd, src1 ? src1_dd : nullptr, dst_dd, ne00, nrows_x, nrows_y, scale, main_stream);
|
||||
#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
|
||||
const bool use_f16_soft_max = false;
|
||||
#else
|
||||
#ifdef GGML_CUDA_F16
|
||||
const bool use_f16_soft_max = true;
|
||||
#else
|
||||
const bool use_f16_soft_max = false;
|
||||
#endif // GGML_CUDA_F16
|
||||
#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
|
||||
|
||||
if (use_f16_soft_max) {
|
||||
soft_max_f16_cuda(src0_dd, src1 ? src1_dd : nullptr, dst_dd, ne00, nrows_x, nrows_y, scale, main_stream);
|
||||
} else {
|
||||
soft_max_f32_cuda(src0_dd, src1 ? src1_dd : nullptr, dst_dd, ne00, nrows_x, nrows_y, scale, main_stream);
|
||||
}
|
||||
|
||||
(void) dst;
|
||||
}
|
||||
|
||||
@@ -3841,8 +3841,8 @@ void dequantize_q3_K(device const block_q3_K *xb, short il, thread type4x4 & reg
|
||||
uint16_t scale_2 = scales[il%8], scale_1 = scales[8 + il%4];
|
||||
int16_t dl_int = (il/4)&1 ? (scale_2&kmask2) | ((scale_1&kmask1) << 2)
|
||||
: (scale_2&kmask2) | ((scale_1&kmask1) << 4);
|
||||
half dl = il<8 ? d_all * (dl_int - 32.h) : d_all * (dl_int / 16.h - 32.h);
|
||||
const half ml = 4.h * dl;
|
||||
float dl = il<8 ? d_all * (dl_int - 32.f) : d_all * (dl_int / 16.f - 32.f);
|
||||
const float ml = 4.f * dl;
|
||||
|
||||
il = (il/2) & 3;
|
||||
const half coef = il>1 ? (il>2 ? 1/64.h : 1/16.h) : (il>0 ? 1/4.h : 1.h);
|
||||
@@ -3909,7 +3909,7 @@ void dequantize_q5_K(device const block_q5_K *xb, short il, thread type4x4 & reg
|
||||
uint8_t ul = 1 << (il/2);
|
||||
il = il & 3;
|
||||
const uchar2 sc = get_scale_min_k4_just2(is, il/2, xb->scales);
|
||||
const float d = il < 2 ? xb->d : xb->d / 16.h;
|
||||
const float d = il < 2 ? xb->d : xb->d / 16.f;
|
||||
const float min = xb->dmin;
|
||||
const float dl = d * sc[0];
|
||||
const float ml = min * sc[1];
|
||||
@@ -3942,17 +3942,17 @@ void dequantize_q6_K(device const block_q6_K *xb, short il, thread type4x4 & reg
|
||||
#if QK_K == 256
|
||||
ql = ql + 64*(il/8) + 32*((il/2)&1) + 16*(il&1);
|
||||
qh = qh + 32*(il/8) + 16*(il&1);
|
||||
half sc = scales[(il%2) + 2 * ((il/2))];
|
||||
float sc = scales[(il%2) + 2 * ((il/2))];
|
||||
il = (il/2) & 3;
|
||||
#else
|
||||
ql = ql + 16 * (il&1);
|
||||
half sc = scales[il];
|
||||
float sc = scales[il];
|
||||
#endif
|
||||
const uint16_t kmask1 = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3);
|
||||
const uint16_t kmask2 = il>1 ? 0xF0 : 0x0F;
|
||||
const half coef = il>1 ? 1.f/16.h : 1.h;
|
||||
const half ml = d_all * sc * 32.h;
|
||||
const half dl = d_all * sc * coef;
|
||||
const float coef = il>1 ? 1.f/16.f : 1.f;
|
||||
const float ml = d_all * sc * 32.f;
|
||||
const float dl = d_all * sc * coef;
|
||||
for (int i = 0; i < 16; ++i) {
|
||||
const half q = il&1 ? ((ql[i] & kmask2) | ((qh[i] & kmask1) << 2))
|
||||
: ((ql[i] & kmask2) | ((qh[i] & kmask1) << 4));
|
||||
|
||||
@@ -7250,9 +7250,9 @@ void ggml_vec_dot_iq2_xxs_q8_K(const int n, float * restrict s, const void * res
|
||||
uint32_t aux32[4];
|
||||
const uint8_t * aux8 = (const uint8_t *)aux32;
|
||||
|
||||
int8x16x4_t q2u;
|
||||
int8x16x4_t q2s;
|
||||
int8x16x4_t q8b;
|
||||
ggml_int8x16x4_t q2u;
|
||||
ggml_int8x16x4_t q2s;
|
||||
ggml_int8x16x4_t q8b;
|
||||
|
||||
float sumf = 0;
|
||||
for (int i = 0; i < nb; ++i) {
|
||||
@@ -7261,7 +7261,7 @@ void ggml_vec_dot_iq2_xxs_q8_K(const int n, float * restrict s, const void * res
|
||||
const int8_t * restrict q8 = y[i].qs;
|
||||
float sumf1 = 0, sumf2 = 0;
|
||||
for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
|
||||
q8b = vld1q_s8_x4(q8); q8 += 64;
|
||||
q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
|
||||
memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8;
|
||||
q2u.val[0] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 0])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 1])));
|
||||
q2u.val[1] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 2])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 3])));
|
||||
|
||||
356
scripts/compare-llama-bench.py
Executable file
356
scripts/compare-llama-bench.py
Executable file
@@ -0,0 +1,356 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import heapq
|
||||
import sys
|
||||
import os
|
||||
from glob import glob
|
||||
import sqlite3
|
||||
|
||||
try:
|
||||
import git
|
||||
from tabulate import tabulate
|
||||
except ImportError:
|
||||
print("ERROR: the following Python libraries are required: GitPython, tabulate.")
|
||||
sys.exit(1)
|
||||
|
||||
# Properties by which to differentiate results per commit:
|
||||
KEY_PROPERTIES = [
|
||||
"cuda", "opencl", "metal", "gpu_blas", "blas", "cpu_info", "gpu_info", "model_filename",
|
||||
"model_type", "model_size", "model_n_params", "n_batch", "n_threads", "type_k", "type_v",
|
||||
"n_gpu_layers", "main_gpu", "no_kv_offload", "mul_mat_q", "tensor_split", "n_prompt", "n_gen"
|
||||
]
|
||||
|
||||
# Properties that are boolean and are converted to Yes/No for the table:
|
||||
BOOL_PROPERTIES = ["cuda", "opencl", "metal", "gpu_blas", "blas"]
|
||||
|
||||
# Header names for the table:
|
||||
PRETTY_NAMES = {
|
||||
"cuda": "CUDA", "opencl": "OpenCL", "metal": "Metal", "gpu_blas": "GPU BLAS", "blas": "BLAS",
|
||||
"cpu_info": "CPU", "gpu_info": "GPU", "model_filename": "File", "model_type": "Model",
|
||||
"model_size": "Model Size [GiB]", "model_n_params": "Num. of Parameters",
|
||||
"n_batch": "Batch size", "n_threads": "Threads", "type_k": "K type", "type_v": "V type",
|
||||
"n_gpu_layers": "GPU layers", "main_gpu": "Main GPU", "no_kv_offload": "NKVO",
|
||||
"mul_mat_q": "MMQ", "tensor_split": "Tensor split"
|
||||
}
|
||||
|
||||
DEFAULT_SHOW = ["model_type"] # Always show these properties by default.
|
||||
DEFAULT_HIDE = ["model_filename"] # Always hide these properties by default.
|
||||
GPU_NAME_STRIP = ["NVIDIA GeForce ", "Tesla ", "AMD Radeon "] # Strip prefixes for smaller tables.
|
||||
|
||||
DESCRIPTION = """Creates tables from llama-bench data written to an SQLite database. Example usage (Linux):
|
||||
|
||||
$ git checkout master
|
||||
$ make clean && make llama-bench
|
||||
$ ./llama-bench -o sql | sqlite3 llama-bench.sqlite
|
||||
$ git checkout some_branch
|
||||
$ make clean && make llama-bench
|
||||
$ ./llama-bench -o sql | sqlite3 llama-bench.sqlite
|
||||
$ ./scripts/compare-llama-bench.py
|
||||
|
||||
Performance numbers from multiple runs per commit are averaged WITHOUT being weighted by the --repetitions parameter of llama-bench.
|
||||
"""
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description=DESCRIPTION, formatter_class=argparse.RawDescriptionHelpFormatter)
|
||||
help_b = (
|
||||
"The baseline commit to compare performance to. "
|
||||
"Accepts either a branch name, tag name, or commit hash. "
|
||||
"Defaults to latest master commit with data."
|
||||
)
|
||||
parser.add_argument("-b", "--baseline", help=help_b)
|
||||
help_c = (
|
||||
"The commit whose performance is to be compared to the baseline. "
|
||||
"Accepts either a branch name, tag name, or commit hash. "
|
||||
"Defaults to the non-master commit for which llama-bench was run most recently."
|
||||
)
|
||||
parser.add_argument("-c", "--compare", help=help_c)
|
||||
help_i = (
|
||||
"Input SQLite file for comparing commits. "
|
||||
"Defaults to 'llama-bench.sqlite' in the current working directory. "
|
||||
"If no such file is found and there is exactly one .sqlite file in the current directory, "
|
||||
"that file is instead used as input."
|
||||
)
|
||||
parser.add_argument("-i", "--input", help=help_i)
|
||||
help_o = (
|
||||
"Output format for the table. "
|
||||
"Defaults to 'pipe' (GitHub compatible). "
|
||||
"Also supports e.g. 'latex' or 'mediawiki'. "
|
||||
"See tabulate documentation for full list."
|
||||
)
|
||||
parser.add_argument("-o", "--output", help=help_o, default="pipe")
|
||||
help_s = (
|
||||
"Columns to add to the table. "
|
||||
"Accepts a comma-separated list of values. "
|
||||
f"Legal values: {', '.join(KEY_PROPERTIES[:-2])}. "
|
||||
"Defaults to model name (model_type) and CPU and/or GPU name (cpu_info, gpu_info) "
|
||||
"plus any column where not all data points are the same. "
|
||||
"If the columns are manually specified, then the results for each unique combination of the "
|
||||
"specified values are averaged WITHOUT weighing by the --repetitions parameter of llama-bench."
|
||||
)
|
||||
parser.add_argument("-s", "--show", help=help_s)
|
||||
|
||||
known_args, unknown_args = parser.parse_known_args()
|
||||
|
||||
if unknown_args:
|
||||
print(f"ERROR: Received unknown args: {unknown_args}.")
|
||||
print()
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
input_file = known_args.input
|
||||
if input_file is None and os.path.exists("./llama-bench.sqlite"):
|
||||
input_file = "llama-bench.sqlite"
|
||||
if input_file is None:
|
||||
sqlite_files = glob("*.sqlite")
|
||||
if len(sqlite_files) == 1:
|
||||
input_file = sqlite_files[0]
|
||||
|
||||
if input_file is None:
|
||||
print("ERROR: Cannot find a suitable input file, please provide one.")
|
||||
print()
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
connection = sqlite3.connect(input_file)
|
||||
cursor = connection.cursor()
|
||||
builds = cursor.execute("SELECT DISTINCT build_commit FROM test;").fetchall()
|
||||
|
||||
try:
|
||||
repo = git.Repo(".", search_parent_directories=True)
|
||||
except git.exc.InvalidGitRepositoryError:
|
||||
repo = None
|
||||
|
||||
|
||||
def find_parent_in_data(commit):
|
||||
"""Helper function to find the most recent parent measured in number of commits for which there is data."""
|
||||
heap = [(0, commit)]
|
||||
seen_hexsha8 = set()
|
||||
while heap:
|
||||
depth, current_commit = heapq.heappop(heap)
|
||||
current_hexsha8 = commit.hexsha[:8]
|
||||
if (current_hexsha8,) in builds:
|
||||
return current_hexsha8
|
||||
for parent in commit.parents:
|
||||
parent_hexsha8 = parent.hexsha[:8]
|
||||
if parent_hexsha8 not in seen_hexsha8:
|
||||
seen_hexsha8.add(parent_hexsha8)
|
||||
heapq.heappush(heap, (depth + 1, parent))
|
||||
return None
|
||||
|
||||
|
||||
def get_all_parent_hexsha8s(commit):
|
||||
"""Helper function to recursively get hexsha8 values for all parents of a commit."""
|
||||
unvisited = [commit]
|
||||
visited = []
|
||||
|
||||
while unvisited:
|
||||
current_commit = unvisited.pop(0)
|
||||
visited.append(current_commit.hexsha[:8])
|
||||
for parent in current_commit.parents:
|
||||
if parent.hexsha[:8] not in visited:
|
||||
unvisited.append(parent)
|
||||
|
||||
return visited
|
||||
|
||||
|
||||
def get_commit_name(hexsha8):
|
||||
"""Helper function to find a human-readable name for a commit if possible."""
|
||||
if repo is None:
|
||||
return hexsha8
|
||||
for h in repo.heads:
|
||||
if h.commit.hexsha[:8] == hexsha8:
|
||||
return h.name
|
||||
for t in repo.tags:
|
||||
if t.commit.hexsha[:8] == hexsha8:
|
||||
return t.name
|
||||
return hexsha8
|
||||
|
||||
|
||||
def get_commit_hexsha8(name):
|
||||
"""Helper function to search for a commit given a human-readable name."""
|
||||
if repo is None:
|
||||
return None
|
||||
for h in repo.heads:
|
||||
if h.name == name:
|
||||
return h.commit.hexsha[:8]
|
||||
for t in repo.tags:
|
||||
if t.name == name:
|
||||
return t.commit.hexsha[:8]
|
||||
return None
|
||||
|
||||
|
||||
hexsha8_baseline = name_baseline = None
|
||||
|
||||
# If the user specified a baseline, try to find a commit for it:
|
||||
if known_args.baseline is not None:
|
||||
if (known_args.baseline,) in builds:
|
||||
hexsha8_baseline = known_args.baseline
|
||||
if hexsha8_baseline is None:
|
||||
hexsha8_baseline = get_commit_hexsha8(known_args.baseline)
|
||||
name_baseline = known_args.baseline
|
||||
if hexsha8_baseline is None:
|
||||
print(f"ERROR: cannot find data for baseline={known_args.baseline}.")
|
||||
sys.exit(1)
|
||||
# Otherwise, search for the most recent parent of master for which there is data:
|
||||
elif repo is not None:
|
||||
hexsha8_baseline = find_parent_in_data(repo.heads.master.commit)
|
||||
|
||||
if hexsha8_baseline is None:
|
||||
print("ERROR: No baseline was provided and did not find data for any master branch commits.")
|
||||
print()
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
else:
|
||||
print(
|
||||
"ERROR: No baseline was provided and the current working directory "
|
||||
"is not part of a git repository from which a baseline could be inferred."
|
||||
)
|
||||
print()
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
name_baseline = get_commit_name(hexsha8_baseline)
|
||||
|
||||
hexsha8_compare = name_compare = None
|
||||
|
||||
# If the user has specified a compare value, try to find a corresponding commit:
|
||||
if known_args.compare is not None:
|
||||
if (known_args.compare,) in builds:
|
||||
hexsha8_compare = known_args.compare
|
||||
if hexsha8_compare is None:
|
||||
hexsha8_compare = get_commit_hexsha8(known_args.compare)
|
||||
name_compare = known_args.compare
|
||||
if hexsha8_compare is None:
|
||||
print(f"ERROR: cannot find data for baseline={known_args.compare}.")
|
||||
sys.exit(1)
|
||||
# Otherwise, search for the commit for llama-bench was most recently run
|
||||
# and that is not a parent of master:
|
||||
elif repo is not None:
|
||||
hexsha8s_master = get_all_parent_hexsha8s(repo.heads.master.commit)
|
||||
builds_timestamp = cursor.execute(
|
||||
"SELECT build_commit, test_time FROM test ORDER BY test_time;").fetchall()
|
||||
for (hexsha8, _) in reversed(builds_timestamp):
|
||||
if hexsha8 not in hexsha8s_master:
|
||||
hexsha8_compare = hexsha8
|
||||
break
|
||||
|
||||
if hexsha8_compare is None:
|
||||
print("ERROR: No compare target was provided and did not find data for any non-master commits.")
|
||||
print()
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
else:
|
||||
print(
|
||||
"ERROR: No compare target was provided and the current working directory "
|
||||
"is not part of a git repository from which a compare target could be inferred."
|
||||
)
|
||||
print()
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
name_compare = get_commit_name(hexsha8_compare)
|
||||
|
||||
|
||||
def get_rows(properties):
|
||||
"""
|
||||
Helper function that gets table rows for some list of properties.
|
||||
Rows are created by combining those where all provided properties are equal.
|
||||
The resulting rows are then grouped by the provided properties and the t/s values are averaged.
|
||||
The returned rows are unique in terms of property combinations.
|
||||
"""
|
||||
select_string = ", ".join(
|
||||
[f"tb.{p}" for p in properties] + ["tb.n_prompt", "tb.n_gen", "AVG(tb.avg_ts)", "AVG(tc.avg_ts)"])
|
||||
equal_string = " AND ".join(
|
||||
[f"tb.{p} = tc.{p}" for p in KEY_PROPERTIES] + [
|
||||
f"tb.build_commit = '{hexsha8_baseline}'", f"tc.build_commit = '{hexsha8_compare}'"]
|
||||
)
|
||||
group_order_string = ", ".join([f"tb.{p}" for p in properties] + ["tb.n_gen", "tb.n_prompt"])
|
||||
query = (f"SELECT {select_string} FROM test tb JOIN test tc ON {equal_string} "
|
||||
f"GROUP BY {group_order_string} ORDER BY {group_order_string};")
|
||||
return cursor.execute(query).fetchall()
|
||||
|
||||
|
||||
# If the user provided columns to group the results by, use them:
|
||||
if known_args.show is not None:
|
||||
show = known_args.show.split(",")
|
||||
unknown_cols = []
|
||||
for prop in show:
|
||||
if prop not in KEY_PROPERTIES[:-2]: # Last two values are n_prompt, n_gen.
|
||||
unknown_cols.append(prop)
|
||||
if unknown_cols:
|
||||
print(f"ERROR: Unknown values for --show: {', '.join(unknown_cols)}")
|
||||
print()
|
||||
parser.print_usage()
|
||||
sys.exit(1)
|
||||
rows_show = get_rows(show)
|
||||
# Otherwise, select those columns where the values are not all the same:
|
||||
else:
|
||||
rows_full = get_rows(KEY_PROPERTIES)
|
||||
properties_different = []
|
||||
for i, kp_i in enumerate(KEY_PROPERTIES):
|
||||
if kp_i in DEFAULT_SHOW or kp_i == "n_prompt" or kp_i == "n_gen":
|
||||
continue
|
||||
for row_full in rows_full:
|
||||
if row_full[i] != rows_full[0][i]:
|
||||
properties_different.append(kp_i)
|
||||
break
|
||||
|
||||
show = []
|
||||
# Show CPU and/or GPU by default even if the hardware for all results is the same:
|
||||
if "gpu_blas" not in properties_different and "n_gpu_layers" not in properties_different:
|
||||
gpu_blas = bool(rows_full[0][KEY_PROPERTIES.index("gpu_blas")])
|
||||
ngl = int(rows_full[0][KEY_PROPERTIES.index("n_gpu_layers")])
|
||||
|
||||
if not gpu_blas or ngl != 99 and "cpu_info" not in properties_different:
|
||||
show.append("cpu_info")
|
||||
if gpu_blas and "gpu_info" not in properties_different:
|
||||
show.append("gpu_info")
|
||||
|
||||
show += DEFAULT_SHOW
|
||||
show += properties_different
|
||||
for prop in DEFAULT_HIDE:
|
||||
try:
|
||||
show.remove(prop)
|
||||
except ValueError:
|
||||
pass
|
||||
rows_show = get_rows(show)
|
||||
|
||||
table = []
|
||||
for row in rows_show:
|
||||
n_prompt = int(row[-4])
|
||||
n_gen = int(row[-3])
|
||||
assert n_prompt == 0 or n_gen == 0
|
||||
test_name = f"tg{n_gen}" if n_prompt == 0 else f"pp{n_prompt}"
|
||||
# Regular columns test name avg t/s values Speedup
|
||||
# VVVVVVVVVVVVV VVVVVVVVV VVVVVVVVVVVVVV VVVVVVV
|
||||
table.append(list(row[:-4]) + [test_name] + list(row[-2:]) + [float(row[-1]) / float(row[-2])])
|
||||
|
||||
# Some a-posteriori fixes to make the table contents prettier:
|
||||
for bool_property in BOOL_PROPERTIES:
|
||||
if bool_property in show:
|
||||
ip = show.index(bool_property)
|
||||
for row_table in table:
|
||||
row_table[ip] = "Yes" if int(row_table[ip]) == 1 else "No"
|
||||
|
||||
if "model_size" in show:
|
||||
ip = show.index("model_size")
|
||||
for row_table in table:
|
||||
row_table[ip] = float(row_table[ip]) / 1024 ** 3
|
||||
|
||||
if "gpu_info" in show:
|
||||
ip = show.index("gpu_info")
|
||||
for gns in GPU_NAME_STRIP:
|
||||
for row_table in table:
|
||||
row_table[ip] = row_table[ip].replace(gns, "")
|
||||
|
||||
headers = [PRETTY_NAMES[p] for p in show]
|
||||
headers += ["Test", f"t/s {name_baseline}", f"t/s {name_compare}", "Speedup"]
|
||||
|
||||
print(tabulate(
|
||||
table,
|
||||
headers=headers,
|
||||
floatfmt=".2f",
|
||||
tablefmt=known_args.output
|
||||
))
|
||||
70
scripts/get-pg.sh
Executable file
70
scripts/get-pg.sh
Executable file
@@ -0,0 +1,70 @@
|
||||
#!/bin/bash
|
||||
|
||||
function usage {
|
||||
echo "usage: <n>$0"
|
||||
echo "note: n is the number of essays to download"
|
||||
echo "for specific n, the resulting pg.txt file will have the following number of tokens:"
|
||||
echo "n | tokens"
|
||||
echo "--- | ---"
|
||||
echo "1 | 6230"
|
||||
echo "2 | 23619"
|
||||
echo "5 | 25859"
|
||||
echo "10 | 36888"
|
||||
echo "15 | 50188"
|
||||
echo "20 | 59094"
|
||||
echo "25 | 88764"
|
||||
echo "30 | 103121"
|
||||
echo "32 | 108338"
|
||||
echo "35 | 113403"
|
||||
echo "40 | 127699"
|
||||
echo "45 | 135896"
|
||||
exit 1
|
||||
}
|
||||
|
||||
function has_cmd {
|
||||
if ! [ -x "$(command -v $1)" ]; then
|
||||
echo "error: $1 is not available" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# check for: curl, html2text, tail, sed, fmt
|
||||
has_cmd curl
|
||||
has_cmd html2text
|
||||
has_cmd tail
|
||||
has_cmd sed
|
||||
|
||||
if [ $# -ne 1 ]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
n=$1
|
||||
|
||||
# get urls
|
||||
urls="$(curl http://www.aaronsw.com/2002/feeds/pgessays.rss | grep html | sed -e "s/.*http/http/" | sed -e "s/html.*/html/" | head -n $n)"
|
||||
|
||||
printf "urls:\n%s\n" "$urls"
|
||||
|
||||
if [ -f pg.txt ]; then
|
||||
rm pg.txt
|
||||
fi
|
||||
|
||||
c=1
|
||||
for url in $urls; do
|
||||
echo "processing $url"
|
||||
|
||||
cc=$(printf "%03d" $c)
|
||||
|
||||
curl -L $url | html2text | tail -n +4 | sed -E "s/^[[:space:]]+//g" | fmt -w 80 >> pg-$cc-one.txt
|
||||
cat pg-$cc-one.txt >> pg.txt
|
||||
|
||||
cp -v pg.txt pg-$cc-all.txt
|
||||
c=$((c+1))
|
||||
|
||||
# don't flood the server
|
||||
sleep 1
|
||||
done
|
||||
|
||||
echo "done. data in pg.txt"
|
||||
|
||||
exit 0
|
||||
@@ -450,7 +450,7 @@ struct test_case {
|
||||
|
||||
double err = nmse(f1.data(), f2.data(), f1.size());
|
||||
if (err > ud->max_err) {
|
||||
printf("[%s] NMSE = %f ", ggml_op_desc(t1), err);
|
||||
printf("[%s] NMSE = %.9f > %.9f ", ggml_op_desc(t1), err, ud->max_err);
|
||||
//for (int i = 0; i < (int) f1.size(); i++) {
|
||||
// printf("%5d %9.6f %9.6f, diff = %9.6f\n", i, f1[i], f2[i], f1[i] - f2[i]);
|
||||
//}
|
||||
@@ -1449,6 +1449,7 @@ struct test_moe : public test_case {
|
||||
|
||||
static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op_name) {
|
||||
std::vector<std::unique_ptr<test_case>> test_cases;
|
||||
std::default_random_engine rng(0);
|
||||
|
||||
const ggml_type all_types[] = {
|
||||
GGML_TYPE_F32, GGML_TYPE_F16,
|
||||
@@ -1583,7 +1584,19 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op
|
||||
test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, {10, 10, 10, 1}, 5));
|
||||
test_cases.emplace_back(new test_diag_mask_inf(GGML_TYPE_F32, {10, 10, 10, 10}, 5));
|
||||
|
||||
test_cases.emplace_back(new test_soft_max());
|
||||
std::uniform_int_distribution<> dist_ne1(1, 50);
|
||||
int exponent = 1;
|
||||
while (exponent < (1 << 17)) {
|
||||
std::uniform_int_distribution<> dist_ne0(exponent, 2*exponent);
|
||||
|
||||
for (int n = 0; n < 10; ++n) {
|
||||
int64_t ne0 = dist_ne0(rng);
|
||||
int64_t ne1 = dist_ne1(rng);
|
||||
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0, ne1, 1, 1}));
|
||||
}
|
||||
|
||||
exponent <<= 1;
|
||||
}
|
||||
|
||||
for (ggml_type type : {GGML_TYPE_F32, GGML_TYPE_F16}) {
|
||||
test_cases.emplace_back(new test_rope(type, {128, 32, 10, 1}, 128, 0, 512)); // llama 7B
|
||||
|
||||
Reference in New Issue
Block a user