From 3f3d62ffec73fc3f4bd07baf99723e369c794fce Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 15 Apr 2026 11:44:00 +0000 Subject: [PATCH] semver: add proper semantic versioning and ABI check workflow for libllama - Add LLAMA_VERSION_MAJOR/MINOR variables to CMakeLists.txt (both default 0) replacing the hard-coded 0.0.{build_number} scheme - Use GenerateExportHeader in src/CMakeLists.txt to generate llama_export.h and replace the manual LLAMA_API visibility macro dance in include/llama.h - Set SOVERSION to LLAMA_VERSION_MAJOR so the .so symlink tracks the major ABI version (libllama.so.0 -> libllama.so.0.MINOR.PATCH) - Install the generated llama_export.h alongside llama.h as a public header - Add scripts/libllama.abi: committed baseline of exported llama_* symbols (233 symbols extracted from the current build) - Add .github/workflows/libllama-abi-check.yml: CI workflow that builds libllama, extracts symbols with nm, and compares against the baseline to determine whether a MAJOR (symbols removed) or MINOR (symbols added) version bump is required Agent-Logs-Url: https://github.com/ggml-org/llama.cpp/sessions/e9059c50-ffff-4233-a16d-13a7214f7b98 Co-authored-by: ggerganov <1991296+ggerganov@users.noreply.github.com> --- .github/workflows/libllama-abi-check.yml | 102 ++++++++++ CMakeLists.txt | 11 +- include/llama.h | 14 +- scripts/libllama.abi | 233 +++++++++++++++++++++++ src/CMakeLists.txt | 17 +- 5 files changed, 358 insertions(+), 19 deletions(-) create mode 100644 .github/workflows/libllama-abi-check.yml create mode 100644 scripts/libllama.abi diff --git a/.github/workflows/libllama-abi-check.yml b/.github/workflows/libllama-abi-check.yml new file mode 100644 index 0000000000..102707ca83 --- /dev/null +++ b/.github/workflows/libllama-abi-check.yml @@ -0,0 +1,102 @@ +name: libllama ABI check + +# Checks exported symbols of libllama against a committed baseline and +# determines whether a major (symbols removed/changed) or minor (symbols +# added) version bump is required. + +on: + workflow_dispatch: + push: + branches: + - master + paths: + - 'include/llama.h' + - 'include/llama-cpp.h' + - 'src/**' + - 'scripts/libllama.abi' + - 'CMakeLists.txt' + - '.github/workflows/libllama-abi-check.yml' + + pull_request: + types: [opened, synchronize, reopened] + paths: + - 'include/llama.h' + - 'include/llama-cpp.h' + - 'src/**' + - 'scripts/libllama.abi' + - 'CMakeLists.txt' + - '.github/workflows/libllama-abi-check.yml' + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }} + cancel-in-progress: true + +jobs: + abi-check: + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v6 + + - name: Configure + run: | + cmake -B build \ + -DBUILD_SHARED_LIBS=ON \ + -DLLAMA_BUILD_TESTS=OFF \ + -DLLAMA_BUILD_EXAMPLES=OFF \ + -DLLAMA_TOOLS_INSTALL=OFF \ + -DGGML_METAL=OFF \ + -DGGML_CUDA=OFF \ + -DCMAKE_BUILD_TYPE=Release + + - name: Build libllama + run: cmake --build build --target llama -j$(nproc) + + - name: Extract exported symbols + run: | + nm -D --format=posix build/bin/libllama.so \ + | awk '$2 == "T" || $2 == "W" {print $1}' \ + | grep '^llama_' \ + | sort > /tmp/current.abi + + - name: Compare with baseline + id: compare + run: | + baseline=scripts/libllama.abi + current=/tmp/current.abi + + removed=$(comm -23 "$baseline" "$current") + added=$(comm -13 "$baseline" "$current") + + if [ -n "$removed" ]; then + echo "bump=major" >> "$GITHUB_OUTPUT" + echo "### :boom: MAJOR version bump required" >> "$GITHUB_STEP_SUMMARY" + echo "" >> "$GITHUB_STEP_SUMMARY" + echo "The following exported symbols were **removed** from libllama:" >> "$GITHUB_STEP_SUMMARY" + echo '```' >> "$GITHUB_STEP_SUMMARY" + echo "$removed" >> "$GITHUB_STEP_SUMMARY" + echo '```' >> "$GITHUB_STEP_SUMMARY" + elif [ -n "$added" ]; then + echo "bump=minor" >> "$GITHUB_OUTPUT" + echo "### :sparkles: MINOR version bump required" >> "$GITHUB_STEP_SUMMARY" + echo "" >> "$GITHUB_STEP_SUMMARY" + echo "The following new symbols were **added** to libllama:" >> "$GITHUB_STEP_SUMMARY" + echo '```' >> "$GITHUB_STEP_SUMMARY" + echo "$added" >> "$GITHUB_STEP_SUMMARY" + echo '```' >> "$GITHUB_STEP_SUMMARY" + else + echo "bump=patch" >> "$GITHUB_OUTPUT" + echo "### :white_check_mark: No ABI change – PATCH version bump only" >> "$GITHUB_STEP_SUMMARY" + fi + + if [ -n "$removed" ] || [ -n "$added" ]; then + echo "" >> "$GITHUB_STEP_SUMMARY" + echo "Update \`scripts/libllama.abi\` and increment \`LLAMA_VERSION_MAJOR\`/\`LLAMA_VERSION_MINOR\` in \`CMakeLists.txt\` accordingly." >> "$GITHUB_STEP_SUMMARY" + fi + + - name: Fail on unacknowledged ABI break + if: steps.compare.outputs.bump == 'major' || steps.compare.outputs.bump == 'minor' + run: | + echo "ABI change detected. Update scripts/libllama.abi and bump LLAMA_VERSION_MAJOR/LLAMA_VERSION_MINOR in CMakeLists.txt." + exit 1 diff --git a/CMakeLists.txt b/CMakeLists.txt index caea48c506..3027514d7a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -127,7 +127,13 @@ endif() if (NOT DEFINED LLAMA_BUILD_COMMIT) set(LLAMA_BUILD_COMMIT ${BUILD_COMMIT}) endif() -set(LLAMA_INSTALL_VERSION 0.0.${LLAMA_BUILD_NUMBER}) +if (NOT DEFINED LLAMA_VERSION_MAJOR) + set(LLAMA_VERSION_MAJOR 0) +endif() +if (NOT DEFINED LLAMA_VERSION_MINOR) + set(LLAMA_VERSION_MINOR 0) +endif() +set(LLAMA_INSTALL_VERSION ${LLAMA_VERSION_MAJOR}.${LLAMA_VERSION_MINOR}.${LLAMA_BUILD_NUMBER}) # override ggml options set(GGML_ALL_WARNINGS ${LLAMA_ALL_WARNINGS}) @@ -241,7 +247,8 @@ set(LLAMA_BIN_INSTALL_DIR ${CMAKE_INSTALL_BINDIR} CACHE PATH "Location o set(LLAMA_PUBLIC_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/include/llama.h - ${CMAKE_CURRENT_SOURCE_DIR}/include/llama-cpp.h) + ${CMAKE_CURRENT_SOURCE_DIR}/include/llama-cpp.h + ${CMAKE_CURRENT_BINARY_DIR}/src/llama_export.h) set_target_properties(llama PROPERTIES diff --git a/include/llama.h b/include/llama.h index ac267b5089..c305cc027e 100644 --- a/include/llama.h +++ b/include/llama.h @@ -12,19 +12,7 @@ #include #include -#ifdef LLAMA_SHARED -# if defined(_WIN32) && !defined(__MINGW32__) -# ifdef LLAMA_BUILD -# define LLAMA_API __declspec(dllexport) -# else -# define LLAMA_API __declspec(dllimport) -# endif -# else -# define LLAMA_API __attribute__ ((visibility ("default"))) -# endif -#else -# define LLAMA_API -#endif +#include "llama_export.h" #ifdef __GNUC__ # define DEPRECATED(func, hint) func __attribute__((deprecated(hint))) diff --git a/scripts/libllama.abi b/scripts/libllama.abi new file mode 100644 index 0000000000..6b8bf7d880 --- /dev/null +++ b/scripts/libllama.abi @@ -0,0 +1,233 @@ +llama_adapter_get_alora_invocation_tokens +llama_adapter_get_alora_n_invocation_tokens +llama_adapter_lora_free +llama_adapter_lora_init +llama_adapter_meta_count +llama_adapter_meta_key_by_index +llama_adapter_meta_val_str +llama_adapter_meta_val_str_by_index +llama_add_bos_token +llama_add_eos_token +llama_attach_threadpool +llama_backend_free +llama_backend_init +llama_batch_free +llama_batch_get_one +llama_batch_init +llama_chat_apply_template +llama_chat_builtin_templates +llama_context_default_params +llama_copy_state_data +llama_decode +llama_detach_threadpool +llama_detokenize +llama_encode +llama_flash_attn_type_name +llama_free +llama_free_model +llama_get_embeddings +llama_get_embeddings_ith +llama_get_embeddings_seq +llama_get_logits +llama_get_logits_ith +llama_get_memory +llama_get_model +llama_get_sampled_candidates_count_ith +llama_get_sampled_candidates_ith +llama_get_sampled_logits_count_ith +llama_get_sampled_logits_ith +llama_get_sampled_probs_count_ith +llama_get_sampled_probs_ith +llama_get_sampled_token_ith +llama_get_state_size +llama_init_from_model +llama_load_model_from_file +llama_load_session_file +llama_log_get +llama_log_set +llama_max_devices +llama_max_parallel_sequences +llama_max_tensor_buft_overrides +llama_memory_breakdown_print +llama_memory_can_shift +llama_memory_clear +llama_memory_seq_add +llama_memory_seq_cp +llama_memory_seq_div +llama_memory_seq_keep +llama_memory_seq_pos_max +llama_memory_seq_pos_min +llama_memory_seq_rm +llama_model_chat_template +llama_model_cls_label +llama_model_decoder_start_token +llama_model_default_params +llama_model_desc +llama_model_free +llama_model_get_vocab +llama_model_has_decoder +llama_model_has_encoder +llama_model_init_from_user +llama_model_is_diffusion +llama_model_is_hybrid +llama_model_is_recurrent +llama_model_load_from_file +llama_model_load_from_file_ptr +llama_model_load_from_splits +llama_model_meta_count +llama_model_meta_key_by_index +llama_model_meta_key_str +llama_model_meta_val_str +llama_model_meta_val_str_by_index +llama_model_n_cls_out +llama_model_n_ctx_train +llama_model_n_embd +llama_model_n_embd_inp +llama_model_n_embd_out +llama_model_n_head +llama_model_n_head_kv +llama_model_n_layer +llama_model_n_params +llama_model_n_swa +llama_model_quantize +llama_model_quantize_default_params +llama_model_rope_freq_scale_train +llama_model_rope_type +llama_model_save_to_file +llama_model_size +llama_n_batch +llama_n_ctx +llama_n_ctx_seq +llama_n_ctx_train +llama_n_embd +llama_n_head +llama_n_layer +llama_n_seq_max +llama_n_threads +llama_n_threads_batch +llama_n_ubatch +llama_n_vocab +llama_new_context_with_model +llama_numa_init +llama_opt_epoch +llama_opt_init +llama_opt_param_filter_all +llama_params_fit +llama_perf_context +llama_perf_context_print +llama_perf_context_reset +llama_perf_sampler +llama_perf_sampler_print +llama_perf_sampler_reset +llama_pooling_type +llama_print_system_info +llama_sampler_accept +llama_sampler_apply +llama_sampler_chain_add +llama_sampler_chain_default_params +llama_sampler_chain_get +llama_sampler_chain_init +llama_sampler_chain_n +llama_sampler_chain_remove +llama_sampler_clone +llama_sampler_free +llama_sampler_get_seed +llama_sampler_init +llama_sampler_init_adaptive_p +llama_sampler_init_dist +llama_sampler_init_dry +llama_sampler_init_grammar +llama_sampler_init_grammar_lazy +llama_sampler_init_grammar_lazy_patterns +llama_sampler_init_greedy +llama_sampler_init_infill +llama_sampler_init_logit_bias +llama_sampler_init_min_p +llama_sampler_init_mirostat +llama_sampler_init_mirostat_v2 +llama_sampler_init_penalties +llama_sampler_init_temp +llama_sampler_init_temp_ext +llama_sampler_init_top_k +llama_sampler_init_top_n_sigma +llama_sampler_init_top_p +llama_sampler_init_typical +llama_sampler_init_xtc +llama_sampler_name +llama_sampler_reset +llama_sampler_sample +llama_save_session_file +llama_set_abort_callback +llama_set_adapter_cvec +llama_set_adapters_lora +llama_set_causal_attn +llama_set_embeddings +llama_set_n_threads +llama_set_sampler +llama_set_state_data +llama_set_warmup +llama_split_path +llama_split_prefix +llama_state_get_data +llama_state_get_size +llama_state_load_file +llama_state_save_file +llama_state_seq_get_data +llama_state_seq_get_data_ext +llama_state_seq_get_size +llama_state_seq_get_size_ext +llama_state_seq_load_file +llama_state_seq_save_file +llama_state_seq_set_data +llama_state_seq_set_data_ext +llama_state_set_data +llama_supports_gpu_offload +llama_supports_mlock +llama_supports_mmap +llama_supports_rpc +llama_synchronize +llama_time_us +llama_token_bos +llama_token_cls +llama_token_eos +llama_token_eot +llama_token_fim_mid +llama_token_fim_pad +llama_token_fim_pre +llama_token_fim_rep +llama_token_fim_sep +llama_token_fim_suf +llama_token_get_attr +llama_token_get_score +llama_token_get_text +llama_token_is_control +llama_token_is_eog +llama_token_nl +llama_token_pad +llama_token_sep +llama_token_to_piece +llama_tokenize +llama_vocab_bos +llama_vocab_cls +llama_vocab_eos +llama_vocab_eot +llama_vocab_fim_mid +llama_vocab_fim_pad +llama_vocab_fim_pre +llama_vocab_fim_rep +llama_vocab_fim_sep +llama_vocab_fim_suf +llama_vocab_get_add_bos +llama_vocab_get_add_eos +llama_vocab_get_add_sep +llama_vocab_get_attr +llama_vocab_get_score +llama_vocab_get_text +llama_vocab_is_control +llama_vocab_is_eog +llama_vocab_mask +llama_vocab_n_tokens +llama_vocab_nl +llama_vocab_pad +llama_vocab_sep +llama_vocab_type diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 121c21fed9..11403e6c58 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -153,18 +153,27 @@ add_library(llama set_target_properties(llama PROPERTIES VERSION ${LLAMA_INSTALL_VERSION} - SOVERSION 0 + SOVERSION ${LLAMA_VERSION_MAJOR} MACHO_CURRENT_VERSION 0 # keep macOS linker from seeing oversized version number ) +include(GenerateExportHeader) +generate_export_header(llama + BASE_NAME LLAMA + EXPORT_MACRO_NAME LLAMA_API + NO_EXPORT_MACRO_NAME LLAMA_NO_EXPORT + EXPORT_FILE_NAME ${CMAKE_CURRENT_BINARY_DIR}/llama_export.h + STATIC_DEFINE LLAMA_STATIC_DEFINE) + target_include_directories(llama PRIVATE .) -target_include_directories(llama PUBLIC ../include) +target_include_directories(llama PUBLIC + $ + $ + $) target_compile_features (llama PRIVATE cxx_std_17) # don't bump target_link_libraries(llama PUBLIC ggml) if (BUILD_SHARED_LIBS) set_target_properties(llama PROPERTIES POSITION_INDEPENDENT_CODE ON) - target_compile_definitions(llama PRIVATE LLAMA_BUILD) - target_compile_definitions(llama PUBLIC LLAMA_SHARED) endif()