Skip to content

Commit 25e6f21

Browse files
authored
chore(deps): bump llama.cpp to 4ccea213bc629c4eef7b520f7f6c59ce9bbdaca0 (#5143)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
1 parent b4df1c9 commit 25e6f21

File tree

2 files changed

+2
-2
lines changed

2 files changed

+2
-2
lines changed

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ BINARY_NAME=local-ai
66
DETECT_LIBS?=true
77

88
# llama.cpp versions
9-
CPPLLAMA_VERSION?=916c83bfe7f8b08ada609c3b8e583cf5301e594b
9+
CPPLLAMA_VERSION?=4ccea213bc629c4eef7b520f7f6c59ce9bbdaca0
1010

1111
# whisper.cpp version
1212
WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp

backend/cpp/llama/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh
88
TARGET?=--target grpc-server
99

1010
# Disable Shared libs as we are linking on static gRPC and we can't mix shared and static
11-
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF
11+
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF -DLLAMA_CURL=OFF
1212

1313
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
1414
ifeq ($(BUILD_TYPE),cublas)

0 commit comments

Comments
 (0)