Skip to content

Commit

Permalink
Cleanup header and other files
Browse files Browse the repository at this point in the history
  • Loading branch information
0cc4m committed Jan 25, 2024
1 parent 3a15a01 commit 82ce1c4
Show file tree
Hide file tree
Showing 5 changed files with 8 additions and 42 deletions.
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -408,7 +408,7 @@ if (LLAMA_VULKAN)
message(STATUS "Vulkan found")

add_library(ggml-vulkan STATIC ggml-vulkan.cpp ggml-vulkan.h)
target_link_libraries(ggml-vulkan PUBLIC Vulkan::Vulkan)
target_link_libraries(ggml-vulkan PRIVATE Vulkan::Vulkan)

add_compile_definitions(GGML_USE_VULKAN)

Expand Down
8 changes: 3 additions & 5 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -449,14 +449,12 @@ ggml-opencl.o: ggml-opencl.cpp ggml-opencl.h
endif # LLAMA_CLBLAST

ifdef LLAMA_VULKAN
CFLAGS += -DGGML_USE_VULKAN
CXXFLAGS += -DGGML_USE_VULKAN
LDFLAGS += -lvulkan
MK_CPPFLAGS += -DGGML_USE_VULKAN
MK_LDFLAGS += -lvulkan
OBJS += ggml-vulkan.o

ifdef LLAMA_VULKAN_CHECK_RESULTS
CFLAGS += -DGGML_VULKAN_CHECK_RESULTS
CXXFLAGS += -DGGML_VULKAN_CHECK_RESULTS
MK_CPPFLAGS += -DGGML_VULKAN_CHECK_RESULTS
endif

ggml-vulkan.o: ggml-vulkan.cpp ggml-vulkan.h
Expand Down
26 changes: 4 additions & 22 deletions ggml-vulkan.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1254,7 +1254,7 @@ static vk_buffer ggml_vk_create_buffer_temp(size_t size) {
return buf;
}

void* ggml_vk_host_malloc(size_t size) {
static void * ggml_vk_host_malloc(size_t size) {
#ifdef VK_DEBUG
std::cerr << "ggml_vk_host_malloc(" << size << ")" << std::endl;
#endif
Expand All @@ -1278,7 +1278,7 @@ void* ggml_vk_host_malloc(size_t size) {
return buf.ptr;
}

void ggml_vk_host_free(void* ptr) {
static void ggml_vk_host_free(void* ptr) {
if (ptr == nullptr) {
return;
}
Expand Down Expand Up @@ -2500,7 +2500,7 @@ static void ggml_vk_mul_mat_vec_nc_f16_f32(vk_context& ctx, const ggml_tensor *
}
}

bool ggml_vk_can_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * dst) {
static bool ggml_vk_can_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * dst) {
const uint64_t ne10 = src1->ne[0];

const uint64_t ne0 = dst->ne[0];
Expand Down Expand Up @@ -3532,24 +3532,6 @@ static ggml_tensor_extra_gpu * ggml_vk_tensor_create_extra(ggml_tensor * tensor)
return extra;
}

void ggml_vk_prepare_tensor(ggml_tensor * tensor) {
#ifdef VK_DEBUG
std::cerr << "ggml_vk_prepare_tensor(" << tensor << " (" << tensor->name << ", " << ggml_op_name(tensor->op) << "))" << std::endl;
#endif
tensor->backend = GGML_BACKEND_GPU;

// recursively prepare buffers until a compute tensor is found
if (tensor->src[0] != nullptr && tensor->src[0]->backend == GGML_BACKEND_CPU) {
const ggml_op src0_op = tensor->src[0]->op;
if (src0_op == GGML_OP_RESHAPE || src0_op == GGML_OP_TRANSPOSE || src0_op == GGML_OP_VIEW || src0_op == GGML_OP_PERMUTE) {
ggml_vk_prepare_tensor(tensor->src[0]);
}
}
if (tensor->op == GGML_OP_CPY && tensor->src[1]->backend == GGML_BACKEND_CPU) {
ggml_vk_prepare_tensor(tensor->src[1]);
}
}

// TODO: Still needed?
static void ggml_vk_tensor_stride_order(const ggml_tensor * tensor, std::array<int, 4>& order) {
order = {-1, -1, -1, -1};
Expand Down Expand Up @@ -4120,7 +4102,7 @@ void ggml_vk_graph_cleanup() {
vk_gc.contexts.clear();
}

void ggml_vk_cleanup() {
static void ggml_vk_cleanup() {
#ifdef VK_DEBUG
std::cerr << "ggml_vk_cleanup()" << std::endl;
#endif
Expand Down
12 changes: 0 additions & 12 deletions ggml-vulkan.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,22 +16,10 @@ GGML_API void ggml_vk_preallocate_buffers(void);
GGML_API void ggml_vk_build_graph(struct ggml_tensor * node, bool last_node);
GGML_API bool ggml_vk_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor);
#ifdef GGML_VULKAN_CHECK_RESULTS
void ggml_vk_check_results_0(struct ggml_compute_params * params, struct ggml_tensor * tensor);
void ggml_vk_check_results_1(struct ggml_compute_params * params, struct ggml_tensor * tensor);
#endif
GGML_API void ggml_vk_graph_cleanup(void);

GGML_API void * ggml_vk_host_malloc(size_t size);
GGML_API void ggml_vk_host_free(void * ptr);

GGML_API void ggml_vk_transform_tensor_temporary(const void * data, struct ggml_tensor * tensor);
GGML_API void ggml_vk_transform_tensor_static(const void * data, struct ggml_tensor * tensor);
GGML_API void ggml_vk_assign_buffer(struct ggml_tensor * tensor);
GGML_API void ggml_vk_prepare_tensor(struct ggml_tensor * tensor);
GGML_API void ggml_vk_cleanup(void);

GGML_API bool ggml_vk_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, const struct ggml_tensor * dst);

// backend API
GGML_API GGML_CALL ggml_backend_t ggml_backend_vk_init(void);

Expand Down
2 changes: 0 additions & 2 deletions llama.cpp
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
#define LLAMA_API_INTERNAL
#include "llama.h"

#include <iostream>

#include "unicode.h"

#include "ggml.h"
Expand Down

0 comments on commit 82ce1c4

Please sign in to comment.