Skip to content
This repository has been archived by the owner on Sep 30, 2023. It is now read-only.

Commit

Permalink
Merge pull request #55 from ravenscroftj/feature/gpu_layers
Browse files Browse the repository at this point in the history
WIP: Integrate more direct GPU support
  • Loading branch information
ravenscroftj authored Aug 26, 2023
2 parents d4989b5 + a00de2a commit 2b27760
Show file tree
Hide file tree
Showing 18 changed files with 519 additions and 20 deletions.
16 changes: 8 additions & 8 deletions .github/workflows/docker-image.yml
Original file line number Diff line number Diff line change
Expand Up @@ -53,14 +53,14 @@ jobs:
runtime_base: nvidia/cuda:12.2.0-runtime-ubuntu22.04
cmake_args: -DGGML_CUBLAS=ON -DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc

- tag: -clblast
dockerfile: ./Dockerfile.default
platforms: linux/amd64
build_base: ubuntu:22.04
runtime_base: ubuntu:22.04
runtime_deps: libclblast1
extra_deps: libclblast-dev
cmake_args: -DGGML_CLBLAST=On
# - tag: -clblast
# dockerfile: ./Dockerfile.default
# platforms: linux/amd64
# build_base: ubuntu:22.04
# runtime_base: ubuntu:22.04
# runtime_deps: libclblast1
# extra_deps: libclblast-dev
# cmake_args: -DGGML_CLBLAST=On


steps:
Expand Down
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
build/
models/
5 changes: 4 additions & 1 deletion .gitmodules
Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
[submodule "ggml"]
path = extern/ggml
url = git@github.com:ravenscroftj/ggml.git
url = git@github.com:ggerganov/ggml.git
[submodule "extern/argparse"]
path = extern/argparse
url = https://github.com/p-ranav/argparse.git
[submodule "extern/sbdlog"]
path = extern/spdlog
url = https://github.com/gabime/spdlog.git
[submodule "extern/ggml"]
path = extern/ggml
url = https://github.com/ggerganov/ggml
20 changes: 20 additions & 0 deletions .vscode/c_cpp_properties.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
{
"configurations": [
{
"name": "Linux",
"includePath": [
"${workspaceFolder}/**",
"${workspaceFolder}/extern/crow/include",
"${workspaceFolder}/include",
"${workspaceFolder}/include"
],
"defines": [],
"compilerPath": "/usr/bin/gcc",
"cStandard": "c17",
"cppStandard": "gnu++17",
"intelliSenseMode": "linux-gcc-x64",
"configurationProvider": "ms-vscode.cmake-tools"
}
],
"version": 4
}
70 changes: 70 additions & 0 deletions .vscode/launch.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "(gdb) Launch TBP",
"type": "cppdbg",
"request": "launch",
"program": "/home/james/workspace/rafael-llm/turbopilot/build/bin/turbopilot",
"args": [
//TBP ARGS
"-v",
"-f",
"/home/james/Downloads/replit-code-v1-3b-q4_0.bin",
"-m",
"replit",
],
"stopAtEntry": false,
"cwd": "${workspaceFolder}",
"environment": [],
"externalConsole": false,
"MIMode": "gdb",
"setupCommands": [
{
"description": "Enable pretty-printing for gdb",
"text": "-enable-pretty-printing",
"ignoreFailures": true
},
{
"description": "Set Disassembly Flavor to Intel",
"text": "-gdb-set disassembly-flavor intel",
"ignoreFailures": true
}
]
},
{
"name": "(gdb) Launch Replut",
"type": "cppdbg",
"request": "launch",
"program": "/home/james/workspace/rafael-llm/turbopilot/extern/ggml/build/bin/replit",
"args": [
// REPLIT ARGS
"-m",
"/home/james/Downloads/replit-code-v1-3b-q4_0.bin",
"-f",
"/home/james/workspace/rafael-llm/turbopilot/test.txt"
],
"stopAtEntry": false,
"cwd": "${workspaceFolder}",
"environment": [],
"externalConsole": false,
"MIMode": "gdb",
"setupCommands": [
{
"description": "Enable pretty-printing for gdb",
"text": "-enable-pretty-printing",
"ignoreFailures": true
},
{
"description": "Set Disassembly Flavor to Intel",
"text": "-gdb-set disassembly-flavor intel",
"ignoreFailures": true
}
]
},

]
}
28 changes: 28 additions & 0 deletions .vscode/tasks.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
{
"tasks": [
{
"type": "cppbuild",
"label": "C/C++: g++ build active file",
"command": "/usr/bin/g++",
"args": [
"-fdiagnostics-color=always",
"-g",
"${file}",
"-o",
"${fileDirname}/${fileBasenameNoExtension}"
],
"options": {
"cwd": "${fileDirname}"
},
"problemMatcher": [
"$gcc"
],
"group": {
"kind": "build",
"isDefault": true
},
"detail": "Task generated by Debugger."
}
],
"version": "2.0.0"
}
19 changes: 16 additions & 3 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,11 @@ set(CMAKE_EXPORT_COMPILE_COMMANDS "on")
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib")

option(GGML_CLBLAST "ggml: use clBLAST" OFF)
option(GGML_CUBLAS "ggml: use cuBLAS" OFF)



if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64")
message(STATUS "ARM detected")
if (MSVC)
Expand Down Expand Up @@ -48,13 +53,21 @@ if (GGML_STATIC)
SET(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
SET(BUILD_SHARED_LIBS OFF)
SET(CMAKE_EXE_LINKER_FLAGS "-static")
endif()

# if(GGML_OPENBLAS)
# set(BLA_STATIC ON)
# endif()
if (GGML_CUBLAS)
cmake_minimum_required(VERSION 3.17)

find_package(CUDAToolkit)
if (CUDAToolkit_FOUND)
add_compile_definitions(GGML_USE_CUBLAS)
else()
message(WARNING "cuBLAS not found")
endif()
endif()



add_subdirectory(src)

set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
2 changes: 1 addition & 1 deletion extern/ggml
Submodule ggml updated 61 files
+19 −0 .editorconfig
+23 −1 .github/workflows/ci.yml
+14 −2 .gitignore
+26 −0 CMakeLists.txt
+6 −2 README.md
+29 −13 build.zig
+260 −0 ci/run.sh
+1 −0 examples/CMakeLists.txt
+71 −17 examples/common.cpp
+20 −1 examples/common.h
+7 −7 examples/dolly-v2/main.cpp
+7 −7 examples/gpt-2/main.cpp
+8 −8 examples/gpt-j/main.cpp
+4 −4 examples/gpt-neox/README.md
+7 −7 examples/gpt-neox/main.cpp
+6 −1 examples/mnist/README.md
+4 −2 examples/mnist/main-cpu.cpp
+31 −19 examples/mnist/main-mtl.m
+3 −0 examples/mnist/main.cpp
+7 −7 examples/mpt/main.cpp
+115 −0 examples/python/README.md
+14 −0 examples/python/api.h
+25 −0 examples/python/example_add_quant.py
+68 −0 examples/python/example_test_all_quants.py
+58 −0 examples/python/ggml/__init__.py
+2,431 −0 examples/python/ggml/__init__.pyi
+11 −0 examples/python/ggml/cffi.py
+7 −0 examples/python/ggml/ffi/__init__.pyi
+182 −0 examples/python/ggml/utils.py
+42 −0 examples/python/regenerate.py
+128 −0 examples/python/stubs.py
+258 −0 examples/python/test_tensor.py
+7 −7 examples/replit/main.cpp
+13 −0 examples/sam/CMakeLists.txt
+89 −0 examples/sam/README.md
+134 −0 examples/sam/convert-pth-to-ggml.py
+2,158 −0 examples/sam/main.cpp
+18 −0 examples/starcoder/CMakeLists.txt
+3 −1 examples/starcoder/convert-hf-to-ggml.py
+7 −8 examples/starcoder/main.cpp
+1,126 −0 examples/starcoder/starcoder-mmap.cpp
+7,987 −0 examples/stb_image.h
+1,724 −0 examples/stb_image_write.h
+26 −0 include/ggml/ggml-alloc.h
+450 −54 include/ggml/ggml.h
+4 −2 scripts/sync-llama.sh
+63 −5 src/CMakeLists.txt
+579 −0 src/ggml-alloc.c
+3,542 −493 src/ggml-cuda.cu
+19 −22 src/ggml-cuda.h
+13 −0 src/ggml-metal.h
+334 −210 src/ggml-metal.m
+1,090 −972 src/ggml-metal.metal
+3,239 −1,378 src/ggml.c
+75 −20 tests/CMakeLists.txt
+175 −0 tests/test-conv-transpose.c
+223 −0 tests/test-customop.c
+408 −99 tests/test-grad0.cpp
+11 −10 tests/test-opt.cpp
+84 −0 tests/test-rel-pos.c
+87 −0 tests/test-xpos.c
4 changes: 3 additions & 1 deletion include/turbopilot/model.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ struct ModelConfig
int32_t seed = -1; // RNG seed
int32_t n_ctx = 512; // context size
int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS)
int32_t n_gpu_layers = 0;
};

class TurbopilotModel
Expand All @@ -67,4 +68,5 @@ class TurbopilotModel
std::mutex model_lock;
};

#endif //__TURBOPILOT_MODEL_H

#endif //__TURBOPILOT_MODEL_H
7 changes: 5 additions & 2 deletions run.sh
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
#!/bin/sh

/app/turbopilot -t $THREADS -m $MODEL_TYPE -f $MODEL
if [ -z "$GPU_LAYERS" ]; then
/app/turbopilot -t $THREADS -m $MODEL_TYPE -f $MODEL
else
/app/turbopilot -t $THREADS -m $MODEL_TYPE -f $MODEL --ngl $GPU_LAYERS
fi
52 changes: 52 additions & 0 deletions src/gptj.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,14 @@
#include <iostream>
#include <fstream>


#ifdef GGML_USE_CLBLAST
#include "ggml-opencl.h"
#endif
#ifdef GGML_USE_CUBLAS
#include "ggml-cuda.h"
#endif

#if defined(_MSC_VER)
#pragma warning(disable: 4244 4267) // possible loss of data
#endif
Expand Down Expand Up @@ -455,6 +463,9 @@ bool GPTJModel::load_model(std::string fname) {
}
}




// key + value memory
{
const auto & hparams = model->hparams;
Expand Down Expand Up @@ -553,6 +564,47 @@ bool GPTJModel::load_model(std::string fname) {

fin.close();



#if defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)

if(config.n_gpu_layers > 0){
size_t vram_total = 0;
int gpu_layers = std::min(config.n_gpu_layers, model->hparams.n_layer);
spdlog::info("Attempting to offload {} layers to GPU", gpu_layers);

for(int i=0; i < gpu_layers; i++) {
const auto & layer = model->layers[i];
layer.c_attn_q_proj_w->backend = GGML_BACKEND_GPU;
layer.c_attn_k_proj_w->backend = GGML_BACKEND_GPU;
layer.c_attn_v_proj_w->backend = GGML_BACKEND_GPU;

layer.c_attn_proj_w->backend = GGML_BACKEND_GPU;
layer.c_mlp_fc_w->backend = GGML_BACKEND_GPU;
layer.c_mlp_proj_w->backend = GGML_BACKEND_GPU;

#if defined(GGML_USE_CLBLAST)
ggml_cl_transform_tensor(layer.c_attn_q_proj_w->data,layer.c_attn_q_proj_w); vram_total += ggml_nbytes(layer.c_attn_q_proj_w);
ggml_cl_transform_tensor(layer.c_attn_k_proj_w->data,layer.c_attn_k_proj_w); vram_total += ggml_nbytes(layer.c_attn_k_proj_w);
ggml_cl_transform_tensor(layer.c_attn_v_proj_w->data,layer.c_attn_v_proj_w); vram_total += ggml_nbytes(layer.c_attn_v_proj_w);
ggml_cl_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_nbytes(layer.c_attn_proj_w);
ggml_cl_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_nbytes(layer.c_mlp_fc_w);
ggml_cl_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
#else
ggml_cuda_transform_tensor(layer.c_attn_q_proj_w->data,layer.c_attn_q_proj_w); vram_total += ggml_nbytes(layer.c_attn_q_proj_w);
ggml_cuda_transform_tensor(layer.c_attn_k_proj_w->data,layer.c_attn_k_proj_w); vram_total += ggml_nbytes(layer.c_attn_k_proj_w);
ggml_cuda_transform_tensor(layer.c_attn_v_proj_w->data,layer.c_attn_v_proj_w); vram_total += ggml_nbytes(layer.c_attn_v_proj_w);
ggml_cuda_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_nbytes(layer.c_attn_proj_w);
ggml_cuda_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_nbytes(layer.c_mlp_fc_w);
ggml_cuda_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
#endif
}

spdlog::info("{}: [GPU] total VRAM used: {} MB\n", __func__, vram_total / 1024 / 1024);
}

#endif // defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)

return true;
}

Expand Down
43 changes: 42 additions & 1 deletion src/gptneox.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,13 @@

#include <ggml/ggml.h>

#ifdef GGML_USE_CLBLAST
#include "ggml-opencl.h"
#endif
#ifdef GGML_USE_CUBLAS
#include "ggml-cuda.h"
#endif

#include <cinttypes>

#include <iostream>
Expand Down Expand Up @@ -50,6 +57,7 @@ ggml_tensor * gpt_neox_ff(
}



// evaluate the transformer
//
// - model: the model
Expand Down Expand Up @@ -612,9 +620,42 @@ bool GPTNEOXModel::load_model(std::string fname) {

printf("%s: model size = %8.2f MB / num tensors = %d\n", __func__, total_size/1024.0/1024.0, n_tensors);
}

fin.close();

#if defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)


if(config.n_gpu_layers > 0){
size_t vram_total = 0;
int gpu_layers = std::min(config.n_gpu_layers, model->hparams.n_layer);
spdlog::info("Attempting to offload {} layers to GPU", gpu_layers);


for(int i=0; i < gpu_layers; i++) {
const auto & layer = model->layers[i];
layer.c_attn_attn_w->backend = GGML_BACKEND_GPU;
layer.c_attn_proj_w->backend = GGML_BACKEND_GPU;
layer.c_mlp_fc_w->backend = GGML_BACKEND_GPU;
layer.c_mlp_proj_w->backend = GGML_BACKEND_GPU;

#if defined(GGML_USE_CLBLAST)
ggml_cl_transform_tensor(layer.c_attn_attn_w->data,layer.c_attn_attn_w); vram_total += ggml_nbytes(layer.c_attn_attn_w);
ggml_cl_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_nbytes(layer.c_attn_proj_w);
ggml_cl_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_nbytes(layer.c_mlp_fc_w);
ggml_cl_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
#else
ggml_cuda_transform_tensor(layer.c_attn_attn_w->data,layer.c_attn_attn_w); vram_total += ggml_nbytes(layer.c_attn_attn_w);
ggml_cuda_transform_tensor(layer.c_attn_proj_w->data,layer.c_attn_proj_w); vram_total += ggml_nbytes(layer.c_attn_proj_w);
ggml_cuda_transform_tensor(layer.c_mlp_fc_w->data,layer.c_mlp_fc_w); vram_total += ggml_nbytes(layer.c_mlp_fc_w);
ggml_cuda_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
#endif
}

spdlog::info("{}: [GPU] total VRAM used: {} MB\n", __func__, vram_total / 1024 / 1024);
}

#endif // defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)

return true;
}

Expand Down
Loading

0 comments on commit 2b27760

Please sign in to comment.