Skip to content

Commit

Permalink
(improvement)(launcher)Introduce supersonic-env.sh to incorporate LLM…
Browse files Browse the repository at this point in the history
…-related configs.
  • Loading branch information
jerryjzhang committed May 20, 2024
1 parent cbafff0 commit 5421212
Show file tree
Hide file tree
Showing 7 changed files with 63 additions and 169 deletions.
4 changes: 2 additions & 2 deletions assembly/bin/supersonic-build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ fi
function buildJavaService {
model_name=$1
echo "starting building supersonic-${model_name} service"
mvn -f $projectDir/launchers/${model_name} clean package -DskipTests
mvn -f $projectDir clean package -DskipTests
if [ $? -ne 0 ]; then
echo "Failed to build backend Java modules."
exit 1
Expand Down Expand Up @@ -72,7 +72,7 @@ elif [ "$service" == "webapp" ]; then
buildWebapp
target_path=$projectDir/launchers/$STANDALONE_SERVICE/target/classes
tar xvf $projectDir/webapp/supersonic-webapp.tar.gz -C $target_path
mv $target_path/supersonic_webapp $target_path/webapp
mv $target_path/supersonic-webapp $target_path/webapp
else
buildJavaService $service
buildWebapp
Expand Down
5 changes: 4 additions & 1 deletion assembly/bin/supersonic-daemon.sh
Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
#!/usr/bin/env bash

sbinDir=$(cd "$(dirname "$0")"; pwd)
chmod +x $sbinDir/supersonic-common.sh
source $sbinDir/supersonic-common.sh

set -a
source $sbinDir/../conf/supersonic-env.sh
set +a

command=$1
service=$2
if [ -z "$service" ]; then
Expand Down
15 changes: 9 additions & 6 deletions headless/python/config/config_parse.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
# -*- coding:utf-8 -*-
import os
import configparser

import os
Expand All @@ -8,6 +7,12 @@
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(os.path.dirname(os.path.abspath(__file__)))

class EnvInterpolation(configparser.BasicInterpolation):
"""Interpolation which expands environment variables in values."""

def before_get(self, parser, section, option, value, defaults):
value = super().before_get(parser, section, option, value, defaults)
return os.path.expandvars(value)

def type_convert(input_str: str):
try:
Expand All @@ -16,13 +21,13 @@ def type_convert(input_str: str):
return input_str


PROJECT_DIR_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_DIR_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
config_dir = "config"
CONFIG_DIR_PATH = os.path.join(PROJECT_DIR_PATH, config_dir)
CONFIG_DIR_PATH = os.path.join(PROJECT_DIR_PATH, config_dir)
config_file = "run_config.ini"
config_path = os.path.join(CONFIG_DIR_PATH, config_file)

config = configparser.ConfigParser()
config = configparser.ConfigParser(interpolation=EnvInterpolation())
config.read(config_path)

log_dir = "log"
Expand Down Expand Up @@ -77,5 +82,3 @@ def type_convert(input_str: str):
print(f"ACT_MIN_WINDOWN_SIZE: {ACT_MIN_WINDOWN_SIZE}")
print(f"ACT_MAX_WINDOWN_SIZE: {ACT_MAX_WINDOWN_SIZE}")
print(f"LOG_FILE_PATH: {LOG_FILE_PATH}")


10 changes: 4 additions & 6 deletions headless/python/config/run_config.ini
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,11 @@ ACT_MAX_WINDOWN_SIZE = 10
[Text2Vec]
HF_TEXT2VEC_MODEL_NAME = GanymedeNil/text2vec-large-chinese


[LLMProvider]
LLM_PROVIDER_NAME = openai


[LLMModel]
MODEL_NAME = gpt-3.5-turbo
OPENAI_API_KEY = YOUR_API_KEY
OPENAI_API_BASE = http://YOUR_API_BASE
TEMPERATURE = 0.0
OPENAI_API_KEY = ${OPENAI_API_KEY}
OPENAI_API_BASE = ${OPENAI_API_BASE}
MODEL_NAME = ${OPENAI_MODEL_NAME}
TEMPERATURE = ${OPENAI_TEMPERATURE}
66 changes: 34 additions & 32 deletions launchers/standalone/src/main/resources/application-local.yaml
Original file line number Diff line number Diff line change
@@ -1,3 +1,10 @@
server:
port: 9080
compression:
enabled: true
min-response-size: 1024
mime-types: application/javascript,application/json,application/xml,text/html,text/xml,text/plain,text/css,image/*

spring:
h2:
console:
Expand All @@ -12,15 +19,8 @@ spring:
username: root
password: semantic

demo:
enabled: true

server:
port: 9080
compression:
enabled: true
min-response-size: 1024
mime-types: application/javascript,application/json,application/xml,text/html,text/xml,text/plain,text/css,image/*
mybatis:
mapper-locations=classpath:mappers/custom/*.xml,classpath*:/mappers/*.xml

authentication:
enable: true
Expand All @@ -31,6 +31,16 @@ authentication:
header:
key: Authorization

demo:
enabled: true

query:
optimizer:
enable: true

multi:
turn: false

time:
threshold: 100

Expand All @@ -39,49 +49,48 @@ dimension:
metric:
topn: 20

mybatis:
mapper-locations=classpath:mappers/custom/*.xml,classpath*:/mappers/*.xml

corrector:
additional:
information: true

pyllm:
url: http://127.0.0.1:9092

llm:
parser:
url: ${pyllm.url}

embedding:
url: ${pyllm.url}

functionCall:
url: ${pyllm.url}

text2sql:
example:
num: 1

#langchain4j config
s2:
langchain4j:
#1.chat-model
chat-model:
provider: open_ai
openai:
# Replace with your LLM configs
# Note: Below API key `demo` is provided by langchain4j community which limits 1000 tokens per request.
base-url: https://api.openai.com/v1
api-key: demo
model-name: gpt-3.5-turbo
temperature: 0.0
timeout: PT60S
# Note: The default API key `demo` is provided by langchain4j community
# which limits 1000 tokens per request.
base-url: ${OPENAI_API_BASE:https://api.openai.com/v1}
api-key: ${OPENAI_API_KEY:demo}
model-name: ${OPENAI_MODEL_NAME:gpt-3.5-turbo}
temperature: ${OPENAI_TEMPERATURE:0.0}
timeout: ${OPENAI_TIMEOUT:PT60S}
#2.embedding-model
#2.1 in_memory(default)
embedding-model:
provider: in_process
# inProcess:
# modelPath: /data/model.onnx
# vocabularyPath: /data/onnx_vocab.txt
# shibing624/text2vec-base-chinese
# inProcess:
# modelPath: /data/model.onnx
# vocabularyPath: /data/onnx_vocab.txt
# shibing624/text2vec-base-chinese
#2.2 open_ai
# embedding-model:
# provider: open_ai
Expand All @@ -105,11 +114,4 @@ logging:

inMemoryEmbeddingStore:
persistent:
path: /tmp

query:
optimizer:
enable: true
multi:
turn: false
num: 5
path: /tmp
122 changes: 0 additions & 122 deletions launchers/standalone/src/main/resources/rewrite_examplar.json

This file was deleted.

10 changes: 10 additions & 0 deletions launchers/standalone/src/main/resources/supersonic-env.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
#!/usr/bin/env bash

# Replace with your LLM configs
# Note: The default API key `demo` is provided by langchain4j community
# which limits 1000 tokens per request.
OPENAI_API_BASE=https://api.openai.com/v1
OPENAI_API_KEY=demo
OPENAI_MODEL_NAME=gpt-3.5-turbo
OPENAI_TEMPERATURE=0.0
OPENAI_TIMEOUT=PT60S

0 comments on commit 5421212

Please sign in to comment.