Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor/deploy #42

Open
wants to merge 22 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
97 changes: 97 additions & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
# 가상환경
poetry.lock
pyproject.toml

# Data
data/

# Git
.git
.github
.gitignore
.gitattributes
*,md

# CI
.codeclimate.yml
.travis.yml
.taskcluster.yml

# Docker
docker-compose.yml
Dockerfile
.docker
.dockerignore

# Byte-compiled / optimized / DLL files
**/__pycache__/
**/*.py[cod]

# C extensions
*.so

# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg

# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.cache
nosetests.xml
coverage.xml

# Translations
*.mo
*.pot

# Django stuff:
*.log

# Sphinx documentation
docs/_build/

# PyBuilder
target/

# Virtual environment
.env
.venv/
venv/

# PyCharm
.idea

# Python mode for VIM
.ropeproject
**/.ropeproject

# Vim swap files
**/*.swp

# VS Code
.vscode/
17 changes: 17 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
FROM python:3.10-slim

WORKDIR /app

COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt

COPY . .

# Create a startup script
RUN echo '#!/bin/bash\n\
python main.py &\n\
streamlit run app.py' > start.sh

RUN chmod +x start.sh

CMD ["/bin/bash", "start.sh"]
2 changes: 1 addition & 1 deletion back/kakao.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import secrets
from typing import Optional

from config import CLIENT_ID, CLIENT_SECRET, OUTSIDE_IP, PORT, STREAMLIT_PORT
from config import CLIENT_ID, CLIENT_SECRET, OUTSIDE_IP, PORT
from fastapi import APIRouter, Depends, Header, HTTPException, status
from fastapi.responses import RedirectResponse
from fastapi_oauth_client import OAuthClient
Expand Down
5 changes: 4 additions & 1 deletion back/managers/mongo_config.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
import os
from pymongo import MongoClient
from gridfs import GridFSBucket
from dotenv import load_dotenv

load_dotenv(override=True)

# 환경 변수로부터 MongoDB 설정 읽기
username = os.getenv("MONGO_USERNAME", "admin")
Expand All @@ -11,4 +14,4 @@
client = MongoClient(MONGO_URL)
database = client["database"]
collection = database["users"]
fs_bucket = GridFSBucket(database)
fs_bucket = GridFSBucket(database)
47 changes: 30 additions & 17 deletions config.py
Original file line number Diff line number Diff line change
@@ -1,29 +1,42 @@
import os
import yaml
import socket
import requests
from dotenv import load_dotenv

def get_public_ip():
response = requests.get('https://checkip.amazonaws.com')
public_ip = response.text.strip()
return public_ip

def get_private_ip():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
private_ip = s.getsockname()[0]
s.close()
except Exception as e:
hostname = socket.gethostname()
private_ip = socket.gethostbyname(hostname)
return private_ip


path = os.getcwd() # 상위 폴더에서 실행된 경우 -> secret_key.yaml이 상위 폴더에 있음
# path = os.path.dirname(os.path.abspath(__file__)) # 현재 폴더에서 실행된 경우 -> secret_key.yaml이 현재 폴더에 있음

with open(os.path.join(path, "secret_key.yaml"), "r") as yaml_file:
cfg = yaml.safe_load(yaml_file)

OPENAI_API_KEY = cfg["OPENAI_API_KEY"]
COHERE_API_KEY = cfg["COHERE_API_KEY"]

INSIDE_IP = cfg["IP"]["INSIDE_IP"]
OUTSIDE_IP = cfg["IP"]["OUTSIDE_IP"]
load_dotenv(override=True)

REST_API_KEY = cfg["Kakaologin"]["REST_API_KEY"]
REDIRECT_URI = f"http://{OUTSIDE_IP}:{cfg['PORT']}/auth"
MODEL_NAME = os.getenv("MODEL_NAME", "gpt-4o-mini")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
COHERE_API_KEY = os.getenv("COHERE_API_KEY")

PORT = cfg["PORT"]
STREAMLIT_PORT = cfg["STREAMLIT"]["PORT"]
INSIDE_IP = get_private_ip()
OUTSIDE_IP = get_public_ip()

KEY_FILE = cfg["SSL"]["KEY_FILE"]
CERT_FILE = cfg["SSL"]["CERT_FILE"]
PORT = 8001
STREAMLIT_PORT = 8501

CLIENT_ID = cfg["CLIENT_ID"]
CLIENT_SECRET = cfg["CLIENT_SECRET"]
CLIENT_ID = os.getenv("CLIENT_ID")
CLIENT_SECRET = os.getenv("CLIENT_SECRET")

DATA_DIR = os.path.join(path, "data")
IMG_PATH = os.path.join(path, "data", "images")
Expand Down
56 changes: 56 additions & 0 deletions docker-compose.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
version: '3.10'

services:
# nginx:
# image: nginx:alpine
# ports:
# - "8080:80"
# - "443:443"
# volumes:
# - /etc/nginx/nginx.conf:/etc/nginx/nginx.conf:ro
# - /etc/ssl/merged_certificate.crt:/etc/ssl/merged_certificate.crt:ro
# - /etc/ssl/private/private.key:/etc/ssl/private/private.key:ro
# depends_on:
# - app
# networks:
# - app-network

app:
image: kooqooo/hello-jobits:latest
ports:
- "8501:8501"
- "8001:8001"
volumes:
- ~/data:/app/data
environment:
- CLIENT_ID=${CLIENT_ID}
- CLIENT_SECRET=${CLIENT_SECRET}
- OPENAI_API_KEY=${OPENAI_API_KEY}
- COHERE_API_KEY=${COHERE_API_KEY}
- MODEL_NAME=${MODEL_NAME}
- MONGO_USERNAME=${MONGO_USERNAME}
- MONGO_PASSWORD=${MONGO_PASSWORD}
depends_on:
- mongo
networks:
- app-network

mongo:
image: mongo:latest
ports:
- "27017:27017"
volumes:
- mongo-data:/data/db
environment:
- MONGO_INITDB_ROOT_USERNAME=${MONGO_USERNAME}
- MONGO_INITDB_ROOT_PASSWORD=${MONGO_PASSWORD}
networks:
- app-network

networks:
app-network:
driver: bridge

volumes:
mongo-data:
driver: local
1 change: 0 additions & 1 deletion pages/1_home.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@
print("user_id : ", st.session_state["user_id"])

if "openai_api_key" not in st.session_state:
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
st.session_state.openai_api_key = OPENAI_API_KEY

if "FAV_IMAGE_PATH" not in st.session_state:
Expand Down
6 changes: 3 additions & 3 deletions pages/2_user.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
sys.path.append("./")
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))

from config import DATA_DIR, IMG_PATH, path
from config import DATA_DIR, IMG_PATH, path, CSS_PATH
from loguru import logger as _logger
from src.logger import DevConfig
from src.util import (check_essential, get_image_base64, local_css,
Expand Down Expand Up @@ -48,7 +48,7 @@

st.title("안녕하세요, " + st.session_state.nickname + "님!") # 사용자 이름을 받아서 화면에 출력합니다.

local_css(os.path.join(path, "front", "css", "background.css"))
local_css(os.path.join(CSS_PATH, "background.css"))


# local_css("css/1_user.css")
Expand Down Expand Up @@ -237,7 +237,7 @@


## read job info tb
job_info, JOBS = read_job_info_tb(path + "/data/samples/job_info_tb.parquet")
job_info, JOBS = read_job_info_tb(os.path.join(DATA_DIR, "samples", "job_info_tb.parquet"))
st.session_state.job_info = job_info
st.session_state.logger.info("read job tb")
st.session_state.logger.info(f"job info is ... {JOBS}")
Expand Down
20 changes: 7 additions & 13 deletions pages/3_gene_question.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
from src.rule_based import list_extend_questions_based_on_keywords
from src.util import local_css, read_prompt_from_txt
from src.semantic_search import faiss_inference, reranker
from config import OPENAI_API_KEY, DATA_DIR, IMG_PATH, CSS_PATH, PORT
from config import DATA_DIR, IMG_PATH, CSS_PATH, PORT, MODEL_NAME

st.session_state["FAV_IMAGE_PATH"] = os.path.join(IMG_PATH, "favicon.png")
st.set_page_config(
Expand Down Expand Up @@ -80,8 +80,6 @@
</style>
""",unsafe_allow_html=True)

## set variables
MODEL_NAME = "gpt-3.5-turbo-16k"

## set save dir
USER_RESUME_SAVE_DIR = os.path.join(st.session_state["save_dir"], "2_generate_question_user_resume.pdf")
Expand Down Expand Up @@ -139,16 +137,14 @@

### JD 사용하여 JD 추출용 프롬프트 만들기
st.session_state.logger.info("prompt JD start")
prompt_template_jd = read_prompt_from_txt(os.path.join(DATA_DIR, "test/prompt_JD_template.txt"))
prompt_template_jd = read_prompt_from_txt(os.path.join(DATA_DIR, "prompts", "prompt_JD_template.txt"))
st.session_state.prompt_JD = create_prompt_with_jd(prompt_template_jd)

# prompt_JD 생성완료
st.session_state.logger.info("create prompt JD object")

### 모델 세팅 그대로
llm = ChatOpenAI(temperature=st.session_state.temperature,
model_name=MODEL_NAME,
openai_api_key=OPENAI_API_KEY)
llm = ChatOpenAI(temperature=st.session_state.temperature, model_name=MODEL_NAME)

st.session_state.logger.info("create llm object")

Expand All @@ -167,7 +163,7 @@
# prompt_qa_template #######################################

st.session_state.logger.info("prompt resume start")
prompt_template_resume = read_prompt_from_txt(os.path.join(DATA_DIR, "test/prompt_resume_template.txt"))
prompt_template_resume = read_prompt_from_txt(os.path.join(DATA_DIR, "prompts", "prompt_resume_template.txt"))

st.session_state.logger.info("create prompt resume template")
st.session_state.prompt_resume = create_prompt_with_resume(prompt_template_resume)
Expand All @@ -178,9 +174,7 @@
st.session_state.logger.info("user_resume chunk OpenAIEmbeddings ")

### STEP 2 를 위한 새 모델 호출
llm2 = ChatOpenAI(temperature=0.0,
model_name=MODEL_NAME,
openai_api_key=OPENAI_API_KEY)
llm2 = ChatOpenAI(temperature=0.0, model_name=MODEL_NAME)

st.session_state.chain_type_kwargs = {"prompt": st.session_state.prompt_resume}

Expand All @@ -198,12 +192,12 @@

## step3 :
st.session_state.logger.info("prompt question start")
prompt_template_question = read_prompt_from_txt(os.path.join(DATA_DIR, "test/prompt_question_template.txt"))
prompt_template_question = read_prompt_from_txt(os.path.join(DATA_DIR, "prompts", "prompt_question_template.txt"))

st.session_state.logger.info("create prompt question template")
st.session_state.prompt_question = create_prompt_with_question(prompt_template_question)

llm3 = ChatOpenAI(temperature=0, model_name=MODEL_NAME, openai_api_key=OPENAI_API_KEY)
llm3 = ChatOpenAI(temperature=0, model_name=MODEL_NAME)
st.session_state.chain = LLMChain(llm=llm3, prompt=st.session_state.prompt_question)
st.session_state.main_question = st.session_state.chain.invoke({"jd": st.session_state.job_description, "resume": st.session_state.resume})['text']
#################
Expand Down
Loading