-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #58 from boostcampaitech5/dev
[Etc] Cleaning Branch
- Loading branch information
Showing
65 changed files
with
14,178 additions
and
483 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,4 @@ | ||
if __name__ == "__main__": | ||
import uvicorn | ||
|
||
uvicorn.run("app.main:app", host="0.0.0.0", port=30007, reload=True) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,77 @@ | ||
from fastapi import FastAPI, UploadFile, File | ||
from fastapi.param_functions import Depends | ||
from pydantic import BaseModel, Field | ||
from uuid import UUID, uuid4 | ||
from typing import List, Union, Optional, Dict, Any | ||
|
||
from datetime import datetime | ||
|
||
import torch | ||
from transformers import AutoTokenizer, AutoModelForSequenceClassification | ||
|
||
app = FastAPI() | ||
|
||
MODEL_PATH = "/opt/ml/input/model-roberta_large-sota_trainer" | ||
tokenizer = AutoTokenizer.from_pretrained("klue/roberta-large") | ||
model = AutoModelForSequenceClassification.from_pretrained(MODEL_PATH) | ||
|
||
@app.get("/") | ||
def hello_world(): | ||
return {"hello": "world"} | ||
|
||
|
||
def predict_sentiment(text): | ||
model.eval() | ||
with torch.no_grad() : | ||
temp = tokenizer( | ||
text, | ||
return_tensors='pt', | ||
padding=True, | ||
truncation=True, | ||
## | ||
max_length=100, | ||
# stride=stride, | ||
# return_overflowing_tokens=True, | ||
return_offsets_mapping=False | ||
) | ||
|
||
|
||
predicted_label = model(input_ids=temp['input_ids'], | ||
token_type_ids=temp['token_type_ids']) | ||
|
||
print(predicted_label) | ||
|
||
results = [] | ||
results = torch.nn.Softmax(dim=-1)(predicted_label.logits) | ||
|
||
|
||
answer = [] | ||
print(results) | ||
for result in results : | ||
if result[0]>=result[1] : | ||
answer.append("부정") | ||
|
||
else : | ||
answer.append("긍정") | ||
|
||
return answer | ||
|
||
class FinanaceSentiment(BaseModel): | ||
corpus_list: list = [] | ||
title: str = "title" | ||
company: str = "삼성전자" | ||
result: Optional[List] | ||
|
||
@app.post("/classify_sentiment", description="문장의 감정을 분류합니다.") | ||
async def classify_sentiment(finance: FinanaceSentiment): | ||
# 입력으로 받은 텍스트를 모델로 예측합니다. | ||
predictions = predict_sentiment(finance.corpus_list) | ||
|
||
# 결과를 반환합니다. | ||
result = { | ||
"title": finance.title, | ||
# "input_text": finance.corpus, | ||
"sentiment": predictions | ||
} | ||
|
||
return predictions |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,4 @@ | ||
SECRET_KEY=secret | ||
DEBUG=True | ||
MODEL_PATH=./ml/model/ | ||
MODEL_NAME=model.pkl |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,92 @@ | ||
# Cookiecutter specific | ||
data/* | ||
|
||
# Byte-compiled / optimized / DLL files | ||
__pycache__/ | ||
*.py[cod] | ||
|
||
# C extensions | ||
*.so | ||
|
||
# Distribution / packaging | ||
.Python | ||
env/ | ||
build/ | ||
develop-eggs/ | ||
dist/ | ||
downloads/ | ||
eggs/ | ||
.eggs/ | ||
lib/ | ||
lib64/ | ||
parts/ | ||
sdist/ | ||
var/ | ||
*.egg-info/ | ||
.installed.cfg | ||
*.egg | ||
|
||
# PyInstaller | ||
# Usually these files are written by a python script from a template | ||
# before PyInstaller builds the exe, so as to inject date/other infos into it. | ||
*.manifest | ||
*.spec | ||
|
||
# Installer logs | ||
pip-log.txt | ||
pip-delete-this-directory.txt | ||
|
||
# Unit test / coverage reports | ||
htmlcov/ | ||
.tox/ | ||
.coverage | ||
.coverage.* | ||
.cache | ||
nosetests.xml | ||
coverage.xml | ||
*.cover | ||
|
||
# Translations | ||
*.mo | ||
*.pot | ||
|
||
# Django stuff: | ||
*.log | ||
|
||
# Sphinx documentation | ||
docs/_build/ | ||
|
||
# PyBuilder | ||
target/ | ||
|
||
# DotEnv configuration | ||
.env | ||
|
||
# Database | ||
*.db | ||
*.rdb | ||
|
||
# Pycharm | ||
.idea | ||
|
||
# VS Code | ||
.vscode/ | ||
|
||
# Spyder | ||
.spyproject/ | ||
|
||
# Jupyter NB Checkpoints | ||
.ipynb_checkpoints/ | ||
|
||
# exclude data from source control by default | ||
/data/ | ||
|
||
# Mac OS-specific storage files | ||
.DS_Store | ||
|
||
# vim | ||
*.swp | ||
*.swo | ||
|
||
# Mypy cache | ||
.mypy_cache/ |
Oops, something went wrong.