-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathmain.py
53 lines (48 loc) · 2.28 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import argparse
import os
from dotenv import load_dotenv
from utils.diagram import DiagramHandler
from utils.chatgpt import OpenAIHandler
from utils.local_llm.llama import LlamaHandler
def handle_arguments():
parser = argparse.ArgumentParser(description=
"Dragon-GPT is a CLI program that makes an automatic threat analysis "
"using Chat-GPT on a given scenario produced using OWASP Threat Dragon.")
parser.add_argument("filename", help="Path to the diagram (json format expected)")
parser.add_argument("--api_key", "-k", help="Pass the key as a parameter or set it in .env file")
parser.add_argument("--model", "-m", default="gpt-3.5-turbo", help="AI Model to be used by OpenAI API (default: gpt-3.5-turbo)")
parser.add_argument("--output", "-o", default="output.txt", help="Export the response from OpenAI to a txt file")
parser.add_argument("--use_local_llm", "-l", default=False, action="store_true", help="Set to true if you want to use a local LLM")
parser.add_argument("--n_ctx", "-c", help="(Recommended when using local LLM) Number of tokens the LLM uses for context, generally high numbers (>2048) gives longer responses but takes more time.")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = handle_arguments()
diagram = DiagramHandler(args.filename)
sentence = diagram.make_sentence()
print(sentence)
response = ""
if args.use_local_llm:
local_llm = LlamaHandler(args.n_ctx)
response = local_llm.do_threat_modeling(sentence)
else:
load_dotenv()
openai_key = os.getenv("OPENAI_KEY")
if openai_key == "":
if args.api_key:
openai_key = args.api_key
else:
print("OpenAI KEY needs to be informed. You can get yours from this link: ")
print("https://platform.openai.com/account/api-keys")
exit()
chatgpt = OpenAIHandler(openai_key, args.model)
response = chatgpt.do_threat_modeling(sentence)
for comp in diagram.components:
if comp["type"] == DiagramHandler.flow_type:
for flow in comp["flow"]:
if "preventive_measures" in flow:
response += f"\nPreventive measures for {flow['name']}: {flow['preventive_measures']}"
print(response)
if args.output:
with open(args.output, "w") as f:
f.write(response)