Skip to content

Commit

Permalink
add flag explanation
Browse files Browse the repository at this point in the history
  • Loading branch information
DerrickYLJ committed Jul 21, 2023
1 parent 427f7ab commit b771046
Showing 1 changed file with 4 additions and 3 deletions.
7 changes: 4 additions & 3 deletions inference/utils/download_llama_weights.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,12 @@
from transformers import AutoModelForCausalLM

# You can pass the --use-full-precision flag to use the full-precision weight. By default, we use half precision.
# and pass "--use_13B", "--use_30B", and "--use_65B" to use the corresponding "llama-13B/30B/65B" model weights
parser = argparse.ArgumentParser()
parser.add_argument("--use-full-precision", action="store_true", help="Use full precision")
parser.add_argument("--use_13B", action="store_true", help="Use full precision")
parser.add_argument("--use_30B", action="store_true", help="Use full precision")
parser.add_argument("--use_65B", action="store_true", help="Use full precision")
parser.add_argument("--use_13B", action="store_true", help="choose to use llama-13B")
parser.add_argument("--use_30B", action="store_true", help="choose to use llama-30B")
parser.add_argument("--use_65B", action="store_true", help="choose to use llama-65B")
args = parser.parse_args()
if not args.use_full_precision:
import torch
Expand Down

0 comments on commit b771046

Please sign in to comment.