From 773df106bdacbca0cc4edecf98defb8e185350ae Mon Sep 17 00:00:00 2001 From: Ronak Date: Wed, 7 Feb 2024 14:34:55 +0000 Subject: [PATCH] some README and response_analysis cleanup --- README.md | 7 ++----- src/rank_llm/scripts/run_response_analysis.py | 4 ++-- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 3e7c4b3d..26684695 100644 --- a/README.md +++ b/README.md @@ -9,9 +9,6 @@ We offer a suite of prompt decoders, albeit with a current focus on RankVicuna. Some of the code in this repository is borrowed from [RankGPT](https://github.com/sunnweiwei/RankGPT)! -# Releases -current_version = 0.2.6 - ## 📟 Instructions ### Create Conda Environment @@ -33,8 +30,8 @@ pip install -r requirements.txt ### Run end to end Test ```bash -python src/rank_llm/scripts/run_rank_llm.py --model_path=castorini/rank_zephyr_7b_v1_full --top_k_candidates=100 --dataset=dl20 \ ---retrieval_method=SPLADE++_EnsembleDistil_ONNX --prompt_mode=rank_GPT --context_size=4096 --variable_passages +python src/rank_llm/scripts/run_rank_llm.py --model_path=castorini/rank_zephyr_7b_v1_full --top_k_candidates=100 --dataset=dl19 \ + --retrieval_method=SPLADE++_EnsembleDistil_ONNX --prompt_mode=rank_GPT --context_size=4096 --variable_passages ``` ### Contributing diff --git a/src/rank_llm/scripts/run_response_analysis.py b/src/rank_llm/scripts/run_response_analysis.py index 6b6320e7..eab8b886 100644 --- a/src/rank_llm/scripts/run_response_analysis.py +++ b/src/rank_llm/scripts/run_response_analysis.py @@ -12,11 +12,11 @@ def main(args): - response_analyzer = ResponseAnalyzer(args.files, 100, PromptMode.RANK_GPT) + response_analyzer = ResponseAnalyzer(args.files) responses, num_passages = response_analyzer.read_saved_responses() print("Normalized scores:") - print(response_analyzer.count_errors(responses, num_passages, args.verbose)) + print(response_analyzer.count_errors(verbose=args.verbose)) # Print normalized scores