Skip to content

Commit

Permalink
chore: Updated README. Fixed the JSON mode warning for Ollama. Correc…
Browse files Browse the repository at this point in the history
…ted progress bar.
  • Loading branch information
anirbanbasu committed Sep 12, 2024
1 parent 7aaf8a8 commit 5de3404
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 9 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ Following is a table of some updates regarding the project status. Note that the

| Date | Status | Notes or observations |
|----------|:-------------:|----------------------|
| September 13, 2024 | active | Low parameter LLMs perform badly in unnecessary self-discovery, query refinements and ReAct tool selections. |
| September 12, 2024 | active | Self-discover may need to be conditionally bypassed to reduce the number of unnecessary LLM calls. |
| September 10, 2024 | active | Query decomposition may generate unnecessary sub-workflows. |
| September 7, 2024 | active | Cohere `command-r-plus` is _very_ slow. |
Expand Down
3 changes: 2 additions & 1 deletion src/webapp.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,8 @@ def set_llm_provider(self, provider: str | None = None):
default_value=EnvironmentVariables.VALUE__LLM_TEMPERATURE,
type_cast=float,
),
json_mode=True,
# JSON mode is not required because the LLM will be only sometimes instructed to output JSON.
# json_mode=True,
additional_kwargs={
"top_p": parse_env(
EnvironmentVariables.KEY__LLM_TOP_P,
Expand Down
16 changes: 8 additions & 8 deletions src/workflows.py
Original file line number Diff line number Diff line change
Expand Up @@ -724,6 +724,7 @@ async def start(
)

response = await self_discover_task
self._finished_steps += 1

return DQAReasoningStructureEvent(reasoning_structure=response)

Expand Down Expand Up @@ -814,6 +815,7 @@ async def query(
f"\n\nAnd, here is the corresponding reasoning structure:\n{ctx.data[DQAWorkflow.KEY_REASONING_STRUCTURE]}"
)
response = await self.llm.acomplete(prompt)
self._finished_steps += 1

response_obj = json.loads(str(response))
sub_questions = response_obj["sub_questions"]
Expand Down Expand Up @@ -867,7 +869,6 @@ async def review_sub_questions(
ctx.send_event(DQAQueryEvent(question=question))

self._total_steps += 1
self._finished_steps += 1
ctx.write_event_to_stream(
WorkflowStatusEvent(
msg=f"Reviewing sub-questions:\n\t{str(ev.questions)}",
Expand Down Expand Up @@ -906,6 +907,7 @@ async def review_sub_questions(
f"\n\nAnd, here is the corresponding reasoning structure:\n{ctx.data[DQAWorkflow.KEY_REASONING_STRUCTURE]}"
)
response = await self.llm.acomplete(prompt)
self._finished_steps += 1
self._refinement_iterations += 1

response_obj = json.loads(str(response))
Expand Down Expand Up @@ -952,7 +954,6 @@ async def answer_sub_question(
"""

self._total_steps += 1
self._finished_steps += 1
ctx.write_event_to_stream(
WorkflowStatusEvent(
msg=f"Starting a {ReActWorkflow.__name__} to answer question:\n\t{ev.question}",
Expand Down Expand Up @@ -982,6 +983,7 @@ async def answer_sub_question(
)

response = await react_task
self._finished_steps += 1

return DQAAnswerEvent(
question=ev.question,
Expand Down Expand Up @@ -1027,7 +1029,6 @@ async def combine_refine_answers(
)

self._total_steps += 1
self._finished_steps += 1
ctx.write_event_to_stream(
WorkflowStatusEvent(
msg=f"Generating the final response to the original query:\n\t{ctx.data[DQAWorkflow.KEY_ORIGINAL_QUERY]}",
Expand Down Expand Up @@ -1055,16 +1056,15 @@ async def combine_refine_answers(
)

response = await self.llm.acomplete(prompt)

self._finished_steps += 1

ctx.write_event_to_stream(
WorkflowStatusEvent(
msg=f"Done, final response generated.\n\nFinal response: {response}",
msg=("Done, final response generated.\n" f"{response}" "\n"),
total_steps=self._total_steps,
finished_steps=self._finished_steps,
)
)

return StopEvent(result=str(response))


Expand Down Expand Up @@ -1365,11 +1365,11 @@ async def run(self, query: str):
try:
result = await task
done = self.workflow.is_done()
progress_bar.close()
except Exception as e:
result = f"Exception in running the workflow(s): {str(e)}"
# Set this to done, otherwise another workflow call cannot be made.
done = True
progress_bar.close()
print(result, file=sys.stderr)
finally:
progress_bar.close()
yield done, finished_steps, total_steps, result

0 comments on commit 5de3404

Please sign in to comment.