diff --git a/app.py b/app.py index 36932cd..61a47bf 100644 --- a/app.py +++ b/app.py @@ -13,7 +13,7 @@ # Third-party imports import gradio as gr from loguru import logger -from pydantic import BaseModel +from pydantic import BaseModel, Field from pypdf import PdfReader from pydub import AudioSegment @@ -29,20 +29,29 @@ class DialogueItem(BaseModel): text: str -class Dialogue(BaseModel): +class ShortDialogue(BaseModel): """The dialogue between the host and guest.""" scratchpad: str name_of_guest: str - dialogue: List[DialogueItem] + dialogue: List[DialogueItem] = Field(..., description="A list of dialogue items, typically between 5 to 9 items") + + +class MediumDialogue(BaseModel): + """The dialogue between the host and guest.""" + + scratchpad: str + name_of_guest: str + dialogue: List[DialogueItem] = Field(..., description="A list of dialogue items, typically between 8 to 13 items") def generate_podcast( files: List[str], url: Optional[str], + question: Optional[str], tone: Optional[str], length: Optional[str], - language: str + language: str, ) -> Tuple[str, str]: """Generate the audio and transcript from the PDFs and/or URL.""" text = "" @@ -64,8 +73,10 @@ def generate_podcast( # Process PDFs if any if files: for file in files: - if not file.lower().endswith('.pdf'): - raise gr.Error(f"File {file} is not a PDF. Please upload only PDF files.") + if not file.lower().endswith(".pdf"): + raise gr.Error( + f"File {file} is not a PDF. Please upload only PDF files." + ) try: with Path(file).open("rb") as f: @@ -84,10 +95,14 @@ def generate_podcast( # Check total character count if len(text) > 100000: - raise gr.Error("The total content is too long. Please ensure the combined text from PDFs and URL is fewer than ~100,000 characters.") - - # Modify the system prompt based on the chosen tone and length + raise gr.Error( + "The total content is too long. Please ensure the combined text from PDFs and URL is fewer than ~100,000 characters." + ) + + # Modify the system prompt based on the user input modified_system_prompt = SYSTEM_PROMPT + if question: + modified_system_prompt += f"\n\PLEASE ANSWER THE FOLLOWING QN: {question}" if tone: modified_system_prompt += f"\n\nTONE: The tone of the podcast should be {tone}." if length: @@ -97,10 +112,15 @@ def generate_podcast( } modified_system_prompt += f"\n\nLENGTH: {length_instructions[length]}" if language: - modified_system_prompt += f"\n\nOUTPUT LANGUAGE : The the podcast should be {language}." + modified_system_prompt += ( + f"\n\nOUTPUT LANGUAGE : The the podcast should be {language}." + ) # Call the LLM - llm_output = generate_script(modified_system_prompt, text, Dialogue) + if length == "Short (1-2 min)": + llm_output = generate_script(modified_system_prompt, text, ShortDialogue) + else: + llm_output = generate_script(modified_system_prompt, text, MediumDialogue) logger.info(f"Generated dialogue: {llm_output}") # Process the dialogue @@ -118,7 +138,9 @@ def generate_podcast( total_characters += len(line.text) # Get audio file path - audio_file_path = generate_audio(line.text, line.speaker, language_mapping[language]) + audio_file_path = generate_audio( + line.text, line.speaker, language_mapping[language] + ) # Read the audio file into an AudioSegment audio_segment = AudioSegment.from_file(audio_file_path) audio_segments.append(audio_segment) @@ -149,36 +171,48 @@ def generate_podcast( demo = gr.Interface( title="Open NotebookLM", - description="Convert your PDFs into podcasts with open-source AI models (Llama 3.1 405B and MeloTTS). \n \n Note: Only the text content of the PDFs will be processed. Images and tables are not included. The total content should be no more than 100,000 characters due to the context length of Llama 3.1 405B.", + description=""" + + + + + + +
+ Open NotebookLM + +

Convert your PDFs into podcasts with open-source AI models (Llama 3.1 405B and MeloTTS).

+

Note: Only the text content of the PDFs will be processed. Images and tables are not included. The total content should be no more than 100,000 characters due to the context length of Llama 3.1 405B.

+
+""", fn=generate_podcast, inputs=[ gr.File( - label="1. 📄 Upload your PDF(s)", - file_types=[".pdf"], - file_count="multiple" + label="1. 📄 Upload your PDF(s)", file_types=[".pdf"], file_count="multiple" ), gr.Textbox( label="2. 🔗 Paste a URL (optional)", - placeholder="Enter a URL to include its content" + placeholder="Enter a URL to include its content", ), - gr.Radio( + gr.Textbox(label="3. 🤔 Do you have a specific question or topic in mind?"), + gr.Dropdown( choices=["Fun", "Formal"], - label="3. 🎭 Choose the tone", + label="4. 🎭 Choose the tone", value="Fun" ), - gr.Radio( + gr.Dropdown( choices=["Short (1-2 min)", "Medium (3-5 min)"], - label="4. ⏱️ Choose the length", + label="5. ⏱️ Choose the length", value="Medium (3-5 min)" ), gr.Dropdown( choices=["English", "Spanish", "French", "Chinese", "Japanese", "Korean"], value="English", - label="5. 🌐 Choose the language (Highly experimental, English is recommended)", + label="6. 🌐 Choose the language" ), ], outputs=[ - gr.Audio(label="Audio", format="mp3"), + gr.Audio(label="Podcast", format="mp3"), gr.Markdown(label="Transcript"), ], allow_flagging="never", @@ -189,27 +223,30 @@ def generate_podcast( [ [str(Path("examples/1310.4546v1.pdf"))], "", + "Explain this paper to me like I'm 5 years old", "Fun", "Short (1-2 min)", - "English" + "English", ], [ [], "https://en.wikipedia.org/wiki/Hugging_Face", + "How did Hugging Face become so successful?", "Fun", "Short (1-2 min)", - "English" + "English", ], [ [], "https://simple.wikipedia.org/wiki/Taylor_Swift", + "Why is Taylor Swift so popular?", "Fun", "Short (1-2 min)", - "English" + "English", ], ], cache_examples=True, ) if __name__ == "__main__": - demo.launch(show_api=True) \ No newline at end of file + demo.launch(show_api=True) diff --git a/icon.png b/icon.png new file mode 100644 index 0000000..afe8b77 Binary files /dev/null and b/icon.png differ diff --git a/prompts.py b/prompts.py index 3807b71..a25dfea 100644 --- a/prompts.py +++ b/prompts.py @@ -3,41 +3,53 @@ """ SYSTEM_PROMPT = """ -You are a world-class podcast producer. -Your task is to transform the provided input text into an engaging and informative podcast script. -You will receive as input a text that may be unstructured or messy, sourced from places like PDFs or web pages. Ignore irrelevant information or formatting issues. Y -Your focus is on extracting the most interesting and insightful content for a podcast discussion. +You are a world-class podcast producer tasked with transforming the provided input text into an engaging and informative podcast script. The input may be unstructured or messy, sourced from PDFs or web pages. Your goal is to extract the most interesting and insightful content for a compelling podcast discussion. # Steps to Follow: 1. **Analyze the Input:** - Carefully read the input text. Identify the key topics, points, and any interesting facts or anecdotes that could drive a compelling podcast conversation. + Carefully examine the text, identifying key topics, points, and interesting facts or anecdotes that could drive an engaging podcast conversation. Disregard irrelevant information or formatting issues. 2. **Brainstorm Ideas:** - In the ``, brainstorm creative ways to present the key points in an engaging manner. Think of analogies, storytelling techniques, or hypothetical scenarios to make the content relatable and entertaining for listeners. - - - Keep the discussion accessible to a general audience. Avoid jargon and briefly explain complex concepts in simple terms. - - Use imagination to fill in any gaps or create thought-provoking questions to explore during the podcast. - - Your aim is to create an entertaining and informative podcast, so feel free to be creative with your approach. - -3. **Write the Dialogue:** - Now, develop the podcast dialogue. Aim for a natural, conversational flow between the host (named Jane) and the guest speaker (the author of the input text, if mentioned). - - - Use the best ideas from your brainstorming session. - - Ensure complex topics are explained clearly and simply. - - Focus on maintaining an engaging and lively tone that would captivate listeners. - - Rules: - > The host ALWAYS goes first and is interviewing the guest. The guest is the one who explains the topic. - > The host should ask the guest questions. - > The host should summarize the key insights at the end. - > Include common verbal fillers like "uhms" and "errs" in the host and guests response. This is so the script is realistic. - > The host and guest can interrupt each other. - > The guest must NOT include marketing or self-promotional content. - > The guest must NOT include any material NOT substantiated within the input text. - > This is to be a PG conversation. - -4. **Wrap it Up:** - At the end of the dialogue, the host and guest should naturally summarize the key insights. This should feel like a casual conversation, rather than a formal recap, reinforcing the main points one last time before signing off. - -ALWAYS REPLY IN VALID JSON, AND NO CODE BLOCKS. BEGIN DIRECTLY WITH THE JSON OUTPUT. + In the ``, creatively brainstorm ways to present the key points engagingly. Consider: + - Analogies, storytelling techniques, or hypothetical scenarios to make content relatable + - Ways to make complex topics accessible to a general audience + - Thought-provoking questions to explore during the podcast + - Creative approaches to fill any gaps in the information + +3. **Craft the Dialogue:** + Develop a natural, conversational flow between the host (Jane) and the guest speaker (the author or an expert on the topic). Incorporate: + - The best ideas from your brainstorming session + - Clear explanations of complex topics + - An engaging and lively tone to captivate listeners + - A balance of information and entertainment + + Rules for the dialogue: + - The host (Jane) always initiates the conversation and interviews the guest + - Include thoughtful questions from the host to guide the discussion + - Incorporate natural speech patterns, including occasional verbal fillers (e.g., "um," "well," "you know") + - Allow for natural interruptions and back-and-forth between host and guest + - Ensure the guest's responses are substantiated by the input text, avoiding unsupported claims + - Maintain a PG-rated conversation appropriate for all audiences + - Avoid any marketing or self-promotional content from the guest + - The host concludes the conversation + +4. **Summarize Key Insights:** + Naturally weave a summary of key points into the closing part of the dialogue. This should feel like a casual conversation rather than a formal recap, reinforcing the main takeaways before signing off. + +5. **Maintain Authenticity:** + Throughout the script, strive for authenticity in the conversation. Include: + - Moments of genuine curiosity or surprise from the host + - Instances where the guest might briefly struggle to articulate a complex idea + - Light-hearted moments or humor when appropriate + - Brief personal anecdotes or examples that relate to the topic (within the bounds of the input text) + +6. **Consider Pacing and Structure:** + Ensure the dialogue has a natural ebb and flow: + - Start with a strong hook to grab the listener's attention + - Gradually build complexity as the conversation progresses + - Include brief "breather" moments for listeners to absorb complex information + - End on a high note, perhaps with a thought-provoking question or a call-to-action for listeners + +Remember: Always reply in valid JSON format, without code blocks. Begin directly with the JSON output. """ diff --git a/utils.py b/utils.py index 4316628..ffc51bf 100644 --- a/utils.py +++ b/utils.py @@ -30,17 +30,22 @@ def generate_script(system_prompt: str, input_text: str, output_model): # Load as python object try: response = call_llm(system_prompt, input_text, output_model) - dialogue = output_model.model_validate_json( - response.choices[0].message.content - ) + dialogue = output_model.model_validate_json(response.choices[0].message.content) except ValidationError as e: error_message = f"Failed to parse dialogue JSON: {e}" system_prompt_with_error = f"{system_prompt}\n\nPlease return a VALID JSON object. This was the earlier error: {error_message}" response = call_llm(system_prompt_with_error, input_text, output_model) - dialogue = output_model.model_validate_json( - response.choices[0].message.content - ) - return dialogue + dialogue = output_model.model_validate_json(response.choices[0].message.content) + + # Call the LLM again to improve the dialogue + system_prompt_with_dialogue = f"{system_prompt}\n\nHere is the first draft of the dialogue you provided:\n\n{dialogue}." + response = call_llm( + system_prompt_with_dialogue, "Please improve the dialogue.", output_model + ) + improved_dialogue = output_model.model_validate_json( + response.choices[0].message.content + ) + return improved_dialogue def call_llm(system_prompt: str, text: str, dialogue_format): @@ -78,9 +83,13 @@ def generate_audio(text: str, speaker: str, language: str) -> bytes: speed = 1 if language != "EN" and speaker != "Guest": speed = 1.1 - + # Generate audio result = hf_client.predict( - text=text, language=language, speaker=accent, speed=speed, api_name="/synthesize" + text=text, + language=language, + speaker=accent, + speed=speed, + api_name="/synthesize", ) - return result \ No newline at end of file + return result