如何将问题和GPT API响应变量添加到Gradio chatbot()中?

huangapple go评论95阅读模式
英文:

How to add question and GPT Api response variables to Gradio chatbot()?

问题

我有一个程序,它从先前创建和工作的JSON文件中提取信息,然后使用ChatGPT API来查询此文档。我需要在机器人正常滚动时输入初始用户输入,然后是对他的输入的响应,并且我需要使用gr.Chatbot选项(gr.Interface不是一个选项)。不幸的是,gr.Chatbot() 文档不够详细,以至于我无法解决它。我已经创建了question_answer_NEW函数,还尝试使用Gradio文档中提供的示例中的bot函数,但没有结果。谢谢。以下是代码部分:

  1. # 你的代码

如果你需要进一步的帮助,请告诉我。

英文:

I have a program that extracts the information form a previously-created and working json file, and then uses chatGPT API to query this document. I need to input the initial user input and then the response to his input all within the bot scrolling up normally, and I need to use the gr.Chatbot option (gr.Interface is not an option). Unfortunately the grChatbot() docs are not detailed enough so I can't solve it. I have created the function question_answer_NEW and also tried with the "bot" function provided as example in Gradio docs, but no results. Thank you. Here's the code:

  1. import gradio as gr
  2. import random
  3. import time
  4. from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
  5. from langchain.chat_models import ChatOpenAI
  6. from langchain import OpenAI
  7. import sys
  8. import os
  9. from IPython.display import Markdown, display
  10. import gradio, openai
  11. os.environ["OPENAI_API_KEY"] = 'sk-...' #present in code
  12. messages = [{"role": "system", "content": "You are a helpful customer service assistant."}]
  13. global query
  14. global question
  15. global response
  16. def question_answer_NEW(question):
  17. index = GPTSimpleVectorIndex.load_from_disk('index.json')
  18. for i in range(3): #while True:
  19. query = question
  20. response = index.query(query, response_mode="compact")
  21. messages.append({"User asks": question, "System response": response.response.strip()})
  22. #print(messages[-1])''
  23. print(query, response.response.strip())
  24. return query, response.response.strip()
  25. with gr.Blocks() as demo:
  26. chatbot = gr.Chatbot()
  27. msg = gr.Textbox() #placeholder = 'test'
  28. clear = gr.Button("Clear")
  29. global user_message
  30. def user(user_message, history):
  31. print('I am user message', user_message)
  32. print(history + [[user_message, None]])
  33. return "", history + [[user_message, None]]
  34. def bot(history):
  35. index = GPTSimpleVectorIndex.load_from_disk('index.json')
  36. bot_message = index.query(user_message, response_mode="compact")
  37. print('I am bot message', bot_message)
  38. history[-1][1] = bot_message
  39. time.sleep(1)
  40. return history
  41. return bot_message
  42. msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(bot, chatbot, chatbot)
  43. clear.click(lambda: None, None, chatbot, queue=False)
  44. if __name__ == "__main__":
  45. demo.launch()

答案1

得分: 1

I made it work. This is the solution:

  1. from IPython.display import Markdown, display
  2. from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
  3. from langchain.chat_models import ChatOpenAI
  4. from langchain import OpenAI
  5. import gradio as gr
  6. import random, time
  7. import sys
  8. import os
  9. os.environ["OPENAI_API_KEY"] = '' #insert your key there
  10. def construct_index(directory_path):
  11. global index
  12. # set maximum input size
  13. max_input_size = 4096
  14. # set number of output tokens
  15. num_outputs = 500
  16. # set maximum chunk overlap
  17. max_chunk_overlap = 20
  18. # set chunk size limit
  19. chunk_size_limit = 600
  20. llm_predictor = LLMPredictor(llm = ChatOpenAI(temperature=0.1, model_name='gpt-3.5-turbo', max_tokens=num_outputs)) #original temp was .5
  21. prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
  22. documents = SimpleDirectoryReader(directory_path).load_data()
  23. global index
  24. index = GPTSimpleVectorIndex(
  25. documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper)
  26. index.save_to_disk('index.json')
  27. return index
  28. construct_index("context_data/data/done")
  29. # Define chat function
  30. def chat(chat_history, user_input):
  31. index = GPTSimpleVectorIndex.load_from_disk('index.json')
  32. bot_response = index.query(user_input)
  33. response = ""
  34. for letter in ''.join(bot_response.response):
  35. response += letter + ""
  36. yield chat_history + [(user_input, response.strip())]
  37. # Build interface
  38. with gr.Blocks() as demo:
  39. with gr.Tab('Chat with this helpful AI assistant'):
  40. chatbot = gr.Chatbot()
  41. message = gr.Textbox(label = 'Write your message here and press "enter"')
  42. message.submit(chat, [chatbot, message], chatbot).then(lambda: None, None, message, queue=False)
  43. demo.queue().launch(debug = True, share = True)

Please note that the code part has not been translated.

英文:

I made it work. This is the solution:

  1. from IPython.display import Markdown, display
  2. from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
  3. from langchain.chat_models import ChatOpenAI
  4. from langchain import OpenAI
  5. import gradio as gr
  6. import random, time
  7. import sys
  8. import os
  9. os.environ["OPENAI_API_KEY"] = '' #insert your key there
  10. def construct_index(directory_path):
  11. global index
  12. # set maximum input size
  13. max_input_size = 4096
  14. # set number of output tokens
  15. num_outputs = 500
  16. # set maximum chunk overlap
  17. max_chunk_overlap = 20
  18. # set chunk size limit
  19. chunk_size_limit = 600
  20. llm_predictor = LLMPredictor(llm = ChatOpenAI(temperature=0.1, model_name='gpt-3.5-turbo', max_tokens=num_outputs)) #original temp was .5
  21. prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
  22. documents = SimpleDirectoryReader(directory_path).load_data()
  23. global index
  24. index = GPTSimpleVectorIndex(
  25. documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper)
  26. index.save_to_disk('index.json')
  27. return index
  28. construct_index("context_data/data/done")
  29. #Define chat function
  30. def chat(chat_history, user_input):
  31. index = GPTSimpleVectorIndex.load_from_disk('index.json')
  32. bot_response = index.query(user_input)
  33. response = ""
  34. for letter in ''.join(bot_response.response): #[bot_response[i:i+1] for i in range(0, len(bot_response), 1)]:
  35. response += letter + ""
  36. yield chat_history + [(user_input, response.strip())]
  37. #Build interface
  38. with gr.Blocks() as demo:
  39. with gr.Tab('Chat with this helpful AI assistant'):
  40. chatbot = gr.Chatbot()
  41. message = gr.Textbox (label = 'Write your message here and press "enter"')
  42. message.submit(chat, [chatbot, message], chatbot).then(lambda: None, None, message, queue=False)
  43. demo.queue().launch(debug = True, share = True)

huangapple
  • 本文由 发表于 2023年3月31日 18:55:21
  • 转载请务必保留本文链接:https://go.coder-hub.com/75897729.html
匿名

发表评论

匿名网友

:?: :razz: :sad: :evil: :!: :smile: :oops: :grin: :eek: :shock: :???: :cool: :lol: :mad: :twisted: :roll: :wink: :idea: :arrow: :neutral: :cry: :mrgreen:

确定