如何将问题和GPT API响应变量添加到Gradio chatbot()中?

huangapple go评论74阅读模式
英文:

How to add question and GPT Api response variables to Gradio chatbot()?

问题

我有一个程序,它从先前创建和工作的JSON文件中提取信息,然后使用ChatGPT API来查询此文档。我需要在机器人正常滚动时输入初始用户输入,然后是对他的输入的响应,并且我需要使用gr.Chatbot选项(gr.Interface不是一个选项)。不幸的是,gr.Chatbot() 文档不够详细,以至于我无法解决它。我已经创建了question_answer_NEW函数,还尝试使用Gradio文档中提供的示例中的bot函数,但没有结果。谢谢。以下是代码部分:

# 你的代码

如果你需要进一步的帮助,请告诉我。

英文:

I have a program that extracts the information form a previously-created and working json file, and then uses chatGPT API to query this document. I need to input the initial user input and then the response to his input all within the bot scrolling up normally, and I need to use the gr.Chatbot option (gr.Interface is not an option). Unfortunately the grChatbot() docs are not detailed enough so I can't solve it. I have created the function question_answer_NEW and also tried with the "bot" function provided as example in Gradio docs, but no results. Thank you. Here's the code:

import gradio as gr
import random
import time
from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
from langchain.chat_models import ChatOpenAI
from langchain import OpenAI
import sys
import os
from IPython.display import Markdown, display
import gradio, openai
os.environ["OPENAI_API_KEY"] = 'sk-...' #present in code
messages = [{"role": "system", "content": "You are a helpful customer service assistant."}]

global query
global question
global response

def question_answer_NEW(question):
    index = GPTSimpleVectorIndex.load_from_disk('index.json')
    for i in range(3): #while True: 
        query = question
        response = index.query(query, response_mode="compact")        
        messages.append({"User asks": question, "System response": response.response.strip()})
        #print(messages[-1])''
        print(query, response.response.strip())
        return query, response.response.strip()


with gr.Blocks() as demo:
    chatbot = gr.Chatbot()
    msg = gr.Textbox() #placeholder = 'test'
    clear = gr.Button("Clear")
    
    global user_message

    def user(user_message, history):
        print('I am user message', user_message)
        print(history + [[user_message, None]])
        return "", history + [[user_message, None]]
    


    def bot(history):
        index = GPTSimpleVectorIndex.load_from_disk('index.json')
        bot_message = index.query(user_message, response_mode="compact")
        print('I am bot message', bot_message)
        history[-1][1] = bot_message
        time.sleep(1)
        return history
        return bot_message

    msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(bot, chatbot, chatbot)
    clear.click(lambda: None, None, chatbot, queue=False)

if __name__ == "__main__":
    demo.launch()


答案1

得分: 1

I made it work. This is the solution:

from IPython.display import Markdown, display
from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
from langchain.chat_models import ChatOpenAI
from langchain import OpenAI
import gradio as gr
import random, time
import sys
import os

os.environ["OPENAI_API_KEY"] = '' #insert your key there

def construct_index(directory_path):
    global index
    # set maximum input size
    max_input_size = 4096
    # set number of output tokens
    num_outputs = 500
    # set maximum chunk overlap
    max_chunk_overlap = 20
    # set chunk size limit
    chunk_size_limit = 600 

    llm_predictor = LLMPredictor(llm = ChatOpenAI(temperature=0.1, model_name='gpt-3.5-turbo', max_tokens=num_outputs)) #original temp was .5
    prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)

    documents = SimpleDirectoryReader(directory_path).load_data()
    global index
    index = GPTSimpleVectorIndex(
        documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper)
    index.save_to_disk('index.json')
    return index

construct_index("context_data/data/done")

# Define chat function
def chat(chat_history, user_input):
    index = GPTSimpleVectorIndex.load_from_disk('index.json')
    bot_response = index.query(user_input)
    response = ""
    for letter in ''.join(bot_response.response):
        response += letter + ""
        yield chat_history + [(user_input, response.strip())]

# Build interface
with gr.Blocks() as demo:
    with gr.Tab('Chat with this helpful AI assistant'):
        chatbot = gr.Chatbot()
        message = gr.Textbox(label = 'Write your message here and press "enter"')
        message.submit(chat, [chatbot, message], chatbot).then(lambda: None, None, message, queue=False)

demo.queue().launch(debug = True, share = True)

Please note that the code part has not been translated.

英文:

I made it work. This is the solution:

from IPython.display import Markdown, display
from llama_index import SimpleDirectoryReader, GPTListIndex, readers, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
from langchain.chat_models import ChatOpenAI
from langchain import OpenAI
import gradio as gr
import random, time
import sys
import os

os.environ["OPENAI_API_KEY"] = '' #insert your key there

def construct_index(directory_path):
    global index
    # set maximum input size
    max_input_size = 4096
    # set number of output tokens
    num_outputs = 500
    # set maximum chunk overlap
    max_chunk_overlap = 20
    # set chunk size limit
    chunk_size_limit = 600 

    llm_predictor = LLMPredictor(llm = ChatOpenAI(temperature=0.1, model_name='gpt-3.5-turbo', max_tokens=num_outputs)) #original temp was .5
    prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
 
    documents = SimpleDirectoryReader(directory_path).load_data()
    global index
    index = GPTSimpleVectorIndex(
        documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper)
    index.save_to_disk('index.json')
    return index

construct_index("context_data/data/done")

#Define chat function
def chat(chat_history, user_input):
      index = GPTSimpleVectorIndex.load_from_disk('index.json')
      bot_response = index.query(user_input)
      response = ""
      for letter in ''.join(bot_response.response): #[bot_response[i:i+1] for i in range(0, len(bot_response), 1)]:
          response += letter + ""
          yield chat_history + [(user_input, response.strip())]
     
#Build interface
with gr.Blocks() as demo:
    with gr.Tab('Chat with this helpful AI assistant'):
          chatbot = gr.Chatbot()
          message = gr.Textbox (label = 'Write your message here and press "enter"')
          message.submit(chat, [chatbot, message], chatbot).then(lambda: None, None, message, queue=False)

demo.queue().launch(debug = True, share = True)

huangapple
  • 本文由 发表于 2023年3月31日 18:55:21
  • 转载请务必保留本文链接:https://go.coder-hub.com/75897729.html
匿名

发表评论

匿名网友

:?: :razz: :sad: :evil: :!: :smile: :oops: :grin: :eek: :shock: :???: :cool: :lol: :mad: :twisted: :roll: :wink: :idea: :arrow: :neutral: :cry: :mrgreen:

确定