OpenAI GPT-3 API错误 429: “请求失败,状态码为429”

huangapple go评论131阅读模式
英文:

OpenAI GPT-3 API error 429: "Request failed with status code 429"

问题

I'm trying to connect OpenAI API to my Vue.js project. Everything is OK but every time I try to POST request, I get a 429 status code (too many requests) but I didn't even have the chance to make one. Any help?

Response:

{
    "message": "Request failed with status code 429",
    "name": "Error",
    "stack": "Error: Request failed with status code 429\n    at createError (C:\\Users\\sim\\Documents\\SC\\server\\node_modules\\axios\\lib\\core\\createError.js:16:15)\n    at settle (C:\\Users\\sim\\Documents\\SC\\server\\node_modules\\axios\\lib\\core\\settle.js:17:12)\n    at IncomingMessage.handleStreamEnd (C:\\Users\\sim\\Documents\\SC\\server\\node_modules\\axios\\lib\\adapters\\http.js:322:11)\n    at IncomingMessage.emit (events.js:412:35)\n    at endReadableNT (internal/streams/readable.js:1333:12)\n    at processTicksAndRejections (internal/process/task_queues.js:82:21)",
    "config": {
        "transitional": {
            "silentJSONParsing": true,
            "forcedJSONParsing": true,
            "clarifyTimeoutError": false
        },
        "transformRequest": [
            null
        ],
        "transformResponse": [
            null
        ],
        "timeout": 0,
        "xsrfCookieName": "XSRF-TOKEN",
        "xsrfHeaderName": "X-XSRF-TOKEN",
        "maxContentLength": -1,
        "maxBodyLength": -1,
        "headers": {
            "Accept": "application/json, text/plain, */*",
            "Content-Type": "application/json",
            "User-Agent": "OpenAI/NodeJS/3.1.0",
            "Authorization": "Bearer secret",
            "Content-Length": 137
        },
        "method": "post",
        "data": "{\"model\":\"text-davinci-003\",\"prompt\":\"option-2\",\"temperature\":0,\"max_tokens\":3000,\"top_p\":1,\"frequency_penalty\":0.5,\"presence_penalty\":0}",
        "url": "https://api.openai.com/v1/completions"
    },
    "status": 429
}

My method in Vue.js:

async handleSelect() {
      try {
        const res = await fetch("http://localhost:8000/", {
          method: "POST",
          headers: {
            "Content-Type": "application/json",
          },
          body: JSON.stringify({
            question: this.selectedOption,
          })
        })

        const data = await res.json();
        console.log(data);
      } catch {
        console.log(data);
      }
    }

On the server side:

app.post("/", async (req, res) => {
  try {
    const question = req.body.question;

    const response = await openai.createCompletion({
      model: "text-davinci-003",
      prompt: `${question}`,
      temperature: 0,
      max_tokens: 3000,
      top_p: 1,
      frequency_penalty: 0.5,
      presence_penalty: 0,
    });

    res.status(200).send({
      bot: response.data.choices[0].text,
    });
  } catch (error) {
    res.status(500).send(error || "Something went wrong");
  }
});
英文:

I'm trying to connect OpenAI API to my Vue.js project. Everything is OK but every time I try to POST request, I get a 429 status code (too many request) but I didn't even had the chance to make one. Any help?

Response:

{
    "message": "Request failed with status code 429",
    "name": "Error",
    "stack": "Error: Request failed with status code 429\n    at createError (C:\\Users\\sim\\Documents\\SC\\server\\node_modules\\axios\\lib\\core\\createError.js:16:15)\n    at settle (C:\\Users\\sim\\Documents\\SC\\server\\node_modules\\axios\\lib\\core\\settle.js:17:12)\n    at IncomingMessage.handleStreamEnd (C:\\Users\\sim\\Documents\\SC\\server\\node_modules\\axios\\lib\\adapters\\http.js:322:11)\n    at IncomingMessage.emit (events.js:412:35)\n    at endReadableNT (internal/streams/readable.js:1333:12)\n    at processTicksAndRejections (internal/process/task_queues.js:82:21)",
    "config": {
        "transitional": {
            "silentJSONParsing": true,
            "forcedJSONParsing": true,
            "clarifyTimeoutError": false
        },
        "transformRequest": [
            null
        ],
        "transformResponse": [
            null
        ],
        "timeout": 0,
        "xsrfCookieName": "XSRF-TOKEN",
        "xsrfHeaderName": "X-XSRF-TOKEN",
        "maxContentLength": -1,
        "maxBodyLength": -1,
        "headers": {
            "Accept": "application/json, text/plain, */*",
            "Content-Type": "application/json",
            "User-Agent": "OpenAI/NodeJS/3.1.0",
            "Authorization": "Bearer secret",
            "Content-Length": 137
        },
        "method": "post",
        "data": "{\"model\":\"text-davinci-003\",\"prompt\":\"option-2\",\"temperature\":0,\"max_tokens\":3000,\"top_p\":1,\"frequency_penalty\":0.5,\"presence_penalty\":0}",
        "url": "https://api.openai.com/v1/completions"
    },
    "status": 429
}

My method in Vue.js:

async handleSelect() {
      try {
        const res = await fetch("http://localhost:8000/", {
          method: "POST",
          headers: {
            "Content-Type": "application/json",
          },
          body: JSON.stringify({
            question: this.selectedOption,
          })
        })

        const data = await res.json();
        console.log(data);
      } catch {
        console.log(data);
      }
    }

on server side

app.post("/", async (req, res) => {
  try {
    const question = req.body.question;

    const response = await openai.createCompletion({
      model: "text-davinci-003",
      prompt: `${question}`,
      temperature: 0, // Higher values means the model will take more risks.
      max_tokens: 3000, // The maximum number of tokens to generate in the completion. Most models have a context length of 2048 tokens (except for the newest models, which support 4096).
      top_p: 1, // alternative to sampling with temperature, called nucleus sampling
      frequency_penalty: 0.5, // Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
      presence_penalty: 0, // Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
    });
    // console.log(response);
    res.status(200).send({
      bot: response.data.choices[0].text,
    });
  } catch (error) {
    // console.error(error);
    res.status(500).send(error || "Something went wrong");
  }
});

答案1

得分: 1

根据官方OpenAI文章的说明:

> 此(即429)错误消息表示您已达到API的分配速率限制。这意味着您在短时间内提交了太多令牌或请求,并超出了允许的请求数量。这可能发生的原因有很多,例如:

> - 您正在使用频繁或并发请求的循环或脚本。
> - 您与其他用户或应用程序共享API密钥。
> - 您正在使用速率限制较低的免费计划。

工作示例

前端

HelloWorld.vue

<template>
  <div class="hello"></div>

  <select v-model="selected" @change="handleSelect()">
    <option disabled value="">请选择一个</option>
    <option>Say this is a test</option>
    <option>Say nothing</option>
  </select>

  <div class="container-selected">Selected: {{ selected }}</div>

  <div class="container-data" v-if="showData">{{ showData.bot }}</div>
</template>

<script>
export default {
  data: function () {
    return {
      selected: "",
      showData: "",
    };
  },
  methods: {
    async handleSelect(data) {
      try {
        const res = await fetch("http://localhost:3000/", {
          method: "POST",
          headers: {
            "Content-Type": "application/json",
          },
          body: JSON.stringify({
            question: this.selected,
          }),
        });

        const data = await res.json();
        this.showData = data;
        console.log(data);
      } catch {
        console.log(data);
      }
    },
  },
};
</script>

<style lang="scss">
.container-selected {
  margin-top: 12px;
  font-size: 20px;
}

.container-data {
  margin-top: 24px;
  font-size: 20px;
}
</style>

package.json

{
  "name": "openai",
  "version": "0.1.0",
  "private": true,
  "scripts": {
    "serve": "vue-cli-service serve",
    "build": "vue-cli-service build",
    "lint": "vue-cli-service lint"
  },
  "dependencies": {
    "register-service-worker": "^1.7.2",
    "vue": "^3.2.13",
    "vue-class-component": "^8.0.0-0",
    "vue-router": "^4.0.3",
    "vuex": "^4.0.0"
  },
  "devDependencies": {
    "@typescript-eslint/eslint-plugin": "^5.4.0",
    "@typescript-eslint/parser": "^5.4.0",
    "@vue/cli-plugin-eslint": "~5.0.0",
    "@vue/cli-plugin-pwa": "~5.0.0",
    "@vue/cli-plugin-router": "~5.0.0",
    "@vue/cli-plugin-typescript": "~5.0.0",
    "@vue/cli-plugin-vuex": "~5.0.0",
    "@vue/cli-service": "~5.0.0",
    "@vue/eslint-config-typescript": "^9.1.0",
    "eslint": "^7.32.0",
    "eslint-config-prettier": "^8.3.0",
    "eslint-plugin-prettier": "^4.0.0",
    "eslint-plugin-vue": "^8.0.3",
    "prettier": "^2.4.1",
    "sass": "^1.32.7",
    "sass-loader": "^12.0.0",
    "typescript": "~4.5.5"
  }
}

后端

index.js

const express = require('express');
const app = express();
app.use(express.json());

const cors = require('cors');
app.use(cors());

app.post('/', async (req, res) => {
  try {
    const { Configuration, OpenAIApi } = require('openai');
    const configuration = new Configuration({
      apiKey: 'sk-xxxxxxxxxxxxxxxxxxxx',
    });
    const openai = new OpenAIApi(configuration);

    const question = req.body.question;

    await openai.createCompletion({
      model: 'text-davinci-003',
      prompt: question,
      temperature: 0,
      max_tokens: 7,
    })
      .then((response) => {
        console.log(response.data.choices[0].text);
        res.status(200).send({ bot: response.data.choices[0].text });
      })
      .catch((err) => {
        res.status(400).send({ message: err.message });
      });
  } catch (error) {
    res.status(500).send(error || 'Something went wrong');
  }
});

const PORT = process.env.PORT || 3000;
app.listen(PORT, () => {
  console.log(`Server is running on port ${PORT}.`);
});

package.json

{
  "name": "openai-server",
  "version": "1.0.0",
  "description": "Express server",
  "main": "index.js",
  "scripts": {
    "test": "echo \"Error: no test specified\" && exit 1"
  },
  "author": "",
  "license": "ISC",
  "dependencies": {
    "cors": "^2.8.5",
    "express": "^4.18.2",
    "nodemon": "^2.0.20",
    "openai": "^3.1.0"
  }
}

输出

OpenAI GPT-3 API错误 429: “请求失败,状态码为429”

英文:

As stated in the official OpenAI article:

> This (i.e., 429) error message indicates that you have hit your assigned rate
> limit
for the API. This means that you have submitted too many tokens
> or requests in a short period of time and have exceeded the number of
> requests allowed. This could happen for several reasons, such as:
>
> - You are using a loop or a script that makes frequent or concurrent requests.
>
> - You are sharing your API key with other users or applications.
>
> - You are using a free plan that has a low rate limit.

Working example

Frontend

HelloWorld.vue

&lt;template&gt;
&lt;div class=&quot;hello&quot;&gt;&lt;/div&gt;
&lt;select v-model=&quot;selected&quot; @change=&quot;handleSelect()&quot;&gt;
&lt;option disabled value=&quot;&quot;&gt;Please select one&lt;/option&gt;
&lt;option&gt;Say this is a test&lt;/option&gt;
&lt;option&gt;Say nothing&lt;/option&gt;
&lt;/select&gt;
&lt;div class=&quot;container-selected&quot;&gt;Selected: {{ selected }}&lt;/div&gt;
&lt;div class=&quot;container-data&quot; v-if=&quot;showData&quot;&gt;{{ showData.bot }}&lt;/div&gt;
&lt;/template&gt;
&lt;script&gt;
export default {
data: function () {
return {
selected: &quot;&quot;,
showData: &quot;&quot;,
};
},
methods: {
async handleSelect(data) {
try {
const res = await fetch(&quot;http://localhost:3000/&quot;, {
method: &quot;POST&quot;,
headers: {
&quot;Content-Type&quot;: &quot;application/json&quot;,
},
body: JSON.stringify({
question: this.selected,
}),
});
const data = await res.json();
this.showData = data;
console.log(data);
} catch {
console.log(data);
}
},
},
};
&lt;/script&gt;
&lt;style lang=&quot;scss&quot;&gt;
.container-selected {
margin-top: 12px;
font-size: 20px;
}
.container-data {
margin-top: 24px;
font-size: 20px;
}
&lt;/style&gt;

package.json

{
&quot;name&quot;: &quot;openai&quot;,
&quot;version&quot;: &quot;0.1.0&quot;,
&quot;private&quot;: true,
&quot;scripts&quot;: {
&quot;serve&quot;: &quot;vue-cli-service serve&quot;,
&quot;build&quot;: &quot;vue-cli-service build&quot;,
&quot;lint&quot;: &quot;vue-cli-service lint&quot;
},
&quot;dependencies&quot;: {
&quot;register-service-worker&quot;: &quot;^1.7.2&quot;,
&quot;vue&quot;: &quot;^3.2.13&quot;,
&quot;vue-class-component&quot;: &quot;^8.0.0-0&quot;,
&quot;vue-router&quot;: &quot;^4.0.3&quot;,
&quot;vuex&quot;: &quot;^4.0.0&quot;
},
&quot;devDependencies&quot;: {
&quot;@typescript-eslint/eslint-plugin&quot;: &quot;^5.4.0&quot;,
&quot;@typescript-eslint/parser&quot;: &quot;^5.4.0&quot;,
&quot;@vue/cli-plugin-eslint&quot;: &quot;~5.0.0&quot;,
&quot;@vue/cli-plugin-pwa&quot;: &quot;~5.0.0&quot;,
&quot;@vue/cli-plugin-router&quot;: &quot;~5.0.0&quot;,
&quot;@vue/cli-plugin-typescript&quot;: &quot;~5.0.0&quot;,
&quot;@vue/cli-plugin-vuex&quot;: &quot;~5.0.0&quot;,
&quot;@vue/cli-service&quot;: &quot;~5.0.0&quot;,
&quot;@vue/eslint-config-typescript&quot;: &quot;^9.1.0&quot;,
&quot;eslint&quot;: &quot;^7.32.0&quot;,
&quot;eslint-config-prettier&quot;: &quot;^8.3.0&quot;,
&quot;eslint-plugin-prettier&quot;: &quot;^4.0.0&quot;,
&quot;eslint-plugin-vue&quot;: &quot;^8.0.3&quot;,
&quot;prettier&quot;: &quot;^2.4.1&quot;,
&quot;sass&quot;: &quot;^1.32.7&quot;,
&quot;sass-loader&quot;: &quot;^12.0.0&quot;,
&quot;typescript&quot;: &quot;~4.5.5&quot;
}
}

Backend

index.js

const express = require(&#39;express&#39;);
const app = express();
app.use(express.json());
const cors = require(&#39;cors&#39;);
app.use(cors());
app.post(&#39;/&#39;, async(req, res) =&gt; {
try {
const { Configuration, OpenAIApi } = require(&#39;openai&#39;);
const configuration = new Configuration({
apiKey: &#39;sk-xxxxxxxxxxxxxxxxxxxx&#39;
});
const openai = new OpenAIApi(configuration);
const question = req.body.question;
await openai.createCompletion({
model: &#39;text-davinci-003&#39;,
prompt: question,
temperature: 0,
max_tokens: 7
})
.then((response) =&gt; {
console.log(response.data.choices[0].text);
res.status(200).send({ bot: response.data.choices[0].text });
})
.catch((err) =&gt; {
res.status(400).send({ message: err.message });
})
} catch (error) {
res.status(500).send(error || &#39;Something went wrong&#39;);
}
});
const PORT = process.env.PORT || 3000;
app.listen(PORT, () =&gt; {
console.log(`Server is running on port ${PORT}.`);
});

package.json

{
&quot;name&quot;: &quot;openai-server&quot;,
&quot;version&quot;: &quot;1.0.0&quot;,
&quot;description&quot;: &quot;Express server&quot;,
&quot;main&quot;: &quot;index.js&quot;,
&quot;scripts&quot;: {
&quot;test&quot;: &quot;echo \&quot;Error: no test specified\&quot; &amp;&amp; exit 1&quot;
},
&quot;author&quot;: &quot;&quot;,
&quot;license&quot;: &quot;ISC&quot;,
&quot;dependencies&quot;: {
&quot;cors&quot;: &quot;^2.8.5&quot;,
&quot;express&quot;: &quot;^4.18.2&quot;,
&quot;nodemon&quot;: &quot;^2.0.20&quot;,
&quot;openai&quot;: &quot;^3.1.0&quot;
}
}

Output

OpenAI GPT-3 API错误 429: “请求失败,状态码为429”

huangapple
  • 本文由 发表于 2023年1月8日 23:46:00
  • 转载请务必保留本文链接:https://go.coder-hub.com/75049140.html
匿名

发表评论

匿名网友

:?: :razz: :sad: :evil: :!: :smile: :oops: :grin: :eek: :shock: :???: :cool: :lol: :mad: :twisted: :roll: :wink: :idea: :arrow: :neutral: :cry: :mrgreen:

确定