next.js OpenAI速率限制429错误

sr4lhrrt  于 2023-04-11  发布在  其他
关注(0)|答案(1)|浏览(610)

我试图使用this存储库来创建使用OpenAI + Pinecone的youtube视频语义搜索,但我在这一步遇到了429错误-“运行命令npx tsx src/bin/process-yt-playlist.ts来预处理转录本并从OpenAI获取嵌入,然后将它们插入Pinecone搜索索引。
任何帮助是赞赏!!
附件是我的openai.ts文件

import pMap from 'p-map'
import unescape from 'unescape'

import * as config from '@/lib/config'

import * as types from './types'

import pMemoize from 'p-memoize'
import pRetry from 'p-retry'
import pThrottle from 'p-throttle'

// TODO: enforce max OPENAI_EMBEDDING_CTX_LENGTH of 8191

// https://platform.openai.com/docs/guides/rate-limits/what-are-the-rate-limits-for-our-api
// TODO: enforce TPM
const throttleRPM = pThrottle({
  // 3k per minute instead of 3.5k per minute to add padding
  limit: 3000,
  interval: 60 * 1000,
  strict: true
})

type PineconeCaptionVectorPending = {
  id: string
  input: string
  metadata: types.PineconeCaptionMetadata
}

export async function getEmbeddingsForVideoTranscript({
  transcript,
  title,
  openai,
  model = config.openaiEmbeddingModel,
  maxInputTokens = 100, // TODO???
  concurrency = 1
}: {
  transcript: types.Transcript
  title: string
  openai: types.OpenAIApi
  model?: string
  maxInputTokens?: number
  concurrency?: number
}) {
  const { videoId } = transcript

  let pendingVectors: PineconeCaptionVectorPending[] = []
  let currentStart = ''
  let currentNumTokensEstimate = 0
  let currentInput = ''
  let currentPartIndex = 0
  let currentVectorIndex = 0
  let isDone = false

  // const createEmbedding = pMemoize(throttleRPM(createEmbeddingImpl))

  // Pre-compute the embedding inputs, making sure none of them are too long
  do {
    isDone = currentPartIndex >= transcript.parts.length

    const part = transcript.parts[currentPartIndex]
    const text = unescape(part?.text)
      .replaceAll('[Music]', '')
      .replaceAll(/[\t\n]/g, ' ')
      .replaceAll('  ', ' ')
      .trim()
    const numTokens = getNumTokensEstimate(text)

    if (!isDone && currentNumTokensEstimate + numTokens < maxInputTokens) {
      if (!currentStart) {
        currentStart = part.start
      }

      currentNumTokensEstimate += numTokens
      currentInput = `${currentInput} ${text}`

      ++currentPartIndex
    } else {
      currentInput = currentInput.trim()
      if (isDone && !currentInput) {
        break
      }

      const currentVector: PineconeCaptionVectorPending = {
        id: `${videoId}:${currentVectorIndex++}`,
        input: currentInput,
        metadata: {
          title,
          videoId,
          text: currentInput,
          start: currentStart
        }
      }

      pendingVectors.push(currentVector)

      // reset current batch
      currentNumTokensEstimate = 0
      currentStart = ''
      currentInput = ''
    }
  } while (!isDone)
  let index = 0;

  console.log("Entering embeddings calculation")
  // Evaluate all embeddings with a max concurrency
  // const delay = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
  const vectors: types.PineconeCaptionVector[] = await pMap(
    pendingVectors,
    async (pendingVector) => {
      // await delay(6000); // add a delay of 1 second before each iteration
      console.log(pendingVector.input + " " + model)

      // const { data: embed } = await openai.createEmbedding({
      //   input: pendingVector.input,
      //   model
      // })

      async function createEmbeddingImpl({
        input = pendingVector.input,
        model = 'text-embedding-ada-002'
      }: {
        input: string
        model?: string
      }): Promise<number[]> {
        const res = await pRetry(
          () =>
            openai.createEmbedding({
              input,
              model
            }),
          {
            retries: 4,
            minTimeout: 1000,
            factor: 2.5
          }
        )
      
        return res.data.data[0].embedding
      }

      const embedding = await pMemoize(throttleRPM(createEmbeddingImpl));
      

      const vector: types.PineconeCaptionVector = {
        id: pendingVector.id,
        metadata: pendingVector.metadata,
        values: await embedding(pendingVector)
      }
      console.log(index + " THIS IS THE NUMBER OF CALLS TO OPENAI Embedding: " + embedding)
      index++;
      return vector
    },
    {
      concurrency
    }
  )

  return vectors
}

function getNumTokensEstimate(input: string): number {
  const numTokens = (input || '')
    .split(/\s/)
    .map((token) => token.trim())
    .filter(Boolean).length

  return numTokens
}

我已经试着将API调用之间的时间间隔增加到远低于限制,但不知何故,我仍然得到相同的错误。

hs1rzwqc

hs1rzwqc1#

如果您没有任何点数,OpenAI会发送429 Rate Limit错误。我一直在使用3个月后过期的免费点数。您可以在使用页面上查看您的可用点数:
https://platform.openai.com/account/usage
边注:一旦我把信用卡的档案,它花了大约5分钟的利率限制消失

相关问题