Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- async function handleResponseAsStream(clientResponse, apiResponse) {
- const reader = apiResponse.body.getReader();
- const nextDecoder = new TextDecoder();
- clientResponse.write("data: " + JSON.stringify(createBeginChunk()) + "\n\n");
- new ReadableStream({
- start(controller) {
- return pump();
- function pump() {
- return reader.read().then(({ done, value }) => {
- const textData = nextDecoder.decode(value);
- clientResponse.write(
- "data: " + JSON.stringify(createMessageChunk(textData)) + "\n\n"
- );
- // When no more data needs to be consumed, close the stream
- if (done) {
- clientResponse.write(
- "data: " + JSON.stringify(createEndChunk()) + "\n\n"
- );
- clientResponse.end();
- controller.close();
- return;
- }
- // Enqueue the next data chunk into our target stream
- controller.enqueue(value);
- return pump();
- });
- }
- },
- });
- }
- const createBeginChunk = () => ({
- id: "chatcmpl-123",
- object: "chat.completion.chunk",
- created: getCurrentDate(),
- model: "gpt-4",
- system_fingerprint: "",
- choices: [
- {
- index: 0,
- delta: { role: "assistant", content: "" },
- logprobs: null,
- finish_reason: null,
- },
- ],
- });
- const createMessageChunk = (text) => ({
- id: "chatcmpl-123",
- object: "chat.completion.chunk",
- created: getCurrentDate(),
- model: "gpt-4",
- system_fingerprint: "",
- choices: [
- {
- index: 0,
- delta: { content: text },
- logprobs: null,
- finish_reason: null,
- },
- ],
- });
- const createEndChunk = () => ({
- id: "chatcmpl-123",
- object: "chat.completion.chunk",
- created: getCurrentDate(),
- model: "gpt-4",
- system_fingerprint: "",
- choices: [{ index: 0, delta: {}, logprobs: null, finish_reason: "stop" }],
- });
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement