Next.js / React with ChatGPT

Filip Jerga
Eincode
Published in
5 min readMay 5, 2023

--

Learn how to integrate the ChatGPT model into your javascript application.

Photo by ilgmyzin on Unsplash

1. Setup

Clone or download the initial project

https://github.com/Jerga99/nextjs-chatgpt-integration

The whole setup is explained in the readme file

If you are more interested in a video tutorial, watch my youtube video explaining the same stuff.

Ok, let’s get to work! After you initialize the project, you should have this basic layout.

The base layout of the application

2. Get prompt data

All changes will be performed in src/pages/index.js

export default function Home() {
const [prompt, setPrompt] = useState("");

return (
<div className="container">
<div className="inputContainer">
<textarea
onChange={(e) => {
setPrompt(e.target.value);
}}
value={prompt}
placeholder="Ask a question"
rows={3}
/>
</div>
... rest of the template
</div>
)
}

The critical part is providing onChange to textarea to store values in the prompt state. Now when a user writes to textarea, values get stored.

3. Simulate messaging

As we submit the prompt, we want to display the message in the UI. Also, we want to simulate the response from ChatGPT. For now, the response will be just some hardcoded string.

export default function Home() {
const [prompt, setPrompt] = useState("");
const [messages, setMessages] = useState([]);

const handleSubmit = async () => {
if (prompt.trim().length === 0) {
return;
}

setMessages((messages) => [...messages, {
text: prompt.trim(),
id: new Date().toISOString(),
author: "human"
}]);

setPrompt("");

await new Promise((res) => setTimeout(res, 1000));

setMessages((messages) => [...messages, {
text: "Just some hardcoded response bla bla bla...",
id: new Date().toISOString(),
author: "ai"
}]);
}

return (
<div className="container">
<div className="inputContainer">
<textarea
onChange={(e) => {
setPrompt(e.target.value);
}}
value={prompt}
placeholder="Ask a question"
rows={3}
/>
<button
onClick={handleSubmit}
className="submit">Submit
</button>
</div>
... rest of the template
</div>
)
}

Let’s add the state to store the messages first.

const [messages, setMessages] = useState([]);

Next, we add the function handleSubmit, which gets called when a user clicks the submit button.

Let’s talk about this function. First, we ensure that we provide the data in the prompt. If the prompt is empty, we return early from the function.

Next, we set a new message to the state of messages. The first message stored is a user prompt.

The message has this format:

{
text: string; // message value
id: string; // Identifier, for example new date
author: "human" | "ai"; // author can be human or ai
}

After the first message is added, we will wait for 1000ms (1 sec) to simulate the delay of getting a response from a server.

After that, we store the AI hardcoded message. We are done in this section.

4. Display messages

We stored the messages in the state but didn’t display them.

Create a new component to display a message.

function MessageItem({message}) {
const [text, setText] = useState(message.author === "human" ? message.text : "");

useEffect(() => {
setTimeout(() => {
setText(message.text.slice(0, text.length + 1));
}, 10);
}, [text, message.text]);

return (
<div className="answer">
<div className={`author author-${message.author}`}>
{message.author}:
</div>
<div className="message">
{text}
</div>
</div>
)
}

Now map the messages and display them in the return of the Home function.

<div className="container">
<div className="inputContainer">
<textarea
onChange={(e) => {
setPrompt(e.target.value);
}}
value={prompt}
placeholder="Ask a question"
rows={3}
/>
<button
onClick={handleSubmit}
className="submit">Submit
</button>
</div>
<div className="answers">
{messages.map(message =>
<MessageItem
key={message.id}
message={message}
/>
)}
</div>
</div>

5. API Endpoint

We need to create an API endpoint to send the prompt to and get a response from ChatGPT.

In pages/api/ folder add a new file called completion.js


export default async function handler(req, res) {
if (req.method === "POST") {
const prompt = req.body.prompt || "";

if (!process.env.OPENAI_SECRET_KEY) {
return res.status(500).json({error: {message: "Api key is not provided!"}});
}

if (prompt.trim().length === 0) {
return res.status(400).json({error: {message: "Provide prompt value!"}});
}

try {
const RESULT = "Just some hardcoded response bla bla bla...";

return res.status(200).json({result: completion.data.choices[0].text});
} catch (e) {
return res.status(400).json({error: {message: e.message}});
}

} else {
return res.status(500).json({error: {message: "Invalid Api Route!"}});
}
}

A straightforward code. We first get the prompt data from the request body.

If there are non, we return the error.

Similarly, we returned the error if we didn’t set up OPENAI_SECRET_KEY (which we didn’t yet).

Finally, if all went well, we sent back a hardcoded response.

We are not calling this endpoint yet, so let’s fix it!

6. Call the endpoint

Back in pages/index.js

Call the endpoint in handleSubmit

const handleSubmit = async () => {
if (prompt.trim().length === 0) {
return;
}

setMessages((messages) => [...messages, {
text: prompt.trim(),
id: new Date().toISOString(),
author: "human"
}]);

setPrompt("");
const response = await fetch("/api/completion", {
method: "POST",
headers: {
"Content-type": "application/json"
},
body: JSON.stringify({prompt: prompt.trim()})
});

const json = await response.json();

if (response.ok) {
console.log(json.result);

setMessages((messages) => [...messages, {
text: json.result,
id: new Date().toISOString(),
author: "ai"
}]);
} else {
console.warn(json?.error?.message);
}
}

In the fetch function, we specify the URL of the endpoint and provide the prompt data.

Not let’s pass this data to the ChatGPT model.

7. Setup OpenAI API

To communicate with ChatGPT, we first need to get an API key.

Get this key on the following link: https://platform.openai.com/account/api-keys

In the project’s root folder, create a .env file and provide the API key you got from OpenAI.

OPENAI_SECRET_KEY=your_key

Then in completion.js, on top of the file, prepare the configuration.

const configuration = new Configuration({
apiKey: process.env.OPENAI_SECRET_KEY
});

const openAi = new OpenAIApi(configuration);

8. Call the model

Here is the final part

import { Configuration, OpenAIApi } from "openai";

const configuration = new Configuration({
apiKey: process.env.OPENAI_SECRET_KEY
});

const openAi = new OpenAIApi(configuration);

export default async function handler(req, res) {
if (req.method === "POST") {
const prompt = req.body.prompt || "";

if (!process.env.OPENAI_SECRET_KEY) {
return res.status(500).json({error: {message: "Api key is not provided!"}});
}

if (prompt.trim().length === 0) {
return res.status(400).json({error: {message: "Provide prompt value!"}});
}

try {
const completion = await openAi.createCompletion({
model: "text-davinci-003",
prompt: prompt,
temperature: 0.7,
max_tokens: 1024
});

return res.status(200).json({result: completion.data.choices[0].text});
} catch (e) {
return res.status(400).json({error: {message: e.message}});
}

} else {
return res.status(500).json({error: {message: "Invalid Api Route!"}});
}
}

In the try block, we call createCompletion, providing the prompt data and some other parameters (learn more about parameters).

The model will respond with an answer which we provide back in the server response; this way, we get the response back to the client application where we display it.

Now you should be able to get the actual answer:

And that’s it! To learn more, you can watch the complete course:

Thank you.

--

--