Hey on day #19 of #javascriptmas I was trying to use OpenAI API to generate the image first because I like its generated images better but when I try feed the image url to huggingFACE to get me the caption it fails to fetch the data, so I switched to us huggingface in both process!
any idea on why it didn’t work?
here’s the code:
`/** OpenAI setup **/
// import OpenAI from "openai"
// const openai = new OpenAI({
// apiKey: process.env.OPENAPIKEY,
// dangerouslyAllowBrowser: true
// })
/** HuggingFace setup **/
import { HfInference } from '@huggingface/inference'
let HF_TOKEN = process.env.HF_TOKEN
const inference = new HfInference(HF_TOKEN);
import { blobToBase64 } from '/utils'
const dialogModal = document.getElementById('dialog-modal')
dialogModal.show()
document.addEventListener('submit', function(e) {
e.preventDefault()
const imageDescription = document.getElementById('user-input').value
dialogModal.close()
generateImage(imageDescription)
})
async function generateImage(imageToGenerate) {
/** OpenAI **/
// const response = await openai.images.generate({
// model: "dall-e-3",
// prompt: imageToGenerate,
// })
// console.log(response.data[0].url)
// generateAltText(response.data[0].url)
/** HuggingFace **/
const response = await inference.textToImage({
inputs: imageToGenerate,
model: "stabilityai/stable-diffusion-2",
})
const imageUrl = await blobToBase64(response)
generateAltText(imageUrl)
}
async function generateAltText(imageUrl) {
const data = await inference.imageToText({
data: await (await fetch(imageUrl)).blob(),
model: "Salesforce/blip-image-captioning-base",
});
renderImage(imageUrl, data.generated_text)
}
function renderImage(imageUrl, altText) {
console.log(altText)
const imageContainer = document.getElementById('image-container')
imageContainer.innerHTML = ''
const image = document.createElement('img')
image.src = imageUrl
image.alt = altText
imageContainer.appendChild(image)
}`