英文:
Unhandled Rejection (TypeError): Cannot read properties of undefined (reading 'protocol')
问题
I'm learning and following a Deep Learning tutorial that uses FastAPI to serve prediction models to a React.js App.
It returns a console log error: Unhandled Rejection (TypeError): Cannot read properties of undefined (reading 'protocol')
API runs: "http://localhost:8000" MyApp runs: "http://localhost:3000"
Here is the React code to make the API request:
import Card from "@material-ui/core/Card";
import CardContent from "@material-ui/core/CardContent";
import { Paper, CardActionArea, CardMedia, Grid, TableContainer, Table, TableBody, TableHead, TableRow, TableCell, Button, CircularProgress } from "@material-ui/core";
import cblogo from "./cblogo.PNG";
import image from "./bg.png";
import { DropzoneArea } from 'material-ui-dropzone';
import { common } from '@material-ui/core/colors';
import Clear from '@material-ui/icons/Clear';
export const ImageUpload = () => {
const classes = useStyles();
const [selectedFile, setSelectedFile] = useState();
const [preview, setPreview] = useState();
const [data, setData] = useState();
const [image, setImage] = useState(false);
const [isLoading, setIsloading] = useState(false);
let confidence = 0;
const sendFile = async () => {
if (image) {
let formData = new FormData();
formData.append("file", selectedFile);
let res = await axios({ #here make the API request
method: "post",
url: process.env.REACT_APP_API_URL,
data: formData,
});
if (res.status === 200) {
setData(res.data);
}
setIsloading(false);
}
}
return image
}
Here is the React code to make the API request:
from fastapi import FastAPI, File, UploadFile
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
import numpy as np
from io import BytesIO
from PIL import Image
import tensorflow as tf
app = FastAPI()
origins = [
"http://localhost",
"http://localhost:3000",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
MODEL = tf.keras.models.load_model("../saved-models/3")
CLASS_NAMES = ["Early Blight", "Late Blight", "Healthy"]
@app.get("/ping")
async def ping():
return "hello"
def read_file_as_image(data) -> np.ndarray:
image = np.array(Image.open(BytesIO(data)))
Here is the prediction method:
@app.post("/predict")
async def predict(fil: UploadFile = File(...)):
image = read_file_as_image(await fil.read())
img_batch = np.expand_dims(image, 0)
predictions = MODEL.predict(img_batch)
predicted_class = CLASS_NAMES[np.argmax(predictions[0])]
confidence = np.max(predictions[0])
return {
'class': predicted_class,
'confidence': float(confidence)
}
if __name__ == "__main__":
uvicorn.run(app, host='localhost', port=8000)
英文:
enter image description hereI'm learning and following a Deep leaning tutorial that uses FastApi to serve prediction models to a Reactjs App.
It returns a console log error: Unhandled Rejection (TypeError): Cannot read properties of undefined (reading 'protocol')
Api runs: "http://localhost:8000" myApp runs: "http://localhost:3000"
**Here is the react code to make the API request
from fastapi import FastAPI, File, UploadFile
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
import numpy as np
from io import BytesIO
from PIL import Image
import tensorflow as tf
app = FastAPI()
origins = [
"http://localhost",
"http://localhost:3000",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
-MODEL = tf.keras.models.load_model("../saved-models/3")
CLASS_NAMES = ["Early Blight", "Late Blight", "Healthy"]
@app.get("/ping")
async def ping():
return "hello"
def read_file_as_image(data) -> np.ndarray:
image = np.array(Image.open(BytesIO(data)))
**Here is the react code to make the API request **
import Card from "@material-ui/core/Card";
import CardContent from "@material-ui/core/CardContent";
import { Paper, CardActionArea, CardMedia, Grid, TableContainer, Table, TableBody, TableHead, TableRow, TableCell, Button, CircularProgress } from "@material-ui/core";
import cblogo from "./cblogo.PNG";
import image from "./bg.png";
import { DropzoneArea } from 'material-ui-dropzone';
import { common } from '@material-ui/core/colors';
import Clear from '@material-ui/icons/Clear';
export const ImageUpload = () => {
const classes = useStyles();
const [selectedFile, setSelectedFile] = useState();
const [preview, setPreview] = useState();
const [data, setData] = useState();
const [image, setImage] = useState(false);
const [isLoading, setIsloading] = useState(false);
let confidence = 0;
const sendFile = async () => {
if (image) {
let formData = new FormData();
formData.append("file", selectedFile);
let res = await axios({ #here make the API request
method: "post",
url: process.env.REACT_APP_API_URL,
data: formData,
});
if (res.status === 200) {
setData(res.data);
}
setIsloading(false);
}
}
return image
@app.post("/predict") #Here is the prediction method
async def predict(fil: UploadFile = File(...)):
image = read_file_as_image(await fil.read())
img_batch = np.expand_dims(image, 0)
predictions = MODEL.predict(img_batch)
predicted_class = CLASS_NAMES[np.argmax(predictions[0])]
confidence = np.max(predictions[0])
return {
'class': predicted_class,
'confidence': float(confidence)
}
if __name__ == "__main__":
uvicorn.run(app, host='localhost', port=8000)
答案1
得分: 1
我认为我正在处理相同的项目...端到端的番茄或土豆或...疾病,我遇到了同样的问题,感谢Aymendp,我也解决了第一个问题,但之后它仍然在处理中,我已经将REACT_APP_API_URL更改为REACT_APP_API_URL= http://localhost:8000/predict,然后重新启动了npm,没有重新启动它,它将无法正常工作,请确保您的main.py文件已加载(或者您正在工作的位置),并且端口已打开并正在运行,然后之后重新启动npm(通过关闭它,然后再次运行"start npm",现在图片将加载,但类别将不会输出任何内容,置信度得分输出为"NaN%,我现在正在处理这个问题,稍后会通知您,如果您想知道我是如何解决GPT4的问题的(我用它解决了很多问题):)
英文:
I think i'am working on the same project... End-to-end Tomato or Potato or ... Disease, I've encountered the same problem and thanks to Aymendp, I also fixed the first problem but afterwards it also kept on processing, I've changed my REACT_APP_API_URL to REACT_APP_API_URL= http://localhost:8000/predict en then I restared the npm, it won't work without restarting this, Make sure that you already have your main.py file loading (or wherever you're working) and that the port is open and running, then afterwards restart the npm (by closing it and then back again running "start npm", now the picture will load bu the class will print out nothing and the confidence score is printing out "NaN% , I'm workjing on this now and will let you know a bit later, if you want to know how, I fixed the problem with GPT4 (I use this for A LOT of my problems)
通过集体智慧和协作来改善编程学习和解决问题的方式。致力于成为全球开发者共同参与的知识库,让每个人都能够通过互相帮助和分享经验来进步。
评论