英文:
how to run CodePen project localy
问题
我想在本地运行一个 CodePen 项目,但是当我将文件复制粘贴到我的计算机上的本地项目时,它不起作用。该项目包括 3 个文件(html、css、js(ts))。如何修复它并使其正常工作?我想在本地运行的 CodePen 项目如下:https://codepen.io/mediapipe-preview/pen/OJBVQJm。我已将文件复制粘贴到本地项目,但似乎 js 文件未运行。我甚至将 js 和 css 文件链接到 html 文件中,但它仍然无法正常工作。我甚至从 CodePen 导出整个项目,但对我来说仍然无法正常工作。
英文:
i want to run a codePen project locally, but when i copy past the files into local project on my computer its not working, the project have 3 file(html,css,js(ts)).
how could i fix it and make it working?
the codepen project that i want to make it locally is below:
<kbd>https://codepen.io/mediapipe-preview/pen/OJBVQJm</kbd>
i copy pasted the files into local project but it seems the js file is not running, i even linked the js and css file into html file but its not working.
even i export the hole project from codepen but it still not working for me
答案1
得分: 0
这是您提供的代码的翻译部分:
<html>
<head>
<meta charset="utf-8">
<meta http-equiv="Cache-control" content="no-cache, no-store, must-revalidate">
<meta http-equiv="Pragma" content="no-cache">
<meta name="viewport" content="width=device-width, initial-scale=1, user-scalable=no">
<title>Face Landmarker</title>
<!-- 样式部分 -->
<!-- ...(省略部分内容)... -->
</head>
<body>
<h1>使用MediaPipe FaceLandmarker任务进行面部关键点检测</h1>
<section id="demos" class="invisible">
<h2>演示:检测图像</h2>
<p><b>单击下面的图像</b>以查看面部的关键点。</p>
<!-- 图像部分 -->
<div class="detectOnClick">
<!-- 图像链接 -->
</div>
<!-- ...(省略部分内容)... -->
</section>
<!-- 脚本部分 -->
<script type="module">
// JavaScript脚本部分
// ...(省略部分内容)...
</script>
</body>
</html>
请注意,我已经省略了大部分的代码内容,只提供了整个HTML文档的结构和脚本部分的提示。如果需要更详细的翻译或有其他特定的要求,请告诉我。
英文:
Heya Hope you are doing well..here's a working version of the codepen you provided in local..i have made necessary changes for you..hope it will help you.
<!-- begin snippet: js hide: false console: true babel: false -->
<!-- language: lang-html -->
<html>
<head>
<meta charset="utf-8">
<meta http-equiv="Cache-control" content="no-cache, no-store, must-revalidate">
<meta http-equiv="Pragma" content="no-cache">
<meta name="viewport" content="width=device-width, initial-scale=1, user-scalable=no">
<title>Face Landmarker</title>
<style>
@use "@material";
body {
font-family: helvetica, arial, sans-serif;
margin: 2em;
color: #3d3d3d;
--mdc-theme-primary: #007f8b;
--mdc-theme-on-primary: #f1f3f4;
}
h1 {
font-style: italic;
color: #ff6f00;
color: #007f8b;
}
h2 {
clear: both;
}
em {
font-weight: bold;
}
video {
clear: both;
display: block;
transform: rotateY(180deg);
-webkit-transform: rotateY(180deg);
-moz-transform: rotateY(180deg);
}
section {
opacity: 1;
transition: opacity 500ms ease-in-out;
}
header,
footer {
clear: both;
}
.removed {
display: none;
}
.invisible {
opacity: 0.2;
}
.note {
font-style: italic;
font-size: 130%;
}
.videoView,
.detectOnClick,
.blend-shapes {
position: relative;
float: left;
width: 48%;
margin: 2% 1%;
cursor: pointer;
}
.videoView p,
.detectOnClick p {
position: absolute;
padding: 5px;
background-color: #007f8b;
color: #fff;
border: 1px dashed rgba(255, 255, 255, 0.7);
z-index: 2;
font-size: 12px;
margin: 0;
}
.highlighter {
background: rgba(0, 255, 0, 0.25);
border: 1px dashed #fff;
z-index: 1;
position: absolute;
}
.canvas {
z-index: 1;
position: absolute;
pointer-events: none;
}
.output_canvas {
transform: rotateY(180deg);
-webkit-transform: rotateY(180deg);
-moz-transform: rotateY(180deg);
}
.detectOnClick {
z-index: 0;
}
.detectOnClick img {
width: 100%;
}
.blend-shapes-item {
display: flex;
align-items: center;
height: 20px;
}
.blend-shapes-label {
display: flex;
width: 120px;
justify-content: flex-end;
align-items: center;
margin-right: 4px;
}
.blend-shapes-value {
display: flex;
height: 16px;
align-items: center;
background-color: #007f8b;
}
</style>
<link href="https://unpkg.com/material-components-web@latest/dist/material-components-web.min.css" rel="stylesheet">
<script src="https://unpkg.com/material-components-web@latest/dist/material-components-web.min.js"></script>
</head>
<body>
<h1>Face landmark detection using the MediaPipe FaceLandmarker task</h1>
<section id="demos" class="invisible">
<h2>Demo: Detecting Images</h2>
<p><b>Click on an image below</b> to see the key landmarks of the face.</p>
<div class="detectOnClick">
<img src="https://storage.googleapis.com/mediapipe-assets/portrait.jpg" width="100%" crossorigin="anonymous" title="Click to get detection!" />
</div>
<div class="blend-shapes">
<ul class="blend-shapes-list" id="image-blend-shapes"></ul>
</div>
<h2>Demo: Webcam continuous face landmarks detection</h2>
<p>Hold your face in front of your webcam to get real-time face landmarker detection.</br>Click <b>enable webcam</b> below and grant access to the webcam if prompted.</p>
<div id="liveView" class="videoView">
<button id="webcamButton" class="mdc-button mdc-button--raised">
<span class="mdc-button__ripple"></span>
<span class="mdc-button__label">ENABLE WEBCAM</span>
</button>
<div style="position: relative;">
<video id="webcam" style="position: abso" autoplay playsinline></video>
<canvas class="output_canvas" id="output_canvas" style="position: absolute; left: 0px; top: 0px;"></canvas>
</div>
</div>
<div class="blend-shapes">
<ul class="blend-shapes-list" id="video-blend-shapes"></ul>
</div>
</section>
<script type="module">
import vision from "https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@0.10.0";
const { FaceLandmarker, FilesetResolver, DrawingUtils } = vision;
const demosSection = document.getElementById("demos");
const imageBlendShapes = document.getElementById("image-blend-shapes");
const videoBlendShapes = document.getElementById("video-blend-shapes");
let faceLandmarker;
let runningMode= "IMAGE" | "VIDEO";
let enableWebcamButton = HTMLButtonElement;
let webcamRunning= Boolean = false;
const videoWidth = 480;
// Before we can use HandLandmarker class we must wait for it to finish
// loading. Machine Learning models can be large and take a moment to
// get everything needed to run.
async function runDemo() {
// Read more `CopyWebpackPlugin`, copy wasm set from "https://cdn.skypack.dev/node_modules" to `/wasm`
const filesetResolver = await FilesetResolver.forVisionTasks(
"https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@0.10.0/wasm"
);
faceLandmarker = await FaceLandmarker.createFromOptions(filesetResolver, {
baseOptions: {
modelAssetPath: `https://storage.googleapis.com/mediapipe-models/face_landmarker/face_landmarker/float16/1/face_landmarker.task`,
delegate: "GPU"
},
outputFaceBlendshapes: true,
runningMode,
numFaces: 1
});
demosSection.classList.remove("invisible");
}
runDemo();
/********************************************************************
// Demo 1: Grab a bunch of images from the page and detection them
// upon click.
********************************************************************/
// In this demo, we have put all our clickable images in divs with the
// CSS class 'detectionOnClick'. Lets get all the elements that have
// this class.
const imageContainers = document.getElementsByClassName("detectOnClick");
// Now let's go through all of these and add a click event listener.
for (let i = 0; i < imageContainers.length; i++) {
// Add event listener to the child element whichis the img element.
imageContainers[i].children[0].addEventListener("click", handleClick);
}
// When an image is clicked, let's detect it and display results!
async function handleClick(event) {
if (!faceLandmarker) {
console.log("Wait for faceLandmarker to load before clicking!");
return;
}
if (runningMode === "VIDEO") {
runningMode = "IMAGE";
await faceLandmarker.setOptions({ runningMode });
}
// Remove all landmarks drawed before
const allCanvas = event.target.parentNode.getElementsByClassName("canvas");
for (var i = allCanvas.length - 1; i >= 0; i--) {
const n = allCanvas[i];
n.parentNode.removeChild(n);
}
// We can call faceLandmarker.detect as many times as we like with
// different image data each time. This returns a promise
// which we wait to complete and then call a function to
// print out the results of the prediction.
const faceLandmarkerResult = faceLandmarker.detect(event.target);
const canvas = document.createElement("canvas");
canvas.setAttribute("class", "canvas");
canvas.setAttribute("width", event.target.naturalWidth + "px");
canvas.setAttribute("height", event.target.naturalHeight + "px");
canvas.style.left = "0px";
canvas.style.top = "0px";
canvas.style.width = `${event.target.width}px`;
canvas.style.height = `${event.target.height}px`;
event.target.parentNode.appendChild(canvas);
const ctx = canvas.getContext("2d");
const drawingUtils = new DrawingUtils(ctx);
for (const landmarks of faceLandmarkerResult.faceLandmarks) {
drawingUtils.drawConnectors(
landmarks,
FaceLandmarker.FACE_LANDMARKS_TESSELATION,
{ color: "#C0C0C070", lineWidth: 1 }
);
drawingUtils.drawConnectors(
landmarks,
FaceLandmarker.FACE_LANDMARKS_RIGHT_EYE,
{ color: "#FF3030" }
);
drawingUtils.drawConnectors(
landmarks,
FaceLandmarker.FACE_LANDMARKS_RIGHT_EYEBROW,
{ color: "#FF3030" }
);
drawingUtils.drawConnectors(
landmarks,
FaceLandmarker.FACE_LANDMARKS_LEFT_EYE,
{ color: "#30FF30" }
);
drawingUtils.drawConnectors(
landmarks,
FaceLandmarker.FACE_LANDMARKS_LEFT_EYEBROW,
{ color: "#30FF30" }
);
drawingUtils.drawConnectors(
landmarks,
FaceLandmarker.FACE_LANDMARKS_FACE_OVAL,
{ color: "#E0E0E0" }
);
drawingUtils.drawConnectors(landmarks, FaceLandmarker.FACE_LANDMARKS_LIPS, {
color: "#E0E0E0"
});
drawingUtils.drawConnectors(
landmarks,
FaceLandmarker.FACE_LANDMARKS_RIGHT_IRIS,
{ color: "#FF3030" }
);
drawingUtils.drawConnectors(
landmarks,
FaceLandmarker.FACE_LANDMARKS_LEFT_IRIS,
{ color: "#30FF30" }
);
}
drawBlendShapes(imageBlendShapes, faceLandmarkerResult.faceBlendshapes);
}
/********************************************************************
// Demo 2: Continuously grab image from webcam stream and detect it.
********************************************************************/
const video = document.getElementById("webcam");
const canvasElement = document.getElementById(
"output_canvas"
);
const canvasCtx = canvasElement.getContext("2d");
// Check if webcam access is supported.
function hasGetUserMedia() {
return !!(navigator.mediaDevices && navigator.mediaDevices.getUserMedia);
}
// If webcam supported, add event listener to button for when user
// wants to activate it.
if (hasGetUserMedia()) {
enableWebcamButton = document.getElementById(
"webcamButton"
);
enableWebcamButton.addEventListener("click", enableCam);
} else {
console.warn("getUserMedia() is not supported by your browser");
}
// Enable the live webcam view and start detection.
function enableCam(event) {
if (!faceLandmarker) {
console.log("Wait! faceLandmarker not loaded yet.");
return;
}
if (webcamRunning === true) {
webcamRunning = false;
enableWebcamButton.innerText = "ENABLE PREDICTIONS";
} else {
webcamRunning = true;
enableWebcamButton.innerText = "DISABLE PREDICTIONS";
}
// getUsermedia parameters.
const constraints = {
video: true
};
// Activate the webcam stream.
navigator.mediaDevices.getUserMedia(constraints).then(function (stream) {
video.srcObject = stream;
video.addEventListener("loadeddata", predictWebcam);
});
}
let lastVideoTime = -1;
let results = undefined;
const drawingUtils = new DrawingUtils(canvasCtx);
async function predictWebcam() {
const radio = video.videoHeight / video.videoWidth;
video.style.width = videoWidth + "px";
video.style.height = videoWidth * radio + "px";
canvasElement.style.width = videoWidth + "px";
canvasElement.style.height = videoWidth * radio + "px";
canvasElement.width = video.videoWidth;
canvasElement.height = video.videoHeight;
// Now let's start detecting the stream.
if (runningMode === "IMAGE") {
runningMode = "VIDEO";
await faceLandmarker.setOptions({ runningMode: runningMode });
}
let nowInMs = Date.now();
if (lastVideoTime !== video.currentTime) {
lastVideoTime = video.currentTime;
results = faceLandmarker.detectForVideo(video, nowInMs);
}
if (results.faceLandmarks) {
for (const landmarks of results.faceLandmarks) {
drawingUtils.drawConnectors(
landmarks,
FaceLandmarker.FACE_LANDMARKS_TESSELATION,
{ color: "#C0C0C070", lineWidth: 1 }
);
drawingUtils.drawConnectors(
landmarks,
FaceLandmarker.FACE_LANDMARKS_RIGHT_EYE,
{ color: "#FF3030" }
);
drawingUtils.drawConnectors(
landmarks,
FaceLandmarker.FACE_LANDMARKS_RIGHT_EYEBROW,
{ color: "#FF3030" }
);
drawingUtils.drawConnectors(
landmarks,
FaceLandmarker.FACE_LANDMARKS_LEFT_EYE,
{ color: "#30FF30" }
);
drawingUtils.drawConnectors(
landmarks,
FaceLandmarker.FACE_LANDMARKS_LEFT_EYEBROW,
{ color: "#30FF30" }
);
drawingUtils.drawConnectors(
landmarks,
FaceLandmarker.FACE_LANDMARKS_FACE_OVAL,
{ color: "#E0E0E0" }
);
drawingUtils.drawConnectors(
landmarks,
FaceLandmarker.FACE_LANDMARKS_LIPS,
{ color: "#E0E0E0" }
);
drawingUtils.drawConnectors(
landmarks,
FaceLandmarker.FACE_LANDMARKS_RIGHT_IRIS,
{ color: "#FF3030" }
);
drawingUtils.drawConnectors(
landmarks,
FaceLandmarker.FACE_LANDMARKS_LEFT_IRIS,
{ color: "#30FF30" }
);
}
}
drawBlendShapes(videoBlendShapes, results.faceBlendshapes);
// Call this function again to keep predicting when the browser is ready.
if (webcamRunning === true) {
window.requestAnimationFrame(predictWebcam);
}
}
function drawBlendShapes(el=HTMLCanvasElement, blendShapes= any[{}]) {
if (!blendShapes.length) {
return;
}
let htmlMaker = "";
blendShapes[0].categories.map((shape) => {
htmlMaker += `
<li class="blend-shapes-item">
<span class="blend-shapes-label">${
shape.displayName || shape.categoryName
}</span>
<span class="blend-shapes-value" style="width: calc(${
+shape.score * 100
}% - 120px)">${(+shape.score).toFixed(4)}</span>
</li>
`;
});
el.innerHTML = htmlMaker;
}
</script>
</body>
</html>
<!-- end snippet -->
通过集体智慧和协作来改善编程学习和解决问题的方式。致力于成为全球开发者共同参与的知识库,让每个人都能够通过互相帮助和分享经验来进步。
评论