英文:
Rendering Bezier expressions on Desmos and saving them as screenshots
问题
以下是您要翻译的代码部分:
const { Canvas, createCanvas, Image, ImageData, loadImage } = require('canvas');
const { JSDOM } = require('jsdom');
const { exec } = require('child_process');
const fs = require('fs');
const potrace = require('potrace');
const ffmpeg = require('ffmpeg');
const chalk = require('chalk');
const darkRed = chalk.hex('#b51b1b');
async function getCurveArray(file) {
// 省略部分代码...
}
function loadOpenCV() {
// 省略部分代码...
}
function installDOM() {
// 省略部分代码...
}
function videoToFrames(fileName, extension) {
// 省略部分代码...
}
async function cleanDirectories() {
// 省略部分代码...
}
videoToFrames('ganyu');
fs.readdir('frames', async function (err, files) {
installDOM();
await loadOpenCV();
if (!files || files?.length == 0) {
return console.log(darkRed('Frame directory was found empty or does not exist, attempt to rerun the program with the same parameters.'));
}
for (i = 0; i < files.length; i++) {
// 省略部分代码...
}
for (j = 0; j < files.length; j++) {
await getCurveArray(files[j]);
console.clear();
console.log(darkRed(`Frame ${j + 1}/${files.length} has been traced.`));
}
});
请提供更多信息,以便我回答您的其他问题。
英文:
I have the following code that takes a video, converts it to frames using ffmpeg, runs OpenCV Canny Edge on the frames and then converts the images to JSON files with Bezier curves.
const { JSDOM } = require('jsdom');
const { exec } = require('child_process');
const fs = require('fs');
const potrace = require('potrace');
const ffmpeg = require('ffmpeg');
const chalk = require('chalk');
const darkRed = chalk.hex('#b51b1b');
async function getCurveArray(file) {
let curveArray = [];
let paths;
let x0;
let x1;
let x2;
let x3;
let y0;
let y1;
let y2;
let y3;
let trace = new potrace.Potrace();
trace.loadImage(`./frames/${file}`, function (err) {
if (err) throw err;
trace.getPathTag();
paths = trace._pathlist;
for (let h = 0; h < paths.length; h++) {
x0 = paths[h].x0;
y0 = paths[h].y0;
for (let k = 0; k < paths[h].curve.n; k++) {
let curveDataIndex = k * 3;
if (paths[h].curve.tag[k] === 'CORNER') {
x1 = paths[h].curve.c[curveDataIndex + 1].x;
x2 = paths[h].curve.c[curveDataIndex + 2].x;
y1 = paths[h].curve.c[curveDataIndex + 1].y;
y2 = paths[h].curve.c[curveDataIndex + 2].y;
x0 = Math.round((x0 + Number.EPSILON) * 100) / 100;
x1 = Math.round((x1 + Number.EPSILON) * 100) / 100;
x2 = Math.round((x2 + Number.EPSILON) * 100) / 100;
y0 = Math.round((y0 + Number.EPSILON) * 100) / 100;
y1 = Math.round((y1 + Number.EPSILON) * 100) / 100;
y2 = Math.round((y2 + Number.EPSILON) * 100) / 100;
curveArray.push(`((1-t)${x0}+t${x1}, (1-t)${y0}+t${y1})`);
curveArray.push(`((1-t)${x1}+t${x2}, (1-t)${y1}+t${y2})`);
} else {
x1 = paths[h].curve.c[curveDataIndex].x;
x2 = paths[h].curve.c[curveDataIndex + 1].x;
x3 = paths[h].curve.c[curveDataIndex + 2].x;
y1 = paths[h].curve.c[curveDataIndex].y;
y2 = paths[h].curve.c[curveDataIndex + 1].y;
y3 = paths[h].curve.c[curveDataIndex + 2].y;
x0 = Math.round((x0 + Number.EPSILON) * 100) / 100;
x1 = Math.round((x1 + Number.EPSILON) * 100) / 100;
x2 = Math.round((x2 + Number.EPSILON) * 100) / 100;
x3 = Math.round((x3 + Number.EPSILON) * 100) / 100;
y0 = Math.round((y0 + Number.EPSILON) * 100) / 100;
y1 = Math.round((y1 + Number.EPSILON) * 100) / 100;
y2 = Math.round((y2 + Number.EPSILON) * 100) / 100;
y3 = Math.round((y3 + Number.EPSILON) * 100) / 100;
curveArray.push(
`((1-t)((1-t)((1-t)${x0}+t${x1})+t((1-t)${x1}+t${x2}))+t((1-t)((1-t)${x1}+t${x2})+t((1-t)${x2}+t${x3})), (1-t)((1-t)((1-t)${y0}+t${y1})+t((1-t)${y1}+t${y2}))+t((1-t)((1-t)${y1}+t${y2})+t((1-t)${y2}+t${y3})))`
);
}
x0 = paths[h].curve.c[curveDataIndex + 2].x;
y0 = paths[h].curve.c[curveDataIndex + 2].y;
}
}
fs.writeFileSync(`./curves/curves_${file.split('_')[1].split('.')[0]}.json`, JSON.stringify(curveArray));
});
}
function loadOpenCV() {
return new Promise((resolve) => {
global.Module = {
onRuntimeInitialized: resolve,
};
global.cv = require('./lib/opencv.js');
});
}
function installDOM() {
const dom = new JSDOM();
global.document = dom.window.document;
global.Image = Image;
global.HTMLCanvasElement = Canvas;
global.ImageData = ImageData;
global.HTMLImageElement = Image;
}
function videoToFrames(fileName, extension) {
let fileExtension = extension?.length > 1 ? extension : 'mp4';
try {
var process = new ffmpeg(`./video/${fileName}.${fileExtension}`);
process.then(
function (video) {
video.fnExtractFrameToJPG('./frames', {
file_name: 'frame',
size: `?x480`,
frame_rate: 30,
});
},
function (err) {
console.log('Error: ' + err);
}
);
} catch (e) {
console.log(e.code);
console.log(e.msg);
}
console.log(darkRed('Video processed'));
return 0;
}
async function cleanDirectories() {
exec('rm -Force -r frames', { shell: 'powershell.exe' });
exec('mkdir frames', { shell: 'powershell.exe' });
exec('rm -Force -r curves', { shell: 'powershell.exe' });
exec('mkdir curves', { shell: 'powershell.exe' });
}
videoToFrames('ganyu');
fs.readdir('frames', async function (err, files) {
installDOM();
await loadOpenCV();
if (!files || files?.length == 0) {
return console.log(darkRed('Frame directory was found empty or does not exist, attempt to rerun the program with the same parameters.'));
}
for (i = 0; i < files.length; i++) {
const image = await loadImage(`./frames/${files[i]}`);
let src = cv.imread(image);
cv.cvtColor(src, src, cv.COLOR_BGR2GRAY);
let src2 = new cv.Mat();
cv.bilateralFilter(src, src2, 5, 50, 50, cv.BORDER_DEFAULT);
let src3 = new cv.Mat();
cv.rotate(src2, src3, cv.ROTATE_180);
let dst = new cv.Mat();
cv.Canny(src3, dst, 30, 200, 3, true);
const canvas = createCanvas(300, 300);
cv.imshow(canvas, dst);
fs.writeFileSync(`./frames/${files[i]}`, canvas.toBuffer('image/jpeg'));
src.delete();
src2.delete();
src3.delete();
dst.delete();
console.clear();
console.log(darkRed(`${i + 1}/${files.length} frames prepared.`));
}
for (j = 0; j < files.length; j++) {
await getCurveArray(files[j]);
console.clear();
console.log(darkRed(`Frame ${j + 1}/${files.length} has been traced.`));
}
});
My goal is to be able to render these frames individually in Desmos, capture the result and stitch the rendered frames back together into an .mp4.
My question is how should I even approach this? How could I render each frame, detect that its "finished" rendering and somehow save the image shown in the Desmos API?
I am also curious if there are any obvious ways I could decrease the quantity of the curves without severely damaging the quality of the frames outside of lowering the resolution in the ffmpeg function.
答案1
得分: 0
Desmos API GraphingCalculator.asyncScreenshot()
等待当前图形状态完全评估,并允许轻松传递边界以裁剪掉不需要的部分。
英文:
Desmos API GraphingCalculator.asyncScreenshot()
Waits for the current graph state to be fully evaluated, and allows for bounds to be passed the function easily cropping out anything unneeded.
通过集体智慧和协作来改善编程学习和解决问题的方式。致力于成为全球开发者共同参与的知识库,让每个人都能够通过互相帮助和分享经验来进步。
评论