Skip to content

Instantly share code, notes, and snippets.

@wibus-wee
Created April 13, 2022 10:11
Show Gist options
  • Save wibus-wee/41baf91440d11626dc987a20c0acda09 to your computer and use it in GitHub Desktop.
Save wibus-wee/41baf91440d11626dc987a20c0acda09 to your computer and use it in GitHub Desktop.
Fork from @iveseenthedatk
/**
* ASCII - CAM
* Fork from @iveseenthedatk
* https: //hellogithub.com/onefile/code/126093303b6b414dbab9d623c957fdd4
**/
(() => {
'use strict';
let interval; // 用于记录定时器的id
let r_layer, g_layer, b_layer; // 用于记录每个颜色通道的canvas
const palette = [ ' ', '.', '-', ':', '+', '*', '=', '%', '@', '#' ]; // 用于记录颜色的字符
const vanity = 'B Y I V E S E E N T H E D A R K'; // 用于记录颜色的字符
/**
* @param {*} stream
*/
const handle_video = stream => { // 将视频流转换为图像
const canvas = document.getElementById('canvas'); // 获取canvas元素
const ctx = canvas.getContext('2d'); // 获取canvas的2d绘图上下文
const video = document.getElementById('video'); // 获取video元素
// Older browsers may not have srcObject
if ('srcObject' in video) { // 判断浏览器是否支持srcObject属性
video.srcObject = stream; // 设置video的srcObject属性
} else {
// Avoid using this in new browsers, as it is going away.
video.src = window.URL.createObjectURL(stream); // 设置video的src属性
}
video.addEventListener('loadedmetadata', () => { // 监听video元素的loadedmetadata事件
window.addEventListener('resize', () => set_canvas_dimensions(video, canvas, ctx)); // 监听窗口大小改变事件
set_canvas_dimensions(video, canvas, ctx); // 设置canvas的尺寸
// Hide Logo
play_video(video, canvas, ctx); // 播放视频
setTimeout(() => { // 延迟一秒
document.getElementById('logo').style.opacity = 0; // 隐藏logo
document.getElementById('ascii-cam').style.opacity = 1; // 显示ascii-cam
}, 1000);
});
};
const handle_no_video = () => { // 当视频流不可用时
setTimeout(() => {
document.getElementById('logo').style.opacity = 0; // 隐藏logo
document.getElementById('ascii-cam').style.opacity = 1; // 显示ascii-cam
document.getElementById('enable-cam-msg').style.opacity = 1; // 显示enable-cam-msg
play_random(); // 播放随机颜色
}, 1000);
};
/**
* @param {*} e
*/
const handle_error = e => { // 当视频流出错时
console.error('Error', e);
handle_no_video(); // 显示错误信息
};
/**
* The greyscale value is calculated GREY = 0.299 * RED + 0.587 * GREEN + 0.114 * BLUE
* 这个函数将rgb颜色值转换为灰度值
* For more information see http://en.wikipedia.org/wiki/Grayscale
* @param {Array} image_data
*/
const fade_to_grey = pixels => pixels.map(pixel => pixel[0] * 0.299 + pixel[1] * 0.587 + pixel[2] * 0.114); // 计算灰度值
const play_video = (video, canvas, ctx) => { // 播放视频
clearInterval(interval); // 清除定时器
// Buffer reverse
ctx.drawImage(video, 0, 0, canvas.width * -1, canvas.height); // 将视频绘制到canvas上
// Grab view
const dx = canvas.dataset.hoffset || 0; // 获取canvas的hoffset属性
const dy = canvas.dataset.voffset || 0; // 获取canvas的voffset属性
const image_width = canvas.width - dx * 2; // 获取canvas的宽度
const image_data = ctx.getImageData(dx, dy, image_width, canvas.height - dy * 2).data; // 获取canvas的图像数据
// Chunk
const channels = 4; // 获取图像数据的通道数
// 将图像数据分割成通道数组
const pixels = Array.from(Array(Math.ceil(image_data.length / channels)), (_, i) => image_data.slice(i * channels, i * channels + channels));
// Draw
let r_output = ''; // 用于记录r通道的输出
let g_output = ''; // 用于记录g通道的输出
let b_output = ''; // 用于记录b通道的输出
pixels.forEach((pixel, i) => { // 遍历图像数据
// Break for new line
if (i && i % image_width === 0) { // 判断是否为新行
r_output += '\n'; // 添加换行符
g_output += '\n';
b_output += '\n';
}
const b_idx = Math.floor((pixel[2] / 255.0) * (palette.length - 1)); // 计算b通道的索引
const r_idx = Math.floor((pixel[0] / 255.0) * (palette.length - 1));
const g_idx = Math.floor((pixel[1] / 255.0) * (palette.length - 1));
if (i && i >= image_width - vanity.length && i < image_width) { // 判断是否为vanity
// Input vanity message
r_output += vanity[vanity.length + i - image_width]; // 添加vanity
g_output += vanity[vanity.length + i - image_width]; // 添加vanity
b_output += palette[g_idx]; // 添加vanity
} else {
// Output camera data
r_output += palette[r_idx]; // 添加r通道的颜色
g_output += palette[g_idx];
b_output += palette[b_idx];
}
}, this); // this指向pixels
r_layer.textContent = r_output; // 设置r通道的文本内容
g_layer.textContent = g_output;
b_layer.textContent = b_output;
interval = setInterval(() => play_video(video, canvas, ctx), 33); // 定时器
};
const play_random = () => { // 播放随机颜色
clearInterval(interval); // 清除定时器
const char_dims = get_character_dimensions(); // 获取字符的尺寸
const screen_dims = get_screen_dimensions(char_dims); // 获取屏幕的尺寸
const total_chars = Math.floor(screen_dims.char_width * screen_dims.char_height); // 获取总字符数
const mid_x = Math.floor(screen_dims.char_width / 2); // 获取中心x坐标
const mid_y = Math.floor(screen_dims.char_height / 2); // 获取中心y坐标
let radius_sq = mid_x * mid_x + mid_y + mid_y; // 获取半径的平方
radius_sq = radius_sq < 2500 ? 2500 : radius_sq; // Stop the circle getting too small,设置半径的最小值
let r_output = '', // 用于记录r通道的输出,g通道的输出,b通道的输出
g_output = '',
b_output = '';
for (let i = 0; i < total_chars; i++) { // 遍历总字符数
// Break for new line
if (i && i % screen_dims.char_width === 0) { // 判断是否为新行
r_output += '\n';
g_output += '\n';
b_output += '\n';
}
const char_pos_x = i % screen_dims.char_width; // 获取字符的x坐标
const char_pos_y = i / screen_dims.char_width;
let dist = (char_pos_x - mid_x) * (char_pos_x - mid_x) + (char_pos_y - mid_y) * (char_pos_y - mid_y); // 获取距离的平方
dist = dist > radius_sq ? radius_sq : dist; // 判断距离是否超过半径
const range = Math.floor(palette.length * (1 - dist / radius_sq)); // 获取范围
const r_idx = Math.floor(Math.random() * range); // 获取r通道的索引
const g_idx = Math.floor(Math.random() * range);
const b_idx = Math.floor(Math.random() * range);
r_output += palette[r_idx]; // 添加r通道的颜色
g_output += palette[g_idx];
b_output += palette[b_idx];
}
r_layer.textContent = r_output; // 设置r通道的文本内容
g_layer.textContent = g_output;
b_layer.textContent = b_output;
interval = setInterval(play_random, 100); // 定时器
};
const get_character_dimensions = () => { // 获取字符的尺寸
const span = document.createElement('span'); // 创建span元素
span.textContent = 'X'; // 设置span的文本内容
span.style.position = 'absolute'; // 设置span的位置
span.style.left = '-100px'; // 设置span的left值
document.querySelector('#ascii-cam pre').appendChild(span); // 将span添加到pre元素中
const dimensions = span.getBoundingClientRect(); // 获取span的尺寸
span.remove(); // 移除span
return dimensions; // 返回尺寸
};
const get_screen_dimensions = char_dims => { // 获取屏幕的尺寸
return {
width : window.innerWidth,
height : window.innerHeight,
char_width : Math.ceil(window.innerWidth / char_dims.width),
char_height : Math.ceil(window.innerHeight / char_dims.height)
};
};
const set_canvas_dimensions = (video, canvas, ctx) => { // 设置canvas的尺寸
console.log('setting canvas dimensions'); // 打印信息
const char_dims = get_character_dimensions(); // 获取字符的尺寸
const screen_dims = get_screen_dimensions(char_dims); // 获取屏幕的尺寸
let width = Math.floor(screen_dims.width / char_dims.width); // 获取canvas的宽度
let height = Math.max(width * 0.75, screen_dims.char_height); // 获取canvas的高度
if (height > screen_dims.char_height) { // 判断高度是否超过屏幕的高度
canvas.dataset.voffset = Math.floor((height - screen_dims.char_height) / 2); // 设置偏移量
canvas.dataset.hoffset = 0; // 设置偏移量
} else {
width = (height / 3) * 4; // 设置宽度
canvas.dataset.hoffset = Math.floor((width - screen_dims.char_width) / 2); // 设置偏移量
canvas.dataset.voffset = 0;
}
video.width = canvas.width = width;
video.height = canvas.height = height;
// Reset scale if context exists
ctx && ctx.scale(-1, 1); // 设置翻转
};
// Onload
window.addEventListener('load', () => {
if (navigator && navigator.mediaDevices) { // 判断浏览器是否支持MediaDevices
// Grab layers
r_layer = document.getElementById('r-output'); // 获取r通道的输出
g_layer = document.getElementById('g-output');
b_layer = document.getElementById('b-output');
const constraints = {audio : false, video : {}}; // 设置约束条件
navigator.mediaDevices // 获取浏览器的MediaDevices
.getUserMedia(constraints) // 获取媒体设备
.then(handle_video) // 处理视频
.catch(handle_error); // 处理错误
} else {
handle_no_video(); // 处理无视频
}
});
})();
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment