|
|
|
function schedule_draw(state, context, animate = false) {
|
|
|
|
if (!state.timers.raf) {
|
|
|
|
window.requestAnimationFrame(async (ts) => {
|
|
|
|
await draw(state, context, animate, ts);
|
|
|
|
});
|
|
|
|
state.timers.raf = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
function upload_if_needed(gl, buffer_kind, serializer) {
|
|
|
|
if (serializer.need_gpu_allocate) {
|
|
|
|
if (config.debug_print) console.debug('gpu allocate');
|
|
|
|
gl.bufferData(buffer_kind, serializer.size, gl.DYNAMIC_DRAW);
|
|
|
|
serializer.need_gpu_allocate = false;
|
|
|
|
serializer.gpu_upload_from = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (serializer.gpu_upload_from < serializer.offset) {
|
|
|
|
if (config.debug_print) console.debug('gpu upload');
|
|
|
|
const upload_offset = serializer.gpu_upload_from;
|
|
|
|
const upload_size = serializer.offset - upload_offset;
|
|
|
|
gl.bufferSubData(buffer_kind, upload_offset, new Uint8Array(serializer.buffer, upload_offset, upload_size));
|
|
|
|
serializer.gpu_upload_from = serializer.offset;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
function upload_square_rgba16ui_texture(gl, serializer, texture_size) {
|
|
|
|
const bpp = 2 * 4;
|
|
|
|
const data_size = serializer.offset - serializer.gpu_upload_from;
|
|
|
|
|
|
|
|
let data_pixels = data_size / bpp; // data_size % bpp is expected to always be zero here
|
|
|
|
|
|
|
|
const pixels_already_uploaded = serializer.gpu_upload_from / bpp;
|
|
|
|
let rows_uploaded = Math.floor(pixels_already_uploaded / texture_size);
|
|
|
|
const rows_remainder = pixels_already_uploaded % texture_size;
|
|
|
|
|
|
|
|
// Upload first non-whole row (if last upload was not a whole number of rows)
|
|
|
|
if (rows_remainder > 0) {
|
|
|
|
const row_upload_to_full = texture_size - rows_remainder;
|
|
|
|
const first_upload = Math.min(row_upload_to_full, data_pixels);
|
|
|
|
|
|
|
|
if (first_upload > 0) {
|
|
|
|
gl.texSubImage2D(gl.TEXTURE_2D, 0, rows_remainder, rows_uploaded, first_upload, 1, gl.RGBA_INTEGER, gl.UNSIGNED_SHORT, new Uint16Array(serializer.buffer, serializer.gpu_upload_from, first_upload * 4));
|
|
|
|
|
|
|
|
data_pixels -= first_upload;
|
|
|
|
serializer.gpu_upload_from += first_upload;
|
|
|
|
rows_uploaded += 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const rows = Math.ceil(data_pixels / texture_size);
|
|
|
|
const last_row = data_pixels % texture_size;
|
|
|
|
const whole_upload = (rows - 1) * texture_size;
|
|
|
|
|
|
|
|
// Upload whole rows
|
|
|
|
if (rows > 1) {
|
|
|
|
gl.texSubImage2D(gl.TEXTURE_2D, 0, 0, rows_uploaded, texture_size, rows - 1, gl.RGBA_INTEGER, gl.UNSIGNED_SHORT, new Uint16Array(serializer.buffer, serializer.gpu_upload_from, whole_upload * 4));
|
|
|
|
rows_uploaded += rows - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Upload last row
|
|
|
|
if (last_row > 0) {
|
|
|
|
const last_row_upload = last_row * bpp;
|
|
|
|
gl.texSubImage2D(gl.TEXTURE_2D, 0, 0, rows_uploaded, last_row, 1, gl.RGBA_INTEGER, gl.UNSIGNED_SHORT, new Uint16Array(serializer.buffer, whole_upload, last_row_upload * 4));
|
|
|
|
}
|
|
|
|
|
|
|
|
serializer.gpu_upload_from = serializer.offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
function draw_html(state) {
|
|
|
|
// HUD-like things. Player cursors, screens
|
|
|
|
for (const player_id in state.players) {
|
|
|
|
if (player_id === state.me) continue;
|
|
|
|
|
|
|
|
const player = state.players[player_id];
|
|
|
|
let player_cursor_element = document.querySelector(`.player-cursor[data-player-id="${player_id}"]`);
|
|
|
|
|
|
|
|
if (player_cursor_element === null && player.online) {
|
|
|
|
player_cursor_element = insert_player_cursor(state, player_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!player.online && player_cursor_element !== null) {
|
|
|
|
player_cursor_element.remove();
|
|
|
|
const player_list_item = document.querySelector(`.player-list .player[data-player-id="${player_id}"]`);
|
|
|
|
if (player_list_item) player_list_item.remove();
|
|
|
|
if (document.querySelector('.player-list').childElementCount === 0) {
|
|
|
|
document.querySelector('.player-list').classList.add('vhide');
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (player_cursor_element && player.online) {
|
|
|
|
const screenp = canvas_to_screen(state, player.cursor);
|
|
|
|
player_cursor_element.style.transform = `translate(${Math.round(screenp.x)}px, ${Math.round(screenp.y)}px) rotate(-30deg)`;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
async function draw(state, context, animate, ts) {
|
|
|
|
const dt = ts - context.last_frame_ts;
|
|
|
|
const cpu_before = performance.now();
|
|
|
|
|
|
|
|
context.last_frame_ts = ts;
|
|
|
|
|
|
|
|
const gl = context.gl;
|
|
|
|
const width = window.innerWidth;
|
|
|
|
const height = window.innerHeight;
|
|
|
|
|
|
|
|
bvh_clip(state, context);
|
|
|
|
|
|
|
|
const segment_count = await geometry_write_instances(state, context);
|
|
|
|
const dynamic_segment_count = context.dynamic_segment_count;
|
|
|
|
const dynamic_stroke_count = context.dynamic_stroke_count;
|
|
|
|
|
|
|
|
let query = null;
|
|
|
|
|
|
|
|
if (context.gpu_timer_ext !== null) {
|
|
|
|
query = gl.createQuery();
|
|
|
|
gl.beginQuery(context.gpu_timer_ext.TIME_ELAPSED_EXT, query);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Only clear once we have the data, this might not always be on the same frame?
|
|
|
|
gl.viewport(0, 0, context.canvas.width, context.canvas.height);
|
|
|
|
gl.clearColor(context.bgcolor.r, context.bgcolor.g, context.bgcolor.b, 1);
|
|
|
|
gl.clearDepth(0.0);
|
|
|
|
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
|
|
|
|
|
|
|
|
const locations = context.locations;
|
|
|
|
const buffers = context.buffers;
|
|
|
|
const programs = context.programs;
|
|
|
|
const textures = context.textures;
|
|
|
|
|
|
|
|
// Draw the background pattern
|
|
|
|
if (state.background_pattern === 'dots') {
|
|
|
|
const pr = programs['dots'];
|
|
|
|
gl.useProgram(pr.program);
|
|
|
|
|
|
|
|
gl.bindBuffer(gl.ARRAY_BUFFER, buffers['b_instance_dot']);
|
|
|
|
gl.enableVertexAttribArray(pr.locations['a_center']);
|
|
|
|
gl.vertexAttribPointer(pr.locations['a_center'], 2, gl.FLOAT, false, 2 * 4, 0);
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_center'], 1);
|
|
|
|
|
|
|
|
gl.uniform2f(pr.locations['u_res'], context.canvas.width, context.canvas.height);
|
|
|
|
gl.uniform2f(pr.locations['u_scale'], state.canvas.zoom, state.canvas.zoom);
|
|
|
|
gl.uniform2f(pr.locations['u_translation'], state.canvas.offset.x, state.canvas.offset.y);
|
|
|
|
|
|
|
|
const zoom = state.canvas.zoom;
|
|
|
|
const zoom_log2 = Math.log2(zoom);
|
|
|
|
const zoom_previous = Math.pow(2, Math.floor(zoom_log2));
|
|
|
|
const zoom_next = Math.pow(2, Math.ceil(zoom_log2));
|
|
|
|
|
|
|
|
// Previous level
|
|
|
|
{
|
|
|
|
const one_dot = new Float32Array(geometry_gen_quad(0, 0, 1 / zoom_previous));
|
|
|
|
const dot_instances = new Float32Array(geometry_gen_fullscreen_grid(state, context, 32 / zoom_previous, 32 / zoom_previous));
|
|
|
|
const t = Math.min(1.0, 1.0 - (zoom / zoom_previous) / 2.0);
|
|
|
|
|
|
|
|
gl.uniform1f(pr.locations['u_fadeout'], t);
|
|
|
|
|
|
|
|
gl.bindBuffer(gl.ARRAY_BUFFER, buffers['b_instance_dot']);
|
|
|
|
gl.bufferData(gl.ARRAY_BUFFER, dot_instances, gl.STREAM_DRAW);
|
|
|
|
|
|
|
|
gl.drawArraysInstanced(gl.TRIANGLES, 0, 6, dot_instances.length / 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next level
|
|
|
|
if (zoom_previous != zoom_next) {
|
|
|
|
const dot_instances = new Float32Array(geometry_gen_fullscreen_grid(state, context, 32 / zoom_next, 32 / zoom_next));
|
|
|
|
const t = Math.min(1.0, 1.0 - (zoom_next / zoom) / 2.0);
|
|
|
|
|
|
|
|
gl.uniform1f(pr.locations['u_fadeout'], t);
|
|
|
|
|
|
|
|
gl.bindBuffer(gl.ARRAY_BUFFER, buffers['b_instance_dot']);
|
|
|
|
gl.bufferData(gl.ARRAY_BUFFER, dot_instances, gl.STREAM_DRAW);
|
|
|
|
|
|
|
|
gl.drawArraysInstanced(gl.TRIANGLES, 0, 6, dot_instances.length / 2);
|
|
|
|
}
|
|
|
|
} else if (state.background_pattern === 'grid') {
|
|
|
|
const pr = programs['grid'];
|
|
|
|
const zoom = state.canvas.zoom;
|
|
|
|
|
|
|
|
let zoom_log8 = Math.log(zoom) / Math.log(8);
|
|
|
|
//if (zoom_log2 === Math.floor(zoom_log2)) {
|
|
|
|
// zoom_log2 -= 0.001;
|
|
|
|
//}
|
|
|
|
|
|
|
|
const zoom_previous = Math.pow(8, Math.floor(zoom_log8));
|
|
|
|
let zoom_next = Math.pow(8, Math.ceil(zoom_log8));
|
|
|
|
|
|
|
|
if (zoom_next === zoom_previous) {
|
|
|
|
zoom_next = zoom_previous * 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
gl.useProgram(pr.program);
|
|
|
|
|
|
|
|
gl.bindBuffer(gl.ARRAY_BUFFER, buffers['b_instance_grid']);
|
|
|
|
gl.enableVertexAttribArray(pr.locations['a_data']);
|
|
|
|
gl.vertexAttribPointer(pr.locations['a_data'], 2, gl.FLOAT, false, 2 * 4, 0);
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_data'], 1);
|
|
|
|
|
|
|
|
gl.uniform2f(pr.locations['u_res'], context.canvas.width, context.canvas.height);
|
|
|
|
gl.uniform2f(pr.locations['u_scale'], state.canvas.zoom, state.canvas.zoom);
|
|
|
|
gl.uniform2f(pr.locations['u_translation'], state.canvas.offset.x, state.canvas.offset.y);
|
|
|
|
gl.uniform1f(pr.locations['u_fadeout'], 1.0);
|
|
|
|
|
|
|
|
// Previous level (major lines)
|
|
|
|
{
|
|
|
|
const grid_instances = new Float32Array(geometry_gen_fullscreen_grid_1d(state, context, 32 / zoom_previous, 32 / zoom_previous));
|
|
|
|
let t = (zoom / zoom_previous - 1) / -7 + 1;
|
|
|
|
t = 0.25;
|
|
|
|
|
|
|
|
gl.uniform1f(pr.locations['u_fadeout'], t);
|
|
|
|
|
|
|
|
gl.bindBuffer(gl.ARRAY_BUFFER, buffers['b_instance_grid']);
|
|
|
|
gl.bufferData(gl.ARRAY_BUFFER, grid_instances, gl.STREAM_DRAW);
|
|
|
|
|
|
|
|
gl.drawArraysInstanced(gl.TRIANGLES, 0, 6, grid_instances.length / 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next level (minor lines)
|
|
|
|
{
|
|
|
|
const grid_instances = new Float32Array(geometry_gen_fullscreen_grid_1d(state, context, 32 / zoom_next, 32 / zoom_next));
|
|
|
|
let t = (zoom_next / zoom - 1) / 7;
|
|
|
|
t = Math.min(0.1, -t + 1); // slight fade-in
|
|
|
|
|
|
|
|
gl.uniform1f(pr.locations['u_fadeout'], t);
|
|
|
|
|
|
|
|
gl.bindBuffer(gl.ARRAY_BUFFER, buffers['b_instance_grid']);
|
|
|
|
gl.bufferData(gl.ARRAY_BUFFER, grid_instances, gl.STREAM_DRAW);
|
|
|
|
|
|
|
|
gl.drawArraysInstanced(gl.TRIANGLES, 0, 6, grid_instances.length / 2);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Images
|
|
|
|
{
|
|
|
|
const pr = programs['image'];
|
|
|
|
|
|
|
|
gl.clear(gl.DEPTH_BUFFER_BIT); // draw images above the background pattern
|
|
|
|
gl.useProgram(pr.program);
|
|
|
|
|
|
|
|
let offset = 0;
|
|
|
|
|
|
|
|
const quads = geometry_image_quads(state, context);
|
|
|
|
|
|
|
|
gl.bindBuffer(gl.ARRAY_BUFFER, buffers['b_images']);
|
|
|
|
gl.bufferData(gl.ARRAY_BUFFER, quads, gl.STATIC_DRAW);
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_pos'], 0);
|
|
|
|
|
|
|
|
gl.enableVertexAttribArray(pr.locations['a_pos']);
|
|
|
|
gl.vertexAttribPointer(pr.locations['a_pos'], 2, gl.FLOAT, false, 2 * 4, 0);
|
|
|
|
|
|
|
|
for (const entry of context.images) {
|
|
|
|
if (!entry.deleted) {
|
|
|
|
gl.uniform2f(pr.locations['u_res'], context.canvas.width, context.canvas.height);
|
|
|
|
gl.uniform2f(pr.locations['u_scale'], state.canvas.zoom, state.canvas.zoom);
|
|
|
|
gl.uniform2f(pr.locations['u_translation'], state.canvas.offset.x, state.canvas.offset.y);
|
|
|
|
gl.uniform1i(pr.locations['u_texture'], 0); // Only 1 active texture for each drawcall
|
|
|
|
gl.uniform1i(pr.locations['u_solid'], 0);
|
|
|
|
|
|
|
|
gl.bindTexture(gl.TEXTURE_2D, entry.texture);
|
|
|
|
gl.drawArrays(gl.TRIANGLES, offset, 6);
|
|
|
|
|
|
|
|
// Highlight active image
|
|
|
|
if (entry.key === state.active_image) {
|
|
|
|
gl.uniform1i(pr.locations['u_solid'], 1);
|
|
|
|
gl.uniform4f(pr.locations['u_color'], 0.133 * 0.5, 0.545 * 0.5, 0.902 * 0.5, 0.5);
|
|
|
|
gl.drawArrays(gl.TRIANGLES, offset, 6);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
offset += 6;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const circle_segments = 32;
|
|
|
|
const circle_points = circle_segments * 3;
|
|
|
|
const circle_data = geometry_line_segments_with_two_circles(circle_segments);
|
|
|
|
|
|
|
|
// "Static" data upload
|
|
|
|
if (segment_count > 0) {
|
|
|
|
const pr = programs['main'];
|
|
|
|
|
|
|
|
gl.clear(gl.DEPTH_BUFFER_BIT); // draw strokes above the images
|
|
|
|
gl.useProgram(pr.program);
|
|
|
|
|
|
|
|
const total_static_size = context.instance_data_points.size * 4 +
|
|
|
|
context.instance_data_ids.size * 4 +
|
|
|
|
round_to_pow2(context.instance_data_pressures.size, 4) +
|
|
|
|
circle_data.length * 4;
|
|
|
|
|
|
|
|
gl.bindBuffer(gl.ARRAY_BUFFER, buffers['b_strokes_static']);
|
|
|
|
gl.bufferData(gl.ARRAY_BUFFER, total_static_size, gl.DYNAMIC_DRAW);
|
|
|
|
gl.bufferSubData(gl.ARRAY_BUFFER, 0, tv_data(context.instance_data_points));
|
|
|
|
gl.bufferSubData(gl.ARRAY_BUFFER, context.instance_data_points.size * 4, tv_data(context.instance_data_ids));
|
|
|
|
gl.bufferSubData(gl.ARRAY_BUFFER, context.instance_data_points.size * 4 + context.instance_data_ids.size * 4,
|
|
|
|
tv_data(context.instance_data_pressures));
|
|
|
|
gl.bufferSubData(gl.ARRAY_BUFFER, context.instance_data_points.size * 4 + context.instance_data_ids.size * 4 + round_to_pow2(context.instance_data_pressures.size, 4),
|
|
|
|
circle_data);
|
|
|
|
gl.bindTexture(gl.TEXTURE_2D, textures['stroke_data']);
|
|
|
|
upload_square_rgba16ui_texture(gl, context.stroke_data, config.stroke_texture_size);
|
|
|
|
|
|
|
|
gl.uniform2f(pr.locations['u_res'], context.canvas.width, context.canvas.height);
|
|
|
|
gl.uniform2f(pr.locations['u_scale'], state.canvas.zoom, state.canvas.zoom);
|
|
|
|
gl.uniform2f(pr.locations['u_translation'], state.canvas.offset.x, state.canvas.offset.y);
|
|
|
|
gl.uniform1i(pr.locations['u_stroke_count'], state.events.length);
|
|
|
|
gl.uniform1i(pr.locations['u_debug_mode'], state.debug.red);
|
|
|
|
gl.uniform1i(pr.locations['u_stroke_data'], 0);
|
|
|
|
gl.uniform1i(pr.locations['u_stroke_texture_size'], config.stroke_texture_size);
|
|
|
|
gl.uniform1f(pr.locations['u_fixed_pixel_width'], 0);
|
|
|
|
gl.uniform1i(pr.locations['u_circle_points'], circle_points);
|
|
|
|
|
|
|
|
gl.enableVertexAttribArray(pr.locations['a_pos']);
|
|
|
|
gl.enableVertexAttribArray(pr.locations['a_a']);
|
|
|
|
gl.enableVertexAttribArray(pr.locations['a_b']);
|
|
|
|
gl.enableVertexAttribArray(pr.locations['a_stroke_id']);
|
|
|
|
gl.enableVertexAttribArray(pr.locations['a_pressure']);
|
|
|
|
|
|
|
|
// Circle meshes (shared for all instances)
|
|
|
|
gl.vertexAttribPointer(pr.locations['a_pos'], 2, gl.FLOAT, false, 2 * 4, context.instance_data_points.size * 4 + context.instance_data_ids.size * 4 + round_to_pow2(context.instance_data_pressures.size, 4));
|
|
|
|
|
|
|
|
// Points (a, b) and stroke ids are stored in separate cpu buffers so that points can be reused (look at stride and offset values)
|
|
|
|
gl.vertexAttribPointer(pr.locations['a_a'], 2, gl.FLOAT, false, 2 * 4, 0);
|
|
|
|
gl.vertexAttribPointer(pr.locations['a_b'], 2, gl.FLOAT, false, 2 * 4, 2 * 4);
|
|
|
|
gl.vertexAttribIPointer(pr.locations['a_stroke_id'], 1, gl.INT, 4, context.instance_data_points.size * 4);
|
|
|
|
gl.vertexAttribPointer(pr.locations['a_pressure'], 2, gl.UNSIGNED_BYTE, true, 1, context.instance_data_points.size * 4 + context.instance_data_ids.size * 4);
|
|
|
|
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_pos'], 0);
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_a'], 1);
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_b'], 1);
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_stroke_id'], 1);
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_pressure'], 1);
|
|
|
|
|
|
|
|
// Static draw (everything already bound)
|
|
|
|
gl.drawArraysInstanced(gl.TRIANGLES, 0, circle_points + 6, segment_count);
|
|
|
|
|
|
|
|
// I don't really know why I need to do this, but it
|
|
|
|
// makes background patter drawcall work properly
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_pos'], 0);
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_a'], 0);
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_b'], 0);
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_stroke_id'], 0);
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_pressure'], 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Dynamic draw (strokes currently being drawn)
|
|
|
|
if (false && dynamic_segment_count > 0) {
|
|
|
|
const pr = programs['main']; // same as static
|
|
|
|
|
|
|
|
// Dynamic strokes should be drawn above static strokes
|
|
|
|
gl.clear(gl.DEPTH_BUFFER_BIT);
|
|
|
|
gl.useProgram(pr.program);
|
|
|
|
|
|
|
|
gl.uniform1i(pr.locations['u_stroke_count'], dynamic_stroke_count);
|
|
|
|
gl.uniform1i(pr.locations['u_stroke_data'], 0);
|
|
|
|
gl.uniform1i(pr.locations['u_stroke_texture_size'], config.dynamic_stroke_texture_size);
|
|
|
|
|
|
|
|
gl.bindBuffer(gl.ARRAY_BUFFER, buffers['b_strokes_dynamic']);
|
|
|
|
|
|
|
|
// Dynamic data upload
|
|
|
|
const total_dynamic_size =
|
|
|
|
context.dynamic_instance_points.size * 4 + context.dynamic_instance_ids.size * 4 +
|
|
|
|
context.dynamic_instance_pressure.size;
|
|
|
|
|
|
|
|
gl.bufferData(gl.ARRAY_BUFFER, total_dynamic_size, gl.STREAM_DRAW);
|
|
|
|
gl.bufferSubData(gl.ARRAY_BUFFER, 0, tv_data(context.dynamic_instance_points));
|
|
|
|
gl.bufferSubData(gl.ARRAY_BUFFER, context.dynamic_instance_points.size * 4, tv_data(context.dynamic_instance_ids));
|
|
|
|
gl.bufferSubData(gl.ARRAY_BUFFER, context.dynamic_instance_points.size * 4 + context.dynamic_instance_ids.size * 4,
|
|
|
|
tv_data(context.dynamic_instance_pressure));
|
|
|
|
gl.bindTexture(gl.TEXTURE_2D, textures['dynamic_stroke_data']);
|
|
|
|
upload_square_rgba16ui_texture(gl, context.dynamic_stroke_data, config.dynamic_stroke_texture_size);
|
|
|
|
|
|
|
|
gl.uniform2f(pr.locations['u_res'], context.canvas.width, context.canvas.height);
|
|
|
|
gl.uniform2f(pr.locations['u_scale'], state.canvas.zoom, state.canvas.zoom);
|
|
|
|
gl.uniform2f(pr.locations['u_translation'], state.canvas.offset.x, state.canvas.offset.y);
|
|
|
|
|
|
|
|
gl.uniform1i(pr.locations['u_stroke_count'], context.dynamic_stroke_count);
|
|
|
|
gl.uniform1i(pr.locations['u_debug_mode'], state.debug.red);
|
|
|
|
gl.uniform1i(pr.locations['u_stroke_data'], 0);
|
|
|
|
gl.uniform1i(pr.locations['u_stroke_texture_size'], config.dynamic_stroke_texture_size);
|
|
|
|
gl.uniform1f(pr.locations['u_fixed_pixel_width'], 0);
|
|
|
|
|
|
|
|
gl.enableVertexAttribArray(pr.locations['a_a']);
|
|
|
|
gl.enableVertexAttribArray(pr.locations['a_b']);
|
|
|
|
gl.enableVertexAttribArray(pr.locations['a_stroke_id']);
|
|
|
|
gl.enableVertexAttribArray(pr.locations['a_pressure']);
|
|
|
|
|
|
|
|
// Points (a, b) and stroke ids are stored in separate cpu buffers so that points can be reused (look at stride and offset values)
|
|
|
|
if (context.dynamic_instance_ids.size > 1) {
|
|
|
|
gl.vertexAttribPointer(pr.locations['a_a'], 2, gl.FLOAT, false, 2 * 4, 0);
|
|
|
|
gl.vertexAttribPointer(pr.locations['a_b'], 2, gl.FLOAT, false, 2 * 4, 2 * 4);
|
|
|
|
} else {
|
|
|
|
// A special case where there is no second point. Reuse the first point and handle the zero length segment in the shader
|
|
|
|
gl.vertexAttribPointer(pr.locations['a_a'], 2, gl.FLOAT, false, 2 * 4, 0);
|
|
|
|
gl.vertexAttribPointer(pr.locations['a_b'], 2, gl.FLOAT, false, 2 * 4, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
gl.vertexAttribIPointer(pr.locations['a_stroke_id'], 1, gl.INT, 4, context.dynamic_instance_points.size * 4);
|
|
|
|
gl.vertexAttribPointer(pr.locations['a_pressure'], 2, gl.UNSIGNED_BYTE, true, 1, context.dynamic_instance_points.size * 4 + context.dynamic_instance_ids.size * 4);
|
|
|
|
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_a'], 1);
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_b'], 1);
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_stroke_id'], 1);
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_pressure'], 1);
|
|
|
|
|
|
|
|
gl.drawArraysInstanced(gl.TRIANGLES, 0, 32 * 3 + 6 + 32 * 3, dynamic_segment_count);
|
|
|
|
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_a'], 0);
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_b'], 0);
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_stroke_id'], 0);
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_pressure'], 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
// HUD: resize handles, etc
|
|
|
|
if (state.active_image !== null) {
|
|
|
|
const pr = programs['main']; // same as static
|
|
|
|
const handles = geometry_generate_handles(state, context, state.active_image);
|
|
|
|
const ui_segments = 7 * 4 - 1; // each square = 4, each line = 1, square->line = 1, line->square = 1
|
|
|
|
|
|
|
|
gl.bindBuffer(gl.ARRAY_BUFFER, buffers['b_hud']);
|
|
|
|
gl.bufferData(gl.ARRAY_BUFFER, handles.points.byteLength + handles.ids.byteLength + handles.pressures.byteLength, gl.STREAM_DRAW);
|
|
|
|
gl.bufferSubData(gl.ARRAY_BUFFER, 0, handles.points);
|
|
|
|
gl.bufferSubData(gl.ARRAY_BUFFER, handles.points.byteLength, handles.ids);
|
|
|
|
gl.bufferSubData(gl.ARRAY_BUFFER, handles.points.byteLength + handles.ids.byteLength, handles.pressures);
|
|
|
|
|
|
|
|
gl.bindTexture(gl.TEXTURE_2D, textures['ui']);
|
|
|
|
upload_square_rgba16ui_texture(gl, handles.stroke_data, config.ui_texture_size);
|
|
|
|
|
|
|
|
gl.uniform2f(pr.locations['u_res'], context.canvas.width, context.canvas.height);
|
|
|
|
gl.uniform2f(pr.locations['u_scale'], state.canvas.zoom, state.canvas.zoom);
|
|
|
|
gl.uniform2f(pr.locations['u_translation'], state.canvas.offset.x, state.canvas.offset.y);
|
|
|
|
gl.uniform1i(pr.locations['u_stroke_count'], 8);
|
|
|
|
gl.uniform1i(pr.locations['u_debug_mode'], 0);
|
|
|
|
gl.uniform1i(pr.locations['u_stroke_data'], 0);
|
|
|
|
gl.uniform1i(pr.locations['u_stroke_texture_size'], config.ui_texture_size);
|
|
|
|
gl.uniform1f(pr.locations['u_fixed_pixel_width'], 2);
|
|
|
|
|
|
|
|
gl.enableVertexAttribArray(pr.locations['a_a']);
|
|
|
|
gl.enableVertexAttribArray(pr.locations['a_b']);
|
|
|
|
gl.enableVertexAttribArray(pr.locations['a_stroke_id']);
|
|
|
|
gl.enableVertexAttribArray(pr.locations['a_pressure']);
|
|
|
|
|
|
|
|
gl.vertexAttribPointer(pr.locations['a_a'], 2, gl.FLOAT, false, 2 * 4, 0);
|
|
|
|
gl.vertexAttribPointer(pr.locations['a_b'], 2, gl.FLOAT, false, 2 * 4, 2 * 4);
|
|
|
|
gl.vertexAttribIPointer(pr.locations['a_stroke_id'], 1, gl.INT, 4, handles.points.byteLength);
|
|
|
|
gl.vertexAttribPointer(pr.locations['a_pressure'], 2, gl.UNSIGNED_BYTE, true, 1, handles.points.byteLength + handles.ids.byteLength);
|
|
|
|
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_a'], 1);
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_b'], 1);
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_stroke_id'], 1);
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_pressure'], 1);
|
|
|
|
|
|
|
|
gl.drawArraysInstanced(gl.TRIANGLES, 0, 32 * 3 + 6 + 32 * 3, ui_segments);
|
|
|
|
|
|
|
|
// I don't really know why I need to do this, but it
|
|
|
|
// makes background patter drawcall work properly
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_a'], 0);
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_b'], 0);
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_stroke_id'], 0);
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_pressure'], 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (config.draw_bvh) {
|
|
|
|
const pr = programs['iquad'];
|
|
|
|
const bboxes = tv_create(Float32Array, context.clipped_indices.size * 4);
|
|
|
|
// Debug BVH viz
|
|
|
|
for (let i = 0; i < context.clipped_indices.size; ++i) {
|
|
|
|
const stroke_id = context.clipped_indices.data[i];
|
|
|
|
const stroke = state.events[stroke_id];
|
|
|
|
tv_add(bboxes, stroke.bbox.x1);
|
|
|
|
tv_add(bboxes, stroke.bbox.y1);
|
|
|
|
tv_add(bboxes, stroke.bbox.x2);
|
|
|
|
tv_add(bboxes, stroke.bbox.y2);
|
|
|
|
}
|
|
|
|
|
|
|
|
const quad_count = bboxes.size / 4;
|
|
|
|
|
|
|
|
gl.useProgram(pr.program);
|
|
|
|
|
|
|
|
gl.bindBuffer(gl.ARRAY_BUFFER, buffers['b_iquads']);
|
|
|
|
gl.bufferData(gl.ARRAY_BUFFER, tv_data(bboxes), gl.STREAM_DRAW);
|
|
|
|
|
|
|
|
gl.uniform2f(pr.locations['u_res'], context.canvas.width, context.canvas.height);
|
|
|
|
gl.uniform2f(pr.locations['u_scale'], state.canvas.zoom, state.canvas.zoom);
|
|
|
|
gl.uniform2f(pr.locations['u_translation'], state.canvas.offset.x, state.canvas.offset.y);
|
|
|
|
|
|
|
|
gl.enableVertexAttribArray(pr.locations['a_topleft']);
|
|
|
|
gl.enableVertexAttribArray(pr.locations['a_bottomright']);
|
|
|
|
|
|
|
|
gl.vertexAttribPointer(pr.locations['a_topleft'], 2, gl.FLOAT, false, 4 * 4, 0);
|
|
|
|
gl.vertexAttribPointer(pr.locations['a_bottomright'], 2, gl.FLOAT, false, 4 * 4, 2 * 4);
|
|
|
|
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_topleft'], 1);
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_bottomright'], 1);
|
|
|
|
|
|
|
|
gl.drawArraysInstanced(gl.TRIANGLES, 0, 6, quad_count);
|
|
|
|
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_topleft'], 0);
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_bottomright'], 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (config.draw_fullnodes) {
|
|
|
|
const quads = bvh_get_fullnodes_debug(state, context);
|
|
|
|
const pr = programs['iquad'];
|
|
|
|
const bboxes = tv_create(Float32Array, quads.length * 4);
|
|
|
|
|
|
|
|
for (let i = 0; i < quads.length; ++i) {
|
|
|
|
const bbox = quads[i];
|
|
|
|
tv_add(bboxes, bbox.x1);
|
|
|
|
tv_add(bboxes, bbox.y1);
|
|
|
|
tv_add(bboxes, bbox.x2);
|
|
|
|
tv_add(bboxes, bbox.y2);
|
|
|
|
}
|
|
|
|
|
|
|
|
const quad_count = bboxes.size / 4;
|
|
|
|
|
|
|
|
gl.useProgram(pr.program);
|
|
|
|
|
|
|
|
gl.bindBuffer(gl.ARRAY_BUFFER, buffers['b_iquads']);
|
|
|
|
gl.bufferData(gl.ARRAY_BUFFER, tv_data(bboxes), gl.STREAM_DRAW);
|
|
|
|
|
|
|
|
gl.uniform2f(pr.locations['u_res'], context.canvas.width, context.canvas.height);
|
|
|
|
gl.uniform2f(pr.locations['u_scale'], state.canvas.zoom, state.canvas.zoom);
|
|
|
|
gl.uniform2f(pr.locations['u_translation'], state.canvas.offset.x, state.canvas.offset.y);
|
|
|
|
|
|
|
|
gl.enableVertexAttribArray(pr.locations['a_topleft']);
|
|
|
|
gl.enableVertexAttribArray(pr.locations['a_bottomright']);
|
|
|
|
|
|
|
|
gl.vertexAttribPointer(pr.locations['a_topleft'], 2, gl.FLOAT, false, 4 * 4, 0);
|
|
|
|
gl.vertexAttribPointer(pr.locations['a_bottomright'], 2, gl.FLOAT, false, 4 * 4, 2 * 4);
|
|
|
|
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_topleft'], 1);
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_bottomright'], 1);
|
|
|
|
|
|
|
|
gl.drawArraysInstanced(gl.TRIANGLES, 0, 6, quad_count);
|
|
|
|
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_topleft'], 0);
|
|
|
|
gl.vertexAttribDivisor(pr.locations['a_bottomright'], 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
document.getElementById('debug-stats').innerHTML = `
|
|
|
|
<span>Strokes onscreen: ${context.clipped_indices.size}</span>
|
|
|
|
<span>Segments onscreen: ${segment_count}</span>
|
|
|
|
<span>Canvas offset: (${Math.round(state.canvas.offset.x * 100) / 100}, ${Math.round(state.canvas.offset.y * 100) / 100})</span>
|
|
|
|
<span>Canvas zoom level: ${state.canvas.zoom_level}</span>
|
|
|
|
<span>Canvas zoom: ${Math.round(state.canvas.zoom * 100) / 100}</span>`;
|
|
|
|
|
|
|
|
if (context.gpu_timer_ext) {
|
|
|
|
gl.endQuery(context.gpu_timer_ext.TIME_ELAPSED_EXT);
|
|
|
|
|
|
|
|
const next_tick = () => {
|
|
|
|
if (query) {
|
|
|
|
// At some point in the future, after returning control to the browser
|
|
|
|
const available = gl.getQueryParameter(query, gl.QUERY_RESULT_AVAILABLE);
|
|
|
|
const disjoint = gl.getParameter(context.gpu_timer_ext.GPU_DISJOINT_EXT);
|
|
|
|
|
|
|
|
if (available && !disjoint) {
|
|
|
|
// See how much time the rendering of the object took in nanoseconds.
|
|
|
|
const timeElapsed = gl.getQueryParameter(query, gl.QUERY_RESULT);
|
|
|
|
//console.debug(timeElapsed / 1000000);
|
|
|
|
document.querySelector('.debug-timings .gpu').innerHTML = 'Last GPU Frametime: ' + Math.round(timeElapsed / 10000) / 100 + 'ms';
|
|
|
|
}
|
|
|
|
|
|
|
|
if (available || disjoint) {
|
|
|
|
// Clean up the query object.
|
|
|
|
gl.deleteQuery(query);
|
|
|
|
// Don't re-enter this polling loop.
|
|
|
|
query = null;
|
|
|
|
} else if (!available) {
|
|
|
|
setTimeout(next_tick, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
setTimeout(next_tick, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
const cpu_after = performance.now();
|
|
|
|
|
|
|
|
state.timers.raf = false;
|
|
|
|
|
|
|
|
document.querySelector('.debug-timings .cpu').innerHTML = 'Last CPU Frametime: ' + Math.round((cpu_after - cpu_before) * 100) / 100 + 'ms';
|
|
|
|
|
|
|
|
if (state.debug.benchmark_mode) {
|
|
|
|
const redraw = state.debug.on_benchmark();
|
|
|
|
if (redraw) {
|
|
|
|
schedule_draw(state, context);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (state.canvas.target_zoom != state.canvas.zoom) {
|
|
|
|
update_canvas_zoom(state, state.canvas.zoom, state.canvas.target_zoom, animate ? dt : context.last_frame_dt);
|
|
|
|
schedule_draw(state, context, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
context.last_frame_dt = dt;
|
|
|
|
}
|
|
|
|
|
|
|
|
function update_canvas_zoom(state, current, target, dt) {
|
|
|
|
const rate = Math.min(1.0, dt / 16.66 * 0.3);
|
|
|
|
|
|
|
|
if (Math.abs(1.0 - current / target) > 0.01) {
|
|
|
|
state.canvas.zoom = current + (target - current) * rate;
|
|
|
|
} else {
|
|
|
|
state.canvas.zoom = target;
|
|
|
|
}
|
|
|
|
|
|
|
|
// https://gist.github.com/aolo2/a373363419bd5a9283977ab9f8841f78
|
|
|
|
const zc = state.canvas.zoom_screenp;
|
|
|
|
state.canvas.offset.x = zc.x - (zc.x - state.canvas.offset.x) * state.canvas.zoom / current;
|
|
|
|
state.canvas.offset.y = zc.y - (zc.y - state.canvas.offset.y) * state.canvas.zoom / current;
|
|
|
|
|
|
|
|
update_cursor(state);
|
|
|
|
}
|