How to make simple triangle from vertex and color that sore to storage buffer

51 Views Asked by At

Hi i am learning WebGPU by following from https://webgpufundamentals.org/webgpu/lessons/webgpu-storage-buffers.html and other lessons in same website.

May be any of you guys have time to see my code below what what i miss or wrong, thanks in advance.

(async()=>{

 // here are triangle color & triangle vertex position
 let triangle = [[1.0,0.0,0.0,1.0], [[0.0,0.0],[-0.5,-0.5],[0.5,-0.5]]];

 let wgpuinBrowser = navigator.gpu;
 let presentationFormat = wgpuinBrowser.getPreferredCanvasFormat();
 let adapter = await wgpuinBrowser.requestAdapter();
 let device = await adapter.requestDevice();
 let canvas = document.querySelector('canvas');
 let wgpuContext = canvas.getContext('webgpu');

 wgpuContext.configure({
  device: device,
  format: presentationFormat,
  usage: GPUTextureUsage.RENDER_ATTACHMENT,
  alphaMode: 'premultiplied'
 });

 let devicePixelRatio = window.devicePixelRatio || 1;
 let canvasWidht = wgpuContext.canvas.clientWidth * devicePixelRatio;
 let canvasHeight = wgpuContext.canvas.clientHeight * devicePixelRatio;
 let displayPPI = 102;
 let Vtranslation = [0,0];
 let Vzoom = 1;
 let Vzstep = 0.1;

 // for static uniform;
 let staticUniformDataSize = (2+2+4); // unitDisplay, scale, padding
 let staticUniformDataValues = new Float32Array(staticUniformDataSize);
 staticUniformDataValues.set([displayPPI/25.4,displayPPI/25.4]); // the unitDisplay
 staticUniformDataValues.set([2/canvasWidht,2/canvasHeight]); // the canvas scale

 let staticUniformBuffer = device.createBuffer({
  label: `uniforms static`,
  size: staticUniformDataSize * 4,
  usage: GPUBufferUsage.STORAGE|GPUBufferUsage.COPY_DST,
 });

 device.queue.writeBuffer(staticUniformBuffer, 0, staticUniformDataValues);

 // for dynamic uniform;
 let dynamicUniformDataSize = (2+2); // translation, zoom
 let dinamicUniformDataValues = new Float32Array(dynamicUniformDataSize);

 let dinamicUniformBuffer = device.createBuffer({
  label: `uniforms dinamic`,
  size: dynamicUniformDataSize * 4,
  usage: GPUBufferUsage.STORAGE|GPUBufferUsage.COPY_DST,
 });

 // for vertex data;
 let vertexData = triangle[1];
 let vertexDataValues = new Float32Array(vertexData);

 let vertexStorageBuffer = device.createBuffer({
  label: 'storage buffer vertices',
  size: vertexDataValues.byteLength,
  usage: GPUBufferUsage.STORAGE|GPUBufferUsage.COPY_DST,
 });

 device.queue.writeBuffer(vertexStorageBuffer, 0, vertexDataValues);

 // for color data;
 let colorData = triangle[0];
 let colorDataValues = new Float32Array(colorData);
 
 let colorStorageBuffer = device.createBuffer({
  label: 'storage buffer color',
  size: colorDataValues.byteLength,
  usage: GPUBufferUsage.STORAGE|GPUBufferUsage.COPY_DST,
 });

 device.queue.writeBuffer(colorStorageBuffer, 0, colorDataValues);

 let shaderModule = device.createShaderModule({
  label: 'shader module',
  code:
   `
   struct staticDataType {
    unitDisplay: vec2f,
    scale: vec2f,
   };

   struct dinamicDataType {
    translation: vec2f,
    zoom: f32,
   };

   struct vertexDataType {
    position: vec2f,
   };

   struct fragmentDataType {
    color: vec4f,
   };

   @group(0) @binding(0) var<storage, read> staticData: staticDataType;
   @group(0) @binding(1) var<storage, read> dynamicData: dinamicDataType;
   @group(0) @binding(2) var<storage, read> vertexData: array<vertexDataType>;
   @group(0) @binding(3) var<storage, read> fragmentData: fragmentDataType;

   @vertex fn vs(@builtin(vertex_index) vertexIndex : u32) ->  @builtin(position) vec4f {
    return vec4f((vertexData[vertexIndex].position + dynamicData.translation) *  staticData.unitDisplay * staticData.scale * dynamicData.zoom, 0.0, 1.0);
   }

   @fragment fn fs() -> @location(0) vec4f {
    return vec4<f32>(fragmentData.color);
   }
            `,      
  })

 let renderPipeLine = device.createRenderPipeline({
  label: 'RenderPipeline',
  layout: 'auto',
  vertex: {
   module: shaderModule,
   entryPoint: 'vs',
  },
  fragment: {
   module: shaderModule,
   entryPoint: 'fs',
   targets: [{format: presentationFormat}],
  },
 })

 // create bind group;
 let bindGroup = device.createBindGroup({
  label: 'bind group for objects',
  layout: renderPipeLine.getBindGroupLayout(0),
  entries: [
   { binding: 0, resource: { buffer: staticUniformBuffer }},
   { binding: 1, resource: { buffer: dinamicUniformBuffer }},
   { binding: 2, resource: { buffer: vertexStorageBuffer }},
   { binding: 3, resource: { buffer: colorStorageBuffer }},
  ],
 })

 // render pass descriptor
 let renderPassDescriptor = {
  colorAttachments: [{
   clearValue: VbackgrounColor,
   loadOp: 'clear',
   storeOp: 'store'
  }]
 }

 // render
 function Frender(){
  renderPassDescriptor.colorAttachments[0].view = wgpuContext.getCurrentTexture().createView();

  dinamicUniformDataValues.set(Vtranslation); // set the translation
  dinamicUniformDataValues.set(Vzoom); // set the scale
  device.queue.writeBuffer(dinamicUniformBuffer, 0, dinamicUniformDataValues);
    
  const encoder = device.createCommandEncoder({label:'encoder'});
  const pass = encoder.beginRenderPass(renderPassDescriptor);
   pass.setPipeline(renderPipeLine);
   pass.setBindGroup(0, bindGroup);
   pass.draw(triangle[1].length);
   pass.end();
  const commandBuffer = encoder.finish();
  device.queue.submit([commandBuffer]); 
 }

 // observe canvas size;
 const observer = new ResizeObserver(entries => {

  for (const entry of entries) {
   canvasWidht = entry.devicePixelContentBoxSize?.[0].inlineSize || entry.contentBoxSize[0].inlineSize * devicePixelRatio;
   canvasHeight = entry.devicePixelContentBoxSize?.[0].blockSize || entry.contentBoxSize[0].blockSize * devicePixelRatio;

   let canvas = entry.target;
   canvas.width = Math.max(1, Math.min(canvasWidht, device.limits.maxTextureDimension2D));
   canvas.height = Math.max(1, Math.min(canvasHeight, device.limits.maxTextureDimension2D));

   Frender();

  }

 }); observer.observe(canvas);

 canvas.addEventListener('wheel',(e)=>{
  if(+e.deltaY > 0){
   Vzoom = Math.ceil(Vzoom + Vzoom*Vzstep);
   Frender();
  }else if(Vzoom > 1){ // scroll up; && Vzoom > 1;
   Vzoom = Math.floor(Vzoom - Vzoom*Vzstep);
   Frender();
  } 
 });

})();


i tried to simplify for only creating triangle with storage buffer for storing vertex data, color, translation, and scale for that tutorial. It work but i can not see the red triangle.

i already try few days to mixing with other method but still not work.

1

There are 1 best solutions below

1
gman On BEST ANSWER

Issues:

  1. The code wasn't flattening the vertex data which was 3 arrays of 2 values each instead of just 1 array of 6 values.

    let vertexDataValues = new Float32Array(vertexData);  // error
    
    let vertexDataValues = new Float32Array(vertexData.flat());   // ok
    
  2. The code wasn't setting the zoom correctly in dinamicUniformDataValues

    set takes an array of values, not a number. And, you need the pass in the offset for the zoom

    dinamicUniformDataValues.set(Vzoom); // set the scale
    
    dinamicUniformDataValues.set([Vzoom], 2); // set the scale
    
  3. No offset for setting the scale on staticUniformDataValues

      staticUniformDataValues.set([2/canvasWidht,2/canvasHeight]);  // wrong
    
      staticUniformDataValues.set([2/canvasWidht,2/canvasHeight], 2); // correct
    
  4. The values used for staticUniformDataValues didn't make sense

    Same as above, set them all to 1 just to test.

(async()=>{

 // here are triangle color & triangle vertex position
 let triangle = [[1.0,0.0,0.0,1.0], [[0.0,0.0],[-0.5,-0.5],[0.5,-0.5]]];

 let wgpuinBrowser = navigator.gpu;
 let presentationFormat = wgpuinBrowser.getPreferredCanvasFormat();
 let adapter = await wgpuinBrowser.requestAdapter();
 let device = await adapter.requestDevice();
 let canvas = document.querySelector('canvas');
 let wgpuContext = canvas.getContext('webgpu');

 wgpuContext.configure({
  device: device,
  format: presentationFormat,
  usage: GPUTextureUsage.RENDER_ATTACHMENT,
  alphaMode: 'premultiplied'
 });

 let devicePixelRatio = window.devicePixelRatio || 1;
 let canvasWidht = wgpuContext.canvas.clientWidth * devicePixelRatio;
 let canvasHeight = wgpuContext.canvas.clientHeight * devicePixelRatio;
 let displayPPI = 102;
 let Vtranslation = [0,0];
 let Vzoom = 1;
 let Vzstep = 0.1;

 // for static uniform;
 let staticUniformDataSize = (2+2+4); // unitDisplay, scale, padding
 let staticUniformDataValues = new Float32Array(staticUniformDataSize);
 staticUniformDataValues.set([1, 1]); // the unitDisplay
 staticUniformDataValues.set([1, 1], 2); // the canvas scale

 let staticUniformBuffer = device.createBuffer({
  label: `uniforms static`,
  size: staticUniformDataSize * 4,
  usage: GPUBufferUsage.STORAGE|GPUBufferUsage.COPY_DST,
 });

 device.queue.writeBuffer(staticUniformBuffer, 0, staticUniformDataValues);

 // for dynamic uniform;
 let dynamicUniformDataSize = (2+2); // translation, zoom
 let dinamicUniformDataValues = new Float32Array(dynamicUniformDataSize);

 let dinamicUniformBuffer = device.createBuffer({
  label: `uniforms dinamic`,
  size: dynamicUniformDataSize * 4,
  usage: GPUBufferUsage.STORAGE|GPUBufferUsage.COPY_DST,
 });

 // for vertex data;
 let vertexData = triangle[1];
 let vertexDataValues = new Float32Array(vertexData.flat());

 let vertexStorageBuffer = device.createBuffer({
  label: 'storage buffer vertices',
  size: vertexDataValues.byteLength,
  usage: GPUBufferUsage.STORAGE|GPUBufferUsage.COPY_DST,
 });

 device.queue.writeBuffer(vertexStorageBuffer, 0, vertexDataValues);

 // for color data;
 let colorData = triangle[0];
 let colorDataValues = new Float32Array(colorData);
 
 let colorStorageBuffer = device.createBuffer({
  label: 'storage buffer color',
  size: colorDataValues.byteLength,
  usage: GPUBufferUsage.STORAGE|GPUBufferUsage.COPY_DST,
 });

 device.queue.writeBuffer(colorStorageBuffer, 0, colorDataValues);

 let shaderModule = device.createShaderModule({
  label: 'shader module',
  code:
   `
   struct staticDataType {
    unitDisplay: vec2f,
    scale: vec2f,
   };

   struct dinamicDataType {
    translation: vec2f,
    zoom: f32,
   };

   struct vertexDataType {
    position: vec2f,
   };

   struct fragmentDataType {
    color: vec4f,
   };

   @group(0) @binding(0) var<storage, read> staticData: staticDataType;
   @group(0) @binding(1) var<storage, read> dynamicData: dinamicDataType;
   @group(0) @binding(2) var<storage, read> vertexData: array<vertexDataType>;
   @group(0) @binding(3) var<storage, read> fragmentData: fragmentDataType;

   @vertex fn vs(@builtin(vertex_index) vertexIndex : u32) ->  @builtin(position) vec4f {
    return vec4f((vertexData[vertexIndex].position + dynamicData.translation) *  staticData.unitDisplay * staticData.scale * dynamicData.zoom, 0.0, 1.0);
   }

   @fragment fn fs() -> @location(0) vec4f {
    return vec4<f32>(fragmentData.color);
   }
            `,      
  })

 let renderPipeLine = device.createRenderPipeline({
  label: 'RenderPipeline',
  layout: 'auto',
  vertex: {
   module: shaderModule,
   entryPoint: 'vs',
  },
  fragment: {
   module: shaderModule,
   entryPoint: 'fs',
   targets: [{format: presentationFormat}],
  },
 })

 // create bind group;
 let bindGroup = device.createBindGroup({
  label: 'bind group for objects',
  layout: renderPipeLine.getBindGroupLayout(0),
  entries: [
   { binding: 0, resource: { buffer: staticUniformBuffer }},
   { binding: 1, resource: { buffer: dinamicUniformBuffer }},
   { binding: 2, resource: { buffer: vertexStorageBuffer }},
   { binding: 3, resource: { buffer: colorStorageBuffer }},
  ],
 })

 const VbackgrounColor = [0.3, 0.3, 0.3, 1];
 // render pass descriptor
 let renderPassDescriptor = {
  colorAttachments: [{
   clearValue: VbackgrounColor,
   loadOp: 'clear',
   storeOp: 'store'
  }]
 }

 // render
 function Frender(){
  renderPassDescriptor.colorAttachments[0].view = wgpuContext.getCurrentTexture().createView();

  dinamicUniformDataValues.set(Vtranslation); // set the translation
  dinamicUniformDataValues.set([Vzoom], 2); // set the scale
  device.queue.writeBuffer(dinamicUniformBuffer, 0, dinamicUniformDataValues);
    
  const encoder = device.createCommandEncoder({label:'encoder'});
  const pass = encoder.beginRenderPass(renderPassDescriptor);
   pass.setPipeline(renderPipeLine);
   pass.setBindGroup(0, bindGroup);
   pass.draw(triangle[1].length);
   pass.end();
  const commandBuffer = encoder.finish();
  device.queue.submit([commandBuffer]); 
 }

 // observe canvas size;
 const observer = new ResizeObserver(entries => {

  for (const entry of entries) {
   canvasWidht = entry.devicePixelContentBoxSize?.[0].inlineSize || entry.contentBoxSize[0].inlineSize * devicePixelRatio;
   canvasHeight = entry.devicePixelContentBoxSize?.[0].blockSize || entry.contentBoxSize[0].blockSize * devicePixelRatio;

   let canvas = entry.target;
   canvas.width = Math.max(1, Math.min(canvasWidht, device.limits.maxTextureDimension2D));
   canvas.height = Math.max(1, Math.min(canvasHeight, device.limits.maxTextureDimension2D));

   Frender();

  }

 }); observer.observe(canvas);

 canvas.addEventListener('wheel',(e)=>{
  if(+e.deltaY > 0){
   Vzoom = Math.ceil(Vzoom + Vzoom*Vzstep);
   Frender();
  }else if(Vzoom > 1){ // scroll up; && Vzoom > 1;
   Vzoom = Math.floor(Vzoom - Vzoom*Vzstep);
   Frender();
  } 
 });

})();
html, body {
  margin: 0;
  height: 100%;
}
canvas {
  width: 100%;
  height: 100%;
  display: block;
}
<canvas></canvas>