化石原创文章,转载请注明来源并保留原文链接


注意的是mesh才能加材质,32到36行。

<html>
	<head>
		<title>My first three.js app</title>
		<style>
			body { margin: 0; }
			canvas { width: 400; height: 400 }
		</style>
	</head>
	<body>
		<script src="js/three.js"></script>
		<script src="./js/loaders/OBJLoader.js"></script>
		<script>
			var scene = new THREE.Scene();
			var camera = new THREE.PerspectiveCamera( 15, 1, 0.1, 1000 );
			camera.position.y = 10;
			camera.lookAt(0, 0, 0);
			
			var loader = new THREE.TextureLoader();
			loader.load(
				'texture/uv.jpg',

				function ( texture ) {
					var material = new THREE.MeshBasicMaterial( {
						map: texture
					 } );
					 
					var loader = new THREE.OBJLoader();
					loader.load(
						'models/triangular.obj',
						function ( object ) {
							object.material = material;
							object.traverse( function(child) {
								if (child instanceof THREE.Mesh) {
									child.material = object.material;
								}
							});
							object.position.x = 0;
							object.position.y = 0;
							object.position.z = 0;
							object.scale.set(1, 1, 1);
							scene.add( object );
						},

						function ( xhr ) {
							console.log( ( xhr.loaded / xhr.total * 100 ) + '% loaded' );
						},

						function ( error ) {
							console.log( 'An error happened' );
						}
					);
				},

				undefined,

				function ( err ) {
					console.error( 'An error happened.' );
				}
			);

			//

			var renderer = new THREE.WebGLRenderer();
			renderer.setSize(400, 400);
			document.body.appendChild( renderer.domElement );

			var animate = function () {
				requestAnimationFrame( animate );
				renderer.render( scene, camera );
			};

			animate();
		</script>
	</body>
</html>

化石原创文章,转载请注明来源并保留原文链接



化石原创文章,转载请注明来源并保留原文链接


three.js文档看起来有一定数量,但是组织方式和内容不算很好。找点东西不易。今日有人问到相关问题,遂把涉及的东西写下来。

在github上下载源码后,使用npm install, npm run build,在dist下会有three.js文件,这个文件只是包含了three.js的核心模块。有很多常见的模块,比如OBJLoader,都不在这个核心模块里面,而存在于examples\js目录下,这个examples字眼对很多老手来说,可能会栽坑。

所以,要使用three.js,包含的代码是three.js这个文件和examples\js下的你需要用到的类。

怎么知道哪个东西在哪里?这个根据经验,查手册的时候,看最底下的那行“Source”,以“src”开头的,说明three.js这个文件就有这个类了。如果以”examples/js“开头,那么你得去源码工程的examples\js下自行取。

另外,这篇文章才是three.js开始很重要的一篇文章,写到了怎么自己搭建本地服(http),写到了three.js怎么用原始的javasript包含方式,怎么用module方式

https://threejs.org/docs/#manual/en/introduction/Loading-3D-models

重点的地方它都没有高亮,都堆一块了。一不小心就会错过。


化石原创文章,转载请注明来源并保留原文链接



化石原创文章,转载请注明来源并保留原文链接


下面这段代码是使用Babylon.js加载obj文件,并且使用上硬盘上的一张图。场景中使用了环境光。刚刚知道,Babylon.js居然使用的是左手坐标系。哈,没想到!

<!DOCTYPE html>
<html>
<head>
    <meta http-equiv="Content-Type" content="text/html" charset="utf-8"/>
    <title>Babylon - Getting Started</title>
    <script src="https://cdn.babylonjs.com/babylon.js"></script>
	<script src="https://cdn.babylonjs.com/loaders/babylonjs.loaders.min.js"></script>
    <style>
        html, body {
            overflow: hidden;
            width   : 100%;
            height  : 100%;
            margin  : 0;
            padding : 0;
        }

        #renderCanvas {
            width   : 100%;
            height  : 100%;
            touch-action: none;
        }
    </style>
</head>
<body>
    <canvas id="renderCanvas"></canvas>
    <script>
        window.addEventListener('DOMContentLoaded', function(){
            var canvas = document.getElementById('renderCanvas');
            var engine = new BABYLON.Engine(canvas, true);

            var createScene = function(){
                var scene = new BABYLON.Scene(engine);
                var camera = new BABYLON.FreeCamera('camera1', new BABYLON.Vector3(0, 0, 10), scene);
                camera.setTarget(BABYLON.Vector3.Zero());
                camera.attachControl(canvas, false);
				
				//environment light
				var light = new BABYLON.HemisphericLight("HemiLight", new BABYLON.Vector3(0, 1, 0), scene);
				
				var myMaterial = new BABYLON.StandardMaterial("myMaterial", scene);
				myMaterial.diffuseTexture = new BABYLON.Texture("./texture/uv.jpg", scene);
				
				var mesh = null;
				
				BABYLON.SceneLoader.LoadAssetContainer("./models/", "triangular.obj", scene, function (container) {
					var meshes = container.meshes;
					var materials = container.materials;
					
					mesh = meshes[0];
					mesh.material = myMaterial;
					// Adds all elements to the scene
					container.addAllToScene();
				});

                return scene;
            }

            var scene = createScene();
            engine.runRenderLoop(function(){
                scene.render();
            });

            // the canvas/window resize event handler
            window.addEventListener('resize', function(){
                engine.resize();
            });
        });
    </script>
</body>
</html>

化石原创文章,转载请注明来源并保留原文链接



化石原创文章,转载请注明来源并保留原文链接


在前面文章的基础上,这里分别使用uvn相机模型和camera transform invert模式来演示透视投影使用。

实现场景:相机放在世界坐标的(0, 0, 10), 看向位置原点的我们原来那个半径为1的球体。

主代码:

<!-- <!doctype html> -->
<html>
   <body>
      <canvas width="400" height="400" id = "my_Canvas"></canvas>
      <script src="script/gl-matrix/gl-matrix.js"></script>
      <script src="script/vertexReorganizer.js"></script>

      <script>
         // use uvn camera Or camera transform invert
         var useUVNCameraModel = false;

         // input detect
         var keyStatus = [];
         document.addEventListener('keydown', event => {
           const key = event.key.toLowerCase();
           console.log(key);
           keyStatus[key] = true;
         });
   
         document.addEventListener('keyup', event => {
           const key = event.key.toLowerCase();
           console.log(key);
           if (keyStatus[key] != null) keyStatus[key] = false;
         });
   
         function getKeyDown(key) {
            if (keyStatus[key]) return true;
            return false;
         }
   
         var vertices;
         var uvs;
         var indices;
         var texture;
         var modelLoaded = false;
         var textureLoaded = false;
         var timeElapse = 0;
         var deltaZ = 1;
         var modelTransform, viewTransform, projectionTransform;

         //model transform
         modelTransform = glMatrix.mat4.create();

         //view transform matrix
         viewTransform = glMatrix.mat4.create();

         var cameraPos = glMatrix.vec3.fromValues(0, 0, 10);
         var focalPoint = glMatrix.vec3.fromValues(0, 0, 0);
         var up = glMatrix.vec3.fromValues(0, 1, 0);

         if (useUVNCameraModel) {
            glMatrix.mat4.lookAt(viewTransform, cameraPos, focalPoint, up);
            console.log("use UVN camera model.");
         }
         else {
            var cameraTransform = glMatrix.mat4.create();
            glMatrix.mat4.fromTranslation(cameraTransform, cameraPos);
            glMatrix.mat4.rotateX(cameraTransform, cameraTransform, glMatrix.glMatrix.toRadian(0));
            glMatrix.mat4.invert(viewTransform, cameraTransform);
            console.log("use camera transform invert.");
         }

         //projection transform
         projectionTransform = glMatrix.mat4.create();
         glMatrix.mat4.perspective(projectionTransform, glMatrix.glMatrix.toRadian(15), 1, 0.01, 100);
   
         var canvas = document.getElementById('my_Canvas');
   
         function processInput() {

         }
    
         function clearCanvas(r, g, b, a) {
            gl.clearColor(49/255, 77/255, 121/255, 1);
            gl.clear(gl.COLOR_BUFFER_BIT);
         }
   
         function render() {
            clearCanvas();
    
            gl.viewport(0,0,canvas.width,canvas.height);
            gl.enable(gl.CULL_FACE);
            gl.cullFace(gl.BACK);
             
            //如果模型的顶点准备完毕,我们就可以渲染了
            if (modelLoaded &amp;&amp; textureLoaded) {
               var vertShader = gl.createShader(gl.VERTEX_SHADER);
               gl.shaderSource(vertShader, vertCode);
               gl.compileShader(vertShader);
               var fragShader = gl.createShader(gl.FRAGMENT_SHADER);
               gl.shaderSource(fragShader, fragCode);
               gl.compileShader(fragShader);
               var shaderProgram = gl.createProgram();
               gl.attachShader(shaderProgram, vertShader); 
               gl.attachShader(shaderProgram, fragShader);
               gl.linkProgram(shaderProgram);
               gl.useProgram(shaderProgram);
       
               //顶点数据(CPU到GPU)
               var vertex_buffer = gl.createBuffer();
               gl.bindBuffer(gl.ARRAY_BUFFER, vertex_buffer);
               gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(vertices), gl.STATIC_DRAW);
               var coord = gl.getAttribLocation(shaderProgram, "aVertexPosition");
               gl.vertexAttribPointer(coord, 3, gl.FLOAT, false, 0, 0);
               gl.enableVertexAttribArray(coord);
   
               //贴图uv数据 (顶点属性之一)
               var textureCoordBuffer = gl.createBuffer();
               gl.bindBuffer(gl.ARRAY_BUFFER, textureCoordBuffer);
               gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(uvs), gl.STATIC_DRAW);
               var uv = gl.getAttribLocation(shaderProgram, "aTextureCoord");
               gl.vertexAttribPointer(uv, 2, gl.FLOAT, false, 0, 0);
               gl.enableVertexAttribArray(uv);
    
               //索引数据(CPU到GPU)
               var index_buffer = gl.createBuffer ();
               gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, index_buffer);
               gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, new Uint16Array(indices), gl.STATIC_DRAW);
   
               //实际贴图数据
               gl.activeTexture(gl.TEXTURE0);
               gl.bindTexture(gl.TEXTURE_2D, texture);
               var uSampler = gl.getUniformLocation(shaderProgram, 'uSampler');
               gl.uniform1i(uSampler, 0);
   
               //矩阵传入GPU
               //世界矩阵
               var mt = gl.getUniformLocation(shaderProgram, "modelTransform");

               //view矩阵
               var vt = gl.getUniformLocation(shaderProgram, "viewTransform");
               gl.uniformMatrix4fv(vt, false, viewTransform);

               //投影矩阵
               var pt = gl.getUniformLocation(shaderProgram, "projectionTransform");
               gl.uniformMatrix4fv(pt, false, projectionTransform);
   
               //Call draw
               gl.uniformMatrix4fv(mt, false, modelTransform);
               gl.drawElements(gl.TRIANGLES, indices.length, gl.UNSIGNED_SHORT, 0);

               //disable
               gl.disableVertexAttribArray(uv);
               gl.disableVertexAttribArray(coord);

               //这样就可以看到最后的渲染
            }
         }
   
         function sceneUpdate(timestamp) {
            timeElapse = timeElapse + timestamp;
    
            if (timeElapse > 1000) {
               cameraPos[2] += deltaZ;

               if (cameraPos[2] > 40 || cameraPos[2] < 10) {
                  deltaZ = -deltaZ;
               }

               if (useUVNCameraModel) {
                  glMatrix.mat4.lookAt(viewTransform, cameraPos, focalPoint, up);
               }
               else {
                  var cameraTransform = glMatrix.mat4.create();
                  glMatrix.mat4.fromTranslation(cameraTransform, cameraPos);
                  glMatrix.mat4.rotateX(cameraTransform, cameraTransform, glMatrix.glMatrix.toRadian(0));
                  glMatrix.mat4.invert(viewTransform, cameraTransform);
               }
   
               timeElapse = 0;
            }
         }
   
         function frameUpdate(timestamp) {
            //process input
            processInput();
   
            //logic upate
            sceneUpdate(timestamp);
   
            //render
            render();
   
            //schedule the next frame
            requestAnimationFrame(frameUpdate);
         }
    
         function loadVerticesFromFile(path) {
            let xmlHttpRequest = new XMLHttpRequest();
            xmlHttpRequest.onreadystatechange = function() {
               if (xmlHttpRequest.status == 200 &amp;&amp; xmlHttpRequest.readyState == 4) {
                  var txt = xmlHttpRequest.responseText;
    
                  var lines = txt.split('\n');
    
                  //ignore lines not contain vertices
                  var index = 0;
                  while(lines[index].indexOf('v ') == -1) {
                     index++;
                  }
    
                  //1, 读取顶点数据
                  vertices = [];
                  while(lines[index].indexOf('v ') == 0) {
                     //这里是每一个顶点数据
                     var str = lines[index];
                     var values = str.split(' ');
                      
                     vertices.push(parseFloat(values[1]));
                     vertices.push(parseFloat(values[2]));
                     vertices.push(parseFloat(values[3]));
   
                     index++;
                  }
   
                  //2,读取uv数据
                  uvs = [];
                  while(lines[index].indexOf('vt ') == 0) {
                     var str = lines[index];
                     var values = str.split(' ');
   
                     uvs.push(parseFloat(values[1]));
                     uvs.push(parseFloat(values[2]));
                     index++;
                  }
   
                  //3,处理法线数据
                  var normals = [];
                  while(lines[index].indexOf('vn ') == 0) {
                     var str = lines[index];
                     var values = str.split(' ');
   
                     normals.push(parseFloat(values[1]));
                     normals.push(parseFloat(values[2]));
                     normals.push(parseFloat(values[3]));
                     index++;
                  }
    
                  while(lines[index].indexOf('f ') == -1) {
                     index++;
                  }
                   
                  //3,处理顶点索引:位置和UV,法线
                  while(lines[index].indexOf('f ') == 0) {
                     var line = lines[index];
                     var values = line.split(' ');
    
                     if (values.length == 5) {
                        // first vertex
                        extractAndProcessVertex(vertices, uvs, normals, values[1]);
                        // second vertex
                        extractAndProcessVertex(vertices, uvs, normals, values[2]);
                        // third vertex
                        extractAndProcessVertex(vertices, uvs, normals, values[3]);
   
                        //第二个三角形
                        // 1st vertex
                        extractAndProcessVertex(vertices, uvs, normals, values[1]);
                        // 2nd vertex
                        extractAndProcessVertex(vertices, uvs, normals, values[3]);
                        // 3rd vertex
                        extractAndProcessVertex(vertices, uvs, normals, values[4]); 
                     }
                     else if(values.length == 4) {
                        // first vertex
                        extractAndProcessVertex(vertices, uvs, normals, values[1]);
                        // second vertex
                        extractAndProcessVertex(vertices, uvs, normals, values[2]);  
                        // third vertex
                        extractAndProcessVertex(vertices, uvs, normals, values[3]);
                     }
                     else {
                        console.log("Impossible!");
                     }
    
                     index++;
                  }
    
                  vertices = getPositionArray();
                  uvs = getUvArray();
                  indices = getIndexArray();
   
                  modelLoaded = true;
               }
            
            }
            xmlHttpRequest.open("GET", path);
            xmlHttpRequest.send();         
         }
   
         function loadTexture(path) {
            const image = new Image();
            image.onload = function() {
               texture = gl.createTexture();
               gl.bindTexture(gl.TEXTURE_2D, texture);
               gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
               gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
               gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
               gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
               gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, image);
               textureLoaded = true;
            }
            image.src = path;
         }
   
         loadVerticesFromFile("./model/triangular.obj");
   
         //load texture from disk
         loadTexture("./texture/uv.jpg");
   
         var gl = canvas.getContext("experimental-webgl");
   
         var version = gl.getParameter(gl.VERSION);
    
         var vertCode =
            'attribute vec3 aVertexPosition;' + 
            'attribute vec2 aTextureCoord; ' + 
            'varying highp vec2 vTextureCoord; ' +
            'uniform mat4 modelTransform;' +
            'uniform mat4 viewTransform;' +
            'uniform mat4 projectionTransform;' +
            'void main(void) {' + 
            ' gl_Position = projectionTransform * viewTransform * modelTransform * vec4(aVertexPosition, 1.0);' + 
            ' vTextureCoord = aTextureCoord; ' + 
            '}';
   
         var fragCode = 
         'varying highp vec2 vTextureCoord; ' +
         'uniform sampler2D uSampler; ' + 
         'void main(void) {' + 
         ' gl_FragColor = texture2D(uSampler, vTextureCoord); ' + 
         '}';
    
         requestAnimationFrame(frameUpdate);
    
     </script>
   </body>
</html>

代码开始的地方用了useUVNCameraModel变量来控制使用uvn还是camera transform。背景色也换成了Unity3d相机的默认色。球体的大小没有任何缩放,是因为透视相机的移动形成。


化石原创文章,转载请注明来源并保留原文链接



化石原创文章,转载请注明来源并保留原文链接


上一节我们已经推出透视投影的矩阵,如下:

2*near/(right-left)     0                         (left+right)/(right-left)       0  
0                       2*near/(top-bottom)       (top+bottom)/(top-bottom)       0  
0                       0                         (near+far)/(near-far)     2*near*far/(near-far) 
0                       0                                    -1                   0  

使用该矩阵,相机不强制一定水平方向要在left到right之间,垂直方向不一定要在bottom到top之间。相机在两个方向都居中,那么矩阵可以简化为:

near/right     0                    0                          0  
0            near/top               0                          0  
0              0             (near+far)/(near-far)     2*near*far/(near-far) 
0              0                    -1                         0  

因为right=-left,top=-bottom。

这种情况相机居中其实是大多数引擎使用的方式。而且为了简化用户需要输入的参数,在这种模式下会用另外两个参数来代替原来的left、right、top、bottom(near和far保留):fov和aspectRatio。

所以这种模式下,透视投影会有四个相机参数:fov, aspect, near, far。

fov是竖直方向的视角张开的角度,也就是沿x正方向看的时候,相机的可视范围张开的角度:

fov定义

aspectRatio,这里简称aspect。最简单的情况是我们渲染时用全屏输出,这个时候aspect就是屏幕的width/height。但很多时候,我们并不全屏渲染。比如我们的例子,一直以来都是指定了差不多400×400的一个区域。所以,通用来讲,是我们要求的渲染目标的宽高比。

需要这个参数的原因是:在宽高一样的情况下,因为我们渲染的规制范围是-1到1,所以正好能同比缩放宽和高。但是如果宽高比不同,比如宽屏模式下,宽比高本来就要能多看到一些内容。所以我们在水平方向,就可以通过aspect和fov来计算,最后水平方向能看到的范围。

有了这些概念,我们很容易看出,原来的near/top,就是atan(fov/2);near/right,就是atan(fov/2)/aspect。

代入到上面我们的矩阵,结果:

atan(fov/2)/aspect     0                      0                          0  
0                   atan(fov/2)               0                          0  
0                      0             (near+far)/(near-far)     2*near*far/(near-far) 
0                      0                    -1                         0  

这就是常见的(各种引擎,比如Unity3d、Unreal Engine 4、Babylon等)投影矩阵的形式。


化石原创文章,转载请注明来源并保留原文链接