nanovg_gl.h (46480B)
1 // 2 // Copyright (c) 2009-2013 Mikko Mononen memon@inside.org 3 // 4 // This software is provided 'as-is', without any express or implied 5 // warranty. In no event will the authors be held liable for any damages 6 // arising from the use of this software. 7 // Permission is granted to anyone to use this software for any purpose, 8 // including commercial applications, and to alter it and redistribute it 9 // freely, subject to the following restrictions: 10 // 1. The origin of this software must not be misrepresented; you must not 11 // claim that you wrote the original software. If you use this software 12 // in a product, an acknowledgment in the product documentation would be 13 // appreciated but is not required. 14 // 2. Altered source versions must be plainly marked as such, and must not be 15 // misrepresented as being the original software. 16 // 3. This notice may not be removed or altered from any source distribution. 17 // 18 #ifndef NANOVG_GL_H 19 #define NANOVG_GL_H 20 21 #ifdef __cplusplus 22 extern "C" { 23 #endif 24 25 // Create flags 26 27 enum NVGcreateFlags { 28 // Flag indicating if geometry based anti-aliasing is used (may not be needed when using MSAA). 29 NVG_ANTIALIAS = 1<<0, 30 // Flag indicating if strokes should be drawn using stencil buffer. The rendering will be a little 31 // slower, but path overlaps (i.e. self-intersecting or sharp turns) will be drawn just once. 32 NVG_STENCIL_STROKES = 1<<1, 33 // Flag indicating that additional debug checks are done. 34 NVG_DEBUG = 1<<2, 35 }; 36 37 #if defined NANOVG_GL2_IMPLEMENTATION 38 # define NANOVG_GL2 1 39 # define NANOVG_GL_IMPLEMENTATION 1 40 #elif defined NANOVG_GL3_IMPLEMENTATION 41 # define NANOVG_GL3 1 42 # define NANOVG_GL_IMPLEMENTATION 1 43 # define NANOVG_GL_USE_UNIFORMBUFFER 1 44 #elif defined NANOVG_GLES2_IMPLEMENTATION 45 # define NANOVG_GLES2 1 46 # define NANOVG_GL_IMPLEMENTATION 1 47 #elif defined NANOVG_GLES3_IMPLEMENTATION 48 # define NANOVG_GLES3 1 49 # define NANOVG_GL_IMPLEMENTATION 1 50 #endif 51 52 #define NANOVG_GL_USE_STATE_FILTER (1) 53 54 // Creates NanoVG contexts for different OpenGL (ES) versions. 55 // Flags should be combination of the create flags above. 56 57 #if defined NANOVG_GL2 58 59 NVGcontext* nvgCreateGL2(int flags); 60 void nvgDeleteGL2(NVGcontext* ctx); 61 62 int nvglCreateImageFromHandleGL2(NVGcontext* ctx, GLuint textureId, int w, int h, int flags); 63 GLuint nvglImageHandleGL2(NVGcontext* ctx, int image); 64 65 #endif 66 67 #if defined NANOVG_GL3 68 69 NVGcontext* nvgCreateGL3(int flags); 70 void nvgDeleteGL3(NVGcontext* ctx); 71 72 int nvglCreateImageFromHandleGL3(NVGcontext* ctx, GLuint textureId, int w, int h, int flags); 73 GLuint nvglImageHandleGL3(NVGcontext* ctx, int image); 74 75 #endif 76 77 #if defined NANOVG_GLES2 78 79 NVGcontext* nvgCreateGLES2(int flags); 80 void nvgDeleteGLES2(NVGcontext* ctx); 81 82 int nvglCreateImageFromHandleGLES2(NVGcontext* ctx, GLuint textureId, int w, int h, int flags); 83 GLuint nvglImageHandleGLES2(NVGcontext* ctx, int image); 84 85 #endif 86 87 #if defined NANOVG_GLES3 88 89 NVGcontext* nvgCreateGLES3(int flags); 90 void nvgDeleteGLES3(NVGcontext* ctx); 91 92 int nvglCreateImageFromHandleGLES3(NVGcontext* ctx, GLuint textureId, int w, int h, int flags); 93 GLuint nvglImageHandleGLES3(NVGcontext* ctx, int image); 94 95 #endif 96 97 // These are additional flags on top of NVGimageFlags. 98 enum NVGimageFlagsGL { 99 NVG_IMAGE_NODELETE = 1<<16, // Do not delete GL texture handle. 100 }; 101 102 #ifdef __cplusplus 103 } 104 #endif 105 106 #endif /* NANOVG_GL_H */ 107 108 #ifdef NANOVG_GL_IMPLEMENTATION 109 110 #include <stdlib.h> 111 #include <stdio.h> 112 #include <string.h> 113 #include <math.h> 114 #include "nanovg.h" 115 116 enum GLNVGuniformLoc { 117 GLNVG_LOC_VIEWSIZE, 118 GLNVG_LOC_TEX, 119 GLNVG_LOC_FRAG, 120 GLNVG_MAX_LOCS 121 }; 122 123 enum GLNVGshaderType { 124 NSVG_SHADER_FILLGRAD, 125 NSVG_SHADER_FILLIMG, 126 NSVG_SHADER_SIMPLE, 127 NSVG_SHADER_IMG 128 }; 129 130 #if NANOVG_GL_USE_UNIFORMBUFFER 131 enum GLNVGuniformBindings { 132 GLNVG_FRAG_BINDING = 0, 133 }; 134 #endif 135 136 struct GLNVGshader { 137 GLuint prog; 138 GLuint frag; 139 GLuint vert; 140 GLint loc[GLNVG_MAX_LOCS]; 141 }; 142 typedef struct GLNVGshader GLNVGshader; 143 144 struct GLNVGtexture { 145 int id; 146 GLuint tex; 147 int width, height; 148 int type; 149 int flags; 150 }; 151 typedef struct GLNVGtexture GLNVGtexture; 152 153 struct GLNVGblend 154 { 155 GLenum srcRGB; 156 GLenum dstRGB; 157 GLenum srcAlpha; 158 GLenum dstAlpha; 159 }; 160 typedef struct GLNVGblend GLNVGblend; 161 162 enum GLNVGcallType { 163 GLNVG_NONE = 0, 164 GLNVG_FILL, 165 GLNVG_CONVEXFILL, 166 GLNVG_STROKE, 167 GLNVG_TRIANGLES, 168 }; 169 170 struct GLNVGcall { 171 int type; 172 int image; 173 int pathOffset; 174 int pathCount; 175 int triangleOffset; 176 int triangleCount; 177 int uniformOffset; 178 GLNVGblend blendFunc; 179 }; 180 typedef struct GLNVGcall GLNVGcall; 181 182 struct GLNVGpath { 183 int fillOffset; 184 int fillCount; 185 int strokeOffset; 186 int strokeCount; 187 }; 188 typedef struct GLNVGpath GLNVGpath; 189 190 struct GLNVGfragUniforms { 191 #if NANOVG_GL_USE_UNIFORMBUFFER 192 float scissorMat[12]; // matrices are actually 3 vec4s 193 float paintMat[12]; 194 struct NVGcolor innerCol; 195 struct NVGcolor outerCol; 196 float scissorExt[2]; 197 float scissorScale[2]; 198 float extent[2]; 199 float radius; 200 float feather; 201 float strokeMult; 202 float strokeThr; 203 int texType; 204 int type; 205 #else 206 // note: after modifying layout or size of uniform array, 207 // don't forget to also update the fragment shader source! 208 #define NANOVG_GL_UNIFORMARRAY_SIZE 11 209 union { 210 struct { 211 float scissorMat[12]; // matrices are actually 3 vec4s 212 float paintMat[12]; 213 struct NVGcolor innerCol; 214 struct NVGcolor outerCol; 215 float scissorExt[2]; 216 float scissorScale[2]; 217 float extent[2]; 218 float radius; 219 float feather; 220 float strokeMult; 221 float strokeThr; 222 float texType; 223 float type; 224 }; 225 float uniformArray[NANOVG_GL_UNIFORMARRAY_SIZE][4]; 226 }; 227 #endif 228 }; 229 typedef struct GLNVGfragUniforms GLNVGfragUniforms; 230 231 struct GLNVGcontext { 232 GLNVGshader shader; 233 GLNVGtexture* textures; 234 float view[2]; 235 int ntextures; 236 int ctextures; 237 int textureId; 238 GLuint vertBuf; 239 #if defined NANOVG_GL3 240 GLuint vertArr; 241 #endif 242 #if NANOVG_GL_USE_UNIFORMBUFFER 243 GLuint fragBuf; 244 #endif 245 int fragSize; 246 int flags; 247 248 // Per frame buffers 249 GLNVGcall* calls; 250 int ccalls; 251 int ncalls; 252 GLNVGpath* paths; 253 int cpaths; 254 int npaths; 255 struct NVGvertex* verts; 256 int cverts; 257 int nverts; 258 unsigned char* uniforms; 259 int cuniforms; 260 int nuniforms; 261 262 // cached state 263 #if NANOVG_GL_USE_STATE_FILTER 264 GLuint boundTexture; 265 GLuint stencilMask; 266 GLenum stencilFunc; 267 GLint stencilFuncRef; 268 GLuint stencilFuncMask; 269 GLNVGblend blendFunc; 270 #endif 271 }; 272 typedef struct GLNVGcontext GLNVGcontext; 273 274 static int glnvg__maxi(int a, int b) { return a > b ? a : b; } 275 276 #ifdef NANOVG_GLES2 277 static unsigned int glnvg__nearestPow2(unsigned int num) 278 { 279 unsigned n = num > 0 ? num - 1 : 0; 280 n |= n >> 1; 281 n |= n >> 2; 282 n |= n >> 4; 283 n |= n >> 8; 284 n |= n >> 16; 285 n++; 286 return n; 287 } 288 #endif 289 290 static void glnvg__bindTexture(GLNVGcontext* gl, GLuint tex) 291 { 292 #if NANOVG_GL_USE_STATE_FILTER 293 if (gl->boundTexture != tex) { 294 gl->boundTexture = tex; 295 glBindTexture(GL_TEXTURE_2D, tex); 296 } 297 #else 298 glBindTexture(GL_TEXTURE_2D, tex); 299 #endif 300 } 301 302 static void glnvg__stencilMask(GLNVGcontext* gl, GLuint mask) 303 { 304 #if NANOVG_GL_USE_STATE_FILTER 305 if (gl->stencilMask != mask) { 306 gl->stencilMask = mask; 307 glStencilMask(mask); 308 } 309 #else 310 glStencilMask(mask); 311 #endif 312 } 313 314 static void glnvg__stencilFunc(GLNVGcontext* gl, GLenum func, GLint ref, GLuint mask) 315 { 316 #if NANOVG_GL_USE_STATE_FILTER 317 if ((gl->stencilFunc != func) || 318 (gl->stencilFuncRef != ref) || 319 (gl->stencilFuncMask != mask)) { 320 321 gl->stencilFunc = func; 322 gl->stencilFuncRef = ref; 323 gl->stencilFuncMask = mask; 324 glStencilFunc(func, ref, mask); 325 } 326 #else 327 glStencilFunc(func, ref, mask); 328 #endif 329 } 330 static void glnvg__blendFuncSeparate(GLNVGcontext* gl, const GLNVGblend* blend) 331 { 332 #if NANOVG_GL_USE_STATE_FILTER 333 if ((gl->blendFunc.srcRGB != blend->srcRGB) || 334 (gl->blendFunc.dstRGB != blend->dstRGB) || 335 (gl->blendFunc.srcAlpha != blend->srcAlpha) || 336 (gl->blendFunc.dstAlpha != blend->dstAlpha)) { 337 338 gl->blendFunc = *blend; 339 glBlendFuncSeparate(blend->srcRGB, blend->dstRGB, blend->srcAlpha,blend->dstAlpha); 340 } 341 #else 342 glBlendFuncSeparate(blend->srcRGB, blend->dstRGB, blend->srcAlpha,blend->dstAlpha); 343 #endif 344 } 345 346 static GLNVGtexture* glnvg__allocTexture(GLNVGcontext* gl) 347 { 348 GLNVGtexture* tex = NULL; 349 int i; 350 351 for (i = 0; i < gl->ntextures; i++) { 352 if (gl->textures[i].id == 0) { 353 tex = &gl->textures[i]; 354 break; 355 } 356 } 357 if (tex == NULL) { 358 if (gl->ntextures+1 > gl->ctextures) { 359 GLNVGtexture* textures; 360 int ctextures = glnvg__maxi(gl->ntextures+1, 4) + gl->ctextures/2; // 1.5x Overallocate 361 textures = (GLNVGtexture*)realloc(gl->textures, sizeof(GLNVGtexture)*ctextures); 362 if (textures == NULL) return NULL; 363 gl->textures = textures; 364 gl->ctextures = ctextures; 365 } 366 tex = &gl->textures[gl->ntextures++]; 367 } 368 369 memset(tex, 0, sizeof(*tex)); 370 tex->id = ++gl->textureId; 371 372 return tex; 373 } 374 375 static GLNVGtexture* glnvg__findTexture(GLNVGcontext* gl, int id) 376 { 377 int i; 378 for (i = 0; i < gl->ntextures; i++) 379 if (gl->textures[i].id == id) 380 return &gl->textures[i]; 381 return NULL; 382 } 383 384 static int glnvg__deleteTexture(GLNVGcontext* gl, int id) 385 { 386 int i; 387 for (i = 0; i < gl->ntextures; i++) { 388 if (gl->textures[i].id == id) { 389 if (gl->textures[i].tex != 0 && (gl->textures[i].flags & NVG_IMAGE_NODELETE) == 0) 390 glDeleteTextures(1, &gl->textures[i].tex); 391 memset(&gl->textures[i], 0, sizeof(gl->textures[i])); 392 return 1; 393 } 394 } 395 return 0; 396 } 397 398 static void glnvg__dumpShaderError(GLuint shader, const char* name, const char* type) 399 { 400 GLchar str[512+1]; 401 GLsizei len = 0; 402 glGetShaderInfoLog(shader, 512, &len, str); 403 if (len > 512) len = 512; 404 str[len] = '\0'; 405 printf("Shader %s/%s error:\n%s\n", name, type, str); 406 } 407 408 static void glnvg__dumpProgramError(GLuint prog, const char* name) 409 { 410 GLchar str[512+1]; 411 GLsizei len = 0; 412 glGetProgramInfoLog(prog, 512, &len, str); 413 if (len > 512) len = 512; 414 str[len] = '\0'; 415 printf("Program %s error:\n%s\n", name, str); 416 } 417 418 static void glnvg__checkError(GLNVGcontext* gl, const char* str) 419 { 420 GLenum err; 421 if ((gl->flags & NVG_DEBUG) == 0) return; 422 err = glGetError(); 423 if (err != GL_NO_ERROR) { 424 printf("Error %08x after %s\n", err, str); 425 return; 426 } 427 } 428 429 static int glnvg__createShader(GLNVGshader* shader, const char* name, const char* header, const char* opts, const char* vshader, const char* fshader) 430 { 431 GLint status; 432 GLuint prog, vert, frag; 433 const char* str[3]; 434 str[0] = header; 435 str[1] = opts != NULL ? opts : ""; 436 437 memset(shader, 0, sizeof(*shader)); 438 439 prog = glCreateProgram(); 440 vert = glCreateShader(GL_VERTEX_SHADER); 441 frag = glCreateShader(GL_FRAGMENT_SHADER); 442 str[2] = vshader; 443 glShaderSource(vert, 3, str, 0); 444 str[2] = fshader; 445 glShaderSource(frag, 3, str, 0); 446 447 glCompileShader(vert); 448 glGetShaderiv(vert, GL_COMPILE_STATUS, &status); 449 if (status != GL_TRUE) { 450 glnvg__dumpShaderError(vert, name, "vert"); 451 return 0; 452 } 453 454 glCompileShader(frag); 455 glGetShaderiv(frag, GL_COMPILE_STATUS, &status); 456 if (status != GL_TRUE) { 457 glnvg__dumpShaderError(frag, name, "frag"); 458 return 0; 459 } 460 461 glAttachShader(prog, vert); 462 glAttachShader(prog, frag); 463 464 glBindAttribLocation(prog, 0, "vertex"); 465 glBindAttribLocation(prog, 1, "tcoord"); 466 467 glLinkProgram(prog); 468 glGetProgramiv(prog, GL_LINK_STATUS, &status); 469 if (status != GL_TRUE) { 470 glnvg__dumpProgramError(prog, name); 471 return 0; 472 } 473 474 shader->prog = prog; 475 shader->vert = vert; 476 shader->frag = frag; 477 478 return 1; 479 } 480 481 static void glnvg__deleteShader(GLNVGshader* shader) 482 { 483 if (shader->prog != 0) 484 glDeleteProgram(shader->prog); 485 if (shader->vert != 0) 486 glDeleteShader(shader->vert); 487 if (shader->frag != 0) 488 glDeleteShader(shader->frag); 489 } 490 491 static void glnvg__getUniforms(GLNVGshader* shader) 492 { 493 shader->loc[GLNVG_LOC_VIEWSIZE] = glGetUniformLocation(shader->prog, "viewSize"); 494 shader->loc[GLNVG_LOC_TEX] = glGetUniformLocation(shader->prog, "tex"); 495 496 #if NANOVG_GL_USE_UNIFORMBUFFER 497 shader->loc[GLNVG_LOC_FRAG] = glGetUniformBlockIndex(shader->prog, "frag"); 498 #else 499 shader->loc[GLNVG_LOC_FRAG] = glGetUniformLocation(shader->prog, "frag"); 500 #endif 501 } 502 503 static int glnvg__renderCreate(void* uptr) 504 { 505 GLNVGcontext* gl = (GLNVGcontext*)uptr; 506 int align = 4; 507 508 // TODO: mediump float may not be enough for GLES2 in iOS. 509 // see the following discussion: https://github.com/memononen/nanovg/issues/46 510 static const char* shaderHeader = 511 #if defined NANOVG_GL2 512 "#define NANOVG_GL2 1\n" 513 #elif defined NANOVG_GL3 514 "#version 150 core\n" 515 "#define NANOVG_GL3 1\n" 516 #elif defined NANOVG_GLES2 517 "#version 100\n" 518 "#define NANOVG_GL2 1\n" 519 #elif defined NANOVG_GLES3 520 "#version 300 es\n" 521 "#define NANOVG_GL3 1\n" 522 #endif 523 524 #if NANOVG_GL_USE_UNIFORMBUFFER 525 "#define USE_UNIFORMBUFFER 1\n" 526 #else 527 "#define UNIFORMARRAY_SIZE 11\n" 528 #endif 529 "\n"; 530 531 static const char* fillVertShader = 532 "#ifdef NANOVG_GL3\n" 533 " uniform vec2 viewSize;\n" 534 " in vec2 vertex;\n" 535 " in vec2 tcoord;\n" 536 " out vec2 ftcoord;\n" 537 " out vec2 fpos;\n" 538 "#else\n" 539 " uniform vec2 viewSize;\n" 540 " attribute vec2 vertex;\n" 541 " attribute vec2 tcoord;\n" 542 " varying vec2 ftcoord;\n" 543 " varying vec2 fpos;\n" 544 "#endif\n" 545 "void main(void) {\n" 546 " ftcoord = tcoord;\n" 547 " fpos = vertex;\n" 548 " gl_Position = vec4(2.0*vertex.x/viewSize.x - 1.0, 1.0 - 2.0*vertex.y/viewSize.y, 0, 1);\n" 549 "}\n"; 550 551 static const char* fillFragShader = 552 "#ifdef GL_ES\n" 553 "#if defined(GL_FRAGMENT_PRECISION_HIGH) || defined(NANOVG_GL3)\n" 554 " precision highp float;\n" 555 "#else\n" 556 " precision mediump float;\n" 557 "#endif\n" 558 "#endif\n" 559 "#ifdef NANOVG_GL3\n" 560 "#ifdef USE_UNIFORMBUFFER\n" 561 " layout(std140) uniform frag {\n" 562 " mat3 scissorMat;\n" 563 " mat3 paintMat;\n" 564 " vec4 innerCol;\n" 565 " vec4 outerCol;\n" 566 " vec2 scissorExt;\n" 567 " vec2 scissorScale;\n" 568 " vec2 extent;\n" 569 " float radius;\n" 570 " float feather;\n" 571 " float strokeMult;\n" 572 " float strokeThr;\n" 573 " int texType;\n" 574 " int type;\n" 575 " };\n" 576 "#else\n" // NANOVG_GL3 && !USE_UNIFORMBUFFER 577 " uniform vec4 frag[UNIFORMARRAY_SIZE];\n" 578 "#endif\n" 579 " uniform sampler2D tex;\n" 580 " in vec2 ftcoord;\n" 581 " in vec2 fpos;\n" 582 " out vec4 outColor;\n" 583 "#else\n" // !NANOVG_GL3 584 " uniform vec4 frag[UNIFORMARRAY_SIZE];\n" 585 " uniform sampler2D tex;\n" 586 " varying vec2 ftcoord;\n" 587 " varying vec2 fpos;\n" 588 "#endif\n" 589 "#ifndef USE_UNIFORMBUFFER\n" 590 " #define scissorMat mat3(frag[0].xyz, frag[1].xyz, frag[2].xyz)\n" 591 " #define paintMat mat3(frag[3].xyz, frag[4].xyz, frag[5].xyz)\n" 592 " #define innerCol frag[6]\n" 593 " #define outerCol frag[7]\n" 594 " #define scissorExt frag[8].xy\n" 595 " #define scissorScale frag[8].zw\n" 596 " #define extent frag[9].xy\n" 597 " #define radius frag[9].z\n" 598 " #define feather frag[9].w\n" 599 " #define strokeMult frag[10].x\n" 600 " #define strokeThr frag[10].y\n" 601 " #define texType int(frag[10].z)\n" 602 " #define type int(frag[10].w)\n" 603 "#endif\n" 604 "\n" 605 "float sdroundrect(vec2 pt, vec2 ext, float rad) {\n" 606 " vec2 ext2 = ext - vec2(rad,rad);\n" 607 " vec2 d = abs(pt) - ext2;\n" 608 " return min(max(d.x,d.y),0.0) + length(max(d,0.0)) - rad;\n" 609 "}\n" 610 "\n" 611 "// Scissoring\n" 612 "float scissorMask(vec2 p) {\n" 613 " vec2 sc = (abs((scissorMat * vec3(p,1.0)).xy) - scissorExt);\n" 614 " sc = vec2(0.5,0.5) - sc * scissorScale;\n" 615 " return clamp(sc.x,0.0,1.0) * clamp(sc.y,0.0,1.0);\n" 616 "}\n" 617 "#ifdef EDGE_AA\n" 618 "// Stroke - from [0..1] to clipped pyramid, where the slope is 1px.\n" 619 "float strokeMask() {\n" 620 " return min(1.0, (1.0-abs(ftcoord.x*2.0-1.0))*strokeMult) * min(1.0, ftcoord.y);\n" 621 "}\n" 622 "#endif\n" 623 "\n" 624 "void main(void) {\n" 625 " vec4 result;\n" 626 " float scissor = scissorMask(fpos);\n" 627 "#ifdef EDGE_AA\n" 628 " float strokeAlpha = strokeMask();\n" 629 " if (strokeAlpha < strokeThr) discard;\n" 630 "#else\n" 631 " float strokeAlpha = 1.0;\n" 632 "#endif\n" 633 " if (type == 0) { // Gradient\n" 634 " // Calculate gradient color using box gradient\n" 635 " vec2 pt = (paintMat * vec3(fpos,1.0)).xy;\n" 636 " float d = clamp((sdroundrect(pt, extent, radius) + feather*0.5) / feather, 0.0, 1.0);\n" 637 " vec4 color = mix(innerCol,outerCol,d);\n" 638 " // Combine alpha\n" 639 " color *= strokeAlpha * scissor;\n" 640 " result = color;\n" 641 " } else if (type == 1) { // Image\n" 642 " // Calculate color fron texture\n" 643 " vec2 pt = (paintMat * vec3(fpos,1.0)).xy / extent;\n" 644 "#ifdef NANOVG_GL3\n" 645 " vec4 color = texture(tex, pt);\n" 646 "#else\n" 647 " vec4 color = texture2D(tex, pt);\n" 648 "#endif\n" 649 " if (texType == 1) color = vec4(color.xyz*color.w,color.w);" 650 " if (texType == 2) color = vec4(color.x);" 651 " // Apply color tint and alpha.\n" 652 " color *= innerCol;\n" 653 " // Combine alpha\n" 654 " color *= strokeAlpha * scissor;\n" 655 " result = color;\n" 656 " } else if (type == 2) { // Stencil fill\n" 657 " result = vec4(1,1,1,1);\n" 658 " } else if (type == 3) { // Textured tris\n" 659 "#ifdef NANOVG_GL3\n" 660 " vec4 color = texture(tex, ftcoord);\n" 661 "#else\n" 662 " vec4 color = texture2D(tex, ftcoord);\n" 663 "#endif\n" 664 " if (texType == 1) color = vec4(color.xyz*color.w,color.w);" 665 " if (texType == 2) color = vec4(color.x);" 666 " color *= scissor;\n" 667 " result = color * innerCol;\n" 668 " }\n" 669 "#ifdef NANOVG_GL3\n" 670 " outColor = result;\n" 671 "#else\n" 672 " gl_FragColor = result;\n" 673 "#endif\n" 674 "}\n"; 675 676 glnvg__checkError(gl, "init"); 677 678 if (gl->flags & NVG_ANTIALIAS) { 679 if (glnvg__createShader(&gl->shader, "shader", shaderHeader, "#define EDGE_AA 1\n", fillVertShader, fillFragShader) == 0) 680 return 0; 681 } else { 682 if (glnvg__createShader(&gl->shader, "shader", shaderHeader, NULL, fillVertShader, fillFragShader) == 0) 683 return 0; 684 } 685 686 glnvg__checkError(gl, "uniform locations"); 687 glnvg__getUniforms(&gl->shader); 688 689 // Create dynamic vertex array 690 #if defined NANOVG_GL3 691 glGenVertexArrays(1, &gl->vertArr); 692 #endif 693 glGenBuffers(1, &gl->vertBuf); 694 695 #if NANOVG_GL_USE_UNIFORMBUFFER 696 // Create UBOs 697 glUniformBlockBinding(gl->shader.prog, gl->shader.loc[GLNVG_LOC_FRAG], GLNVG_FRAG_BINDING); 698 glGenBuffers(1, &gl->fragBuf); 699 glGetIntegerv(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT, &align); 700 #endif 701 gl->fragSize = sizeof(GLNVGfragUniforms) + align - sizeof(GLNVGfragUniforms) % align; 702 703 glnvg__checkError(gl, "create done"); 704 705 glFinish(); 706 707 return 1; 708 } 709 710 static int glnvg__renderCreateTexture(void* uptr, int type, int w, int h, int imageFlags, const unsigned char* data) 711 { 712 GLNVGcontext* gl = (GLNVGcontext*)uptr; 713 GLNVGtexture* tex = glnvg__allocTexture(gl); 714 715 if (tex == NULL) return 0; 716 717 #ifdef NANOVG_GLES2 718 // Check for non-power of 2. 719 if (glnvg__nearestPow2(w) != (unsigned int)w || glnvg__nearestPow2(h) != (unsigned int)h) { 720 // No repeat 721 if ((imageFlags & NVG_IMAGE_REPEATX) != 0 || (imageFlags & NVG_IMAGE_REPEATY) != 0) { 722 printf("Repeat X/Y is not supported for non power-of-two textures (%d x %d)\n", w, h); 723 imageFlags &= ~(NVG_IMAGE_REPEATX | NVG_IMAGE_REPEATY); 724 } 725 // No mips. 726 if (imageFlags & NVG_IMAGE_GENERATE_MIPMAPS) { 727 printf("Mip-maps is not support for non power-of-two textures (%d x %d)\n", w, h); 728 imageFlags &= ~NVG_IMAGE_GENERATE_MIPMAPS; 729 } 730 } 731 #endif 732 733 glGenTextures(1, &tex->tex); 734 tex->width = w; 735 tex->height = h; 736 tex->type = type; 737 tex->flags = imageFlags; 738 glnvg__bindTexture(gl, tex->tex); 739 740 glPixelStorei(GL_UNPACK_ALIGNMENT,1); 741 #ifndef NANOVG_GLES2 742 glPixelStorei(GL_UNPACK_ROW_LENGTH, tex->width); 743 glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0); 744 glPixelStorei(GL_UNPACK_SKIP_ROWS, 0); 745 #endif 746 747 #if defined (NANOVG_GL2) 748 // GL 1.4 and later has support for generating mipmaps using a tex parameter. 749 if (imageFlags & NVG_IMAGE_GENERATE_MIPMAPS) { 750 glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_TRUE); 751 } 752 #endif 753 754 if (type == NVG_TEXTURE_RGBA) 755 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_RGBA, GL_UNSIGNED_BYTE, data); 756 else 757 #if defined(NANOVG_GLES2) || defined (NANOVG_GL2) 758 glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, w, h, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, data); 759 #elif defined(NANOVG_GLES3) 760 glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, w, h, 0, GL_RED, GL_UNSIGNED_BYTE, data); 761 #else 762 glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, w, h, 0, GL_RED, GL_UNSIGNED_BYTE, data); 763 #endif 764 765 if (imageFlags & NVG_IMAGE_GENERATE_MIPMAPS) { 766 if (imageFlags & NVG_IMAGE_NEAREST) { 767 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST); 768 } else { 769 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR); 770 } 771 } else { 772 if (imageFlags & NVG_IMAGE_NEAREST) { 773 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); 774 } else { 775 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); 776 } 777 } 778 779 if (imageFlags & NVG_IMAGE_NEAREST) { 780 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); 781 } else { 782 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); 783 } 784 785 if (imageFlags & NVG_IMAGE_REPEATX) 786 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); 787 else 788 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); 789 790 if (imageFlags & NVG_IMAGE_REPEATY) 791 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); 792 else 793 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); 794 795 glPixelStorei(GL_UNPACK_ALIGNMENT, 4); 796 #ifndef NANOVG_GLES2 797 glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); 798 glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0); 799 glPixelStorei(GL_UNPACK_SKIP_ROWS, 0); 800 #endif 801 802 // The new way to build mipmaps on GLES and GL3 803 #if !defined(NANOVG_GL2) 804 if (imageFlags & NVG_IMAGE_GENERATE_MIPMAPS) { 805 glGenerateMipmap(GL_TEXTURE_2D); 806 } 807 #endif 808 809 glnvg__checkError(gl, "create tex"); 810 glnvg__bindTexture(gl, 0); 811 812 return tex->id; 813 } 814 815 816 static int glnvg__renderDeleteTexture(void* uptr, int image) 817 { 818 GLNVGcontext* gl = (GLNVGcontext*)uptr; 819 return glnvg__deleteTexture(gl, image); 820 } 821 822 static int glnvg__renderUpdateTexture(void* uptr, int image, int x, int y, int w, int h, const unsigned char* data) 823 { 824 GLNVGcontext* gl = (GLNVGcontext*)uptr; 825 GLNVGtexture* tex = glnvg__findTexture(gl, image); 826 827 if (tex == NULL) return 0; 828 glnvg__bindTexture(gl, tex->tex); 829 830 glPixelStorei(GL_UNPACK_ALIGNMENT,1); 831 832 #ifndef NANOVG_GLES2 833 glPixelStorei(GL_UNPACK_ROW_LENGTH, tex->width); 834 glPixelStorei(GL_UNPACK_SKIP_PIXELS, x); 835 glPixelStorei(GL_UNPACK_SKIP_ROWS, y); 836 #else 837 // No support for all of skip, need to update a whole row at a time. 838 if (tex->type == NVG_TEXTURE_RGBA) 839 data += y*tex->width*4; 840 else 841 data += y*tex->width; 842 x = 0; 843 w = tex->width; 844 #endif 845 846 if (tex->type == NVG_TEXTURE_RGBA) 847 glTexSubImage2D(GL_TEXTURE_2D, 0, x,y, w,h, GL_RGBA, GL_UNSIGNED_BYTE, data); 848 else 849 #if defined(NANOVG_GLES2) || defined(NANOVG_GL2) 850 glTexSubImage2D(GL_TEXTURE_2D, 0, x,y, w,h, GL_LUMINANCE, GL_UNSIGNED_BYTE, data); 851 #else 852 glTexSubImage2D(GL_TEXTURE_2D, 0, x,y, w,h, GL_RED, GL_UNSIGNED_BYTE, data); 853 #endif 854 855 glPixelStorei(GL_UNPACK_ALIGNMENT, 4); 856 #ifndef NANOVG_GLES2 857 glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); 858 glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0); 859 glPixelStorei(GL_UNPACK_SKIP_ROWS, 0); 860 #endif 861 862 glnvg__bindTexture(gl, 0); 863 864 return 1; 865 } 866 867 static int glnvg__renderGetTextureSize(void* uptr, int image, int* w, int* h) 868 { 869 GLNVGcontext* gl = (GLNVGcontext*)uptr; 870 GLNVGtexture* tex = glnvg__findTexture(gl, image); 871 if (tex == NULL) return 0; 872 *w = tex->width; 873 *h = tex->height; 874 return 1; 875 } 876 877 static void glnvg__xformToMat3x4(float* m3, float* t) 878 { 879 m3[0] = t[0]; 880 m3[1] = t[1]; 881 m3[2] = 0.0f; 882 m3[3] = 0.0f; 883 m3[4] = t[2]; 884 m3[5] = t[3]; 885 m3[6] = 0.0f; 886 m3[7] = 0.0f; 887 m3[8] = t[4]; 888 m3[9] = t[5]; 889 m3[10] = 1.0f; 890 m3[11] = 0.0f; 891 } 892 893 static NVGcolor glnvg__premulColor(NVGcolor c) 894 { 895 c.r *= c.a; 896 c.g *= c.a; 897 c.b *= c.a; 898 return c; 899 } 900 901 static int glnvg__convertPaint(GLNVGcontext* gl, GLNVGfragUniforms* frag, NVGpaint* paint, 902 NVGscissor* scissor, float width, float fringe, float strokeThr) 903 { 904 GLNVGtexture* tex = NULL; 905 float invxform[6]; 906 907 memset(frag, 0, sizeof(*frag)); 908 909 frag->innerCol = glnvg__premulColor(paint->innerColor); 910 frag->outerCol = glnvg__premulColor(paint->outerColor); 911 912 if (scissor->extent[0] < -0.5f || scissor->extent[1] < -0.5f) { 913 memset(frag->scissorMat, 0, sizeof(frag->scissorMat)); 914 frag->scissorExt[0] = 1.0f; 915 frag->scissorExt[1] = 1.0f; 916 frag->scissorScale[0] = 1.0f; 917 frag->scissorScale[1] = 1.0f; 918 } else { 919 nvgTransformInverse(invxform, scissor->xform); 920 glnvg__xformToMat3x4(frag->scissorMat, invxform); 921 frag->scissorExt[0] = scissor->extent[0]; 922 frag->scissorExt[1] = scissor->extent[1]; 923 frag->scissorScale[0] = sqrtf(scissor->xform[0]*scissor->xform[0] + scissor->xform[2]*scissor->xform[2]) / fringe; 924 frag->scissorScale[1] = sqrtf(scissor->xform[1]*scissor->xform[1] + scissor->xform[3]*scissor->xform[3]) / fringe; 925 } 926 927 memcpy(frag->extent, paint->extent, sizeof(frag->extent)); 928 frag->strokeMult = (width*0.5f + fringe*0.5f) / fringe; 929 frag->strokeThr = strokeThr; 930 931 if (paint->image != 0) { 932 tex = glnvg__findTexture(gl, paint->image); 933 if (tex == NULL) return 0; 934 if ((tex->flags & NVG_IMAGE_FLIPY) != 0) { 935 float m1[6], m2[6]; 936 nvgTransformTranslate(m1, 0.0f, frag->extent[1] * 0.5f); 937 nvgTransformMultiply(m1, paint->xform); 938 nvgTransformScale(m2, 1.0f, -1.0f); 939 nvgTransformMultiply(m2, m1); 940 nvgTransformTranslate(m1, 0.0f, -frag->extent[1] * 0.5f); 941 nvgTransformMultiply(m1, m2); 942 nvgTransformInverse(invxform, m1); 943 } else { 944 nvgTransformInverse(invxform, paint->xform); 945 } 946 frag->type = NSVG_SHADER_FILLIMG; 947 948 #if NANOVG_GL_USE_UNIFORMBUFFER 949 if (tex->type == NVG_TEXTURE_RGBA) 950 frag->texType = (tex->flags & NVG_IMAGE_PREMULTIPLIED) ? 0 : 1; 951 else 952 frag->texType = 2; 953 #else 954 if (tex->type == NVG_TEXTURE_RGBA) 955 frag->texType = (tex->flags & NVG_IMAGE_PREMULTIPLIED) ? 0.0f : 1.0f; 956 else 957 frag->texType = 2.0f; 958 #endif 959 // printf("frag->texType = %d\n", frag->texType); 960 } else { 961 frag->type = NSVG_SHADER_FILLGRAD; 962 frag->radius = paint->radius; 963 frag->feather = paint->feather; 964 nvgTransformInverse(invxform, paint->xform); 965 } 966 967 glnvg__xformToMat3x4(frag->paintMat, invxform); 968 969 return 1; 970 } 971 972 static GLNVGfragUniforms* nvg__fragUniformPtr(GLNVGcontext* gl, int i); 973 974 static void glnvg__setUniforms(GLNVGcontext* gl, int uniformOffset, int image) 975 { 976 #if NANOVG_GL_USE_UNIFORMBUFFER 977 glBindBufferRange(GL_UNIFORM_BUFFER, GLNVG_FRAG_BINDING, gl->fragBuf, uniformOffset, sizeof(GLNVGfragUniforms)); 978 #else 979 GLNVGfragUniforms* frag = nvg__fragUniformPtr(gl, uniformOffset); 980 glUniform4fv(gl->shader.loc[GLNVG_LOC_FRAG], NANOVG_GL_UNIFORMARRAY_SIZE, &(frag->uniformArray[0][0])); 981 #endif 982 983 if (image != 0) { 984 GLNVGtexture* tex = glnvg__findTexture(gl, image); 985 glnvg__bindTexture(gl, tex != NULL ? tex->tex : 0); 986 glnvg__checkError(gl, "tex paint tex"); 987 } else { 988 glnvg__bindTexture(gl, 0); 989 } 990 } 991 992 static void glnvg__renderViewport(void* uptr, float width, float height, float devicePixelRatio) 993 { 994 NVG_NOTUSED(devicePixelRatio); 995 GLNVGcontext* gl = (GLNVGcontext*)uptr; 996 gl->view[0] = width; 997 gl->view[1] = height; 998 } 999 1000 static void glnvg__fill(GLNVGcontext* gl, GLNVGcall* call) 1001 { 1002 GLNVGpath* paths = &gl->paths[call->pathOffset]; 1003 int i, npaths = call->pathCount; 1004 1005 // Draw shapes 1006 glEnable(GL_STENCIL_TEST); 1007 glnvg__stencilMask(gl, 0xff); 1008 glnvg__stencilFunc(gl, GL_ALWAYS, 0, 0xff); 1009 glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE); 1010 1011 // set bindpoint for solid loc 1012 glnvg__setUniforms(gl, call->uniformOffset, 0); 1013 glnvg__checkError(gl, "fill simple"); 1014 1015 glStencilOpSeparate(GL_FRONT, GL_KEEP, GL_KEEP, GL_INCR_WRAP); 1016 glStencilOpSeparate(GL_BACK, GL_KEEP, GL_KEEP, GL_DECR_WRAP); 1017 glDisable(GL_CULL_FACE); 1018 for (i = 0; i < npaths; i++) 1019 glDrawArrays(GL_TRIANGLE_FAN, paths[i].fillOffset, paths[i].fillCount); 1020 glEnable(GL_CULL_FACE); 1021 1022 // Draw anti-aliased pixels 1023 glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); 1024 1025 glnvg__setUniforms(gl, call->uniformOffset + gl->fragSize, call->image); 1026 glnvg__checkError(gl, "fill fill"); 1027 1028 if (gl->flags & NVG_ANTIALIAS) { 1029 glnvg__stencilFunc(gl, GL_EQUAL, 0x00, 0xff); 1030 glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP); 1031 // Draw fringes 1032 for (i = 0; i < npaths; i++) 1033 glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount); 1034 } 1035 1036 // Draw fill 1037 glnvg__stencilFunc(gl, GL_NOTEQUAL, 0x0, 0xff); 1038 glStencilOp(GL_ZERO, GL_ZERO, GL_ZERO); 1039 glDrawArrays(GL_TRIANGLE_STRIP, call->triangleOffset, call->triangleCount); 1040 1041 glDisable(GL_STENCIL_TEST); 1042 } 1043 1044 static void glnvg__convexFill(GLNVGcontext* gl, GLNVGcall* call) 1045 { 1046 GLNVGpath* paths = &gl->paths[call->pathOffset]; 1047 int i, npaths = call->pathCount; 1048 1049 glnvg__setUniforms(gl, call->uniformOffset, call->image); 1050 glnvg__checkError(gl, "convex fill"); 1051 1052 for (i = 0; i < npaths; i++) 1053 glDrawArrays(GL_TRIANGLE_FAN, paths[i].fillOffset, paths[i].fillCount); 1054 if (gl->flags & NVG_ANTIALIAS) { 1055 // Draw fringes 1056 for (i = 0; i < npaths; i++) 1057 glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount); 1058 } 1059 } 1060 1061 static void glnvg__stroke(GLNVGcontext* gl, GLNVGcall* call) 1062 { 1063 GLNVGpath* paths = &gl->paths[call->pathOffset]; 1064 int npaths = call->pathCount, i; 1065 1066 if (gl->flags & NVG_STENCIL_STROKES) { 1067 1068 glEnable(GL_STENCIL_TEST); 1069 glnvg__stencilMask(gl, 0xff); 1070 1071 // Fill the stroke base without overlap 1072 glnvg__stencilFunc(gl, GL_EQUAL, 0x0, 0xff); 1073 glStencilOp(GL_KEEP, GL_KEEP, GL_INCR); 1074 glnvg__setUniforms(gl, call->uniformOffset + gl->fragSize, call->image); 1075 glnvg__checkError(gl, "stroke fill 0"); 1076 for (i = 0; i < npaths; i++) 1077 glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount); 1078 1079 // Draw anti-aliased pixels. 1080 glnvg__setUniforms(gl, call->uniformOffset, call->image); 1081 glnvg__stencilFunc(gl, GL_EQUAL, 0x00, 0xff); 1082 glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP); 1083 for (i = 0; i < npaths; i++) 1084 glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount); 1085 1086 // Clear stencil buffer. 1087 glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE); 1088 glnvg__stencilFunc(gl, GL_ALWAYS, 0x0, 0xff); 1089 glStencilOp(GL_ZERO, GL_ZERO, GL_ZERO); 1090 glnvg__checkError(gl, "stroke fill 1"); 1091 for (i = 0; i < npaths; i++) 1092 glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount); 1093 glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); 1094 1095 glDisable(GL_STENCIL_TEST); 1096 1097 // glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset + gl->fragSize), paint, scissor, strokeWidth, fringe, 1.0f - 0.5f/255.0f); 1098 1099 } else { 1100 glnvg__setUniforms(gl, call->uniformOffset, call->image); 1101 glnvg__checkError(gl, "stroke fill"); 1102 // Draw Strokes 1103 for (i = 0; i < npaths; i++) 1104 glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount); 1105 } 1106 } 1107 1108 static void glnvg__triangles(GLNVGcontext* gl, GLNVGcall* call) 1109 { 1110 glnvg__setUniforms(gl, call->uniformOffset, call->image); 1111 glnvg__checkError(gl, "triangles fill"); 1112 1113 glDrawArrays(GL_TRIANGLES, call->triangleOffset, call->triangleCount); 1114 } 1115 1116 static void glnvg__renderCancel(void* uptr) { 1117 GLNVGcontext* gl = (GLNVGcontext*)uptr; 1118 gl->nverts = 0; 1119 gl->npaths = 0; 1120 gl->ncalls = 0; 1121 gl->nuniforms = 0; 1122 } 1123 1124 static GLenum glnvg_convertBlendFuncFactor(int factor) 1125 { 1126 if (factor == NVG_ZERO) 1127 return GL_ZERO; 1128 if (factor == NVG_ONE) 1129 return GL_ONE; 1130 if (factor == NVG_SRC_COLOR) 1131 return GL_SRC_COLOR; 1132 if (factor == NVG_ONE_MINUS_SRC_COLOR) 1133 return GL_ONE_MINUS_SRC_COLOR; 1134 if (factor == NVG_DST_COLOR) 1135 return GL_DST_COLOR; 1136 if (factor == NVG_ONE_MINUS_DST_COLOR) 1137 return GL_ONE_MINUS_DST_COLOR; 1138 if (factor == NVG_SRC_ALPHA) 1139 return GL_SRC_ALPHA; 1140 if (factor == NVG_ONE_MINUS_SRC_ALPHA) 1141 return GL_ONE_MINUS_SRC_ALPHA; 1142 if (factor == NVG_DST_ALPHA) 1143 return GL_DST_ALPHA; 1144 if (factor == NVG_ONE_MINUS_DST_ALPHA) 1145 return GL_ONE_MINUS_DST_ALPHA; 1146 if (factor == NVG_SRC_ALPHA_SATURATE) 1147 return GL_SRC_ALPHA_SATURATE; 1148 return GL_INVALID_ENUM; 1149 } 1150 1151 static GLNVGblend glnvg__blendCompositeOperation(NVGcompositeOperationState op) 1152 { 1153 GLNVGblend blend; 1154 blend.srcRGB = glnvg_convertBlendFuncFactor(op.srcRGB); 1155 blend.dstRGB = glnvg_convertBlendFuncFactor(op.dstRGB); 1156 blend.srcAlpha = glnvg_convertBlendFuncFactor(op.srcAlpha); 1157 blend.dstAlpha = glnvg_convertBlendFuncFactor(op.dstAlpha); 1158 if (blend.srcRGB == GL_INVALID_ENUM || blend.dstRGB == GL_INVALID_ENUM || blend.srcAlpha == GL_INVALID_ENUM || blend.dstAlpha == GL_INVALID_ENUM) 1159 { 1160 blend.srcRGB = GL_ONE; 1161 blend.dstRGB = GL_ONE_MINUS_SRC_ALPHA; 1162 blend.srcAlpha = GL_ONE; 1163 blend.dstAlpha = GL_ONE_MINUS_SRC_ALPHA; 1164 } 1165 return blend; 1166 } 1167 1168 static void glnvg__renderFlush(void* uptr) 1169 { 1170 GLNVGcontext* gl = (GLNVGcontext*)uptr; 1171 int i; 1172 1173 if (gl->ncalls > 0) { 1174 1175 // Setup require GL state. 1176 glUseProgram(gl->shader.prog); 1177 1178 glEnable(GL_CULL_FACE); 1179 glCullFace(GL_BACK); 1180 glFrontFace(GL_CCW); 1181 glEnable(GL_BLEND); 1182 glDisable(GL_DEPTH_TEST); 1183 glDisable(GL_SCISSOR_TEST); 1184 glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); 1185 glStencilMask(0xffffffff); 1186 glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP); 1187 glStencilFunc(GL_ALWAYS, 0, 0xffffffff); 1188 glActiveTexture(GL_TEXTURE0); 1189 glBindTexture(GL_TEXTURE_2D, 0); 1190 #if NANOVG_GL_USE_STATE_FILTER 1191 gl->boundTexture = 0; 1192 gl->stencilMask = 0xffffffff; 1193 gl->stencilFunc = GL_ALWAYS; 1194 gl->stencilFuncRef = 0; 1195 gl->stencilFuncMask = 0xffffffff; 1196 gl->blendFunc.srcRGB = GL_INVALID_ENUM; 1197 gl->blendFunc.srcAlpha = GL_INVALID_ENUM; 1198 gl->blendFunc.dstRGB = GL_INVALID_ENUM; 1199 gl->blendFunc.dstAlpha = GL_INVALID_ENUM; 1200 #endif 1201 1202 #if NANOVG_GL_USE_UNIFORMBUFFER 1203 // Upload ubo for frag shaders 1204 glBindBuffer(GL_UNIFORM_BUFFER, gl->fragBuf); 1205 glBufferData(GL_UNIFORM_BUFFER, gl->nuniforms * gl->fragSize, gl->uniforms, GL_STREAM_DRAW); 1206 #endif 1207 1208 // Upload vertex data 1209 #if defined NANOVG_GL3 1210 glBindVertexArray(gl->vertArr); 1211 #endif 1212 glBindBuffer(GL_ARRAY_BUFFER, gl->vertBuf); 1213 glBufferData(GL_ARRAY_BUFFER, gl->nverts * sizeof(NVGvertex), gl->verts, GL_STREAM_DRAW); 1214 glEnableVertexAttribArray(0); 1215 glEnableVertexAttribArray(1); 1216 glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, sizeof(NVGvertex), (const GLvoid*)(size_t)0); 1217 glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(NVGvertex), (const GLvoid*)(0 + 2*sizeof(float))); 1218 1219 // Set view and texture just once per frame. 1220 glUniform1i(gl->shader.loc[GLNVG_LOC_TEX], 0); 1221 glUniform2fv(gl->shader.loc[GLNVG_LOC_VIEWSIZE], 1, gl->view); 1222 1223 #if NANOVG_GL_USE_UNIFORMBUFFER 1224 glBindBuffer(GL_UNIFORM_BUFFER, gl->fragBuf); 1225 #endif 1226 1227 for (i = 0; i < gl->ncalls; i++) { 1228 GLNVGcall* call = &gl->calls[i]; 1229 glnvg__blendFuncSeparate(gl,&call->blendFunc); 1230 if (call->type == GLNVG_FILL) 1231 glnvg__fill(gl, call); 1232 else if (call->type == GLNVG_CONVEXFILL) 1233 glnvg__convexFill(gl, call); 1234 else if (call->type == GLNVG_STROKE) 1235 glnvg__stroke(gl, call); 1236 else if (call->type == GLNVG_TRIANGLES) 1237 glnvg__triangles(gl, call); 1238 } 1239 1240 glDisableVertexAttribArray(0); 1241 glDisableVertexAttribArray(1); 1242 #if defined NANOVG_GL3 1243 glBindVertexArray(0); 1244 #endif 1245 glDisable(GL_CULL_FACE); 1246 glBindBuffer(GL_ARRAY_BUFFER, 0); 1247 glUseProgram(0); 1248 glnvg__bindTexture(gl, 0); 1249 } 1250 1251 // Reset calls 1252 gl->nverts = 0; 1253 gl->npaths = 0; 1254 gl->ncalls = 0; 1255 gl->nuniforms = 0; 1256 } 1257 1258 static int glnvg__maxVertCount(const NVGpath* paths, int npaths) 1259 { 1260 int i, count = 0; 1261 for (i = 0; i < npaths; i++) { 1262 count += paths[i].nfill; 1263 count += paths[i].nstroke; 1264 } 1265 return count; 1266 } 1267 1268 static GLNVGcall* glnvg__allocCall(GLNVGcontext* gl) 1269 { 1270 GLNVGcall* ret = NULL; 1271 if (gl->ncalls+1 > gl->ccalls) { 1272 GLNVGcall* calls; 1273 int ccalls = glnvg__maxi(gl->ncalls+1, 128) + gl->ccalls/2; // 1.5x Overallocate 1274 calls = (GLNVGcall*)realloc(gl->calls, sizeof(GLNVGcall) * ccalls); 1275 if (calls == NULL) return NULL; 1276 gl->calls = calls; 1277 gl->ccalls = ccalls; 1278 } 1279 ret = &gl->calls[gl->ncalls++]; 1280 memset(ret, 0, sizeof(GLNVGcall)); 1281 return ret; 1282 } 1283 1284 static int glnvg__allocPaths(GLNVGcontext* gl, int n) 1285 { 1286 int ret = 0; 1287 if (gl->npaths+n > gl->cpaths) { 1288 GLNVGpath* paths; 1289 int cpaths = glnvg__maxi(gl->npaths + n, 128) + gl->cpaths/2; // 1.5x Overallocate 1290 paths = (GLNVGpath*)realloc(gl->paths, sizeof(GLNVGpath) * cpaths); 1291 if (paths == NULL) return -1; 1292 gl->paths = paths; 1293 gl->cpaths = cpaths; 1294 } 1295 ret = gl->npaths; 1296 gl->npaths += n; 1297 return ret; 1298 } 1299 1300 static int glnvg__allocVerts(GLNVGcontext* gl, int n) 1301 { 1302 int ret = 0; 1303 if (gl->nverts+n > gl->cverts) { 1304 NVGvertex* verts; 1305 int cverts = glnvg__maxi(gl->nverts + n, 4096) + gl->cverts/2; // 1.5x Overallocate 1306 verts = (NVGvertex*)realloc(gl->verts, sizeof(NVGvertex) * cverts); 1307 if (verts == NULL) return -1; 1308 gl->verts = verts; 1309 gl->cverts = cverts; 1310 } 1311 ret = gl->nverts; 1312 gl->nverts += n; 1313 return ret; 1314 } 1315 1316 static int glnvg__allocFragUniforms(GLNVGcontext* gl, int n) 1317 { 1318 int ret = 0, structSize = gl->fragSize; 1319 if (gl->nuniforms+n > gl->cuniforms) { 1320 unsigned char* uniforms; 1321 int cuniforms = glnvg__maxi(gl->nuniforms+n, 128) + gl->cuniforms/2; // 1.5x Overallocate 1322 uniforms = (unsigned char*)realloc(gl->uniforms, structSize * cuniforms); 1323 if (uniforms == NULL) return -1; 1324 gl->uniforms = uniforms; 1325 gl->cuniforms = cuniforms; 1326 } 1327 ret = gl->nuniforms * structSize; 1328 gl->nuniforms += n; 1329 return ret; 1330 } 1331 1332 static GLNVGfragUniforms* nvg__fragUniformPtr(GLNVGcontext* gl, int i) 1333 { 1334 return (GLNVGfragUniforms*)&gl->uniforms[i]; 1335 } 1336 1337 static void glnvg__vset(NVGvertex* vtx, float x, float y, float u, float v) 1338 { 1339 vtx->x = x; 1340 vtx->y = y; 1341 vtx->u = u; 1342 vtx->v = v; 1343 } 1344 1345 static void glnvg__renderFill(void* uptr, NVGpaint* paint, NVGcompositeOperationState compositeOperation, NVGscissor* scissor, float fringe, 1346 const float* bounds, const NVGpath* paths, int npaths) 1347 { 1348 GLNVGcontext* gl = (GLNVGcontext*)uptr; 1349 GLNVGcall* call = glnvg__allocCall(gl); 1350 NVGvertex* quad; 1351 GLNVGfragUniforms* frag; 1352 int i, maxverts, offset; 1353 1354 if (call == NULL) return; 1355 1356 call->type = GLNVG_FILL; 1357 call->triangleCount = 4; 1358 call->pathOffset = glnvg__allocPaths(gl, npaths); 1359 if (call->pathOffset == -1) goto error; 1360 call->pathCount = npaths; 1361 call->image = paint->image; 1362 call->blendFunc = glnvg__blendCompositeOperation(compositeOperation); 1363 1364 if (npaths == 1 && paths[0].convex) 1365 { 1366 call->type = GLNVG_CONVEXFILL; 1367 call->triangleCount = 0; // Bounding box fill quad not needed for convex fill 1368 } 1369 1370 // Allocate vertices for all the paths. 1371 maxverts = glnvg__maxVertCount(paths, npaths) + call->triangleCount; 1372 offset = glnvg__allocVerts(gl, maxverts); 1373 if (offset == -1) goto error; 1374 1375 for (i = 0; i < npaths; i++) { 1376 GLNVGpath* copy = &gl->paths[call->pathOffset + i]; 1377 const NVGpath* path = &paths[i]; 1378 memset(copy, 0, sizeof(GLNVGpath)); 1379 if (path->nfill > 0) { 1380 copy->fillOffset = offset; 1381 copy->fillCount = path->nfill; 1382 memcpy(&gl->verts[offset], path->fill, sizeof(NVGvertex) * path->nfill); 1383 offset += path->nfill; 1384 } 1385 if (path->nstroke > 0) { 1386 copy->strokeOffset = offset; 1387 copy->strokeCount = path->nstroke; 1388 memcpy(&gl->verts[offset], path->stroke, sizeof(NVGvertex) * path->nstroke); 1389 offset += path->nstroke; 1390 } 1391 } 1392 1393 // Setup uniforms for draw calls 1394 if (call->type == GLNVG_FILL) { 1395 // Quad 1396 call->triangleOffset = offset; 1397 quad = &gl->verts[call->triangleOffset]; 1398 glnvg__vset(&quad[0], bounds[2], bounds[3], 0.5f, 1.0f); 1399 glnvg__vset(&quad[1], bounds[2], bounds[1], 0.5f, 1.0f); 1400 glnvg__vset(&quad[2], bounds[0], bounds[3], 0.5f, 1.0f); 1401 glnvg__vset(&quad[3], bounds[0], bounds[1], 0.5f, 1.0f); 1402 1403 call->uniformOffset = glnvg__allocFragUniforms(gl, 2); 1404 if (call->uniformOffset == -1) goto error; 1405 // Simple shader for stencil 1406 frag = nvg__fragUniformPtr(gl, call->uniformOffset); 1407 memset(frag, 0, sizeof(*frag)); 1408 frag->strokeThr = -1.0f; 1409 frag->type = NSVG_SHADER_SIMPLE; 1410 // Fill shader 1411 glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset + gl->fragSize), paint, scissor, fringe, fringe, -1.0f); 1412 } else { 1413 call->uniformOffset = glnvg__allocFragUniforms(gl, 1); 1414 if (call->uniformOffset == -1) goto error; 1415 // Fill shader 1416 glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset), paint, scissor, fringe, fringe, -1.0f); 1417 } 1418 1419 return; 1420 1421 error: 1422 // We get here if call alloc was ok, but something else is not. 1423 // Roll back the last call to prevent drawing it. 1424 if (gl->ncalls > 0) gl->ncalls--; 1425 } 1426 1427 static void glnvg__renderStroke(void* uptr, NVGpaint* paint, NVGcompositeOperationState compositeOperation, NVGscissor* scissor, float fringe, 1428 float strokeWidth, const NVGpath* paths, int npaths) 1429 { 1430 GLNVGcontext* gl = (GLNVGcontext*)uptr; 1431 GLNVGcall* call = glnvg__allocCall(gl); 1432 int i, maxverts, offset; 1433 1434 if (call == NULL) return; 1435 1436 call->type = GLNVG_STROKE; 1437 call->pathOffset = glnvg__allocPaths(gl, npaths); 1438 if (call->pathOffset == -1) goto error; 1439 call->pathCount = npaths; 1440 call->image = paint->image; 1441 call->blendFunc = glnvg__blendCompositeOperation(compositeOperation); 1442 1443 // Allocate vertices for all the paths. 1444 maxverts = glnvg__maxVertCount(paths, npaths); 1445 offset = glnvg__allocVerts(gl, maxverts); 1446 if (offset == -1) goto error; 1447 1448 for (i = 0; i < npaths; i++) { 1449 GLNVGpath* copy = &gl->paths[call->pathOffset + i]; 1450 const NVGpath* path = &paths[i]; 1451 memset(copy, 0, sizeof(GLNVGpath)); 1452 if (path->nstroke) { 1453 copy->strokeOffset = offset; 1454 copy->strokeCount = path->nstroke; 1455 memcpy(&gl->verts[offset], path->stroke, sizeof(NVGvertex) * path->nstroke); 1456 offset += path->nstroke; 1457 } 1458 } 1459 1460 if (gl->flags & NVG_STENCIL_STROKES) { 1461 // Fill shader 1462 call->uniformOffset = glnvg__allocFragUniforms(gl, 2); 1463 if (call->uniformOffset == -1) goto error; 1464 1465 glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset), paint, scissor, strokeWidth, fringe, -1.0f); 1466 glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset + gl->fragSize), paint, scissor, strokeWidth, fringe, 1.0f - 0.5f/255.0f); 1467 1468 } else { 1469 // Fill shader 1470 call->uniformOffset = glnvg__allocFragUniforms(gl, 1); 1471 if (call->uniformOffset == -1) goto error; 1472 glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset), paint, scissor, strokeWidth, fringe, -1.0f); 1473 } 1474 1475 return; 1476 1477 error: 1478 // We get here if call alloc was ok, but something else is not. 1479 // Roll back the last call to prevent drawing it. 1480 if (gl->ncalls > 0) gl->ncalls--; 1481 } 1482 1483 static void glnvg__renderTriangles(void* uptr, NVGpaint* paint, NVGcompositeOperationState compositeOperation, NVGscissor* scissor, 1484 const NVGvertex* verts, int nverts) 1485 { 1486 GLNVGcontext* gl = (GLNVGcontext*)uptr; 1487 GLNVGcall* call = glnvg__allocCall(gl); 1488 GLNVGfragUniforms* frag; 1489 1490 if (call == NULL) return; 1491 1492 call->type = GLNVG_TRIANGLES; 1493 call->image = paint->image; 1494 call->blendFunc = glnvg__blendCompositeOperation(compositeOperation); 1495 1496 // Allocate vertices for all the paths. 1497 call->triangleOffset = glnvg__allocVerts(gl, nverts); 1498 if (call->triangleOffset == -1) goto error; 1499 call->triangleCount = nverts; 1500 1501 memcpy(&gl->verts[call->triangleOffset], verts, sizeof(NVGvertex) * nverts); 1502 1503 // Fill shader 1504 call->uniformOffset = glnvg__allocFragUniforms(gl, 1); 1505 if (call->uniformOffset == -1) goto error; 1506 frag = nvg__fragUniformPtr(gl, call->uniformOffset); 1507 glnvg__convertPaint(gl, frag, paint, scissor, 1.0f, 1.0f, -1.0f); 1508 frag->type = NSVG_SHADER_IMG; 1509 1510 return; 1511 1512 error: 1513 // We get here if call alloc was ok, but something else is not. 1514 // Roll back the last call to prevent drawing it. 1515 if (gl->ncalls > 0) gl->ncalls--; 1516 } 1517 1518 static void glnvg__renderDelete(void* uptr) 1519 { 1520 GLNVGcontext* gl = (GLNVGcontext*)uptr; 1521 int i; 1522 if (gl == NULL) return; 1523 1524 glnvg__deleteShader(&gl->shader); 1525 1526 #if NANOVG_GL3 1527 #if NANOVG_GL_USE_UNIFORMBUFFER 1528 if (gl->fragBuf != 0) 1529 glDeleteBuffers(1, &gl->fragBuf); 1530 #endif 1531 if (gl->vertArr != 0) 1532 glDeleteVertexArrays(1, &gl->vertArr); 1533 #endif 1534 if (gl->vertBuf != 0) 1535 glDeleteBuffers(1, &gl->vertBuf); 1536 1537 for (i = 0; i < gl->ntextures; i++) { 1538 if (gl->textures[i].tex != 0 && (gl->textures[i].flags & NVG_IMAGE_NODELETE) == 0) 1539 glDeleteTextures(1, &gl->textures[i].tex); 1540 } 1541 free(gl->textures); 1542 1543 free(gl->paths); 1544 free(gl->verts); 1545 free(gl->uniforms); 1546 free(gl->calls); 1547 1548 free(gl); 1549 } 1550 1551 1552 #if defined NANOVG_GL2 1553 NVGcontext* nvgCreateGL2(int flags) 1554 #elif defined NANOVG_GL3 1555 NVGcontext* nvgCreateGL3(int flags) 1556 #elif defined NANOVG_GLES2 1557 NVGcontext* nvgCreateGLES2(int flags) 1558 #elif defined NANOVG_GLES3 1559 NVGcontext* nvgCreateGLES3(int flags) 1560 #endif 1561 { 1562 NVGparams params; 1563 NVGcontext* ctx = NULL; 1564 GLNVGcontext* gl = (GLNVGcontext*)malloc(sizeof(GLNVGcontext)); 1565 if (gl == NULL) goto error; 1566 memset(gl, 0, sizeof(GLNVGcontext)); 1567 1568 memset(¶ms, 0, sizeof(params)); 1569 params.renderCreate = glnvg__renderCreate; 1570 params.renderCreateTexture = glnvg__renderCreateTexture; 1571 params.renderDeleteTexture = glnvg__renderDeleteTexture; 1572 params.renderUpdateTexture = glnvg__renderUpdateTexture; 1573 params.renderGetTextureSize = glnvg__renderGetTextureSize; 1574 params.renderViewport = glnvg__renderViewport; 1575 params.renderCancel = glnvg__renderCancel; 1576 params.renderFlush = glnvg__renderFlush; 1577 params.renderFill = glnvg__renderFill; 1578 params.renderStroke = glnvg__renderStroke; 1579 params.renderTriangles = glnvg__renderTriangles; 1580 params.renderDelete = glnvg__renderDelete; 1581 params.userPtr = gl; 1582 params.edgeAntiAlias = flags & NVG_ANTIALIAS ? 1 : 0; 1583 1584 gl->flags = flags; 1585 1586 ctx = nvgCreateInternal(¶ms); 1587 if (ctx == NULL) goto error; 1588 1589 return ctx; 1590 1591 error: 1592 // 'gl' is freed by nvgDeleteInternal. 1593 if (ctx != NULL) nvgDeleteInternal(ctx); 1594 return NULL; 1595 } 1596 1597 #if defined NANOVG_GL2 1598 void nvgDeleteGL2(NVGcontext* ctx) 1599 #elif defined NANOVG_GL3 1600 void nvgDeleteGL3(NVGcontext* ctx) 1601 #elif defined NANOVG_GLES2 1602 void nvgDeleteGLES2(NVGcontext* ctx) 1603 #elif defined NANOVG_GLES3 1604 void nvgDeleteGLES3(NVGcontext* ctx) 1605 #endif 1606 { 1607 nvgDeleteInternal(ctx); 1608 } 1609 1610 #if defined NANOVG_GL2 1611 int nvglCreateImageFromHandleGL2(NVGcontext* ctx, GLuint textureId, int w, int h, int imageFlags) 1612 #elif defined NANOVG_GL3 1613 int nvglCreateImageFromHandleGL3(NVGcontext* ctx, GLuint textureId, int w, int h, int imageFlags) 1614 #elif defined NANOVG_GLES2 1615 int nvglCreateImageFromHandleGLES2(NVGcontext* ctx, GLuint textureId, int w, int h, int imageFlags) 1616 #elif defined NANOVG_GLES3 1617 int nvglCreateImageFromHandleGLES3(NVGcontext* ctx, GLuint textureId, int w, int h, int imageFlags) 1618 #endif 1619 { 1620 GLNVGcontext* gl = (GLNVGcontext*)nvgInternalParams(ctx)->userPtr; 1621 GLNVGtexture* tex = glnvg__allocTexture(gl); 1622 1623 if (tex == NULL) return 0; 1624 1625 tex->type = NVG_TEXTURE_RGBA; 1626 tex->tex = textureId; 1627 tex->flags = imageFlags; 1628 tex->width = w; 1629 tex->height = h; 1630 1631 return tex->id; 1632 } 1633 1634 #if defined NANOVG_GL2 1635 GLuint nvglImageHandleGL2(NVGcontext* ctx, int image) 1636 #elif defined NANOVG_GL3 1637 GLuint nvglImageHandleGL3(NVGcontext* ctx, int image) 1638 #elif defined NANOVG_GLES2 1639 GLuint nvglImageHandleGLES2(NVGcontext* ctx, int image) 1640 #elif defined NANOVG_GLES3 1641 GLuint nvglImageHandleGLES3(NVGcontext* ctx, int image) 1642 #endif 1643 { 1644 GLNVGcontext* gl = (GLNVGcontext*)nvgInternalParams(ctx)->userPtr; 1645 GLNVGtexture* tex = glnvg__findTexture(gl, image); 1646 return tex->tex; 1647 } 1648 1649 #endif /* NANOVG_GL_IMPLEMENTATION */