Trevor van Hoof

Technical artist


C++ snippets

Windows window

1
2
3
4
5
6
7
#include <windows.h>  
INT WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR lpCmdLine, int nCmdShow)  
{  
    HWND hWnd = CreateWindow("edit", NULL, WS_VISIBLE | WS_OVERLAPPEDWINDOW, CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT, NULL, NULL, hInstance, NULL);  
    while(true);  
    return 0;  
}  
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
LRESULT CALLBACK CustomWindowProc(HWND hWnd, UINT Msg, WPARAM wParam, LPARAM lParam)  
{  
    switch(Msg)  
    {  
    case WM_DESTROY:  
        PostQuitMessage(WM_QUIT);  
        break;  
    default:  
        return DefWindowProc(hWnd, Msg, wParam, lParam);  
    }  
    return 0;  
}

HWND CustomWindow(const char* name, HINSTANCE hInstance, WNDPROC callback)  
{  
    WNDCLASSEX WndClsEx = { 0 };  
    WndClsEx.cbSize = sizeof(WNDCLASSEX);  
    WndClsEx.style = CS_HREDRAW | CS_VREDRAW;  
    WndClsEx.lpfnWndProc = callback;  
    WndClsEx.lpszClassName = name;  
    WndClsEx.hInstance = hInstance;  
    RegisterClassEx(&WndClsEx);  

    return CreateWindow(name, name, WS_VISIBLE | WS_OVERLAPPEDWINDOW, CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT, NULL, NULL, hInstance, NULL);  
}

INT WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR lpCmdLine, int nCmdShow)  
{  
    HWND hWnd = CustomWindow("UI", hInstance, CustomWindowProc);  
    MSG msg;  
    do  
    {  
        if(PeekMessage(&msg, NULL, 0, 0, PM_REMOVE))  
        {  
            if(msg.message == WM_QUIT)  
            {  
                return msg.wParam;  
            }  
            TranslateMessage(&msg);  
            DispatchMessage(&msg);  
        }  
    } while(true);  
    return msg.wParam;  
}

GL context

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
#include <windows.h>  

HDC GLWindow(HWND hWnd)  
{  
    /// Creates & "makes current" (activates) an OpenGL target inside the given window  
    HDC hDC = GetDC(hWnd);  
    const PIXELFORMATDESCRIPTOR pfd = { 0, 0, PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };  
    SetPixelFormat(hDC, ChoosePixelFormat(hDC, &pfd), &pfd);  
    wglMakeCurrent(hDC, wglCreateContext(hDC));  
    return hDC;  
}

INT WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR lpCmdLine, int nCmdShow)  
{  
    HWND hWnd = CreateWindow("edit", NULL, WS_VISIBLE | WS_OVERLAPPEDWINDOW, CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT, NULL, NULL, hInstance, NULL);  
    HDC hDC = GLWindow(hWnd);  
    glClearColor(0.1f, 0.2f, 0.3f, 1.0f);  
    do  
    {  
        glClear(GL_COLOR_BUFFER_BIT);  
        glColor3f(1.0f, 0.9f, 0.8f);  
        glRecti(-1, -1, 1, 1);  
        SwapBuffers(hDC);  
    } while(true);  
    return 0;  
}

GL pixel space

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
/// Mirror mode bitmask  
enum class Mirror   
{  
    None = 0b00,  
    Horizontal = 0b01,  
    Vertical = 0b10,  
    Both = Horizontal | Vertical,  
};  
/// Operators for the bit mask  
Mirror operator&(Mirror lhs, Mirror rhs){ return (Mirror)((int)lhs & (int)rhs); }  
void GLPixelSpace(HWND hWnd, Mirror flip = Mirror::None)  
{  
    /// Make viewport coordinates match pixel coordinates; requires a "Current" GL context (wglMakeCurrent, as initialized for us by GLWindow).  
    float x = ((flip & Mirror::Horizontal) == Mirror::Horizontal) ? 1.0f : -1.0f;  
    float y = ((flip & Mirror::Vertical) == Mirror::Vertical) ? 1.0f : -1.0f;  
    glTranslatef(x, y, 0.0f);  
    // Get the window's draw-able area  
    RECT area;  
    GetClientRect(hWnd, &area);  
    // Compute scale so 1 pixel matches 1 unit.  
    x = ((flip & Mirror::Horizontal) == Mirror::Horizontal) ? -2.0f : 2.0f;  
    y = ((flip & Mirror::Vertical) == Mirror::Vertical) ? -2.0f : 2.0f;  
    glScalef(x / (area.right - area.left), y / (area.bottom - area.top), 1.0f);  
}

/*
Call this once before entering the drawing loop to have it as default, or use
glPushMatrix();  
glLoadIdentity();  
GLPixelSpace(hWnd);  
// Pixel-space drawing code here  
glPopMatrix();  
*/

Windows GL demo

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
INT WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR lpCmdLine, int nCmdShow)  
{  
    HWND hWnd = CreateWindow("edit", NULL, WS_VISIBLE | WS_OVERLAPPEDWINDOW, 100, 100, 800, 600, NULL, NULL, hInstance, NULL);  
    HDC hDC = GLWindow(hWnd);  
    GLPixelSpace(hWnd, Mirror::Vertical);  

    FONScontext* fs = glfonsCreate(512, 512, FONS_ZERO_TOPLEFT);  
    int fontNormal = fonsAddFont(fs, "sans", "C:/Windows/Fonts/Calibri.ttf");  

    glClearColor(0.3f, 0.3f, 0.32f, 1.0f);  

    unsigned int yellow = glfonsRGBA(255, 180, 70, 255);  
    fonsClearState(fs);  
    fonsSetFont(fs, fontNormal);  
    fonsSetColor(fs, yellow);  

    do  
    {  
        glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);  

        glDisable(GL_DEPTH_TEST);  
        glEnable(GL_BLEND);  
        glBlendFunc(GL_SRC_ALPHA,GL_ONE_MINUS_SRC_ALPHA);  

        fonsSetSize(fs, 48);  
        float y = 0;  
        fonsSetAlign(fs, FONS_ALIGN_LEFT | FONS_ALIGN_TOP);  
        fonsDrawText(fs, 4, y, "The quick brown",NULL);  
        y += 48;  
        fonsSetSize(fs, 24);  
        fonsDrawText(fs, 4, y, "fox jumped over the lazy dog",NULL);  

        fonsDrawDebug(fs, 800.0, 50.0);  

        glEnable(GL_DEPTH_TEST);  
        glDisable(GL_BLEND);  
        SwapBuffers(hDC);  
    }  
    while(!GetAsyncKeyState(VK_ESCAPE));  

    glfonsDelete(fs);  

    return 0;  
}  

GL fonts

fontstash

Windows format strings & dialogs

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
#include <Windows.h>
#undef min
#undef max
#include <cstdio>

// Caller owns the return value
static char* _FormatStr(const char* fmt, va_list args)
{
    size_t size;
#pragma warning(suppress:28719)    // 28719
    size = vsnprintf(nullptr, 0, fmt, args);

    char* message = new char[size + 1u];
    vsnprintf(message, size + 1u, fmt, args);
    message[size] = '\0';

    return message;
}

// Caller owns the return value
char* FormatStr(const char* fmt, ...)
{
    va_list args;
    __crt_va_start(args, fmt);
    char* message = _FormatStr(fmt, args);
    __crt_va_end(args);
    return message;
}

static void _Message(const char* title, unsigned int flags, const char* fmt, va_list args)
{
    const char* message = _FormatStr(fmt, args);
    if (IsDebuggerPresent())
        OutputDebugStringA(message);
    else
        MessageBoxA(0, message, title, flags);
    delete message;
}

void Info(const char* fmt, ...)
{
    va_list args;
    __crt_va_start(args, fmt);
    _Message("Info", MB_OK | MB_ICONINFORMATION, fmt, args);
    __crt_va_end(args);
}

void Warning(const char* fmt, ...)
{
    va_list args;
    __crt_va_start(args, fmt);
    _Message("Warning", MB_OK | MB_ICONWARNING, fmt, args);
    __crt_va_end(args);
    if (IsDebuggerPresent())
        DebugBreak();
}

void Error(const char* fmt, ...)
{
    va_list args;
    __crt_va_start(args, fmt);
    _Message("Error", MB_OK | MB_ICONEXCLAMATION, fmt, args);
    __crt_va_end(args);
    if (IsDebuggerPresent())
        DebugBreak();
}

void Fatal(char* fmt, ...)
{
    va_list args;
    __crt_va_start(args, fmt);
    _Message("Error", MB_OK | MB_ICONEXCLAMATION, fmt, args);
    __crt_va_end(args);
    if (IsDebuggerPresent())
        DebugBreak();
    else
        ExitProcess(0);
}

void Assert(bool expression)
{
    if (expression)
        return;
    if (IsDebuggerPresent())
        DebugBreak();
}

void Assert(bool expression, char* fmt, ...)
{
    if (expression)
        return;
    va_list args;
    __crt_va_start(args, fmt);
    _Message("Error", MB_OK | MB_ICONEXCLAMATION, fmt, args);
    __crt_va_end(args);
    if (IsDebuggerPresent())
        DebugBreak();
}

void AssertFatal(bool expression)
{
    if (expression)
        return;
    if (IsDebuggerPresent())
        DebugBreak();
    else
        ExitProcess(0);
}

void AssertFatal(bool expression, const char* fmt, ...)
{
    if (expression)
        return;
    va_list args;
    __crt_va_start(args, fmt);
    _Message("Error", MB_OK | MB_ICONEXCLAMATION, fmt, args);
    __crt_va_end(args);
    if (IsDebuggerPresent())
        DebugBreak();
    else
        ExitProcess(0);
}

Python Range Collection

On several occassions in the past year I needed to describe a set of (time) ranges, and find the gaps in between them. I used it for finding pauses during animations, to trigger different events, fill up the animation or simply hide non-animated props.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
class Range(object):
    '''
    Describes a range of integer values with an interval of +1,
    describing a set similar to python's range(int start, int end).

    Start is inclusive, end is exclusive, like with for loops.
    '''
    def __init__(self, start, end):
        self.start = min(int(start), int(end))
        self.end = max(int(start), int(end))
        if self.start == self.end:
            raise ValueError('Range() can not express a range of size 0; did you mean TimeRange()?')
    def intersects(self, other):
        return other.start <= self.end and other.end >= self.start
    def combine(self, other):
        self.start = min(self.start, other.start)
        self.end = max(self.end, other.end)
    def __repr__(self):
        return 'range[%s,%s)'%(self.start, self.end)
    def __iter__(self):
        for i in xrange(self.start, self.end):
            yield i


class TimeRange(object):
    '''
    A Range() with inclusive end-value; allows for start == end.
    See Range() and RangeCollection() for more information.
    '''
    def __init__(self, start, end):
        self.start = min(int(start), int(end))
        self.end = max(int(start), int(end))
    def intersects(self, other):
        return other.start <= self.end + 1 and other.end + 1 >= self.start
    def combine(self, other):
        self.start = min(self.start, other.start)
        self.end = max(self.end, other.end)
    def __repr__(self):
        return 'range[%s,%s]'%(self.start, self.end)
    def __iter__(self):
        for i in xrange(self.start, self.end + 1):
            yield i


class RangeCollection(object):
    '''
    A list of Range() or TimeRange() objects that is consolidated so not a single instance
    overlaps another one. Allows for consolidated range iteration using segments() and
    remaining gap iteration using gaps().
    '''
    def __init__(self):
        self.segments = []

    def addSegment(self, inRange):
        state = None
        for i in xrange(len(self.segments)):
            segment = self.segments[i]
            if segment.intersects(inRange):
                if state is not None:
                    # If we found two consecutive intersections we close the gap.
                    state.combine(segment)
                    self.segments.pop(i)
                    return
                # If we found the first intersection we check the next node as well.
                state = segment
                continue
            if state is not None:
                # If we only found the first intersection we extend the node.
                break
        if state is not None:
            # If we only found the first intersection we extend the node.
            state.combine(inRange)
            return
        # if we found no intersections we append the new data.
        self.segments.append(inRange)

    def gaps(self, inStart=None, inEnd=None, wantsInclusiveRange=False):
        self.segments.sort(key=lambda x:  x.start)
        offset = 0
        if inStart is None:
            start = self.segments[0].start
        else:
            start = inStart
            while self.segments[offset].start < inStart:
                offset += 1
        end = None
        for i in xrange(offset, len(self.segments)):
            end = self.segments[i].start
            if end - start == 0:
                start = self.segments[i].end + isinstance(self.segments[i], TimeRange)
                continue
            if wantsInclusiveRange:
                yield TimeRange(start, end-1)
            else:
                yield Range(start, end)
            start = self.segments[i].end + isinstance(self.segments[i], TimeRange)
        if inEnd is not None:
            if wantsInclusiveRange:
                yield TimeRange(end, inEnd)
            else:
                yield Range(end, inEnd)

    def iterGapFrames(self, inStart=None, inEnd=None, wantsInclusiveRange=False):
        for gap in self.gaps(inStart, inEnd, wantsInclusiveRange):
            for i in gap:
                yield i

    def iterRangeFrames(self, inStart=None, inEnd=None):
        self.segments.sort(key=lambda x:  x.start)
        for segment in self.segments:
            for i in segment:
                if inStart is not None and i < inStart:
                    continue
                if inEnd is not None and i > inEnd:
                    continue
                yield i


if __name__ == '__main__':
    timeline = RangeCollection()
    testData = [(2, 5), (4, 8), (2, 3), (44, 60), (10, 43), (80, 90), (100, 110), (200, 210), (220, 230), (210, 220), (300, 310), (320, 330), (311, 319)]
    for timeRange in testData:
        timeline.addSegment(TimeRange(*timeRange))
    print timeline.segments
    print list(timeline.gaps(inStart=20, inEnd=400, wantsInclusiveRange=True))

Classes & Javascript relations

Javascript uses objects for everything, these objects are based on prototypes, their definitions, which are also objects.

You can create an object and extend its prototype, then instantiate this object to get an instance of this prototype. Just like any class definition & instance you can then have the instance operate independently of the prototype.

classes

So the first thing is the class definition, in javascript, this is a function. You give a function your class name and any member variables & default values can be set inside this function.

1
2
3
4
function BaseClass()  
{  
    this.a = 'b';  
}  

Now to instantiate the class, much like C# syntax, you do the following:

1
var myObject = new BaseClass();  

'myObject' will now have an 'a' property, with a value of 'b'.

functions

To add functions we must extend the prototype:

1
2
3
4
5
6
7
8
function BaseClass()  
{  
    this.a = 'b'  
}  
BaseClass.prototype.log = function()  
{  
    console.log(this.a);  
}  

And similarly, to have this function use the console (a global variable most browsers provide for debugging) print our 'a' value we can simply use this:

1
2
var myObject = new BaseClass();  
myObject.log();  

static properties

Private static properties can be local variables inbetween the prototype functions, public static properties can be added to the class object instead of its prototype. Contrary to other languages these properties can not be accessed using the this. object at all and the private properties are just a hack by placing variables in a temporary scope so when later on dynamically altering the prototype these variables are still not acessible.

public statics

1
2
3
4
BaseClass.staticLogSomething = function()  
{  
    console.log('something');  
}  

private statics

These are often wrapped in a surrounding function like so (notice the return!):

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
var BaseClass = function()  
{  
    var privateStatic = 0;  
    function BaseClass()  
    {  
        this.a = 'b'  
    }  
    BaseClass.prototype.log = function()  
    {  
        console.log(this.a);  
    }  
    BaseClass.staticLogSomething = function()  
    {  
        console.log(privateStatic);  
        privateStatic += 1;  
    }  
    return BaseClass;  
}();  

subclassing

Subclassing actually means creating a new class and then extending its prototype with the base class prototype so we share its functions and even know its constructor.

Then inside the constructor function we can use the base class constructor to inherit all initialized member variables. The hacky statics as described above won't transfer because they are members of the base class definition object, which is a level above the prototype (which is the bit we inherit).

1
2
3
4
5
function SubClass()  
{  
    BaseClass.call(this);  
}  
SubClass.prototype = Object.create(BaseClass.prototype);  

That's all there is to it. Now we can extend this function by adding properties, overriding properties, etcetera. This second subclass overrides the 'a' and 'log' properties and adds a 'b' property which is also logged.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
function SubClass2()  
{  
    BaseClass.call(this);  
    this.a = 'c';  
    this.b = 'd';  
}  

SubClass2.prototype = Object.create(BaseClass.prototype);  

SubClass2.prototype.log = function()  
{  
    console.log(this.a);  
    console.log(this.b);  
}

Now this is some test code, putting these three classes together you can clearly see the functionality:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
var a = new SubClass();  
a.log();  
var c = new SubClass();  
c.log(); // to prove the sub class has the base class data & functions  
var d = new SubClass2();  
d.log(); // to prove the sub class can override this data  
console.log(d.a); // to prove things are accessable and 'c' == 'this' inside its functions  
d.a = 'f';  
console.log(d.a); // to prove we can alter values  
var e = new SubClass2();  
console.log(e.a); // to prove that that does not affect the prototype  

// now let's see what static's do when inherited  
var iBase = new BaseClass();  
var iSub = new SubClass();  
BaseClass.staticLogSomething();  
SubClass.staticLogSomething(); // this will trigger an error because staticLogSomething must be accessed by its object, in this case the BaseClass defintion object  

calling base functions

One last thing to add, when you wish to call a baseclass function inside your subclass, all you need to do is 'call' its function via the prototype and pass in a reference to this (and any other arguments after that).

So essentially Subclass2.log could have been this:

1
2
3
4
5
SubClass2.prototype.log = function()  
{  
    BaseClass.prototype.log.call(this);  
    console.log(this.b);  
} 

Maya snippets

Polygon volume

I wanted to compute the volume of a mesh in Maya. It was surprisingly simple and elegant to do as well! Using the Divergent Theorem (which is unreadable to me when written mathematically) the only constraints are: the mesh must be closed (no holes, borders or tears; Maya's fill-hole can help), the mesh must be triangulated (using the Maya API you can already query triangles so no need to manually triangulate in this case).

Now imagine to compute the volume of a prism. All you need is the area of the base triangle * the height. To compute the base I use Heron's formula as described here.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
def distance(a, b):  
    return sqrt((b[0]-a[0])*(b[0]-a[0])+  
      (b[1]-a[1])*(b[1]-a[1]))  

def getTriangleArea(pt0, p1, p2):  
    a = distance(pt1, pt0)  
    b = distance(pt2, pt0)  
    c = distance(pt2, pt1)  
    s = (a+b+c) * 0.5  
    return sqrt(s * (s-a) * (s-b) * (s-c))  

Now notice how this only computes the triangle area in the XY plane. This works simply because the 2D projection of the triangle area is all we need. The height is then defined by the triangle's center Z.

1
2
def getTriangleHeight(pt0, pt1, pt2):  
    return (pt0[2] + pt1[2] + pt2[2]) * 0.33333333  

Consider any triangle, extrude it down to the floor, and see that this works for any prism defined along the Z axis this way.

A rotated triangle's area in the XY plane is smaller than the actual area, but by using the face-center the volume will remain accurate.

Screenshot of flat top and slanted top prisms

Now these prisms have the same volume. The trick is to consider every triangle as such a prism, so call getTriangleVolume on each triangle. The last problem is negative space, for this we compute the normal. I use maya's normals, so the volume is negative if all normals are inversed, but you can compute them all the same.

1
2
3
4
5
6
7
8
9
def getTriangleVolume(pt0, pt1, pt2):  
    area = getTriangleArea(pt0, pt1, pt2) * getTriangleHeight(pt0, pt1, pt2)  
    # this is an optimized 2D cross product  
    sign = (pt1[0]-pt0[0]) * (pt2[1]-pt0[1]) - (pt1[1]-pt0[1]) * (pt2[0]-pt0[0])  
    if not sign:  
        return 0  
    if sign < 0:  
        return -area  
    return area  

The selected wireframe shows the prism defined by the bottom triangle, because the normal.z points downwards it will become negative volume. So adding the initial prism volume and this prism volume will give the accurate volume of this cut off prism. Now consider this:

Screenshots of additive and subtractive prism volumes

To avoid confusion I placed the object above the grid; but below the grid a negative normal * a negative height will still add volumes appropriately.

So that's it.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
from math import sqrt  
from maya.OpenMaya import MItMeshPolygon, MDagPath, MSelectionList, MPointArray, MIntArray  


def distance(a, b):    
    return sqrt((b[0]-a[0])*(b[0]-a[0]) +     
      (b[1]-a[1])*(b[1]-a[1]))    

def getTriangleArea(pt0, pt1, pt2):    
    a = distance(pt1, pt0)    
    b = distance(pt2, pt0)    
    c = distance(pt2, pt1)    
    s = (a+b+c) * 0.5    
    return sqrt(s * (s-a) * (s-b) * (s-c))    

def getTriangleHeight(pt0, pt1, pt2):    
    return (pt0[2] + pt1[2] + pt2[2]) * 0.33333333    

def getTriangleVolume(pt0, pt1, pt2):    
    area = getTriangleArea(pt0, pt1, pt2) * getTriangleHeight(pt0, pt1, pt2)    
    # this is an optimized 2D cross product    
    sign = (pt1[0]-pt0[0]) * (pt2[1]-pt0[1]) - (pt1[1]-pt0[1]) * (pt2[0]-pt0[0])    
    if not sign:    
        return 0    
    if sign < 0:    
        return -area    
    return area  

def getPolygonVolume(shapePathName):  
    volume = 0  
    li = MSelectionList()  
    li.add(shapePathName)  
    path = MDagPath()  
    li.getDagPath(0, path)  
    iter = MItMeshPolygon(path)  
    while not iter.isDone():  
        points = MPointArray()  
        iter.getTriangles(points, MIntArray())  
        for i in range(0, points.length(), 3):  
            volume += getTriangleVolume(points[i], points[i+1], points[i+2])  
        iter.next()  
    return volume  

Geodesic Sphere with UI

Screenshot of geospheres and tool interface

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
from maya import mel
from maya.OpenMaya import *
from maya import cmds


def makegeosphere(in_divisions, in_radius):
    if in_divisions == 1 :
        return cmds.polyPlatonicSolid(r=in_radius,st=2,cuv=4,ch=False)
    elif in_divisions >= 2:
        out_obj = cmds.polyPlatonicSolid(r=in_radius,st=1,cuv=4,ch=False)
        out_obj = cmds.ls(out_obj,l=True)[0]
        cmds.xform(out_obj,ro=[0,0,31.717])
        cmds.makeIdentity(out_obj,apply=True)
        if in_divisions > 2:
            #linear smooth gives instant right topology, but deforms
            #the lines to be curved so we can't have nice domes
            #AND is actually (although by 5 to -1 milisecond on 6 divisions) slower than this method
            for i in range(3,in_divisions,1):
                nf = cmds.polyEvaluate(out_obj,face=True)
                cmds.polySmooth(out_obj, mth=0, dv=1, c=0, ch=False)
                nvtx = cmds.polyEvaluate(out_obj,vertex=True)
                cmds.select('%s.vtx[%s:%s]'%(out_obj, nvtx-nf, nvtx))
                mel.eval('DeleteVertex;')
                cmds.polyTriangulate(out_obj)
        li = MSelectionList()
        MGlobal.getSelectionListByName(out_obj, li)
        path = MDagPath()
        li.getDagPath(0, path)
        iter = MItMeshVertex(path)
        mesh = MFnMesh(path)
        while not iter.isDone():            
            #defaults to object space
            point = iter.position()
            mesh.setPoint( iter.index(), MPoint( MVector(point).normal()*in_radius ) )
            iter.next()
        cmds.select(out_obj)
        return out_obj

w = cmds.window(title='GeoSphere Creator')
cmds.columnLayout()
cmds.rowLayout(nc=2)
ds = cmds.intSliderGrp(label='Divisions', field=True, fieldMinValue=1, minValue=1, maxValue=10, value=4, cw3=[42,42,136], width=220)
rs = cmds.floatSliderGrp(label='Radius', field=True, fieldMinValue=0.001, minValue=0.001, value=1.0, cw3=[42,42,136], width=220)
cmds.setParent('..')
cmds.rowLayout(nc=2)
cmds.button('Confirm', w=220, c='makegeosphere( cmds.intSliderGrp("%s",q=True,v=True), cmds.floatSliderGrp("%s",q=True,v=True) ); cmds.deleteUI("%s")'%(ds, rs, w))
cmds.button('Cancel', w=220, c='cmds.deleteUI("%s")'%w)
cmds.showWindow(w)

Enum attribute field names

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
from maya import cmds
# create enum attribute with custom labels & values
null = cmds.group(em=True)
cmds.addAttr(null, ln='color', at='enum', enumName='Red=1:Green=2:Blue=5')
# get enum names and values
enumNames = cmds.addAttr(null + '.color', enumName=True, q=True)
# enumNames is now 'Red=1:Green:Blue=5'
# process the names into a python dict, mapping to the underlying values
cursor = -1
result = {}
for name_value in enumNames.split(':'):
    # if there is a custom value, we use that  value
    if '=' in name_value:
        name, value = name_value.rsplit('=', 1)
    else:
        # if there is no custom value, we use the previous value + 1
        # NOTES: as you can see from enumNames, Green does not return it's value, begcause it is simply the preceding value + 1
        # since enum values start counting at 0 I made the initial cursor -1
        name, value = name_value, cursor + 1
    result[name] = int(value)
    # update cursor
    cursor = int(value)
print result

Advanced locator

This is another take on the locator. It supports multiple shapes and can have a unique color instead of only Maya's built-in colors.

It does not:

Actually draw curves (I just called it that because I usually use degree-1 curves as controls, it just uses GL_LINES). Support separate colors per shape. It is in the end one shape node.

It does:

Save you from the hassle of parenting curve shapes manually and having other scripts break because you suddenly have too many (shape) children. Support any color!

Plugin:

Compiled against Maya 2014

MLL file

Source:

Solution is Visual Studio 2013

Source

The code that made the preview image:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
from maya import cmds
cmds.loadPlugin("CurveLocator.mll", qt=True)

#david star
l = cmds.createNode("CurveLocator", n='CurveLocatorShape')
cmds.setAttr('%s.shapes[0].closed'%l, True)
cmds.setAttr('%s.shapes[0].point[0]'%l, 1.8, -1, 0, type='double3')
cmds.setAttr('%s.shapes[0].point[1]'%l, 0.6, -1, 0, type='double3')
cmds.setAttr('%s.shapes[0].point[2]'%l, 0, -2, 0, type='double3')
cmds.setAttr('%s.shapes[0].point[3]'%l, -0.6, -1, 0, type='double3')
cmds.setAttr('%s.shapes[0].point[4]'%l, -1.8, -1, 0, type='double3')
cmds.setAttr('%s.shapes[0].point[5]'%l, -1.2, 0, 0, type='double3')
cmds.setAttr('%s.shapes[0].point[6]'%l, -1.8, 1, 0, type='double3')
cmds.setAttr('%s.shapes[0].point[7]'%l, -0.6, 1, 0, type='double3')
cmds.setAttr('%s.shapes[0].point[8]'%l, 0, 2, 0, type='double3')
cmds.setAttr('%s.shapes[0].point[9]'%l, 0.6, 1, 0, type='double3')
cmds.setAttr('%s.shapes[0].point[10]'%l, 1.8, 1, 0, type='double3')
cmds.setAttr('%s.shapes[0].point[11]'%l, 1.2, 0, 0, type='double3')
cmds.setAttr('%s.color'%l, 0.8, 0.5, 0.1, type='float3')

#joint like shape
from math import sin, cos, pi
l = cmds.createNode("CurveLocator", n='CurveLocatorShape')
cmds.setAttr('%s.shapes[0].closed'%l, True)
cmds.setAttr('%s.shapes[1].closed'%l, True)
cmds.setAttr('%s.shapes[2].closed'%l, True)
for i in range(36):
    cmds.setAttr('%s.shapes[0].point[%s]'%(l, i), cos(i / 18.0 * pi), -sin(i / 18.0 * pi), 0, type='double3')
    cmds.setAttr('%s.shapes[1].point[%s]'%(l, i), cos(i / 18.0 * pi), 0, -sin(i / 18.0 * pi), type='double3')
    cmds.setAttr('%s.shapes[2].point[%s]'%(l, i), 0, cos(i / 18.0 * pi), -sin(i / 18.0 * pi), type='double3')
cmds.setAttr('%s.color'%l, 0.0, 0.87, 0.3, type='float3')

#circle
l = cmds.createNode("CurveLocator", n='CurveLocatorShape')
for i in range(36):
    cmds.setAttr('%s.shapes[0].point[%s]'%(l, i), cos(i / 18.0 * pi), -sin(i / 18.0 * pi), 0, type='double3')
cmds.setAttr('%s.color'%l, 1.0, 0.1, 0.8, type='float3')

#jagged circle
l = cmds.createNode("CurveLocator", n='CurveLocatorShape')
for i in range(36):
    cmds.setAttr('%s.shapes[0].point[%s]'%(l, i), cos(i), -sin(i), 0, type='double3')
cmds.setAttr('%s.color'%l, 1.0, 0.1, 0.8, type='float3')

Maya quaternion & matrix operation order

Here are some pointers I had to learn the hard way, and don't ever want to forget.

MQuaternion(MVector a, MVector b)

constructs the rotation to go from B to A!
So if you have an arbitrary aim vector and wish to go from world space to that aim vector use something like

MQuaternion(aimVector, MVector::xAxis)

The documentation is very ambiguous about this. Or rather, it makes you think the opposite!

If you wish to combine matrices in maya think of how children and parents relate in the 3D scene to determine the order of multiplication. Childs go first, e.g.

(leafMatrix * parentMatrix) * rootMatrix

Another way to think about it is adding rotations. So if you have a rotation and you wish to add some rotation to it, you generally parent an empty group to it and rotate that, so you again get this relationship of

additionalRotation * existingRotation

A little note: not sure if adding quaternion rotations works in the same way; should check!

More conventions to come hopefully!

Monster Black Hole - Pythius

Pythius - Monster Black Hole (Official Video) [Blackout]

Pythius makes Drum & Bass and is a friend of mine. So when he told me he was doing a new track and I could make the visuals I didn't think twice about it!

The short version

The video was made using custom software and lots of programming! Generally 3D videos are made with programs that calculate the result, which can take minutes or even hours. This means that every time you change something you have to wait to see whether the result is more to your liking. With the custom software that we made, everything is instantly updated and we are looking at the end result at all time. This makes tweaking anything, from colors and shapes to animation, a breeze. It allows for much iteration as we want and turns the video creation process into an interactive playground.

The technique we use generates all the visuals with code, there are very few images and no 3D model files. Everything you see on the screen is visualized through maths. As a side effect of not using big 3D model files, the code that can generate the entire video is incredibly small. About 300 kilobytes, 10 thousand times smaller than the video file it produced!

The details

Technologies used are Python (software) Qt (interface) OpenGL (visual effects). The rendering uses Enhanced Sphere Tracing & physically based shading.

I talked about the tool development here and the rendering pipeline here in the past. More information about advanced sphere tracing here. Which is an enhancement of this!

A* path finding using a game's wiki

Navigate the result here!

I recently replayed Metroid Prime 2 (for GameCube) and got lost. A lot. This game features many interconnecting rooms, most of which exist in a dark and a light dimension with one-way or two-way portals. It took ages to figure out what item to collect when and where, so I did what anyone would do and coded a system to find the optimal path to finish the game!

The game works with rooms, basically every room has a bunch of doors, usually 2 complex rooms are connected by tunnels, this allows the game to close doors and stream the world very late (as we don't have any view of unloaded areas to worry about). To start out I scraped this site (while saving every page loaded to disk as to not get banned for doing too many requests). Every link in that list goes to a page that describes what other places we can go from this place, as well as what upgrades can be found in this room. The connecting rooms are also formulated like "Room X (through red door)", from which I can derive that a red door requires the missile launcher upgrade. So I can scrape limitations as well!

This allowed me to build a massive structure. Instead of making serialization work I just did code generation into a set of global dictionaries. That looks a bit like this:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
# structure.py
class Or(object):
    def __init__(self, *args):
        self.options = args

    def __str__(self):
        return 'Or(\'%s\')' % '\', \''.join(self.options)

    def __eq__(self, other):
        for opt in self.options:
            if opt == other:
                return True
        return False


class Item(object):
    def __init__(self, name):
        self.name = name
        self.requirements = []


class Transition(object):
    def __init__(self, sourceRoom, targetRoom):
        self.sourceRoom, self.targetRoom, self.requirements = sourceRoom, targetRoom, []

    def deregister(self):
        self.sourceRoom.transitions.remove(self)


areas = ['Temple Grounds', 'Sky Temple Grounds', 'Great Temple', 'Sky Temple', 'Agon Wastes', 'Dark Agon Wastes', 'Torvus Bog', 'Dark Torvus Bog', 'Sanctuary Fortress', 'Ing Hive']


class Room(object):
    def __init__(self, name):
        self.name = name
        self.transitions = []
        self.items = []
        self.area = None

    def displayData(self):
        print '%s (%s)' % (self.name, self.area)
        print '\tConnects to:'
        for transition in self.transitions:
            print transition.targetRoom.name, transition.requirements
        print '\tItems:'
        for item in self.items:
            print item.name
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
from structure import *

# list items in the game
items = {
    'beam ammo expansion1': Item('Beam Ammo Expansion'),
    'beam ammo expansion2': Item('Beam Ammo Expansion'),
    'beam ammo expansion3': Item('Beam Ammo Expansion'),
    'beam ammo expansion4': Item('Beam Ammo Expansion'),
    'boost ball': Item('Boost Ball'),
# ... etcetera
}
# list rooms in the game
rooms = {
    ('transit station', 'Agon Wastes'): Room('Transit Station'),
    ('mining plaza', 'Agon Wastes'): Room('Mining Plaza'),
# ... etcetera
}

# denote room worlds and contents
rooms[('transit station', 'Sanctuary Fortress')].area = 'Sanctuary Fortress'
rooms[('transit station', 'Sanctuary Fortress')].items.append(items['power bomb expansion6'])
# ... etcetera

# connect rooms in the game
transitions = {
    (('transit station', 'Agon Wastes'), ('mining plaza', 'Agon Wastes')): Transition(rooms[('transit station', 'Agon Wastes')], rooms[('mining plaza', 'Agon Wastes')]),
# ... etcetera
}
# register the connections
rooms[('transit station', 'Agon Wastes')].transitions.append(transitions[(('transit station', 'Agon Wastes'), ('mining plaza', 'Agon Wastes'))])
# ... etcetera
# add inventory requirements to the connections
transitions[(('sacred path', 'Temple Grounds'), ('profane path', 'Sky Temple Grounds'))].requirements.append('Dark Beam')
# ... etcetera

After executing that code I would have every room in the game in memory, where every room has links to the objects they're connected to, including constraints such as upgrade requirements to open the door between those rooms!

Now with all this information, I can start writing a navigation unit in the game world. My unit has a certain inventory of upgrades and a current location and it can figure out where it can and can't go at this point. So I wrote an A* path finder but for every connection I only consider evaluating it if the requirements of going through that door are met by the inventory.

Putting the unit at the start of the game and telling it to grab the first upgrade works instantly like this! A bit of a harder extension was adding the possibility to realize the requirements for a transition and then finding the path to that requirement first. This also slowed the algorithm down by a lot, but it does allow me to say "start here, go to final boss" and the navigator will figure out what to do! I realized there's 100! (factorial) options to gather the upgrades so stuck with the manual order input for now...

I wanted to add the desire to obtain all items in the game, to complete it with 100% of the upgrades, but didn't get around to it.

Next up was visualization! I soon realized that to highlight rooms on the map the most time efficient way was to just bite the bullet for an evening and trace them... I wrote this little tool where I can navigate the different maps and draw the polygons for each room. That then gets saved as JSON and got used by the visualizer.

Image showing polygon drawing editor for annotating rooms

And here's the code!

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
import json
import os
from qt import *
from scrapeoutput import *


class PolygonView(QWidget):
    def __init__(self, owner):
        super(PolygonView, self).__init__()
        self._owner = owner
        self.roomPolygons = {}
        self.setFocusPolicy(Qt.StrongFocus)
        self._panStart = None
        self.background = None
        self._panX = 0
        self._panY = 0

    def mousePressEvent(self, event):
        if event.button() == Qt.MiddleButton:
            self._panStart = self._panX, self._panY, event.x(), event.y()
            return

    def mouseReleaseEvent(self, event):
        if self._panStart is not None:
            self._panStart = None
            return

    def mouseMoveEvent(self, event):
        if self._panStart is not None:
            self._panX = self._panStart[0] + (event.x() - self._panStart[2])
            self._panY = self._panStart[1] + (event.y() - self._panStart[3])
            self.repaint()
            return

    def paintBackground(self, painter):
        if self.background:
            painter.setOpacity(0.2)
            painter.drawImage(0, 0, self.background)
            painter.setOpacity(1.0)

    def findActivePolygon(self):
        key = self._owner.activeKey()
        if key is None:
            return
        points = self.roomPolygons.get(key, [])
        if not points:
            return None
        return points

    def drawPolygon(self, painter, points):
        solidWhite = QPen(Qt.white, 1, Qt.SolidLine)
        dottedWhite = QPen(Qt.white, 1, Qt.DotLine)
        for i in xrange(len(points)):
            painter.setPen(solidWhite)
            painter.setBrush(Qt.NoBrush)
            x, y = points[i].x(), points[i].y()
            if i == 0:
                painter.setBrush(Qt.white)
            painter.drawRect(QRectF(x - 2.5, y - 2.5, 5.0, 5.0))
            if i == len(points) - 1:
                painter.setPen(dottedWhite)
            painter.drawLine(points[i], points[(i + 1) % len(points)])

    def paint(self, painter):
        self.paintBackground(painter)
        painter.setRenderHint(QPainter.Antialiasing)
        points = self.findActivePolygon()
        if not points:
            return
        self.drawPolygon(painter, points)

    def paintEvent(self, event):
        painter = QPainter(self)
        painter.fillRect(QRect(0, 0, self.width(), self.height()), Qt.black)

        painter.translate(self._panX, self._panY)
        self.paint(painter)


def quadZoom(zoom, steps):
    # apply steps as multiplicative zoom to get a quadratic curve
    if steps < 0:
        return zoom * pow(1.000794567, steps)
    else:
        return zoom * pow(0.99912239, -steps)


class PolygonZoom(PolygonView):
    def __init__(self, owner):
        super(PolygonZoom, self).__init__(owner)
        self.zoom = 1.0
        self.state = 0

    def wheelEvent(self, event):
        oldUnits = (event.x() - self._panX) / self.zoom, (event.y() - self._panY) / self.zoom
        self.zoom = quadZoom(self.zoom, event.delta())
        newUnits = (event.x() - self._panX) / self.zoom, (event.y() - self._panY) / self.zoom
        deltaUnits = newUnits[0] - oldUnits[0], newUnits[1] - oldUnits[1]
        self._panX += deltaUnits[0] * self.zoom
        self._panY += deltaUnits[1] * self.zoom
        self.repaint()

    def drawPolygon(self, painter, points):
        if self.state == 0:
            painter.setPen(Qt.white)
        else:
            painter.setPen(QPen(QBrush(QColor(255, 180, 40)), 3.0))
        for i in xrange(len(points)):
            painter.drawLine(points[i], points[(i + 1) % len(points)])

    def paintEvent(self, event):
        painter = QPainter(self)
        painter.fillRect(QRect(0, 0, self.width(), self.height()), Qt.black)

        painter.translate(self._panX, self._panY)
        painter.scale(self.zoom, self.zoom)
        self.paint(painter)


class PolygonEdit(PolygonView):
    def __init__(self, owner):
        super(PolygonEdit, self).__init__(owner)
        self.__dragStart = None

    def __indexUnderMouse(self, polygon, pos):
        tolerance = 8
        for index, point in enumerate(polygon):
            delta = point - pos
            if abs(delta.x()) + abs(delta.y()) < tolerance:
                return index
        return None

    def mousePressEvent(self, event):
        if event.button() == Qt.MiddleButton:
            self._panStart = self._panX, self._panY, event.x(), event.y()
            return

        if event.button() != Qt.LeftButton:
            return

        key = self._owner.activeKey()
        if key is None:
            return

        polygon = self.roomPolygons.get(key, None)
        if polygon is None:
            return None

        localPos = event.pos() - QPoint(self._panX, self._panY)
        index = self.__indexUnderMouse(polygon, localPos)
        if index is None:
            return

        self.__dragStart = localPos, polygon, index, QPoint(polygon[index])

    def mouseReleaseEvent(self, event):
        if self._panStart is not None:
            self._panStart = None
            return

        if self.__dragStart is None:
            if event.modifiers() == Qt.ControlModifier:
                key = self._owner.activeKey()
                if key is None:
                    return
                polygon = self.roomPolygons.get(key, [])
                localPos = event.pos() - QPoint(self._panX, self._panY)
                polygon.append(localPos)
                self.roomPolygons[key] = polygon
                self.repaint()
                return

            if event.button() == Qt.RightButton:
                key = self._owner.activeKey()
                if key is None:
                    return
                polygon = self.roomPolygons.get(key, None)
                if polygon is None:
                    return
                localPos = event.pos() - QPoint(self._panX, self._panY)
                index = self.__indexUnderMouse(polygon, localPos)
                if index is None:
                    return
                polygon.pop(index)
                self.repaint()
                return

        self.__dragStart = None

    def mouseMoveEvent(self, event):
        if self._panStart is not None:
            self._panX = self._panStart[0] + (event.x() - self._panStart[2])
            self._panY = self._panStart[1] + (event.y() - self._panStart[3])
            self.repaint()
            return
        if self.__dragStart is None:
            return
        localPos = event.pos() - QPoint(self._panX, self._panY)
        delta = localPos - self.__dragStart[0]
        self.__dragStart[1][self.__dragStart[2]] = self.__dragStart[3] + delta
        self.repaint()

    def paint(self, painter):
        self.paintBackground(painter)
        painter.setRenderHint(QPainter.Antialiasing)
        activeKey = self._owner.activeKey()
        if not activeKey:
            return

        painter.setOpacity(0.2)
        for key in self.roomPolygons:
            if key[1] == activeKey[1] and key[0] != activeKey[0]:
                self.drawPolygon(painter, self.roomPolygons[key])
        painter.setOpacity(1.0)

        points = self.findActivePolygon()
        if not points:
            return
        self.drawPolygon(painter, points)


class App(QMainWindowState):
    def __init__(self):
        super(App, self).__init__(QSettings('ImageEditor'))

        self.__worldList = QListView()
        self.__roomList = QListView()
        self.__view = PolygonEdit(self)
        self.__worldList.setModel(QStandardItemModel())
        self.__roomList.setModel(QStandardItemModel())
        self._addDockWidget(self.__worldList, 'Worlds')
        self._addDockWidget(self.__roomList, 'Rooms')
        self._addDockWidget(self.__view, 'View')

        self.__activeRoom = None
        self.__activeWorld = None
        self.__worldRooms = {}
        worldList = self.__worldList.model()
        for key in rooms:
            world = key[1]
            if world not in self.__worldRooms:
                worldList.appendRow(QStandardItem(world))
                self.__worldRooms[world] = []
            self.__worldRooms[world].append(key[0])

        self.__worldList.selectionModel().selectionChanged.connect(self.onWorldSelected)
        self.__roomList.selectionModel().selectionChanged.connect(self.onRoomSelected)

        self.__menuBar = QMenuBar()
        self.setMenuBar(self.__menuBar)
        self.__fileMenu = self.__menuBar.addMenu('File')
        self.__fileMenu.addAction('Save').triggered.connect(self.__onSave)
        self.__fileMenu.addAction('Load').triggered.connect(self.__onLoad)
        self.__fileMenu.addAction('Export').triggered.connect(self.__onExport)

        if os.path.exists('autosave.json'):
            self.load('autosave.json')

    def closeEvent(self, event):
        self.save('autosave.json')

    def __onSave(self):
        filePath = QFileDialog.getSaveFileName(self, 'Save', os.path.dirname(os.path.normpath(__file__)), '*.json')
        if filePath is not None:
            self.save(filePath)

    def __onLoad(self):
        filePath = QFileDialog.getOpenFileName(self, 'Load', os.path.dirname(os.path.normpath(__file__)), '*.json')
        if filePath is not None:
            self.load(filePath)

    def __onExport(self):
        filePath = QFileDialog.getExistingDirectory(self, 'Export', os.path.dirname(os.path.normpath(__file__)))
        if filePath is not None:
            self.export(filePath)

    def export(self, filePath):
        for key, value in self.__view.roomPolygons.iteritems():
            minX = min(value, key=lambda p: p.x()).x()
            minY = min(value, key=lambda p: p.y()).y()
            maxX = max(value, key=lambda p: p.x()).x()
            maxY = max(value, key=lambda p: p.y()).y()
            dest = os.path.join(filePath, '__'.join((key[0], key[1], str(minX), str(minY))) + '.bmp')
            image = QImage(maxX - minX + 1, maxY - minY + 1, QImage.Format_ARGB32)
            image.fill(Qt.black)
            for i, point in enumerate(value):
                image.setPixel(point.x() - minX, point.y() - minY, qRgb(i, 0, 0))
            image.save(dest)

    def save(self, filePath):
        with open(filePath, 'w') as fh:
            result = []
            for key, value in self.__view.roomPolygons.iteritems():
                format = {}
                format['key'] = key
                format['value'] = [(point.x(), point.y()) for point in value]
                result.append(format)
            json.dump(result, fh)

    def load(self, filePath):
        with open(filePath) as fh:
            self.__view.roomPolygons.clear()
            format = json.load(fh)
            for item in format:
                key, value = item['key'], item['value']
                self.__view.roomPolygons[(str(key[0]), str(key[1]))] = [QPoint(*point) for point in value]

    def activeKey(self):
        key = self.__activeRoom, self.__activeWorld
        if None in key:
            return None
        return key

    def onWorldSelected(self, *args):
        idx = self.__worldList.selectionModel().currentIndex()
        roomList = self.__roomList.model()
        roomList.clear()
        if not idx.isValid():
            self.__activeWorld = None
            self.__view.background = None
            return
        item = self.__worldList.model().itemFromIndex(idx)
        self.__activeWorld = str(item.text())
        self.__view.background = QImage(self.__activeWorld + '.jpg')
        for room in self.__worldRooms[self.__activeWorld]:
            inst = QStandardItem(room)
            if (room.lower(), self.__activeWorld) in self.__view.roomPolygons:
                inst.setBackground(Qt.lightGray)
            roomList.appendRow(inst)

        self.onRoomSelected()

    def onRoomSelected(self, *args):
        idx = self.__roomList.selectionModel().currentIndex()
        if not idx.isValid():
            self.__activeRoom = None
            self.__view.repaint()
            return
        item = self.__roomList.model().itemFromIndex(idx)
        self.__activeRoom = str(item.text())
        self.__view.repaint()


if __name__ == '__main__':
    a = QApplication([])
    w = App()
    w.show()
    a.exec_()

Then for navigation I wrote a basic A* pathfinder. This is the most basic introduction I could find, I think I learned from this same site years ago!

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
import json
import itertools
from structure import *
from scrapeoutput import *


_cache = {}
def findItem(target):
    """ given an item name find what room it lives in (and cache it for speedier queries in the future). """
    room = _cache.get(target, None)
    if room:
        return room
    for key, room in rooms.iteritems():
        if target in room.items:
            _cache[target] = room
            return room


def canTransition(transition, inventory):
    """
    check if a path is blocked
    :type transition: Transition
    :type inventory: [str]
    :rtype: bool
    """
    for requirement in transition.requirements:
        if requirement not in inventory:
            return False
    return True


class AStarNode(object):
    """ simply points to a room and knows how to get there through the hierarchy system """
    def __init__(self, data, parent=None):
        self.data = data
        self.parent = parent

    def cost(self):
        """ assume navigating through a room always costs "1", so going through as little doors as possible is assumed to be the shortest path """
        if self.parent is not None:
            return 1 + self.parent.cost()
        return 1

    def __repr__(self):
        if self.parent is not None:
            return '%s (from %s)' % (self.data.name, self.parent.data.name)
        return self.data.name


def growAdjacentNodes(node, inventory, open, closed):
    """
    Update the open list:
    Get adjacent tiles from node.
    If in closed do not add.
    If in open update best score.
    Else add to open.

    :type node: AStarNode
    :type inventory: [str]
    :type open: [AStarNode]
    :type closed: [AStarNode]
    :rtype: [AStarNode]
    """
    for transition in node.data.transitions:
        if not canTransition(transition, inventory):
            continue

        isInClosedList = False
        for entry in closed:
            if entry.data == transition.targetRoom:
                isInClosedList = True
                break
        if isInClosedList:
            continue

        newNode = AStarNode(transition.targetRoom, node)
        cost = newNode.cost()
        for i, entry in enumerate(open):
            if entry.data == transition.targetRoom:
                if cost < entry.cost():
                    open[i] = newNode
                break
        else:
            open.append(newNode)


class AstarException(Exception): pass


def aStar(startRoom, destinationRoom, inventory):
    """ basic path finder """
    closed = [AStarNode(startRoom)]
    open = []
    growAdjacentNodes(closed[0], inventory, open, closed)
    destinationNode = None
    for i in xrange(10000):  # failsafe
        for entry in open:
            if entry.data == destinationRoom:
                destinationNode = entry
                break
        if destinationNode:
            break
        if not open:
            raise AstarException('Out of options searching %s. Closed list: \n%s' % (destinationRoom.name, '\n'.join(str(i) for i in closed)))
        best = min(open, key=lambda x: x.cost())
        open.remove(best)
        closed.append(best)
        growAdjacentNodes(best, inventory, open, closed)
    iter = destinationNode
    path = []
    while iter:
        path.insert(0, (iter.data.name, iter.data.area))
        iter = iter.parent
    return path


def shortestPath(startRoom, itemKey, ioInventory):
    """ utility to kick off the path finder and compile the results """
    target = items[itemKey]
    for requirement in target.requirements:
        if requirement not in ioInventory:
            raise AstarException()
    destinationRoom = findItem(target)
    if startRoom == destinationRoom:
        ioInventory.append(target.name)
        return ['Aquired: %s' % target.name], destinationRoom
    path = aStar(startRoom, destinationRoom, ioInventory)
    if not path:
        raise RuntimeError()
    ioInventory.append(target.name)
    return path[1:] + ['Aquired: %s' % target.name], destinationRoom


def shortestMultiPath(startRoom, orderlessItems, finalItem, ioInventory):
    """ utility to try out all ways to get the orderlessItems and returns the most optimal version
    finalItem is the next item to get, it is included because the most optimal route should also be close to the next destination to be truly optimal """
    results = []
    for option in itertools.permutations(orderlessItems):
        inv = ioInventory[:]
        totalPath = []
        cursor = startRoom
        for itemKey in option + (finalItem,):
            path, cursor = shortestPath(cursor, itemKey, inv)
            totalPath += path
        results.append((totalPath, cursor, inv))
    # get result with shortest path
    result = min(results, key=lambda r: len(r[0]))
    ioInventory += result[2][len(ioInventory):]
    return result[0], result[1]

With this we can navigate from item to item and record the whole play through. After doing this some amendmends were necessary to fix things missed by the wiki scrape, such as activating a switch in room A to move aside something in room B. I solved that by adding a "switch" item to the right room and inserting it into the ordered item list that the navigator will walk along. After I had the pathfinder working and showing the resulting path on a map with highlighting, I went on and exported that all to a webpage! I just added that inline in the startup code. This whole thing does the navigation, generates a webpage and then shows a Qt app in a surprisingly small amount of time.

Here is that visualizer code:

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
from image_editor import PolygonZoom
from qt import *


class App(QMainWindowState):
    def __init__(self):
        super(App, self).__init__(QSettings('Navigator'))
        self.view = PolygonZoom(self)
        self.timeline = QListView()
        model = QStandardItemModel()
        self.timeline.setModel(model)
        self.setCentralWidget(self.view)
        self._addDockWidget(self.timeline, 'Timeline', Qt.LeftDockWidgetArea)
        self.timeline.selectionModel().selectionChanged.connect(self.__onUpdate)
        self.__key = None

        with open('autosave.json') as fh:
            self.view.roomPolygons.clear()
            format = json.load(fh)
            for item in format:
                key, value = item['key'], item['value']
                self.view.roomPolygons[(str(key[0]), str(key[1]))] = [QPoint(*point) for point in value]

        # record playthrough
        inventory = []
        itemOrder = ('missile launcher', 'violet translator', 'morph ball bomb', 'amber translator', 'space jump boots', 'dark beam', 'light beam',
                     ('dark agon temple key 1', 'dark agon temple key 2', 'dark agon temple key 3', 'dark suit'), 'agon energy', 'agon energy delivery',
                     'super missile', 'emerald translator', 'boost ball', 'dark torvus temple key 1', 'seeker launcher',
                     'catacombs lock', 'gathering lock',
                     'gravity boost', 'grapple beam',
                     ('dark torvus temple key 2', 'dark torvus temple key 3', 'dark visor'), 'torvus energy', 'torvus energy delivery',
                     'spider ball', 'power bomb', 'echo visor', 'screw attack',
                     ('ing hive temple key 1', 'ing hive temple key 2', 'ing hive temple key 3', 'annihilator beam'), 'sanctuary energy', 'sanctuary energy delivery',
                     'light suit',
                     'sky temple key 1', 'sky temple key 2', 'sky temple key 3', 'sky temple key 4', 'sky temple key 5', 'sky temple key 6', 'sky temple key 7', 'sky temple key 8', 'sky temple key 9',
                     'temple energy',
                     'temple energy delivery',

                     'missile expansion17', 'beam ammo expansion3', 'power bomb expansion5', 'darkburst', 'missile expansion12'
                     )
        startRoom = rooms[('landing site', 'Temple Grounds')]

        inst = QStandardItem('Landing Site')
        inst.setData('Temple Grounds')
        model.appendRow(inst)

        for itemKey in itemOrder:
            try:
                if isinstance(itemKey, tuple):
                    path, startRoom = shortestMultiPath(startRoom, itemKey[:-1], itemKey[-1], inventory)
                else:
                    path, startRoom = shortestPath(startRoom, itemKey, inventory)
            except ValueError:
                print 'Error finding %s' % str(itemKey)
                raise
            if not path:
                raise RuntimeError()
            for room_area in path:
                if isinstance(room_area, tuple):
                    room, area = room_area
                else:
                    room, area = room_area, None
                inst = QStandardItem(room)
                if area:
                    inst.setData(area)
                model.appendRow(inst)

        worldPolygons = {}
        with open('index.html', 'w') as fh:
            fh.write('')
            fh.write('<script src="polygons.js"></script>')
            fh.write('<script src="index.js"></script>')
            fh.write('<link rel="stylesheet" type="text/css" href="index.css">')
            fh.write('<div id="stack">')
            for i in xrange(model.rowCount()):
                data = str(model.item(i).data())
                room = str(model.item(i).text())
                if not room.lower().startswith('aquired'):
                    worldPolygons[data] = worldPolygons.get(data, {})
                    try:
                        worldPolygons[data][room] = self.view.roomPolygons[(room.lower(), data)]
                    except:
                        worldPolygons[data][room] = []
                data = '' if not data else 'data="%s"' % data
                fh.write('<div class="entry" %s="">%s</div>' % (data, room))
            fh.write('</div><div id="image"><canvas id="painter"></canvas></div>')

        with open('polygons.js', 'w') as fh:
            fh.write('polygons = {')
            for world in worldPolygons:
                fh.write('"%s": {' % world)
                for room in worldPolygons[world]:
                    if worldPolygons[world][room]:
                        poly = '%s' % (', '.join(['[%s, %s]' % (pt.x(), pt.y()) for pt in worldPolygons[world][room]]))
                        fh.write('"%s": [%s],' % (room, poly))
                fh.write('},')
            fh.write('};')

    def activeKey(self):
        return self.__key

    def __onUpdate(self, *args):
        self.view.state = 0
        idx = self.timeline.selectionModel().currentIndex()
        if idx.isValid():
            item = self.timeline.model().itemFromIndex(idx)
            label = str(item.text().lower())
            if label.startswith('aquired: '):
                item = self.timeline.model().item(idx.row() - 1)
                label = str(item.text().lower())
                self.view.state = 1
            world = str(item.data())
            self.__key = label, world
            self.view.background = QImage(world + '.jpg')
        else:
            self.__key = None
            self.view.background = None
        self.view.repaint()


if __name__ == '__main__':
    a = QApplication([])
    w = App()
    w.show()
    a.exec_()

This is definitely one of the sillier things to do in your spare time 🙂

Navigate the result here!

Part 1: Drawing with PyOpenGL using moden openGL buffers.

This is part 1 of a series and it is about getting started with visualizing triangle meshes with Python 2.7 using the libraries PyOpenGL and PyQt4.
PyQt5, PySide, PySide2 are (apart from some class renames) also compatible with this.

Part 1
Part 2
Part 3

I will assume you know python, you will not need a lot of Qt or OpenGL experience, though I will also not go into the deeper details of how OpenGL works. For that I refer you to official documentation and the excellent (C++) tutorials at https://open.gl/. Although they are C++, there is a lot of explanation about OpenGL and why to do certain calls in a certain order.

On a final note: I will make generalizations and simplifications when explaining things. If you think something works different then I say it probably does, this is to try and convey ideas to beginners, not to explain low level openGL implementations.

Part 1: Drawing a mesh using buffers.

1.1. Setting up

Download & run Python with default settings.

Download & run PyQt4 with default settings.

Paste the following in a windows command window (windows key + R -> type "cmd.exe" -> hit enter):

C:/Python27/Scripts/pip install setuptools
C:/Python27/Scripts/pip install PyOpenGL



1.2. Creating an OpenGL enabled window in Qt.

The first thing to know about OpenGL is that any operation requires OpenGL to be initialized. OpenGL is not something you just "import", it has to be attached to a (possibly hidden) window. This means that any file loading or global initialization has to be postponed until OpenGL is available.

The second thing to know about OpenGL is that it is a big state machine. Any setting you change is left until you manually set it back. This means in Python we may want to create some contexts (using contextlib) to manage the safe setting and unsetting of certain states. I will however not go this far.

Similar to this Qt also requires prior initialization. So here's some relevant code:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
# import the necessary modules
from PyQt4.QtCore import * # QTimer
from PyQt4.QtGui import * # QApplication
from PyQt4.QtOpenGL import * # QGLWidget
from OpenGL.GL import * # OpenGL functionality
from OpenGL.GL import shaders # Utilities to compile shaders, we may not actually use this

# this is the basic window
class OpenGLView(QGLWidget):
    def initializeGL(self):
        # here openGL is initialized and we can do our real program initialization
        pass

    def resizeGL(self, width, height):
        # openGL remembers how many pixels it should draw,
        # so every resize we have to tell it what the new window size is it is supposed
        # to be drawing for
        pass

    def paintGL(self):
        # here we can start drawing, on show and on resize the window will redraw
        # automatically
        pass

# this initializes Qt
app = QApplication([])
# this creates the openGL window, but it isn't initialized yet
window = OpenGLView()
# this only schedules the window to be shown on the next Qt update
window.show()
# this starts the Qt main update loop, it avoids python from continuing beyond this
# line and any Qt stuff we did above is now going to actually get executed, along with
# any future events like mouse clicks and window resizes
app.exec_()

Running this should get you a black window that is OpenGL enabled. So let's fill in the view class to draw something in real-time. This will show you how to make your window update at 60-fps-ish, how to set a background color and how to handle resizes.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
class OpenGLView(QGLWidget):
    def initializeGL(self):
        # set the RGBA values of the background
        glClearColor(0.1, 0.2, 0.3, 1.0)
        # set a timer to redraw every 1/60th of a second
        self.__timer = QTimer()
        self.__timer.timeout.connect(self.repaint) # make it repaint when triggered
        self.__timer.start(1000 / 60) # make it trigger every 1000/60 milliseconds

    def resizeGL(self, width, height):
        # this tells openGL how many pixels it should be drawing into
        glViewport(0, 0, width, height)

    def paintGL(self):
        # empty the screen, setting only the background color
        # the depth_buffer_bit also clears the Z-buffer, which is used to make sure
        # objects that are behind other objects actually are not shown drawing 
        # a faraway object later than a nearby object naively implies that it will 
        # just fill in the pixels with itself, but if there is already an object there 
        # the depth buffer will handle checking if it is closer or not automatically
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        # the openGL window has coordinates from (-1,-1) to (1,1), so this fills in 
        # the top right corner with a rectangle. The default color is white.
        glRecti(0, 0, 1, 1)

Note that the QTimer forces the screen to redraw, but because we are not animating any data this will not be visible right now.

1.3. Creating a Vertex Array Object (VAO)

In OpenGL there is a lot we can put into a mesh, not only positions of vertices, but also triangulation patterns, vertex colors, texture coordinates, normals etcetera. Because OpenGL is a state machine (as described at the start of 2.) this means that when drawing 2 different models a lot of settings need to be swapped before we can draw it. This is why the VAO was created, as it is a way to group settings together and be able to draw a mesh (once set up properly) in only 2 calls. It is not less code, but it allows us to move more code to the initialization stage, winning performance and reducing risk of errors resulting in easier debugging.

Our mesh however will not be very complicated. We require 2 sets of data, the vertex positions and the triangulation (3 integers per triangle pointing to what vertex to use for this triangle).

Image showing structure of a 2 triangles forming a quad

As you can see this would result in the following data:

Positions = [0, 0, 1, 0, 0, 1, 1, 1]
Elements = [0, 1, 2, 1, 3, 2]

4 2D vertices and 2 triangles made of 3 indices each.

So let's give this data to a VAO at the bottom of initializeGL.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
# generate a model
# set up the data
positions = (0, 0, 1, 0, 0, 1, 1, 1)
elements = (0, 1, 2, 1, 3, 2)
# apply the data
# generate a vertex array object so we can easily draw the resulting mesh later
self.__vao = glGenVertexArrays(1)
# enable the vertex array before doing anything else, so anything we do is captured in the VAO context
glBindVertexArray(self.__vao)
# generate 2 buffers, 1 for positions, 1 for elements. this is memory on the GPU that our model will be saved in.
bufs = glGenBuffers(2)
# set the first buffer for the main vertex data, that GL_ARRAY_BUFFER indicates that use case
glBindBuffer(GL_ARRAY_BUFFER, bufs[0])
# upload the position data to the GPU
# some info about the arguments:
# GL_ARRAY_BUFFER: this is the buffer we are uploading into, that is why we first had to bind the created buffer, else we'd be uploading to nothing
# sizeof(ctypes.c_float) * len(positions): openGL wants our data as raw C pointer, and for that it needs to know the size in bytes.
# the ctypes module helps us figure out the size in bytes of a single number, then we just multiply that by the array length
# (ctypes.c_float * len(positions))(*positions): this is a way to convert a python list or tuple to a ctypes array of the right data type
# internally this makes that data the right binary format
# GL_STATIC_DRAW: in OpenGL you can specify what you will be doing with this buffer, static means draw it a lot but never access or alter the data once uploaded.
# I suggest changing this only when hitting performance issues at a time you are doing way more complicated things. In general usage static is the fastest.
glBufferData(GL_ARRAY_BUFFER, sizeof(ctypes.c_float) * len(positions), (ctypes.c_float * len(positions))(*positions), GL_STATIC_DRAW)
# set the second buffer for the triangulation data, GL_ELEMENT_ARRAY_BUFFER indicates the use here
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, bufs[1])
# upload the triangulation data
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(ctypes.c_uint) * len(elements), (ctypes.c_uint * len(elements))(*elements), GL_STATIC_DRAW)
# because the data is now on the GPU, our python positions & elements can be safely garbage collected hereafter
# turn on the position attribute so OpenGL starts using our array buffer to read vertex positions from
glEnableVertexAttribArray(0)
# set the dimensions of the position attribute, so it consumes 2 floats at a time (default is 4)
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, None)

So that was quite some code, and it is quite simple because we only have positions to deal with right now. But first let's try to draw it! Replace the glRecti call with:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
# enable the vertex array we initialized, it will bind the right buffers in the background again
glBindVertexArray(self.__vao)
# draw triangles based on the active GL_ELEMENT_ARRAY_BUFFER
# that 6 is the element count, we can save the len(elements) in initializeGL in the future
# that None is because openGL allows us to supply an offset for what element to start drawing at
# (we could only draw the second triangle by offsetting by 3 indices for example)
# problem is that the data type for this must be None or ctypes.c_void_p.
# In many C++ example you will see just "0" being passed in
# but in PyOpenGL this doesn't work and will result in nothing being drawn.
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, None)

Now we should have an identical picture. Some more info about glVertexAttribPointer:

In OpenGL we can upload as many buffers as we want, but for now I'll stick with the 2 we have. This means that if we want to (for example) add colors to our mesh, we have to set up multiple attrib pointers, that both point to different parts of the buffer. I like to keep all my vertex data concatenated, so that we could get (x,y,r,g,b,x,y,r,g,b...) etcetera in our buffer.

Now for OpenGL to render it not only wants to know what buffer to look at (the array_buffer), but it also wants to know how to interpret that data, and what data is provided. OpenGL understand this through attribute locations. Here we activate attribute location 0 (with glEnableVertexAttribArray) and then set our buffer to be 2 floats per vertex at attribute location 0.

The default openGL attribute locations are as follows:
0: position
1: tangent
2: normal
3: color
4: uv

To support multiple attributes in a single buffer we have to use the last 2 arguments of glVertexAttribPointer. The first of those is the size of all data per vertex, so imagine a 2D position and an RGB color that would be 5 * sizeof(float). The second of those is where this attribute location starts. Here's an example to set up position and color:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
vertex_data = (0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1) ##
vertex_element_size = 5 ##
elements = (0, 1, 2, 1, 3, 2)
self.__vao = glGenVertexArrays(1)
glBindVertexArray(self.__vao)
bufs = glGenBuffers(2)
glBindBuffer(GL_ARRAY_BUFFER, bufs[0])
glBufferData(GL_ARRAY_BUFFER, sizeof(ctypes.c_float) * len(vertex_data), (ctypes.c_float * len(vertex_data))(*vertex_data), GL_STATIC_DRAW) ##
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, bufs[1])
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(ctypes.c_uint) * len(elements), (ctypes.c_uint * len(elements))(*elements), GL_STATIC_DRAW)
glEnableVertexAttribArray(3) ##
glVertexAttribPointer(3, 3, GL_FLOAT, GL_FALSE, sizeof(ctypes.c_float) * vertex_element_size, ctypes.c_void_p(2 * sizeof(ctypes.c_float))) ##
glEnableVertexAttribArray(0)
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, sizeof(ctypes.c_float) * vertex_element_size, None) ##

This is an update to initializeGL, new / changed code ends in ## (because I don't know how to override the syntax highlighting), and your rectangle will immediately start showing colors!

The color attribute binding only works on NVidia, there is no official default attribute location and most other drivers will ignore glVertexAttribPointer (or do something random) if you do not use a custom shader. So if you're not seeing colors, don't worry and try diving into shaders later!

One last thing. Add this to the top of paintGL:

1
2
3
4
import time
glLoadIdentity()
glScalef(self.height() / float(self.width()), 1.0, 1.0)
glRotate((time.time() % 36.0) * 10, 0, 0, 1)

The first line (after the import) restores the transform state, the second line corrects aspect ratio (so a square is really square now), the last line rotates over time. We are using a startup time because Python's time is a too large value, by subtracting the application start time from it we get a value OpenGL can actually work with.

That's it for part 1!

Part 2: Creating an OpenGL friendly mesh exporter for Maya

Part 2: Creating an exporter

This is part 2 of a series and it is about getting started with visualizing triangle meshes with Python 2.7 using the libraries PyOpenGL and PyQt4.
PyQt5, PySide, PySide2 are (apart from some class renames) also compatible with this.

Part 1
Part 2
Part 3

I will assume you know python, you will not need a lot of Qt or OpenGL experience, though I will also not go into the deeper details of how OpenGL works. For that I refer you to official documentation and the excellent (C++) tutorials at https://open.gl/. Although they are C++, there is a lot of explanation about OpenGL and why to do certain calls in a certain order.

On a final note: I will make generalizations and simplifications when explaining things. If you think something works different then I say it probably does, this is to try and convey ideas to beginners, not to explain low level openGL implementations.

2.1 File layout

Now that we can draw a model, it is time to define the data that we need to give OpenGL and decide upon a file format that can contain all this data.

Starting off with the elements, we have a way to draw them (GL_TRIANGLES in our case) and we have a data type (GL_UNSIGNED_INT in our case). Given this data type and the number of elements we can actually determine the buffer size regardless of the data type, allowing our file to support not only a number of element values, but allowing those values to be of all the supported types.

Similarly we can look at the vertex layout. We probably want a vertex count and a size of per-vertex-data. This size is a little more complicated because the attribute layout can be very flexible. I suppose it's easier if we also write the vertex element size instead of trying to figure out what it should be based on the layout.

Then we can look at the attribute layout. We can assume all our data is tightly packed, so we can infer the offset (last argument of glVertexAttribPointer). That leaves us with a layout location, a number of values per vertex, and a data type. Of course first we need to write how many attributes we have.

After that all we need to do is fill in the buffer data. So for vertexCount * vertexElementSize bytes we specify binary data for the vertex buffer and for elementCount * elementDataSize we specify binary data for the elements buffer.

Our file format now looks like this:

Field type description
Version nr byte So we can change the format later and not break things.
Vertex count unsigned int Because the elements_array can use at most an unsigned int we can never point to vertices beyond the maximum of this data, so no need to store more bytes.
Vertex element size byte Size in bytes of a vertex, based on all attribute sizes combined.
Element count unsigned int
Element data size byte To infer whether indices are unsigned char, unsigned short or unsigned int.
Render type GLenum OpenGL defines variables as GL_TRIANGLES as a GLenum type which is in turn just an unsigned int.
Number of attributes byte
[For each attribute]
Attribute location byte
Attribute dimensions byte Is it a single value, vec2, 3 or 4?
Attribute type GLenum Are the values float, or int, more types listed in the OpenGL documenation for glVertexAttribuPointer.
[End for]
Vertex buffer vertexCount * vertexElementSize bytes
Elements buffer elementCount * elementDataSize bytes

That brings us to the next step, gather this information in Maya.

2.2 Maya mesh exporter

To export I'll use the maya API (OpenMaya). It provides a way to quickly iterate over a mesh' data without allocating too much memory using the MItMeshPolygons. This will iterate over all the faces and allow us to extract the individual triangles and face vertices.

There are a few steps to do. First let's make a script to generate a test scene:

1
2
3
4
5
6
from maya import cmds
from maya.OpenMaya import *
cmds.file(new=True, force=True)
cmds.polyCube()
meshShapeName = 'pCubeShape1'
outputFilePath = 'C:/Test.bgm'

Now with these variables in mind we have to convert the shape name to actual maya API objects that we can read data from.

1
2
3
4
5
6
7
8
# get an MDagPath from the given mesh path
p = MDagPath()
l = MSelectionList()
MGlobal.getSelectionListByName(mayaShapeName, l)
l.getDagPath(0, p)

# get the iterator
poly = MItMeshPolygon(p)

This sets us up to actually start saving data. Because openGL requires us to provide all the data of a vertex at 1 vertex index we have to remap some of Maya's data. In Maya a vertex (actually face-vertex in Maya terms) is a list of indices that points to e.g. what vertex to use, what normal to use, etc. All with separate indices. In OpenGL all these indices must match. The way I'll go about this is to simply take the triangulation and generate 3 unique vertices for each triangle. This means that to find the vertex count can be determined by counting the triangles in the mesh. Maya meshes don't expose functionality to query this , so instead I'll iterate over all the faces and count the triangles in them.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
# open the file as binary
with open(outputFilePath, 'wb') as fh:
    # fixing the vertex data size to just X, Y, Z floats for the vertex position
    vertexElementSize = 3 * ctypes.sizeof(ctypes.c_float)

    # using unsigned integers as elements
    indexElementSize = ctypes.sizeof(ctypes.c_uint)

    # gather the number of vertices
    vertexCount = 0
    while not poly.isDone():
        vertices = MPointArray()
        vertexList = MIntArray()
        poly.getTriangles(vertices, vertexList, space)
        vertexCount += vertexList.length()
        poly.next()
    poly.reset()
    # start writing
    fh.write(struct.pack('B', FILE_VERSION))
    fh.write(struct.pack('I', vertexCount))
    fh.write(struct.pack('B', vertexElementSize))
    # currently I'm duplicating all vertices per triangle, so total indices matches    total vertices
    fh.write(struct.pack('I', vertexCount))
    fh.write(struct.pack('B', indexElementSize))
    fh.write(struct.pack('I', GL_TRIANGLES))  # render type

As you can see we had to make some assumptions about vertex data size and we had to gather some intel on our final vertex count, but this is a good setup. Next step is to write the attribute layout. I've made the assumption here to write only X Y Z position floats at location 0. We can expand the exporter later with more features, as our file format supports variable attribute layouts. We can write our position attribute next:

1
2
3
4
5
6
7
8
# attribute layout
# 1 attribute
fh.write(struct.pack('B', 1))
# at location 0
fh.write(struct.pack('B', 0))
# of 3 floats
fh.write(struct.pack('B', 3))
fh.write(struct.pack('I', GL_FLOAT))

Note that I am using a constant GL_FLOAT here, if you do not wish to install PyOpenGL for your maya, you can quite simply include this at the top of the file instead:

1
2
3
4
import ctypes
GL_TRIANGLES = 0x0004
GL_UNSIGNED_INT = 0x1405
GL_FLOAT = 0x1406

After that comes streaming the vertex buffer. For this I use the same iterator I used to count the vertex count. The code is pretty much the same only now I write the vertices instead of counting the vertex list.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
# iter all faces
while not poly.isDone():
    # get triangulation of this face
    vertices = MPointArray()
    vertexList = MIntArray()
    poly.getTriangles(vertices, vertexList, space)

    # write the positions
    for i in xrange(vertexList.length()):
        fh.write(struct.pack('3f', vertices[i][0], vertices[i][1], vertices[i][2]))

    poly.next()

Last is the element buffer.

1
2
3
# write the elements buffer
for i in xrange(vertexCount):
   fh.write(struct.pack('I', i))

2.3 All the data

The next step naturally is to export more than just the position. Here is a more elaborate way to extract all the attributes. First we need to get some global data from the mesh. This goes right after where we create the MItMeshPolygons.

1
2
3
4
5
6
7
fn = MFnMesh(p)  
tangents = MFloatVectorArray()  
fn.getTangents(tangents, space)  
colorSetNames = []  
fn.getColorSetNames(colorSetNames)  
uvSetNames = []  
fn.getUVSetNames(uvSetNames)  

Next we have to change our vertexElementSize code to the following:

1
2
3
# compute the vertex data size, write 4 floats for the position for more convenient transformation in shaders
# position, tangent, normal, color sets, uv sets
vertexElementSize = (4 + 3 + 3 + 4 * len(colorSetNames) + 2 * len(uvSetNames)) * ctypes.sizeof(ctypes.c_float)

The attribute layout is significantly changed. I'm also changing the point data from a vec3 to a vec4. I'm filling in the w component as 1.0, this to indicate a point instead of a vector. It will make transforming vertices in shaders a step simpler.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
# attribute layout

# Since NVidia is the only driver to implement a default attribute layout I am following this as much as possible
# on other drivers using a custom shader is mandatory and modern buffers will never work with the fixed function pipeline.
# http://developer.download.nvidia.com/opengl/glsl/glsl_release_notes.pdf
# https://stackoverflow.com/questions/20573235/what-are-the-attribute-locations-for-fixed-function-pipeline-in-opengl-4-0-cor

# num attributes
fh.write(struct.pack('B', 3 + len(colorSetNames) + len(uvSetNames)))
# vec4 position at location 0
fh.write(struct.pack('B', 0))
fh.write(struct.pack('B', 4))
fh.write(struct.pack('I', GL_FLOAT))
# vec3 tangent at location 1
fh.write(struct.pack('B', 1))
fh.write(struct.pack('B', 3))
fh.write(struct.pack('I', GL_FLOAT))
# vec3 normal at location 2
fh.write(struct.pack('B', 2))
fh.write(struct.pack('B', 3))
fh.write(struct.pack('I', GL_FLOAT))
# vec4 color at locations (3,7) and 16+
used = {}
for i in xrange(len(colorSetNames)):
    idx = 3 + i
    if idx > 7:
        idx = 11 + i
        used.add(idx)
    fh.write(struct.pack('B', idx))
    fh.write(struct.pack('B', 4))
    fh.write(struct.pack('I', GL_FLOAT))
# vec2 uvs at locations 8-15 and 16+, but avoiding overlap with colors
idx = 8
for i in xrange(len(uvSetNames)):
    while idx in used:
        idx += 1
    fh.write(struct.pack('B', idx))
    fh.write(struct.pack('B', 2))
    fh.write(struct.pack('I', GL_FLOAT))
    idx += 1

Most of the MItMeshPolygon iterator functions, like getNormals(), gives us a list of the normals for all vertices in this face. The problem is that this data is not triangulated.

To extract the triangulation we used getTriangles(), which gives us a list of vertices used in the face. These vertex numbers are object-wide, so they keep getting bigger the further we get.

That means they're useless if we want to use them to look up the normal returned by getNormals(), because that array is always very short, containing just the normals for this face.

So we have to do some mapping from the triangulated vertex indices into indices that match the data we've got. Either that or get all the normals from the mesh in 1 big array but that is not memory efficient. So at the top of the while loop (just inside) I've added the following dictionary:

1
2
3
4
5
6
# map object indices to local indices - because for some reason we can not query the triangulation as local indices
# but all getters do want us to provide local indices
objectToFaceVertexId = {}
count = poly.polygonVertexCount()
for i in xrange(count):
    objectToFaceVertexId[poly.vertexIndex(i)] = i

That allows us to extract all the data we want for these triangles like so:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
# get per-vertex data
normals = MVectorArray()
poly.getNormals(normals, space)
colorSet = []
for i, colorSetName in enumerate(colorSetNames):
    colorSet.append(MColorArray())
    poly.getColors(colorSet[i], colorSetName)
uvSetU = []
uvSetV = []
for i, uvSetName in enumerate(uvSetNames):
    uvSetU.append(MFloatArray())
    uvSetV.append(MFloatArray())
    poly.getUVs(uvSetU[i], uvSetV[i], uvSetName)

Handling fairly small sets of data at a time. Last we have to write the data, replacing the loop writing 3 floats per vertex we had before with this longer loop:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
# write the data
for i in xrange(vertexList.length()):
    localVertexId = objectToFaceVertexId[vertexList[i]]
    tangentId = poly.tangentIndex(localVertexId)

    fh.write(struct.pack('4f', vertices[i][0], vertices[i][1], vertices[i][2], 1.0))
    fh.write(struct.pack('3f', tangents[tangentId][0], tangents[tangentId][1], tangents[tangentId][2]))
    fh.write(struct.pack('3f', normals[localVertexId][0], normals[localVertexId][1], normals[localVertexId][2]))
    for j in xrange(len(colorSetNames)):
        fh.write(struct.pack('4f', colorSet[j][localVertexId][0], colorSet[j][localVertexId][1], colorSet[j][localVertexId][2], colorSet[j][localVertexId][3]))
    for j in xrange(len(uvSetNames)):
        fh.write(struct.pack('2f', uvSetU[j][localVertexId], uvSetV[j][localVertexId]))

And that completes the exporter with full functionality, extracting all possible data from a maya mesh we want. Unless you want blind data and skin clusters, but that's a whole different story!

2.4 Code

Here is the final code as a function, with an additional function to export multiple selected meshes to multiple files, using Qt for UI. Note that if you wish to use PySide or PyQt5 instead the QFileDialog.getExistingDirectory and QSettings.value return types are different and require some work.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
import os
import struct
from maya import cmds
from maya.OpenMaya import *
import ctypes

GL_TRIANGLES = 0x0004
GL_UNSIGNED_INT = 0x1405
GL_FLOAT = 0x1406
FILE_EXT = '.bm'  # binary mesh
FILE_VERSION = 0
EXPORT_SPACE = MSpace.kWorld  # export meshes in world space for now


def exportMesh(mayaShapeName, outputFilePath, space):
    # get an MDagPath from the given mesh path
    p = MDagPath()
    l = MSelectionList()
    MGlobal.getSelectionListByName(mayaShapeName, l)
    l.getDagPath(0, p)

    # get the mesh and iterator
    fn = MFnMesh(p)
    poly = MItMeshPolygon(p)

    tangents = MFloatVectorArray()
    fn.getTangents(tangents, space)
    colorSetNames = []
    fn.getColorSetNames(colorSetNames)
    uvSetNames = []
    fn.getUVSetNames(uvSetNames)

    # open the file as binary
    with open(outputFilePath, 'wb') as fh:
        # compute the vertex data size, write 4 floats for the position for more convenient transformation in shaders
        # position, tangent, normal, color sets, uv sets
        vertexElementSize = (4 + 3 + 3 + 4 * len(colorSetNames) + 2 * len(uvSetNames)) * ctypes.sizeof(ctypes.c_float)

        # using unsigned integers as elements
        indexElementSize = ctypes.sizeof(ctypes.c_uint)

        # gather the number of vertices
        vertexCount = 0
        while not poly.isDone():
            vertices = MPointArray()
            vertexList = MIntArray()
            poly.getTriangles(vertices, vertexList, space)
            vertexCount += vertexList.length()
            poly.next()
        poly.reset()

        # start writing
        fh.write(struct.pack('B', FILE_VERSION))
        fh.write(struct.pack('I', vertexCount))
        fh.write(struct.pack('B', vertexElementSize))
        # currently I'm duplicating all vertices per triangle, so total indices matches total vertices
        fh.write(struct.pack('I', vertexCount))
        fh.write(struct.pack('B', indexElementSize))
        fh.write(struct.pack('I', GL_TRIANGLES))  # render type

        # attribute layout

        # Since NVidia is the only driver to implement a default attribute layout I am following this as much as possible
        # on other drivers using a custom shader is mandatory and modern buffers will never work with the fixed function pipeline.
        # http://developer.download.nvidia.com/opengl/glsl/glsl_release_notes.pdf
        # https://stackoverflow.com/questions/20573235/what-are-the-attribute-locations-for-fixed-function-pipeline-in-opengl-4-0-cor

        # num attributes
        fh.write(struct.pack('B', 3 + len(colorSetNames) + len(uvSetNames)))
        # vec4 position at location 0
        fh.write(struct.pack('B', 0))
        fh.write(struct.pack('B', 4))
        fh.write(struct.pack('I', GL_FLOAT))
        # vec3 tangent at location 1
        fh.write(struct.pack('B', 1))
        fh.write(struct.pack('B', 3))
        fh.write(struct.pack('I', GL_FLOAT))
        # vec3 normal at location 2
        fh.write(struct.pack('B', 2))
        fh.write(struct.pack('B', 3))
        fh.write(struct.pack('I', GL_FLOAT))
        # vec4 color at locations (3,7) and 16+
        used = {}
        for i in xrange(len(colorSetNames)):
            idx = 3 + i
            if idx > 7:
                idx = 11 + i
                used.add(idx)
            fh.write(struct.pack('B', idx))
            fh.write(struct.pack('B', 4))
            fh.write(struct.pack('I', GL_FLOAT))
        # vec2 uvs at locations 8-15 and 16+, but avoiding overlap with colors
        idx = 8
        for i in xrange(len(uvSetNames)):
            while idx in used:
                idx += 1
            fh.write(struct.pack('B', idx))
            fh.write(struct.pack('B', 2))
            fh.write(struct.pack('I', GL_FLOAT))
            idx += 1

        # iter all faces
        while not poly.isDone():
            # map object indices to local indices - because for some reason we can not query the triangulation as local indices
            # but all getters do want us to provide local indices
            objectToFaceVertexId = {}
            count = poly.polygonVertexCount()
            for i in xrange(count):
                objectToFaceVertexId[poly.vertexIndex(i)] = i

            # get triangulation of this face
            vertices = MPointArray()
            vertexList = MIntArray()
            poly.getTriangles(vertices, vertexList, space)

            # get per-vertex data
            normals = MVectorArray()
            poly.getNormals(normals, space)
            colorSet = []
            for i, colorSetName in enumerate(colorSetNames):
                colorSet.append(MColorArray())
                poly.getColors(colorSet[i], colorSetName)
            uvSetU = []
            uvSetV = []
            for i, uvSetName in enumerate(uvSetNames):
                uvSetU.append(MFloatArray())
                uvSetV.append(MFloatArray())
                poly.getUVs(uvSetU[i], uvSetV[i], uvSetName)

            # write the data
            for i in xrange(vertexList.length()):
                localVertexId = objectToFaceVertexId[vertexList[i]]
                tangentId = poly.tangentIndex(localVertexId)

                fh.write(struct.pack('4f', vertices[i][0], vertices[i][1], vertices[i][2], 1.0))
                fh.write(struct.pack('3f', tangents[tangentId][0], tangents[tangentId][1], tangents[tangentId][2]))
                fh.write(struct.pack('3f', normals[localVertexId][0], normals[localVertexId][1], normals[localVertexId][2]))
                for j in xrange(len(colorSetNames)):
                    fh.write(struct.pack('4f', colorSet[j][localVertexId][0], colorSet[j][localVertexId][1], colorSet[j][localVertexId][2], colorSet[j][localVertexId][3]))
                for j in xrange(len(uvSetNames)):
                    fh.write(struct.pack('2f', uvSetU[j][localVertexId], uvSetV[j][localVertexId]))

            poly.next()

        # write the elements buffer
        for i in xrange(vertexCount):
            fh.write(struct.pack('I', i))


def exportSelected():
    selectedMeshShapes = cmds.select(ls=True, type='mesh', l=True) or []
    selectedMeshShapes += cmds.listRelatives(cmds.select(ls=True, type='transform', l=True) or [], c=True, type='mesh', f=True) or []
    from PyQt4.QtCore import QSettings
    from PyQt4.QtGui import QFileDialog
    settings = QSettings('GLMeshExport')
    mostRecentDir = str(settings.value('mostRecentDir').toPyObject())
    targetDir = QFileDialog.getExistingDirectory(None, 'Save selected meshes in directory', mostRecentDir)
    if targetDir and os.path.exists(targetDir):
        settings.setValue('mostRecentDir', targetDir)
        for i, shortName in enumerate(cmds.ls(selectedMeshShapes)):
            exportMesh(selectedMeshShapes[i],
                       os.path.join(targetDir, shortName.replace('|', '_'), FILE_EXT),
                       EXPORT_SPACE)

Part 3: Importing and drawing a custom mesh file

Part 3: Creating an importer

This is part 3 of a series and it is about getting started with visualizing triangle meshes with Python 2.7 using the libraries PyOpenGL and PyQt4.
PyQt5, PySide, PySide2 are (apart from some class renames) also compatible with this.

Part 1
Part 2
Part 3

I will assume you know python, you will not need a lot of Qt or OpenGL experience, though I will also not go into the deeper details of how OpenGL works. For that I refer you to official documentation and the excellent (C++) tutorials at https://open.gl/. Although they are C++, there is a lot of explanation about OpenGL and why to do certain calls in a certain order.

On a final note: I will make generalizations and simplifications when explaining things. If you think something works different then I say it probably does, this is to try and convey ideas to beginners, not to explain low level openGL implementations.

3.1 Importing

Now with our file format resembling openGL so closely makes this step relatively easy. First I'll declare some globals, because openGL does not have real enums but just a bunch of global constants I make some groups to do testing and data mapping against.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
from OpenGL.GL import *

attributeElementTypes = (GL_BYTE,
                        GL_UNSIGNED_BYTE,
                        GL_SHORT,
                        GL_UNSIGNED_SHORT,
                        GL_INT,
                        GL_UNSIGNED_INT,
                        GL_HALF_FLOAT,
                        GL_FLOAT,
                        GL_DOUBLE,
                        GL_FIXED,
                        GL_INT_2_10_10_10_REV,
                        GL_UNSIGNED_INT_2_10_10_10_REV,
                        GL_UNSIGNED_INT_10F_11F_11F_REV)
sizeOfType = {GL_BYTE: 1,
             GL_UNSIGNED_BYTE: 1,
             GL_SHORT: 2,
             GL_UNSIGNED_SHORT: 2,
             GL_INT: 4,
             GL_UNSIGNED_INT: 4,
             GL_HALF_FLOAT: 2,
             GL_FLOAT: 4,
             GL_DOUBLE: 8,
             GL_FIXED: 4,
             GL_INT_2_10_10_10_REV: 4,
             GL_UNSIGNED_INT_2_10_10_10_REV: 4,
             GL_UNSIGNED_INT_10F_11F_11F_REV: 4}
drawModes = (GL_POINTS,
            GL_LINE_STRIP,
            GL_LINE_LOOP,
            GL_LINES,
            GL_LINE_STRIP_ADJACENCY,
            GL_LINES_ADJACENCY,
            GL_TRIANGLE_STRIP,
            GL_TRIANGLE_FAN,
            GL_TRIANGLES,
            GL_TRIANGLE_STRIP_ADJACENCY,
            GL_TRIANGLES_ADJACENCY,
            GL_PATCHES)
indexTypeFromSize = {1: GL_UNSIGNED_BYTE, 2: GL_UNSIGNED_SHORT, 4: GL_UNSIGNED_INT}

Next up is a Mesh class that stores a vertex array object (and corresponding buffers for deletion) along with all info necessary to draw the mesh once it's on the GPU.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
class Mesh(object):
    def __init__(self, vao, bufs, drawMode, indexCount, indexType):
        self.__vao = vao
        self.__bufs = bufs
        self.__drawMode = drawMode
        self.__indexCount = indexCount
        self.__indexType = indexType

    def __del__(self):
        glDeleteBuffers(len(self.__bufs), self.__bufs)
        glDeleteVertexArrays(1, [self.__vao])

    def draw(self):
        glBindVertexArray(self.__vao)
        glDrawElements(self.__drawMode, self.__indexCount, self.__indexType, None)

Now let's, given a file path, open up the file and run the importer for the right version (if known).

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
def model(filePath):
    vao = glGenVertexArrays(1)
    glBindVertexArray(vao)
    bufs = glGenBuffers(2)
    glBindBuffer(GL_ARRAY_BUFFER, bufs[0])
    glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, bufs[1])
    with open(filePath, 'rb') as fh:
        fileVersion = struct.unpack('B', fh.read(1))[0]
        if fileVersion == 0:
            return _loadMesh_v0(fh, vao, bufs)
        raise RuntimeError('Unknown mesh file version %s in %s' % (fileVersion, filePath))

Next we can start reading the rest of the file:

1
2
3
4
5
6
7
8
    vertexCount = struct.unpack('I', fh.read(4))[0]
    vertexSize = struct.unpack('B', fh.read(1))[0]
    indexCount = struct.unpack('I', fh.read(4))[0]
    indexSize = struct.unpack('B', fh.read(1))[0]
    assert indexSize in indexTypeFromSize, 'Unknown element data type, element size must be one of %s' % indexTypeFromSize.keys()
    indexType = indexTypeFromSize[indexSize]
    drawMode = struct.unpack('I', fh.read(4))[0]
    assert drawMode in (GL_LINES, GL_TRIANGLES), 'Unknown draw mode.'  # TODO: list all render types

Read and apply the attribute layout:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
# gather layout
numAttributes = struct.unpack('B', fh.read(1))[0]
offset = 0
layouts = []
for i in xrange(numAttributes):
   location = struct.unpack('B', fh.read(1))[0]
   dimensions = struct.unpack('B', fh.read(1))[0]
   assert dimensions in (1, 2, 3, 4)
   dataType = struct.unpack('I', fh.read(4))[0]
   assert dataType in attributeElementTypes, 'Invalid GLenum value for attribute element type.'
   layouts.append((location, dimensions, dataType, offset))
   offset += dimensions * sizeOfType[dataType]
# apply
for layout in layouts:
   glVertexAttribPointer(layout[0], layout[1], layout[2], GL_FALSE, offset, ctypes.c_void_p(layout[3]))  # total offset is now stride
   glEnableVertexAttribArray(layout[0])

Read and upload the raw buffer data. This step is easy because we can directly copy the bytes as the storage matches exactly with how openGL expects it due to the layout code above.

1
2
3
4
 = fh.read(vertexSize * vertexCount)  
glBufferData(GL_ARRAY_BUFFER, vertexSize * vertexCount, raw, GL_STATIC_DRAW)  
raw = fh.read(indexSize * indexCount)  
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indexSize * indexCount, raw, GL_STATIC_DRAW)

3.2 The final code

This is the application code including all the rendering from part 1, only the rectangle has been replaced by the loaded mesh.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
import struct
from OpenGL.GL import *

attributeElementTypes = (GL_BYTE,
                        GL_UNSIGNED_BYTE,
                        GL_SHORT,
                        GL_UNSIGNED_SHORT,
                        GL_INT,
                        GL_UNSIGNED_INT,
                        GL_HALF_FLOAT,
                        GL_FLOAT,
                        GL_DOUBLE,
                        GL_FIXED,
                        GL_INT_2_10_10_10_REV,
                        GL_UNSIGNED_INT_2_10_10_10_REV,
                        GL_UNSIGNED_INT_10F_11F_11F_REV)
sizeOfType = {GL_BYTE: 1,
             GL_UNSIGNED_BYTE: 1,
             GL_SHORT: 2,
             GL_UNSIGNED_SHORT: 2,
             GL_INT: 4,
             GL_UNSIGNED_INT: 4,
             GL_HALF_FLOAT: 2,
             GL_FLOAT: 4,
             GL_DOUBLE: 8,
             GL_FIXED: 4,
             GL_INT_2_10_10_10_REV: 4,
             GL_UNSIGNED_INT_2_10_10_10_REV: 4,
             GL_UNSIGNED_INT_10F_11F_11F_REV: 4}
drawModes = (GL_POINTS,
            GL_LINE_STRIP,
            GL_LINE_LOOP,
            GL_LINES,
            GL_LINE_STRIP_ADJACENCY,
            GL_LINES_ADJACENCY,
            GL_TRIANGLE_STRIP,
            GL_TRIANGLE_FAN,
            GL_TRIANGLES,
            GL_TRIANGLE_STRIP_ADJACENCY,
            GL_TRIANGLES_ADJACENCY,
            GL_PATCHES)
indexTypeFromSize = {1: GL_UNSIGNED_BYTE, 2: GL_UNSIGNED_SHORT, 4: GL_UNSIGNED_INT}


def _loadMesh_v0(fh, vao, bufs):
    vertexCount = struct.unpack('I', fh.read(4))[0]
    vertexSize = struct.unpack('B', fh.read(1))[0]
    indexCount = struct.unpack('I', fh.read(4))[0]
    indexSize = struct.unpack('B', fh.read(1))[0]
    assert indexSize in indexTypeFromSize, 'Unknown element data type, element size must be one of %s' % indexTypeFromSize.keys()
    indexType = indexTypeFromSize[indexSize]
    drawMode = struct.unpack('I', fh.read(4))[0]
    assert drawMode in (GL_LINES, GL_TRIANGLES), 'Unknown draw mode.'  # TODO: list all render types

    # gather layout
    numAttributes = struct.unpack('B', fh.read(1))[0]
    offset = 0
    layouts = []
    for i in xrange(numAttributes):
        location = struct.unpack('B', fh.read(1))[0]
        dimensions = struct.unpack('B', fh.read(1))[0]
        assert dimensions in (1, 2, 3, 4)
        dataType = struct.unpack('I', fh.read(4))[0]
        assert dataType in attributeElementTypes, 'Invalid GLenum value for attribute element type.'
        layouts.append((location, dimensions, dataType, offset))
        offset += dimensions * sizeOfType[dataType]

    # apply layout
    for layout in layouts:
        glVertexAttribPointer(layout[0], layout[1], layout[2], GL_FALSE, offset, ctypes.c_void_p(layout[3]))  # total offset is now stride
        glEnableVertexAttribArray(layout[0])

    raw = fh.read(vertexSize * vertexCount)
    glBufferData(GL_ARRAY_BUFFER, vertexSize * vertexCount, raw, GL_STATIC_DRAW)
    raw = fh.read(indexSize * indexCount)
    glBufferData(GL_ELEMENT_ARRAY_BUFFER, indexSize * indexCount, raw, GL_STATIC_DRAW)

    assert len(fh.read()) == 0, 'Expected end of file, but file is longer than it indicates'
    return Mesh(vao, bufs, drawMode, indexCount, indexType)


class Mesh(object):
    def __init__(self, vao, bufs, drawMode, indexCount, indexType):
        self.__vao = vao
        self.__bufs = bufs
        self.__drawMode = drawMode
        self.__indexCount = indexCount
        self.__indexType = indexType

    def __del__(self):
        glDeleteBuffers(len(self.__bufs), self.__bufs)
        glDeleteVertexArrays(1, [self.__vao])

    def draw(self):
        glBindVertexArray(self.__vao)
        glDrawElements(self.__drawMode, self.__indexCount, self.__indexType, None)


def model(filePath):
    vao = glGenVertexArrays(1)
    glBindVertexArray(vao)
    bufs = glGenBuffers(2)
    glBindBuffer(GL_ARRAY_BUFFER, bufs[0])
    glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, bufs[1])
    with open(filePath, 'rb') as fh:
        fileVersion = struct.unpack('B', fh.read(1))[0]
        if fileVersion == 0:
            return _loadMesh_v0(fh, vao, bufs)
        raise RuntimeError('Unknown mesh file version %s in %s' % (fileVersion, filePath))


# import the necessary modules
import time
from PyQt4.QtCore import *  # QTimer
from PyQt4.QtGui import *  # QApplication
from PyQt4.QtOpenGL import *  # QGLWidget
from OpenGL.GL import *  # OpenGL functionality


# this is the basic window
class OpenGLView(QGLWidget):
    def initializeGL(self):
        # set the RGBA values of the background
        glClearColor(0.1, 0.2, 0.3, 1.0)

        # set a timer to redraw every 1/60th of a second
        self.__timer = QTimer()
        self.__timer.timeout.connect(self.repaint)
        self.__timer.start(1000 / 60)

        # import a model
        self.__mesh = model(r'C:\Users\John\Python\maya\cube.bm')

    def resizeGL(self, width, height):
        glViewport(0, 0, width, height)

    def paintGL(self):
        glLoadIdentity()
        glScalef(self.height() / float(self.width()), 1.0, 1.0)
        glRotate((time.time() % 36.0) * 10, 0, 0, 1)

        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        self.__mesh.draw()


# this initializes Qt
app = QApplication([])
# this creates the openGL window, but it isn't initialized yet
window = OpenGLView()
# this only schedules the window to be shown on the next Qt update
window.show()
# this starts the Qt main update loop, it avoids python from continuing beyond this line
# and any Qt stuff we did above is now going to actually get executed, along with any future
# events like mouse clicks and window resizes
app.exec_()

Accelerating Maya -> PyOpenGL mesh IO

This is a continuation of the previous 3-part series on exporting a Maya mesh and drawing it with PyOpenGL and PyQt4.

Faster export

I recently got back to improving this a little. First I improved maya exporter performance by porting it to a C++ plugin. I don't want to go over all the details, because it is similar to the python version posted before and if I'm going to explain this code properly I'd have to do a tutorial series on the Maya API in the first place! So here's a little dump of the visual studio project instead: plugin.vcxproj!

It is currently very basic and just exports all data it can find. I'm aware certain maya models can crash certain other functions in Maya's MFnMesh (and related) class. E.g. empty UV sets, UV sets with UVs for only certain vertices/faces, geometry with holes crashing getTriangles, etc. It may be good to write a python layer that does some validation on the mesh as well as add flags to explicitly export (or ignore) certain attributes and UV/color sets.

Faster import

Next I used the python mmap (memory map) module to upload the mesh directly from disk to openGL without getting (and therefore boxing) the raw data in Python objects first. Previously I was loading binary to python, which requires python to cast the binary to a python object, which I then wrapped into a ctypes object, allocating and copying huge chunks of memory and constructing tons of python objects. With mmap I can just cast the file handle to a void* and hand it to glBufferData.

The memoryMap context can take a file descriptor (acquired through os.open, different from the regular open) or file path. It will then open the entire file as read-only binary and map it instead of reading it. Last it returns a MappedReader object which is a little wrapper around the mmap object that assists in reading chunks as a certain ctype. This way I can easily read some header data (previously I'd do this by reading n bytes and using struct.unpack) and then read the remainder (or a large chunk) of the file as a ctypes pointer.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import os
import mmap
import ctypes
import contextlib


@contextlib.contextmanager
def memoryMap(fileDescriptor, sizeInBytes=0, offsetInBytes=0):
    if isinstance(fileDescriptor, basestring):
        fd = os.open(fileDescriptor, os.O_RDWR | os.O_BINARY)
        ownFd = True
    else:
        fd = fileDescriptor
        ownFd = False
    mfd = None
    try:
        mfd = mmap.mmap(fd, sizeInBytes, offset=offsetInBytes)
        yield MappedReader(mfd)
    finally:
        if mfd is not None:
            mfd.close()
        if ownFd:
            os.close(fd)


class MappedReader(object):
    def __init__(self, memoryMap):
        """Wrap a memory map into a stream that can stream through the file and map sections to ctypes."""
        self.__memoryMap = memoryMap
        self.__offset = 0

    def close(self):
        self.__memoryMap.close()

    def size(self):
        return self.__memoryMap.size()

    def seek(self, offset):
        assert offset >= 0 and offset < self.size(), 'Seek %s beyond file bounds [0, %s)' % (offset, self.size())
        self.__offset = offset

    def tell(self):
        return self.__offset

    def read(self, ctype):
        """
        Map a part of the file memory to a ctypes object (from_buffer, so ctype points directly to file memory).
        Object type is inferred from the given type.
        File cursor is moved to the next unread byte (seek = tell + sizeof(ctype)).
        """
        result = ctype.from_buffer(self.__memoryMap, self.__offset)
        self.__offset += ctypes.sizeof(result)
        return result

    def readValue(self, ctype):
        """
        Utility to read and directly return the data cast as a python value.
        """
        return self.read(ctype).value

This code is a refactor from what I did in the tutorial mentioned at the top, but using mmap instead! It is mostly identical.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
def _loadMesh_v0(stream, vao, bufs):
    vertexCount = stream.readValue(ctypes.c_uint32)
    vertexSize = stream.readValue(ctypes.c_ubyte)

    indexCount = stream.readValue(ctypes.c_uint32)
    indexSize = stream.readValue(ctypes.c_ubyte)

    assert indexSize in indexTypeFromSize, 'Unknown element data type, element size must be one of %s' % indexTypeFromSize.keys()
    indexType = indexTypeFromSize[indexSize]

    drawMode = stream.readValue(ctypes.c_uint32)
    assert drawMode in (GL_LINES, GL_TRIANGLES), 'Unknown draw mode.'  # TODO: list all render types

    # gather layout
    numAttributes = stream.readValue(ctypes.c_ubyte)

    offset = 0
    layouts = [None] * numAttributes
    for i in xrange(numAttributes):
        location = stream.readValue(ctypes.c_ubyte)
        dimensions = stream.readValue(ctypes.c_ubyte)
        assert dimensions in (1, 2, 3, 4)
        dataType = stream.readValue(ctypes.c_uint32)
        assert dataType in attributeElementTypes, 'Invalid GLenum value for attribute element type.'
        layouts[i] = AttributeLayout(location, dimensions, dataType, offset)
        offset += dimensions * sizeOfType[dataType]

    assert offset == vertexSize, 'File says each chunk of vertex data is %s bytes, but attribute layout used up %s bytes' % (vertexSize, offset)

    # apply layout
    for layout in layouts:
        glVertexAttribPointer(layout.location, layout.dimensions, layout.dataType, GL_FALSE, vertexSize, ctypes.c_void_p(layout.offset))  # total offset is now stride
        glEnableVertexAttribArray(layout.location)

    raw = stream.read(ctypes.c_ubyte * (vertexSize * vertexCount))
    glBufferData(GL_ARRAY_BUFFER, vertexSize * vertexCount, raw, GL_STATIC_DRAW)

    raw = stream.read(ctypes.c_ubyte * (indexSize * indexCount))
    glBufferData(GL_ELEMENT_ARRAY_BUFFER, indexSize * indexCount, raw, GL_STATIC_DRAW)

    if stream.size() - stream.tell() > 0:
        raise RuntimeError('Error reading mesh file, more data in file after we were done reading.')

    return Mesh(vao, bufs, drawMode, indexCount, indexType)


def model(filePath):
    vao = glGenVertexArrays(1)
    glBindVertexArray(vao)
    bufs = glGenBuffers(2)
    glBindBuffer(GL_ARRAY_BUFFER, bufs[0])
    glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, bufs[1])
    with memoryMap(filePath) as stream:
        fileVersion = stream.readValue(ctypes.c_ubyte)
        if fileVersion == 0:
            return _loadMesh_v0(stream, vao, bufs)
        raise RuntimeError('Unknown mesh file version %s in %s' % (fileVersion, filePath))

Python dependency graph

Node graphs are a great way to express relationships and logical flow between objects, both for code design and 3D scenes. Where a tree can only express single parent-child relationships, a node graph can show a hierarchy and then display how one object has a look-at logic to look at another object entirely.

An image showing how a transform can have a look-at constraint, as well as how transform hierarchies can inherit each other

This can be done for almost any type of data, and a dependency graph is an evaluation model that tries to evaluate as little of the graph as possible. An evaluation model is the way our code traverses the graph's data to compute the end result.

A dependency graph works by lazily computing data and by storing the "dirty" state of each attribute (dirty meaning something has been changed since the last calculation of this attribute). I'll try to illustrate with this animation of 2 colors being mixed. When an input changes it propagates that change by "dirtying" all downstream dependencies (red).

When a value is requested, input wires are followed and for outputs the node is computed (yellow), but only if the attribute is "dirty", else we just use the cached state (green).

A video displaying node graph dirty propagation & evaluation order

So to sum it up in a few simple rules:

To keep it simple I'll say "if an input on a node changes all its outputs are dirty", which means nodes should be quite granular as having inputs that only change part of the outputs will waste computation.

Also any connection going out of an attribute (whether it is an input feeding another input directly or an output feeding another input) will dirty all targets. These 2 simple rules result in recursive dirtying of all the right inputs and outputs throughout the graph.

When a value is requested its cached value is returned unless it's "dirty" in which case (for outputs) the node will be computed or (for inputs) the other side of the connection will be queried, which results in this rule applying recursively and the minimal graph being computed to get the right value in the end.

Coding it

Now with that theory out of the way, I've written some python to do exactly this. First let's take a look at the initial data structure. A plug must understand connections, ownership, dirty-state and retain an internal value (e.g. the result of computing an output, or just a constant value for an input that is not connected to anything).

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
class Plug(object):
    def __init__(self, name, node, isComputed):
        self.__name = name  # really used for debugging only
        self.__node = node  # the parent node this plug belongs to
        self.__isComputed = isComputed  # make this an output-plug
        self.__isDirty = isComputed  # upstream dependencies changed? computed plug always have dependencies on creation
        self.__cache = None  # last computed value for outputs, user set value for inputs
        self.__sources = []  # plugs we depend on
        self.__targets = []  # plugs we feed into

    def clean(self):
        self.__isDirty = False

class DependNode(object):
    def __init__(self, computeFunc=None):
        self.__plugs = OrderedDict()  # list of node plugs, dict key should match plug name
        self._computeFunc = computeFunc  # logic implementation, should be a callable that accepts this DependNode instance as only argument

    def compute(self):
        # call compute function if node has logic
        if self._computeFunc is not None:
            self._computeFunc(self)

        # clean all plugs
        for plug in self.__plugs.itervalues():
            plug.clean()

    def addInput(self, name):
        self.__plugs[name] = Plug(name, self, isComputed=False)

    def addOutput(self, name):
        self.__plugs[name] = Plug(name, self, isComputed=True)

This template allows us to set up a basic node with inputs and outputs and to provide a custom function to calculate this node's outputs.

1
2
const = DependNode()
const.addInput('value')

The next step is to allow setting a value to a specific plug. To do this, I'll use some meta-programming to make the resulting code more readable. Implementing getattr and setattr on the DependNode as well as value() and setValue() on the plug to alter the Plug.__cache:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
# Added to the end of the Plug class:
    def value(self):
        return self.__cache

    def setValue(self, value):
        self.__cache = value

# Added to the end of the DependNode class:
    def __getattr__(self, name):
        return self.__plugs[name].value()

    def __setattr__(self, name, value):
        if name not in ('_DependNode__plugs', '_DependNode__computeFunc'): # To enable setting attrs in __init__ we must revert to default behaviour for those atrtibute names, notice how mangled names are required for private attributes.
            return self.__plugs[name].setValue(value)
        return super(DependNode, self).__setattr__(name, value)

Now we can do this and see the internal state reflected properly:

1
const.value = 2.0  

The next step I suppose is to create a simple "add" node which sums up its inputs. To do this we'll have to add connections. I'll implement this on the Plug class, where a plugs sources and targets are managed implicitly:

1
2
3
4
5
6
7
8
9
    def addSource(self, sourcePlug):
        if sourcePlug not in self.__sources:
            self.__sources.append(sourcePlug)
            sourcePlug.__targets.append(self)

    def removeSource(self, sourcePlug):
        if sourcePlug in self.__sources:
            self.__sources.remove(sourcePlug)
            sourcePlug.__targets.remove(self)

The next step is to start implementing some of our rules. First rule we need is: compute when we are dirty. This I do in Plug.value().

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
    def value(self):
        if self.__isDirty:
            # we are going to get clean, set clean beforehand so the compute function can get other plug values without recursively triggering compute again.
            self.__isDirty = False
            # compute dirty output
            if self.__isComputed:
                self.__node.compute()
            # fetch dirty input connection
            elif self.__sources:
                self.__cache = [source.value() for source in self.__sources]
            # plug is clean now
            self.clean()
        # return internal state
        return self.__cache

So now outputs are computed, inputs return their sources, all this is cached on the plug so it only computes when dirty is true. The next step is to actually set and propagate the dirty state. So whenever we set a value, set a source, or a plug gets dirtied: all outgoing connections are dirtied. When the dirty happens to an input, the node's outputs are dirtied. See the _dirty implementation below for the Plug class. The setValue and add- removeSource functions just get a _dirty call in the end.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
# modifications to Plug
    def setValue(self, value):
        self.__cache = value
        self._dirty()

    def addSource(self, sourcePlug):
        if sourcePlug not in self.__sources:
            self.__sources.append(sourcePlug)
            sourcePlug.__targets.append(self)
            self._dirty()

    def removeSource(self, sourcePlug):
        if sourcePlug in self.__sources:
            self.__sources.remove(sourcePlug)
            sourcePlug.__targets.remove(self)
            self._dirty()

# added to Plug
    def _isComputed(self):
        return self.__isComputed

    def _dirty(self):
        if self.__isDirty:
            return
        self.__isDirty = True

        # dirty plugs that are computed based on this plug
        if not self.__isComputed:
            for plug in self.__node.iterPlugs():
                if plug._isComputed():
                    plug._dirty()

        # dirty plugs that use this plug as source
        for plug in self.__targets:
            plug._dirty()

# the DependNode class also gets this iterPlugs implementation
    def iterPlugs(self):
        return self.__plugs.itervalues()

Whew, now after all that we should be able to run this test to add 2 and 4 together using 3 nodes:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
const = DependNode()
const.addInput('value')
const.value = 2.0

const2 = DependNode()
const2.addInput('value')
const2.value = 4.0


def addFunc(node):
    node.output = sum(node.inputs)


add = DependNode(addFunc)
add.addInput('inputs')
add.addOutput('output')
add.plug('inputs').addSource(const.plug('value'))
add.plug('inputs').addSource(const2.plug('value'))

print add.output

Here is a slightly more interesting example of a pointOnCurve node that computes a point on a bezier segment and feeds it to a different node:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
def pointOnCurve(node):
    # interpolate a 2D bezier segment with t=node.parameter
    cvs = node.cvs2[0]
    t = node.parameter[0]
    r = [0, 0]
    for i in xrange(2):
        a, b, c = cvs[1][i] - cvs[0][i], cvs[2][i] - cvs[1][i], cvs[3][i] - cvs[2][i]
        d = b - a
        a, b, c, d = c - b - d, d + d + d, a + a + a, cvs[0][i]
        r[i] = (t * (t * (t * a + b) + c) + d)
    node.point = tuple(r)

timeNode = DependNode()
timeNode.addInput('time')
timeNode.time = 0.0

curveNode = DependNode()
curveNode.addInput('cvs')
curveNode.cvs = ((0.0, 0.0), (0.2, 0.0), (0.0, 0.2), (0.2, 0.2))

pointOnCurveNode = DependNode(pointOnCurve)
pointOnCurveNode.addInput('cvs2')
pointOnCurveNode.addInput('parameter')
pointOnCurveNode.plug('parameter').addSource(timeNode.plug('time'))
pointOnCurveNode.plug('cvs2').addSource(curveNode.plug('cvs'))
pointOnCurveNode.addOutput('point')

transform = DependNode()
transform.addInput('translate')
transform.plug('translate').addSource(pointOnCurveNode.plug('point'))

print transform.translate
timeNode.time = 0.5
print transform.translate

Now there are 2 more improvements I want to add. First whenever a plug has sources, there is no real need to cache the result. We can just directly read from the sources and save ourselves a bunch of copying. Second I want to be able to alter an input plug temporarily. Imagine an interface with a point moving over time, it may be nice to alter that point by hand to e.g. feed that back in some animation system. In this case it is important for us to set the translate of a transform without it jumping back to the source time as soon as we redraw (which would read the translate attribute).

First I edit Plug.value() to use the cache only for computed data. Now that __cache is freed up I plan to use it for the user-override. So if a cache is available I want to use it at all times. Next I return sources if available, else cache again which should in this case be computed.

Next in Plug._dirty() I only dirty computed plugs, and I set the cache to None if the plug as sources coming in. This will result in a small problem with Plug.setValue: it currently caches the value and then dirties the plug. That means that on input plugs the cache is set to None immediately. I just swap those 2 lines so my setValue sticks, and the user override also works.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
    def value(self):
        if self.__isDirty and self.__isComputed:
            # compute dirty output
            if self.__isComputed:
                self.__node.compute()
            # plug is clean now
            self.clean()

        # override cache for input attributes to intervene with connections?
        if self.__cache is not None:
            return self.__cache

        # fetch input connection
        if self.__sources:
            return [source.value() for source in self.__sources]

        return self.__cache

    def _dirty(self):
        if self.__isComputed:
            # don't dirty again
            if self.__isDirty:
                return
            self.__isDirty = True
        if self.__sources:
            self.__cache = None

        # dirty plugs that are computed based on this plug
        if not self.__isComputed:
            for plug in self.__node.iterPlugs():
                if plug._isComputed():
                    plug._dirty()

        # dirty plugs that use this plug as source
        for plug in self.__targets:
            plug._dirty()

    def setValue(self, value):
        self._dirty()
        self.__cache = value

Using the previous example I can now see how overriding the parameter works until time is changed again making it use the source connection again:

1
2
3
4
5
6
7
print transform.translate  
timeNode.time = 0.5  
print transform.translate  
pointOnCurveNode.parameter = 0.25  
print transform.translate  
timeNode.time = 0.5  
print transform.translate

Full code:

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
from collections import OrderedDict


class Plug(object):
    def __init__(self, name, node, isComputed):
        self.__name = name  # really used for debugging only
        self.__node = node  # the parent node this plug belongs to
        self.__isComputed = isComputed  # make this an output-plug
        self.__isDirty = isComputed  # upstream dependencies changed?
        self.__cache = None  # last computed value for outputs, user set value for inputs
        self.__sources = []  # plugs we depend on
        self.__targets = []  # plugs we feed into

    def clean(self):
        self.__isDirty = False

    def value(self):
        if self.__isDirty and self.__isComputed:
            # compute dirty output
            if self.__isComputed:
                self.__node.compute()
            # plug is clean now
            self.clean()

        # override cache for input attributes to intervene with connections?
        if self.__cache is not None:
            return self.__cache

        # fetch input connection
        if self.__sources:
            return [source.value() for source in self.__sources]

        return self.__cache

    def setValue(self, value):
        self._dirty()
        self.__cache = value

    def addSource(self, sourcePlug):
        if sourcePlug not in self.__sources:
            self.__sources.append(sourcePlug)
            sourcePlug.__targets.append(self)
            self._dirty()

    def removeSource(self, sourcePlug):
        if sourcePlug in self.__sources:
            self.__sources.remove(sourcePlug)
            sourcePlug.__targets.remove(self)
            self._dirty()

    def _isComputed(self):
        return self.__isComputed

    def _dirty(self):
        if self.__isComputed:
            # don't dirty again
            if self.__isDirty:
                return
            self.__isDirty = True
        if self.__sources:
            self.__cache = None

        # dirty plugs that are computed based on this plug
        if not self.__isComputed:
            for plug in self.__node.iterPlugs():
                if plug._isComputed():
                    plug._dirty()

        # dirty plugs that use this plug as source
        for plug in self.__targets:
            plug._dirty()


class DependNode(object):
    def __init__(self, computeFunc=None):
        self.__plugs = OrderedDict()  # list of node plugs, dict key should match plug name
        self.__computeFunc = computeFunc  # logic implementation, should be a callable that accepts this DependNode instance as only argument

    def iterPlugs(self):
        return self.__plugs.itervalues()

    def compute(self):
        # call compute function if node has logic
        if self.__computeFunc is not None:
            self.__computeFunc(self)

        # clean all plugs
        for plug in self.__plugs.itervalues():
            plug.clean()

    def addInput(self, name):
        self.__plugs[name] = Plug(name, self, isComputed=False)

    def addOutput(self, name):
        self.__plugs[name] = Plug(name, self, isComputed=True)

    def plug(self, name):
        return self.__plugs[name]

    def __getattr__(self, name):
        return self.__plugs[name].value()

    def __setattr__(self, name, value):
        if name not in ('_DependNode__plugs', '_DependNode__computeFunc'):
            return self.__plugs[name].setValue(value)
        return super(DependNode, self).__setattr__(name, value)


def pointOnCurve(node):
    print 'compute'
    # interpolate a 2D bezier segment with t=node.parameter
    cvs = node.cvs2[0]
    t = node.parameter[0]
    r = [0, 0]
    for i in xrange(2):
        a, b, c = cvs[1][i] - cvs[0][i], cvs[2][i] - cvs[1][i], cvs[3][i] - cvs[2][i]
        d = b - a
        a, b, c, d = c - b - d, d + d + d, a + a + a, cvs[0][i]
        r[i] = (t * (t * (t * a + b) + c) + d)
    node.point = tuple(r)

Now I've also been working on sort of an entity-component based system on top of this graph, based on the PyOpenGL mesh renderer from a few posts back.

It allows me to create a scene graph based on Transforms and exported logic (using DependNodes like above) but also logical relationships which are not necessarily customizable, e.g. when rendering a camera I can query it's entity, whose transform will provide the view matrix, without connecting these things. This creates a more classical (and less customizable) framework for render logic, but gives full control over the scene logic behind it through the dependency graph.

Using the point on curve example above and feeding it to a cube's transform - while also implementing a timeline UI & 3D gizmo drawing - this fun thing came out:

An image showing a rectangle moving along a spline, illustrating how a dependency graph can animate a point on a curve and constrain a rectangle to said point

Now the next steps would probably involve adding shader support, file watchers and creating a more elaborate Maya exporter that supports various Maya nodes (Maya is also dependency graph based so it fits very well with this!) but I'm not sure if I'm going to keep working on this.

Attribute editor in PyQt

I've been working on a particle editor, though that isn't entirely done yet, I did create something interesting in the process. An attribute editor for arbitrary python objects.

This is where I'm at right now, I hope to get to work on this more and share details about the particles themselves once it is more complete.

Image showing an attribute editor for an unfinished particle system

On the right you see an editor for the following object:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
class ParticleSettings(OrderedClass):
    def __init__(self):
        super(ParticleSettings, self).__init__()
        # emitter
        self.emitterType = Enum(('Sphere', 'Cone', 'Box'), 0)
        self.emitterSettings = Vec3([0.5, 0.5, 0.5])
        self.emitterIsVolume = True
        self.randomDirection = False
        # not curve based
        self.startSize = RandomFloat()
        self.startSpeed = RandomFloat()
        self.startRotation = RandomFloat()
        self.lifeTime = RandomFloat()
        # curve based on particle alive time / life time
        self.sizeOverTime = RandomChannelFloat()
        self.angularVelocity = RandomChannelFloat()
        self.velocityOverTime = RandomChannelVec3()

I've added some data types so I can visualize them better, but the attribute editing framework I wrote works off the bat on python's basic types. I'd like to break down how I got here, as I wrote a heap of code which still needs a heap of refactoring for it to be presentable, I'll demonstrate creating a more basic example, which should be more useful because it isn't cluttered with my edge cases.

Preparing edit widgets

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
class AEComponent(QObject):
    # Attribute editor widget editing a single value type. Note that UI interactions from the user should emit valueChanged.
    valueChanged = pyqtSignal(object)

    def __init__(self):
        # The constructor may accept additional arguments, e.g. default value or enum options
        self._value = None

    def value(self):
        # Return the internal value
        return self._value

    def setValue(self, value):
        # Set value should programatically adjust the internal value, without emitting a signal; used in case multiple set values may trigger or when a parent widget is already going to send a change event.
        self.blockSignals(True)
        self._value = value
        self.blockSignals(False)

    def editValue(self, value):
        # Set the value and emit a change event
        self._value = value
        self.valueChanged.emit(value)

Note that this is an example of the interface, not a base class. I will not actually use the code above, I'll just subclass Qt widgets and make them behave the same.

The core data types I want to support are:

Type Widget
int QSpinBox
float QDoubleSpinBox
bool checkable QPushButton
str QLineEdit
object recurse into its propreties
dict recurse into its items
list recurse into its items

Because tuples & sets are not mutable it'd be hard to construct a widget that sets the entire tuple at once. I do not intend to adjust the composition of lists, dicts and objects - so no element insertion / removal.

Int & double
QSpinBox already has a value and setValue, but the setValue emits a signal. Instead I'm adding an editValue that forwards to the super setValue and make setValue block the signals. I've also made it so I can construct versions that only support e.g. ctypes.c_char by adding a number of bits parameter that is used to infer limits. It'd be trivial to extend this to unsigned and size-limited variants. The LineEditSelected used is listed below at the QLineEdit, it just simply selects all text at the focus event.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
class SpinBox(QSpinBox):
    """
    QSpinBox with right limits & that follows the AEComponent interface.
    """
    def __init__(self, value=0, bits=32):
        super(SpinBox, self).__init__()
        self.setMinimum(-2 ** (bits - 1))
        self.setMaximum(2 ** (bits - 1) - 1)
        self.setValue(value)
        self.setLineEdit(LineEditSelected())

    def setValue(self, value):
        self.blockSignals(True)
        super(SpinBox, self).setValue(value)
        self.blockSignals(False)

    def editValue(self, value):
        super(SpinBox, self).setValue(value)

Doubles are almost identical.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
class DoubleSpinBox(QDoubleSpinBox):
    """
    QDoubleSpinBox with right limits & that follows the AEComponent interface.
    """
    def __init__(self, value=0.0):
        super(DoubleSpinBox, self).__init__()
        self.setMinimum(-float('inf'))
        self.setMaximum(float('inf'))
        self.setValue(value)
        self.setSingleStep(0.01)  # Depending on use case this can be very coarse.
        self.setLineEdit(LineEditSelected())

    def setValue(self, value):
        self.blockSignals(True)
        super(DoubleSpinBox, self).setValue(value)
        self.blockSignals(False)

    def editValue(self, value):
        super(DoubleSpinBox, self).setValue(value)

Booleans with icons
A few more interesting things to make this work based on a checkable QPushButton. Manual value changed signal handling & keeping track of the icon to use.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
class IconBoolEdit(QPushButton):
    """
    QPushButton with icons to act as a boolean (not tri-state) toggle.
    """
    valueChanged = pyqtSignal(bool)

    def __init__(self, *args):
        super(IconBoolEdit, self).__init__(*args)
        self.__icons = icons.get('Unchecked'), icons.get('Checked')  # Implement your own way to get icons!
        self.setIcon(self.__icons[0])
        self.setCheckable(True)
        self.clicked.connect(self.__updateIcons)
        self.clicked.connect(self.__emitValueChanged)

    def setIcons(self, off, on):
        self.__icons = off, on
        self.__updateIcons(self.isChecked())

    def __updateIcons(self, state):
        self.setIcon(self.__icons[int(state)] or QIcon())

    def __emitValueChanged(self, state):
        self.valueChanged.emit(state)

    def value(self):
        return self.isChecked()

    def setValue(self, state):
        self.setChecked(state)
        self.__updateIcons(state)

    def editValue(self, state):
        self.setChecked(state)
        self.__updateIcons(state)
        self.__emitValueChanged(state)

Strings
This is very similar to the spinbox. One addition I added is to make sure clicking the line edit selects all constants so a user can start typing a new word immediately.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
class LineEdit(QLineEdit):
    valueChanged = pyqtSignal(str)

    def __init__(self, *args):
        super(LineEdit, self).__init__(*args)
        self.textChanged.connect(self.valueChanged.emit)

    def value(self):
        return self.text()

    def setValue(self, text):
        self.blockSignals(True)
        self.setText(text)
        self.blockSignals(False)

    def editValue(self, text):
        self.setText(text)


class LineEditSelected(LineEdit):
    def __init__(self):
        super(LineEditSelected, self).__init__()
        self.__state = False

    def focusInEvent(self, event):
        super(LineEditSelected, self).focusInEvent(event)
        self.selectAll()
        self.__state = True

    def mousePressEvent(self, event):
        super(LineEditSelected, self).mousePressEvent(event)
        if self.__state:
            self.selectAll()
            self.__state = False

Reflecting python objects

Reflection in python is very easy, and our use case simple. Every python object has a __dict__ attribute that contains all the current members of an object (but not methods). In python we tend to denote protected (internal) data by prefixing variable names with an underscore. So to find all attributes that we want to inspect we can simply do:

1
2
3
for name in instance.__dict__:
    if name[0] == '_':
        continue

Now to control such an attribute with a widget we need to construct the right widget and connect the change event to a setter. In python we can use the functools module to bind the global getattr and setattr methods and get a way to connect a callback to a property assignment.

1
2
3
4
5
    value = getattr(instance, name)  # get the current value by name, like the dot operator but using a string to get to the right property
    cls = factory.findEditorForType(type(value))  # factory to get the right widget for our data type, more on this later
    widget = cls()  # construct the widget
    widget.setValue(getattr(instance, name))  # set the editor's initial value to match with our data
    widget.valueChanged.connect(functools.partial(setattr, instance, name))  # make the editor update our data

Widget factory

The last piece of the puzzle is a way to generate widgets based on data types. I wanted to keep this abstract, so I made a class out of it. We can register data type & widget type relations and it understands to create a widget if we have one registered for a base class of the type we're querying.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
class AEFactory(object):
    def __init__(self):
        self.__typeWidgets = {}

    def registerType(self, dataType, widgetConstructor):
        self.__typeWidgets[dataType] = widgetConstructor

    @staticmethod
    def _allBaseTypes(cls):
        """
        Recurse all base classes and return a list of all bases with most close relatives first.
        https://stackoverflow.com/questions/1401661/list-all-base-classes-in-a-hierarchy-of-given-class
        """
        result = list(cls.__bases__)
        for base in result:
            result.extend(AEFactory._allBaseTypes(base))
        return result

    def _findEditorForType(self, dataType):
        if dataType in self.__typeWidgets:
            return self.__typeWidgets[dataType]

        for baseType in AEFactory._allBaseTypes(dataType):
            if dataType in self.__typeWidgets:
                return self.__typeWidgets[baseType]

Complex data

Now this will work fine for simple objects with simple data types. But the real fun begins when we have instances whose properties are lists of other instances. Our findEditorForType will return None in this case and we get an error. Instead, we should split this up in several steps. First we determine the type of data we're dealing with, to defer the widget creation to any type of recursive function until we reach simple data types for which we can generate widgets.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
from collections import OrderedDict

class AEFactor(object):

... the above code still goes here ...

    def generate(self, data, parent=None, name=None):
        """
        This recursively generates widgets & returns an iterator of every resulting item.
        """
        if isinstance(data, (dict, OrderedDict)):
            generator = self._generateMap(data)
        elif hasattr(data, '__getitem__') and hasattr(data, '__setitem__'):
            generator = self._generateList(data)
        elif hasattr(data, '__dict__'):
            generator = self._generateInstance(data)
        else:
            generator = self._generateField(data, parent, name)

    def _generateField(self, data, parent, name):
        cls = self._findEditorForType(type(data))
        assert cls, 'Error: could not inspect object "%s" (parent: %s, name: %s). No wrapper registered or non-supported compound type.' % (data, parent, name)
        widget = cls()
        widget.setValue(data)
        widget.valueChanged.connect(functools.partial(setattr, parent, name))
        yield widget

    def _generateInstance(self, data):
        for name in data.__dict__:
            if name[0] == '_':
                continue
            yield QLabel(name)
            for widget in self.generate(getattr(data, name), data, name):
                yield widget

    def _generateList(self, data):
        for i in xrange(len(data)):
            yield QLabel(str(i))
            for widget in self.generate(data[i], data, str(i)):
                yield widget

    def _generateMap(self, data):
        for key in data:
            yield QLabel(str(key))
            for widget in self.generate(data[key], data, key):
                yield widget

Formatting

If we have a class that needs a special widget or layout, like my particle editor, we may wish to grab the widgets generated for that class and manipulate them. One case I have is that I have a random channel, which has a minimum, maximum and isRandom flag. If isRandom is turned off then I just want to show the minimum field because the maximum is unused. In order to do this I extended the factory with the ability to inject functions that take groups of widgets for a certain data type. See registerWrapper, findWrapperForType and the modifications at the end of generate.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
class AEFactory(object):
    def __init__(self):
        self.__typeWidgets = {}
        self.__typeWrappers = {}

    def registerType(self, dataType, widgetConstructor):
        self.__typeWidgets[dataType] = widgetConstructor

    def registerWrapper(self, dataType, wrapperFunction):
        """
        The wrapperFunction must accept a generator of widgets & return a generator of widgets.
        """
        self.__typeWrappers[dataType] = wrapperFunction

    @staticmethod
    def _allBaseTypes(cls):
        """
        Recurse all base classes and return a list of all bases with most close relatives first.
        https://stackoverflow.com/questions/1401661/list-all-base-classes-in-a-hierarchy-of-given-class
        """
        result = list(cls.__bases__)
        for base in result:
            result.extend(AEFactory._allBaseTypes(base))
        return result

    def _findEditorForType(self, dataType):
        if dataType in self.__typeWidgets:
            return self.__typeWidgets[dataType]

        for baseType in AEFactory._allBaseTypes(dataType):
            if dataType in self.__typeWidgets:
                return self.__typeWidgets[baseType]

    def _findWrapperForType(self, dataType):
        if dataType in self.__typeWrappers:
            return self.__typeWrappers[dataType]

        for baseType in AEFactory._allBaseTypes(dataType):
            if dataType in self.__typeWrappers:
                return self.__typeWrappers[baseType]

    def generate(self, data, parent=None, name=None):
        """
        This recursively generates widgets & returns an iterator of every resulting item.
        """
        if isinstance(data, (dict, OrderedDict)):
            generator = self._generateMap(data)
        elif hasattr(data, '__getitem__') and hasattr(data, '__setitem__'):
            generator = self._generateList(data)
        elif hasattr(data, '__dict__'):
            generator = self._generateInstance(data)
        else:
            generator = self._generateField(data, parent, name)

        wrapper = self._findWrapperForType(type(data))
        if wrapper:
            generator = wrapper(generator)
        for widget in generator:
            yield widget

    def _generateField(self, data, parent, name):
        cls = self._findEditorForType(type(data))
        assert cls, 'Error: could not inspect object "%s" (parent: %s, name: %s). No wrapper registered or non-supported compound type.' % (data, parent, name)
        widget = cls()
        widget.setValue(data)
        widget.valueChanged.connect(functools.partial(setattr, parent, name))
        yield widget

    def _generateInstance(self, data):
        for name in data.__dict__:
            if name[0] == '_':
                continue
            yield QLabel(name)
            for widget in self.generate(getattr(data, name), data, name):
                yield widget

    def _generateList(self, data):
        for i in xrange(len(data)):
            yield QLabel(str(i))
            for widget in self.generate(data[i], data, str(i)):
                yield widget

    def _generateMap(self, data):
        for key in data:
            yield QLabel(str(key))
            for widget in self.generate(data[key], data, key):
                yield widget
Note: I currently allow it to work on sub classes, with the risk of that subclass having extra attributes - or a different attribute order - resulting in the widgets being jumbled & my function breaking the layout completely. I'm not sure yet how to validate that a sub class matches the base class' member layout, so maybe I should just allow explicit overrides for a single type without inheritance support.

Constraining class member order

One thing that annoys me, and maybe you noticed already, is that python does not guarantee that dictionaries are ordered. For this the collections.OrderedDict type exists, but when dealing with class members and the __dict__ attribute we have no control over this.

Now my solution to this is pretty shaky, and I'm definitely not proud of what I came up with, but let me share it anyway! First I created a class that overrides __setattr__ to keep track of the order in which data is set. Then I override __getattribute__ so that when the __dict__ attribute is requested we return a wrapper around it that behaves like the real dict, but implements all iterators to use the ordered keys list instead.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
class FakeOrderedDict(object):
    def __init__(self, realDict, order):
        self.realDict = realDict
        self.order = order

    def __getitem__(self, key):
        return self.realDict[key]

    def __setitem__(self, key, value):
        self.realDict[key] = value

    def __iter__(self):
        return iter(self.order)

    def iterkeys(self):
        return iter(self.order)

    def itervalues(self):
        for key in self.order:
            yield self.realDict[key]

    def iteritems(self):
        for key in self.order:
            yield key, self.realDict[key]


class OrderedClass(object):
    def __init__(self):
        self.__dict__['_OrderedClass__attrs'] = []

    def __getattribute__(self, key):
        result = super(OrderedClass, self).__getattribute__(key)
        if key == '__dict__':
            if '_OrderedClass__attrs' in result:
                return FakeOrderedDict(result, result['_OrderedClass__attrs'])
        return result

    def __setattr__(self, key, value):
        order = self.__dict__['_OrderedClass__attrs']
        if key not in order:
            order.append(key)
        return super(OrderedClass, self).__setattr__(key, value)

That's all folks

Example usage:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
# create test objects
class Vector(list):
    pass


class Compound(OrderedClass):  # inheriting from OrderedClass to ensure widget order
    def __init__(self):
        super(Compound, self).__init__()
        self.x = 2.0  # note how explicit floats are important now
        self.y = 5.0


class Data(OrderedClass):
    def __init__(self):
        super(Data, self).__init__()
        self.name = 'List test'
        self.value = Vector([1.0, 5, True])
        self.dict = {'A': Compound(), 'B': Compound()}


def groupHLayout(widgets):
    h = QHBoxLayout()
    m = QWidget()
    for w in widgets:
        h.addWidget(w)
    m.setLayout(h)
    yield m


# create test data
data = Data()

# create Qt application
app = QApplication([])
window = QWidget()
main = QVBoxLayout()
window.setLayout(main)

# initialize inspector
factory = AEFactory()
factory.registerType(bool, IconBoolEdit)
factory.registerType(int, SpinBox)
factory.registerType(float, DoubleSpinBox)
factory.registerType(str, LineEdit)
factory.registerWrapper(Vector, groupHLayout)

# inspect the data
for widget in factory.generate(data):
    main.addWidget(widget)

window.show()
app.exec_()

# print the data after closing the editor to show we indeed propagated the changes to the data as they happened
print data.name, data.value, data.dict['A'].x, data.dict['A'].y, data.dict, data.dict['B'].x, data.dict['B'].y

Image of an example attribute editor

Full code dump:

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
from collections import OrderedDict
import functools
from PyQt4.QtCore import *
from PyQt4.QtGui import *


class SpinBox(QSpinBox):
    """
    QSpinBox with right limits & that follows the AEComponent interface.
    """

    def __init__(self, value=0, bits=32):
        super(SpinBox, self).__init__()
        self.setMinimum(-2 ** (bits - 1))
        self.setMaximum(2 ** (bits - 1) - 1)
        self.setValue(value)
        self.setLineEdit(LineEditSelected())

    def setValue(self, value):
        self.blockSignals(True)
        super(SpinBox, self).setValue(value)
        self.blockSignals(False)

    def editValue(self, value):
        super(SpinBox, self).setValue(value)


class DoubleSpinBox(QDoubleSpinBox):
    """
    QDoubleSpinBox with right limits & that follows the AEComponent interface.
    """

    def __init__(self, value=0.0):
        super(DoubleSpinBox, self).__init__()
        self.setMinimum(-float('inf'))
        self.setMaximum(float('inf'))
        self.setValue(value)
        self.setSingleStep(0.01)  # Depending on use case this can be very coarse.
        self.setLineEdit(LineEditSelected())

    def setValue(self, value):
        self.blockSignals(True)
        super(DoubleSpinBox, self).setValue(value)
        self.blockSignals(False)

    def editValue(self, value):
        super(DoubleSpinBox, self).setValue(value)


class IconBoolEdit(QPushButton):
    """
    QPushButton with icons to act as a boolean (not tri-state) toggle.
    """
    valueChanged = pyqtSignal(bool)

    def __init__(self, *args):
        super(IconBoolEdit, self).__init__(*args)
        self.__icons = None, None  # icons.get('Unchecked'), icons.get('Checked')  # Implement your own way to get icons!
        self.setIcon(self.__icons[0] or QIcon())
        self.setCheckable(True)
        self.clicked.connect(self.__updateIcons)
        self.clicked.connect(self.__emitValueChanged)

    def setIcons(self, off, on):
        self.__icons = off, on
        self.__updateIcons(self.isChecked())

    def __updateIcons(self, state):
        self.setIcon(self.__icons[int(state)] or QIcon())

    def __emitValueChanged(self, state):
        self.valueChanged.emit(state)

    def value(self):
        return self.isChecked()

    def setValue(self, state):
        self.setChecked(state)
        self.__updateIcons(state)

    def editValue(self, state):
        self.setChecked(state)
        self.__updateIcons(state)
        self.__emitValueChanged(state)


class LineEdit(QLineEdit):
    valueChanged = pyqtSignal(str)

    def __init__(self, *args):
        super(LineEdit, self).__init__(*args)
        self.textChanged.connect(self.valueChanged.emit)

    def value(self):
        return self.text()

    def setValue(self, text):
        self.blockSignals(True)
        self.setText(text)
        self.blockSignals(False)

    def editValue(self, text):
        self.setText(text)


class LineEditSelected(LineEdit):
    def __init__(self):
        super(LineEditSelected, self).__init__()
        self.__state = False

    def focusInEvent(self, event):
        super(LineEditSelected, self).focusInEvent(event)
        self.selectAll()
        self.__state = True

    def mousePressEvent(self, event):
        super(LineEditSelected, self).mousePressEvent(event)
        if self.__state:
            self.selectAll()
            self.__state = False


class AEFactory(object):
    def __init__(self):
        self.__typeWidgets = {}
        self.__typeWrappers = {}

    def registerType(self, dataType, widgetConstructor):
        self.__typeWidgets[dataType] = widgetConstructor

    def registerWrapper(self, dataType, wrapperFunction):
        """
        The wrapperFunction must accept a generator of widgets & return a generator of widgets.
        """
        self.__typeWrappers[dataType] = wrapperFunction

    @staticmethod
    def _allBaseTypes(cls):
        """
        Recurse all base classes and return a list of all bases with most close relatives first.
        https://stackoverflow.com/questions/1401661/list-all-base-classes-in-a-hierarchy-of-given-class
        """
        result = list(cls.__bases__)
        for base in result:
            result.extend(AEFactory._allBaseTypes(base))
        return result

    def _findEditorForType(self, dataType):
        if dataType in self.__typeWidgets:
            return self.__typeWidgets[dataType]

        for baseType in AEFactory._allBaseTypes(dataType):
            if dataType in self.__typeWidgets:
                return self.__typeWidgets[baseType]

    def _findWrapperForType(self, dataType):
        if dataType in self.__typeWrappers:
            return self.__typeWrappers[dataType]

        for baseType in AEFactory._allBaseTypes(dataType):
            if dataType in self.__typeWrappers:
                return self.__typeWrappers[baseType]

    def generate(self, data, parent=None, name=None):
        """
        This recursively generates widgets & returns an iterator of every resulting item.
        """
        if isinstance(data, (dict, OrderedDict)):
            generator = self._generateMap(data)
        elif hasattr(data, '__getitem__') and hasattr(data, '__setitem__'):
            generator = self._generateList(data)
        elif hasattr(data, '__dict__'):
            generator = self._generateInstance(data)
        else:
            generator = self._generateField(data, parent, name)

        wrapper = self._findWrapperForType(type(data))
        if wrapper:
            generator = wrapper(generator)
        for widget in generator:
            yield widget

    def _generateField(self, data, parent, name):
        cls = self._findEditorForType(type(data))
        assert cls, 'Error: could not inspect object "%s" (parent: %s, name: %s). No wrapper registered or non-supported compound type.' % (data, parent, name)
        widget = cls()
        widget.setValue(data)
        widget.valueChanged.connect(functools.partial(setattr, parent, name))
        yield widget

    def _generateInstance(self, data):
        for name in data.__dict__:
            if name[0] == '_':
                continue
            yield QLabel(name)
            for widget in self.generate(getattr(data, name), data, name):
                yield widget

    def _generateList(self, data):
        for i in xrange(len(data)):
            yield QLabel(str(i))
            for widget in self.generate(data[i], data, str(i)):
                yield widget

    def _generateMap(self, data):
        for key in data:
            yield QLabel(str(key))
            for widget in self.generate(data[key], data, key):
                yield widget


class FakeOrderedDict(object):
    def __init__(self, realDict, order):
        self.realDict = realDict
        self.order = order

    def __getitem__(self, key):
        return self.realDict[key]

    def __setitem__(self, key, value):
        self.realDict[key] = value

    def __iter__(self):
        return iter(self.order)

    def iterkeys(self):
        return iter(self.order)

    def itervalues(self):
        for key in self.order:
            yield self.realDict[key]

    def iteritems(self):
        for key in self.order:
            yield key, self.realDict[key]


class OrderedClass(object):
    def __init__(self):
        self.__dict__['_OrderedClass__attrs'] = []

    def __getattribute__(self, key):
        result = super(OrderedClass, self).__getattribute__(key)
        if key == '__dict__':
            if '_OrderedClass__attrs' in result:
                return FakeOrderedDict(result, result['_OrderedClass__attrs'])
        return result

    def __setattr__(self, key, value):
        order = self.__dict__['_OrderedClass__attrs']
        if key not in order:
            order.append(key)
        return super(OrderedClass, self).__setattr__(key, value)


# create test objects
class Vector(list):
    pass


class Compound(OrderedClass):  # inheriting from OrderedClass to ensure widget order
    def __init__(self):
        super(Compound, self).__init__()
        self.x = 2.0  # note how explicit floats are important now
        self.y = 5.0


class Data(OrderedClass):
    def __init__(self):
        super(Data, self).__init__()
        self.name = 'List test'
        self.value = Vector([1.0, 5, True])
        self.dict = {'A': Compound(), 'B': Compound()}


def groupHLayout(widgets):
    h = QHBoxLayout()
    m = QWidget()
    for w in widgets:
        h.addWidget(w)
    m.setLayout(h)
    yield m


# create test data
data = Data()

# create Qt application
app = QApplication([])
window = QWidget()
main = QVBoxLayout()
window.setLayout(main)

# initialize inspector
factory = AEFactory()
factory.registerType(bool, IconBoolEdit)
factory.registerType(int, SpinBox)
factory.registerType(float, DoubleSpinBox)
factory.registerType(str, LineEdit)
factory.registerWrapper(Vector, groupHLayout)

# inspect the data
for widget in factory.generate(data):
    main.addWidget(widget)

window.show()
app.exec_()

# print the data after closing the editor to show we indeed propagated the changes to the data as they happened
print data.name, data.value, data.dict['A'].x, data.dict['A'].y, data.dict, data.dict['B'].x, data.dict['B'].y

Viewing Python profiling results with QCacheGrind

This utility outputs cProfile data as a "callgrind" cache file.

Requires pyprof2calltree:

pip install pyprof2calltree

The resulting files can be viewed using QCacheGrind for Windows:

()[http://sourceforge.net/projects/qcachegrindwin/]

Example usage:

runctx(pythonCodeStr, globals(), locals(), executable=QCACHEGRIND)

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
import os
import cProfile
import tempfile
import pyprof2calltree
import pstats
import subprocess


QCACHEGRIND = r'YOUR CACHEGRIND EXECUTABLE PATH'


def runctx(cmdstr, globals={}, locals={}, outpath=None, executable=None):
    tmp = tempfile.mktemp()
    if outpath is not None:
        path = os.path.splitext(outpath)[0] + '.callgrind'
        dirpath = os.path.dirname(path)
        if not os.path.exists(dirpath):
            os.makedirs(dirpath)

        cProfile.runctx(cmdstr, globals, locals, filename=tmp)
        pyprof2calltree.convert(pstats.Stats(tmp), path)

        if executable is not None:
            subprocess.Popen([executable, path])
        os.unlink(tmp)
        return path

    cProfile.runctx(cmdstr, globals, locals, filename=tmp)
    pyprof2calltree.convert(pstats.Stats(tmp), tmp)
    if executable is not None:
        subprocess.Popen([executable, tmp])
    return tmp

Searchable combo box

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
class SearchableComboBox(QComboBox):
    """
    From
    https://stackoverflow.com/questions/4827207/how-do-i-filter-the-pyqt-qcombobox-items-based-on-the-text-input
    """
    def __init__(self, parent=None):
        super(SearchableComboBox, self).__init__(parent)
        self.setFocusPolicy(Qt.StrongFocus)
        self.setEditable(True)
        self.setInsertPolicy(QComboBox.NoInsert)
        self.completer = QCompleter(self)
        # always show all completions
        self.completer.setCompletionMode(QCompleter.UnfilteredPopupCompletion)
        self.__filterModel = QSortFilterProxyModel(self)
        self.__filterModel.setFilterCaseSensitivity(Qt.CaseInsensitive)
        self.completer.setPopup(self.view())
        self.setCompleter(self.completer)
        self.lineEdit().textEdited[unicode].connect(self.__filterModel.setFilterFixedString)
        self.completer.activated.connect(self.setTextIfCompleterIsClicked)

        self.__filterModel.setSourceModel(self.model())
        self.completer.setModel(self.__filterModel)

    def clear(self):
        self.model().clear()

    def addItem(self, item):
        self.model().appendRow(QStandardItem(item))

    def addItems(self, items):
        for item in items:
            self.model().appendRow(QStandardItem(item))

    def setModel(self, model):
        super(SearchableComboBox, self).setModel(model)
        self.__filterModel.setSourceModel(model)
        self.completer.setModel(self.__filterModel)

    def setModelColumn(self, column):
        self.completer.setCompletionColumn(column)
        self.__filterModel.setFilterKeyColumn(column)
        super(SearchableComboBox, self).setModelColumn(column)

    def view(self):
        return self.completer.popup()

    def index(self):
        return self.currentIndex()

    def setTextIfCompleterIsClicked(self, text):
        if text:
            index = self.findText(text)
            self.setCurrentIndex(index)

Smarter delegates

I recently dove into QItemDelegates and realized how easy it can be to make delegates without resorting to subclassing QItemDelegate itself.

I wrapped the functionality in a neat little function, just pass an editor that has a value() getter and a setValue() setter and you should be good to go

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
_typedDelegates = {}

def makeDelegate(cls):
    if cls in _typedDelegates:
        return _typedDelegates[cls]
    d = QStyledItemDelegate()
    _typedDelegates[cls] = d
    f = QItemEditorFactory()
    d.setItemEditorFactory(f)

    class EditorWrapper(QItemEditorCreatorBase):
        def __init__(self, cls):
            super(EditorWrapper, self).__init__()
            self.cls = cls

        def createWidget(self, parent):
            return self.cls(parent)

        def valuePropertyName(self):
            return 'value'

    f.registerEditor(0, EditorWrapper(cls))
    return d

Improving a renderer

This feeds into my previous write-up up on the tools developed for our 64kb endeavours.

After creating Eidolon [Video] we were left with the feeling that the rendering can be a lot better. We had this single pass bloom and simple lambert & phong shading, no anti aliasing and very poor performing depth of field. Last the performance hit for reflections was through the roof as well.

I started almost immediately with a bunch of improvements, most of this work was done within a month after Revision. Which shows in our newest demo Yermom [Video]. I'll go over the improvements in chronological order and credit any sources used (of which there were a lot), if I managed to document that right...

Something useful to mention, all my buffers are Float32 RGBA.

Low-resolution reflections

Basically the scene is raymarched, for every pixel there is a TraceAndShade call to render the pixel excluding fog and reflection.

From the result we do another TraceAndShade for the reflection. This makes the entire thing twice as slow when reflections are on.

Instead I early out at this point if:

if(reflectivity == 0 || gl_FragCoord.x % 4 != 0 || gl_FragCoord.y % 4 != 0) return;

That results in only 1 in 16 pixels being reflective. So instead of compositing the reflection directly I write it to a separate buffer.

Then in a future pass I composite the 2 buffers, where I just do a look up in the reflection buffer like so:

texelFetch(uImages[0], ivec2(gl_FragCoord.xy)) + texelFetch(uImages[1], ivec2(gl_FragCoord.xy / 4) * 4)

In my real scenario I removed that * 4 and render to a 4 times smaller buffer instead, so reading it back results in free interpolation.

I still have glitches when blurring the reflections too much & around edges in general. Definitely still room for future improvement.

Image showing spheres and floor with low-resolution reflection

Oren Nayar diffuse light response

The original paper and this image especially convinced me into liking this shading model for diffuse objects.

Image displaying Oren-Nayar diffuse lighting on a very rough vase, compared with a photo and a lambertian version

So I tried to implement that, failed a few times, got pretty close, found an accurate implementation, realized it was slow, and ended on these 2 websites:
https://blog.popekim.com/en/2011/11/16/optimized-oren-nayar-approximation.html
http://www.artisticexperiments.com/cg-shaders/cg-shaders-oren-nayar-fast

That lists a nifty trick to fake it, I took away some terms as I realized they contributed barely any visible difference, so I got something even less accurate. I already want to revisit this, but it's one of the improvements I wanted to share nonetheless.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
float orenNayarDiffuse(float satNdotV, float satNdotL, float roughness)
{
    float lambert = satNdotL;
    if(roughness == 0.0)
        return lambert;
    float softRim = saturate(1.0 - satNdotV * 0.5);

    // my magic numbers
    float fakey = pow(lambert * softRim, 0.85);
    return mix(lambert, fakey * 0.85, roughness);
}

GGX Specular

There are various open source implementations of this. I found one here:
http://filmicworlds.com/blog/optimizing-ggx-shaders-with-dotlh

It talks about tricks to optimize things by precomputing a lookup texture, I didn't go that far. There's not much I can say about this, as I don't fully understand the math and how it changes from the basic phong dot(N, H).

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
float G1V(float dotNV, float k){return 1.0 / (dotNV * (1.0 - k)+k);}

float ggxSpecular(float NdotV, float NdotL, vec3 N, vec3 L, vec3 V, float roughness)
{
    float F0 = 0.5;

    vec3 H = normalize(V + L);
    float NdotH = saturate(dot(N, H));
    float LdotH = saturate(dot(L, H));
    float a2 = roughness * roughness;

    float D = a2 / (PI * sqr(sqr(NdotH) * (a2 - 1.0) + 1.0));
    float F = F0 + (1.0 - F0) * pow(1.0 - LdotH, 5.0);
    float vis = G1V(NdotL, a2 * 0.5) * G1V(NdotV, a2 * 0.5);
    return NdotL * D * F * vis;
}

Image showing specular highlights on reflective spheres and floor

FXAA

FXAA3 to be precise. The whitepaper is quite clear, still why bother writing it if it's open source. I can't remember which one I used, but here's a few links:
https://gist.github.com/kosua20/0c506b81b3812ac900048059d2383126
https://github.com/urho3d/Urho3D/blob/master/bin/CoreData/Shaders/GLSL/FXAA3.glsl
https://github.com/vispy/experimental/blob/master/fsaa/fxaa.glsl

Preprocessed and minified for preset 12 made it very small in a compressed executable. Figured I'd just share it.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
#version 420
uniform vec3 uTimeResolution;uniform sampler2D uImages[1];out vec4 z;float aa(vec3 a){vec3 b=vec3(.299,.587,.114);return dot(a,b);}
#define bb(a)texture(uImages[0],a)
#define cc(a)aa(texture(uImages[0],a).rgb)
#define dd(a,b)aa(texture(uImages[0],a+(b*c)).rgb)
void main(){vec2 a=gl_FragCoord.xy/uTimeResolution.yz,c=1/uTimeResolution.yz;vec4 b=bb(a);b.y=aa(b.rgb);float d=dd(a,vec2(0,1)),e=dd(a,vec2(1,0)),f=dd(a,vec2(0,-1)),g=dd(a,vec2(-1,0)),h=max(max(f,g),max(e,max(d,b.y))),i=h-min(min(f,g),min(e,min(d,b.y)));if(i<max(.0833,h*.166)){z=bb(a);return;}h=dd(a,vec2(-1,-1));float j=dd(a,vec2( 1,1)),k=dd(a,vec2( 1,-1)),l=dd(a,vec2(-1,1)),m=f+d,n=g+e,o=k+j,p=h+l,q=c.x;
bool r=abs((-2*g)+p)+(abs((-2*b.y)+m)*2)+abs((-2*e)+o)>=abs((-2*d)+l+j)+(abs((-2*b.y)+n)*2)+abs((-2*f)+h+k);if(!r){f=g;d=e;}else q=c.y;h=f-b.y,e=d-b.y,f=f+b.y,d=d+b.y,g=max(abs(h),abs(e));i=clamp((abs((((m+n)*2+p+o)*(1./12))-b.y)/i),0,1);if(abs(e)<abs(h))q=-q;else f=d;vec2 s=a,t=vec2(!r?0:c.x,r?0:c.y);if(!r)s.x+=q*.5;else s.y+=q*.5;
vec2 u=vec2(s.x-t.x,s.y-t.y);s=vec2(s.x+t.x,s.y+t.y);j=((-2)*i)+3;d=cc(u);e=i*i;h=cc(s);g*=.25;i=b.y-f*.5;j=j*e;d-=f*.5;h-=f*.5;bool v,w,x,y=i<0;
#define ee(Q) v=abs(d)>=g;w=abs(h)>=g;if(!v)u.x-=t.x*Q;if(!v)u.y-=t.y*Q;x=(!v)||(!w);if(!w)s.x+=t.x*Q;if(!w)s.y+=t.y*Q;
#define ff if(!v)d=cc(u.xy);if(!w)h=cc(s.xy);if(!v)d=d-f*.5;if(!w)h=h-f*.5;
ee(1.5)if(x){ff ee(2.)if(x){ff ee(4.)if(x){ff ee(12.)}}}e=a.x-u.x;f=s.x-a.x;if(!r){e=a.y-u.y;f=s.y-a.y;}q*=max((e<f?(d<0)!=y:(h<0)!=y)?(min(e,f)*(-1/(f+e)))+.5:0,j*j*.75);if(!r)a.x+=q;else a.y+=q;z=bb(a);}

Image showing off fxaa3 (left) and no aa (right)

Multi pass bloom

The idea for this one was heavily inspired by this asset for Unity:

https://assetstore.unity.com/packages/vfx/shaders/fullscreen-camera-effects/mk-glow-90204

I'm quite sure the technique is not original, but that's where I got the idea.

The idea is to downsample and blur at many resolutions and them combine the (weighted) results to get a very high quality full screen blur.

So basically downsample to a quarter (factor 2) of the screen using this shader:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
#version 420

uniform vec3 uTimeResolution;
#define uTime (uTimeResolution.x)
#define uResolution (uTimeResolution.yz)

uniform sampler2D uImages[1];

out vec4 outColor0;

void main()
{
    outColor0 = 0.25 * (texture(uImages[0], (gl_FragCoord.xy + vec2(-0.5)) / uResolution)
    + texture(uImages[0], (gl_FragCoord.xy + vec2(0.5, -0.5)) / uResolution)
    + texture(uImages[0], (gl_FragCoord.xy + vec2(0.5, 0.5)) / uResolution)
    + texture(uImages[0], (gl_FragCoord.xy + vec2(-0.5, 0.5)) / uResolution));
}

Then downsample that, and recurse until we have a factor 64

All the downsamples fit in the backbuffer, so in theory that together with the first blur pass can be done in 1 go using the backbuffer as sampler2D as well. But to avoid the hassle of figuring out the correct (clamped!) uv coordinates I just use a ton of passes.

Then take all these downsampled buffers and ping pong them for blur passes, so for each buffer: HBLUR taking steps of 2 pixels, into a buffer of the same size VBLUR, back into the initial downsampled buffer HBLUR taking steps of 3 pixels, reuse the HBLUR buffer VBLUR, reuse the initial downsampled buffer

The pixel steps is given to uBlurSize, the direction of blur is given to uDirection.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
#version 420

out vec4 color;

uniform vec3 uTimeResolution;
#define uTime (uTimeResolution.x)
#define uResolution (uTimeResolution.yz)

uniform sampler2D uImages[1];
uniform vec2 uDirection;
uniform float uBlurSize;

const float curve[7] = { 0.0205,
    0.0855,
    0.232,
    0.324,
    0.232,
    0.0855,
    0.0205 };

void main()
{
    vec2 uv = gl_FragCoord.xy / uResolution;
    vec2 netFilterWidth = uDirection / uResolution * uBlurSize;
    vec2 coords = uv - netFilterWidth * 3.0;

    color = vec4(0);
    for( int l = 0; l < 7; l++ )
    {
        vec4 tap = texture(uImages[0], coords);
        color += tap * curve[l];
        coords += netFilterWidth;
    }
}

Last we combine passes with lens dirt. uImages[0] is the original backbuffer, 1-6 is all the downsampled and blurred buffers, 7 is a lens dirt image. My lens dirt texture is pretty poor, its just a precalced texture with randomly scaled and colored circles and hexagons, sometimes filled and sometimes outlines. I don't think I actually ever used the lens dirt or bloom intensity as uniforms.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
#version 420

out vec4 color;

uniform vec3 uTimeResolution;
#define uTime (uTimeResolution.x)
#define uResolution (uTimeResolution.yz)

uniform sampler2D uImages[8];
uniform float uBloom = 0.04;
uniform float uLensDirtIntensity = 0.3;

void main()
{
    vec2 coord = gl_FragCoord.xy / uResolution;
    color = texture(uImages[0], coord);

    vec3 b0 = texture(uImages[1], coord).xyz;
    vec3 b1 = texture(uImages[2], coord).xyz * 0.6; // dampen to have less banding in gamma space
    vec3 b2 = texture(uImages[3], coord).xyz * 0.3; // dampen to have less banding in gamma space
    vec3 b3 = texture(uImages[4], coord).xyz;
    vec3 b4 = texture(uImages[5], coord).xyz;
    vec3 b5 = texture(uImages[6], coord).xyz;

    vec3 bloom = b0 * 0.5
        + b1 * 0.6
        + b2 * 0.6
        + b3 * 0.45
        + b4 * 0.35
        + b5 * 0.23;

    bloom /= 2.2;
    color.xyz = mix(color.xyz, bloom.xyz, uBloom);

    vec3 lens = texture(uImages[7], coord).xyz;
    vec3 lensBloom = b0 + b1 * 0.8 + b2 * 0.6 + b3 * 0.45 + b4 * 0.35 + b5 * 0.23;
    lensBloom /= 3.2;
    color.xyz = mix(color.xyz, lensBloom, (clamp(lens * uLensDirtIntensity, 0.0, 1.0)));

    color.xyz = pow(color.xyz, vec3(1.0 / 2.2));
}

White lines on a cube, brightness of 10.

Image showing off bloom and lens dirt

White lines on a cube, brightness of 300.

Image showing off extreme bloom and lens dirt

Sphere tracing algorithm

Instead of a rather naive sphere tracing loop I used in a lot of 4kb productions and can just write by heart I went for this paper:
http://erleuchtet.org/~cupe/permanent/enhanced_sphere_tracing.pdf

It is a clever technique that involves overstepping and backtracking only when necessary, as well as keeping track of pixel size in 3D to realize when there is no need to compute more detail. The paper is full of code snippets and clear infographics, I don't think I'd be capable to explain it any clearer.

Image showing repeated spheres across a terrain, with both rough and smooth (reflective) spheres and a reflective "water" plane. Showing off stability in the distance

Beauty shots

Image showing off excessive bloom on the previous landscape and spheres

Image showing off 3 colors of bloom

Depth of field

I initially only knew how to do good circular DoF, until this one came along: https://www.shadertoy.com/view/4tK3WK Which I used initially, but to get it to look good was really expensive, because it is all single pass. Then I looked into a 3-blur-pass solution, which sorta worked, but when I went looking for more optimized versions I found this 2 pass one: https://www.shadertoy.com/view/Xd3GDl. It works extremely well, the only edge cases I found were when unfocusing a regular grid of bright points.

Here's what I wrote to get it to work with a depth buffer (depth based blur):

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
const int NUM_SAMPLES = 16;

void main()
{
    vec2 fragCoord = gl_FragCoord.xy;

    const vec2 blurdir = vec2( 0.0, 1.0 );
    vec2 blurvec = (blurdir) / uResolution;
    vec2 uv = fragCoord / uResolution.xy;

    float z = texture(uImages[0], uv).w;
    fragColor = vec4(depthDirectionalBlur(z, CoC(z), uv, blurvec, NUM_SAMPLES), z);
}

Second pass:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
const int NUM_SAMPLES = 16;

void main()
{
    vec2 uv = gl_FragCoord.xy / uResolution;

    float z = texture(uImages[0], uv).w;

    vec2 blurdir = vec2(1.0, 0.577350269189626);
    vec2 blurvec = normalize(blurdir) / uResolution;
    vec3 color0 = depthDirectionalBlur(z, CoC(z), uv, blurvec, NUM_SAMPLES);

    blurdir = vec2(-1.0, 0.577350269189626);
    blurvec = normalize(blurdir) / uResolution;
    vec3 color1 = depthDirectionalBlur(z, CoC(z), uv, blurvec, NUM_SAMPLES);

    vec3 color = min(color0, color1);
    fragColor = vec4(color, 1.0);
}

Shared header:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
#version 420

// default uniforms
uniform vec3 uTimeResolution;
#define uTime (uTimeResolution.x)
#define uResolution (uTimeResolution.yz)

uniform sampler2D uImages[1];

uniform float uSharpDist = 15; // distance from camera that is 100% sharp
uniform float uSharpRange = 0; // distance from the sharp center that remains sharp
uniform float uBlurFalloff = 1000; // distance from the edge of the sharp range it takes to become 100% blurry
uniform float uMaxBlur = 16; // radius of the blur in pixels at 100% blur

float CoC(float z)
{
    return uMaxBlur * min(1, max(0, abs(z - uSharpDist) - uSharpRange) / uBlurFalloff);
}

out vec4 fragColor;

//note: uniform pdf rand [0;1)
float hash1(vec2 p)
{
    p = fract(p * vec2(5.3987, 5.4421));
    p += dot(p.yx, p.xy + vec2(21.5351, 14.3137));
    return fract(p.x * p.y * 95.4307);
}

#define USE_RANDOM

vec3 depthDirectionalBlur(float z, float coc, vec2 uv, vec2 blurvec, int numSamples)
{
    // z: z at UV
    // coc: blur radius at UV
    // uv: initial coordinate
    // blurvec: smudge direction
    // numSamples: blur taps
    vec3 sumcol = vec3(0.0);

    for (int i = 0; i < numSamples; ++i)
    {
        float r =
            #ifdef USE_RANDOM
            (i + hash1(uv + float(i + uTime)) - 0.5)
            #else
            i
            #endif
            / float(numSamples - 1) - 0.5;
        vec2 p = uv + r * coc * blurvec;
        vec4 smpl = texture(uImages[0], p);
        if(smpl.w < z) // if sample is closer consider it's CoC
        {
            p = uv + r * min(coc, CoC(smpl.w)) * blurvec;
            p = uv + r * CoC(smpl.w) * blurvec;
            smpl = texture(uImages[0], p);
        }
        sumcol += smpl.xyz;
    }

    sumcol /= float(numSamples);
    sumcol = max(sumcol, 0.0);

    return sumcol;
}

Image showing off depth of field on a scene with bent & bump mapped cubes, tori, checkerboarded spheres and a perlin noise textured plane as floor.

Additional sources used for a longer time

Distance function library

http://mercury.sexy/hg_sdf/ A very cool site explaining all kinds of things you can do with this code. I think many of these functions were invented already, but with some bonusses as ewll as a very clear code style and excellent documentations for full accessibility. For an introduction to this library:
https://www.youtube.com/watch?v=T-9R0zAwL7s

Noise functions

https://www.shadertoy.com/view/4djSRW Hashes optimized to only implement hash4() and the rest is just swizzling and redirecting, so a float based hash is just:

1
2
float hash1(float x){return hash4(vec4(x)).x;}
vec2 hash2(float x){return hash4(vec4(x)).xy;}

And so on.

Value noise
https://www.shadertoy.com/view/4sfGzS https://www.shadertoy.com/view/lsf3WH

Voronoi 2D
https://www.shadertoy.com/view/llG3zy Voronoi is great, as using the center distance we get worley noise instead, and we can track cell indices for randomization. This is fairly fast, but still too slow to do realtime. So I implemented tileable 2D & 3D versions.

Perlin
Layering the value noise for N iterations, scaling the UV by 2 and weight by 0.5 in every iteration. These could be controllable parameters for various different looks. A slower weight decrease results in a more wood-grain look for example.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
float perlin(vec2 p, int iterations)
{
    float f = 0.0;
    float amplitude = 1.0;

    for (int i = 0; i < iterations; ++i)
    {
        f += snoise(p) * amplitude;
        amplitude *= 0.5;
        p *= 2.0;
    }

    return f * 0.5;
}

Now the perlin logic can be applied to worley noise (voronoi center) to get billows. I did the same for the voronoi edges, all tileable in 2D and 3D for texture precalc. Here's an example. Basically the modulo in the snoise function is the only thing necessary to make things tileable. Perlin then just uses that and keeps track of the scale for that layer.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
float snoise_tiled(vec2 p, float scale)
{
    p *= scale;
    vec2 c = floor(p);
    vec2 f = p - c;
    f = f * f * (3.0 - 2.0 * f);
    return mix(mix(hash1(mod(c + vec2(0.0, 0.0), scale) + 10.0),
    hash1(mod(c + vec2(1.0, 0.0), scale) + 10.0), f.x),
    mix(hash1(mod(c + vec2(0.0, 1.0), scale) + 10.0),
    hash1(mod(c + vec2(1.0, 1.0), scale) + 10.0), f.x), f.y);
}
float perlin_tiled(vec2 p, float scale, int iterations)
{
    float f = 0.0;
    p = mod(p, scale);
    float amplitude = 1.0;

    for (int i = 0; i < iterations; ++i)
    {
        f += snoise_tiled(p, scale) * amplitude;
        amplitude *= 0.5;
        scale *= 2.0;
    }

    return f * 0.5;
}

Creating a tool to make a 64k demo

In the process of picking up this webpage again, I can talk about something we did quite a while ago. I, together with a team, went through the process of making a 64 kilobyte demo. We happened to win at one of the biggest demoscene events in europe. Revision 2017. I still feel the afterglow of happiness from that.

If you're not sure what that is, read on, else, scroll down! You program a piece of software that is only 64 kb in size, that shows an audio-visual experience generated in realtime. To stay within such size limits you have to generate everything, we chose to go for a rendering technique called ray marching, that allowed us to put all 3D modeling, texture generation, lighting, etc. as ascii (glsl sources) in the executable. On top of that we used a very minimal (yet versatile) modular synthesizer called 64klang2. Internally it stores a kind of minimal midi data and the patches and it can render amazing audio in realtime, so it doesn't need to pre-render the song or anything. All this elementary and small size data and code compiles to something over 200kb, which is then compressed using an executable packer like UPX or kkrunchy.

It was called Eidolon. You can watch a video: Eidolon - Poo-Brain
Or stress test your GPU / leave a comment here:
http://www.pouet.net/prod.php?which=69669

The technologies used were fairly basic, it's very old school phong & lambert shading, 2 blur passes for bloom, so all in all pretty low tech and not worth discussing. What I would like to discuss is the evolution of the tool. I'll keep it high level this time though. Maybe in the future I can talk about specific implementations of things, but just seeing the UI will probably explain a lot of the features and the way things work.

Step 1: Don't make a tool from scratch

Our initial idea was to leverage existing software. One of our team members, who controlled the team besides modelling and eventually directing the whole creative result, had some experience with a real-time node based software called Touch Designer. It is a tool where you can do realtime visuals, and it supports exactly what we need: rendering into a 2D texture with a fragment shader.

We wanted to have the same rendering code for all scenes, and just fill in the modeling and material code that is unique per scene. We figured out how to concatenate separate pieces of text and draw them into a buffer. Multiple buffers even. At some point i packed all code and rendering logic of a pass into 1 grouped node and we could design our render pipeline entirely node based.

Screenshot with 5 numbers referenced below: Render pipeline made as Touch Designer node-graph

Here you see the text snippets (1) merged into some buffers (2) and then post processed for the bloom (3). On the right (4) you see the first problem we hit with Touch Designer. The compiler error log is drawn inside this node. There is basically no easy way to have that error visible in the main application somewhere. So the first iteration of the renderer (and coincidentally the main character of Eidolon) looked something like this:

Screenshot of the render pipeline in action, with a time slider widget underneath
The renderer didn't really change after this.

In case I sound too negative about touch designer in the next few paragraphs, our use case was rather special, so take this with a grain of salt!

We have a timeline control, borrowed the UI design from Maya a little, so this became the main preview window. That's when we hit some problems though. The software has no concept of window focus, so it'd constantly suffer hanging keys or responding to keys while typing in the text editor.

Last issue that really killed it though: everything has to be in 1 binary file. There is no native way to reference external text files for the shader code, or merge node graphs. There is a really weird utility that expands the binary to ascii, but then literally every single node is a text file so it is just unmergeable.

Step 2: Make a tool from scratch

So then this happened:
Screenshot of the first proof-of-concept version of SqrMelon

Over a week's time in the evenings and then 1 long saturday I whipped this up using PyQt and PyOpenGL. This is the first screenshot I made, the curve editor isn't actually an editor yet and there is no concept of camera shots (we use this to get hard cuts).

It has all the same concepts however, separate text files for the shader code, with an XML file determining what render passes use what files and in what buffer they render / what buffers they reference in turn. With the added advantage of the perfect granularity all stored in ascii files.

Some files are template-level, some were scene-level, so creating a new scene actually only copies the scene-level fies which can them be adjusted in a text editor, with a file watcher updating the picture. The CurveEditor feeds right back into the uniforms of the shader (by name) and the time slider at the bottom is the same idea as Maya / what you saw before.

Step 3: Make it better

Render pipeline
The concept was to set up a master render pipeline into which scenes would inject snippets of code. On disk this became a bunch of snippets, and an XML based template definition. This would be the most basic XML file:

<template>
    <pass buffer="0" outputs="1">
        <global path="header.glsl"/>
        <section path="scene.glsl"/>
        <global path="pass.glsl"/>
    </pass>
    <pass input0="0">
        <global path="present.glsl"/>
    </pass>
</template>

This will concatenated 3 files to 1 fragment shader, render into full-screen buffer "0" and then use present.glsl as another fragment shader, which in turn has the previous buffer "0" as input (forwarded to a sampler2D uniform).

This branched out into making static bufffers (textures), setting buffer sizes (smaller textures), multiple target buffers (render main and reflection pass at once), set buffer size to a portion of the screen (downsampling for bloom), 3D texture support (volumetric noise textures for cloud).

Creating a new scene will just copy "scene.glsl" from the template to a new folder, there you can then fill out the necessary function(s) to get a unique scene. Here's an example from our latest Evoke demo. 6 scenes, under which you see the "section" files for each scene.

Screenshot of a tree-view listing scenes, each scene containing a sub-list of editable shader files

Camera control
The second important thing I wanted to tackle was camera control. Basically the demo will control the camera based on some animation data, but it is nice to fly around freely and even use the current camera position as animation keyframe. So this was just using Qt's event system to hook up the mouse and keyboard to the viewport.

Screenshot of a camera widget, showing buttons to enable/disable animation, snap to current animation, and 6 inputs for translate and rotate

I also created a little widget that displays where the camera is, has an "animation input or user input" toggle as well as a "snap to current animation frame" button.

Animation control
So now to animate the camera, without hard coding values! Or even typing numbers, preferably. I know a lot of people use a tracker-like tool called Rocket, I never used it and it looks an odd way to control animation data to me. I come from a 3D background, so I figured I'd just want a curve editor like e.g. Maya has. In Touch Designer we also had a basic curve editor, conveniently you can name a channel the same as a uniform, then just have code evaluate the curve at the current time and send the result to that uniform location.

Some trickery was necessary to pack vec3s, I just look for channels that start with the same name and then end in .x, .y, .z, and possibly .w.

Screenshot of the curve editor, showing various controls in a toolbar, a list of animateable properties on the left, and a big graph showing animation curves in the center

Here's an excerpt from a long camera shot with lots of movement, showing off our cool hermite splines. At the top right you can see we have several built in tangent modes, we never got around to building custom tangent editing. In the end this is more than enough however. With flat tangents we can create easing/acceleration, with spline tangents we can get continuous paths and with linear tangents we get continuous speed. Next to that are 2 cool buttons that allow us to feed the camera position to another uniform, so you can literally fly to a place where you want to put an object. It's not as good as actual move/rotate widgets but for the limited times we need to place 3D objects it's great.

Hard cuts
Apart from being impossible to represent in this interface, we don't support 2 keys at identical times. This means that we can't really have the camera "jump" to a new position instantly. With a tiny amount of curve inbetween the previous and the next shot position, the time cursor can actually render 1 frame of a random camera position. So we had to solve this. I think it is one of the only big features that you won't see in the initial screenshot above actually.

Screenshot of the shot manager, showing a toolbar with create/duplicate/delete buttons and a table of shot names, scenes and editable start/end times (in beats), with a context menu to show/hide and other utilities

Introducing camera shots. A shot has its own "scene it should display" and its own set of animation data. So selecting a different shot yields different curve editor content. Shots are placed on a shared timeline, so scrolling through time will automatically show the right shot and setting a keyframe will automatically figure out the "shot local time" to put the key based on the global demo time. The curve editor has it's own playhead that is directly linked to the global timeline as well so we can adjust the time in multiple places.

When working with lots of people we had issues with people touching other people's (work in progress) shots. Therefore we introduced "disabling" of shots. This way anyone could just prefix their shots and disable them before submitting, and we could mix and match shots from several people to get a final camera flow we all liked.

Screenshot of the time slider paired with the curve editor, showing shot layout on the timeline, playback & looping controls and synchronization between the curve and time slider playheads

Shots are also rendered on the timeline as colored blocks. The grey block underneath those is our "range slider". It makes the top part apply on only a subsection of the demo, so it is easy to loop a specific time range, or just zoom in far enough to use the mouse to change the time granularly enough.

The devil is in the details

Some things I overlooked in the first implementation, and some useful things I added only recently. 1. Undo/Redo of animation changes. Not unimportant, and luckily not hard to add with Qt.

  1. Ctrl click timeline to immediately start animating that shot
  2. Right click a shot to find the scene
  3. Right click a scene to create a shot for that scene in particular
  4. Current time display in minutes:seconds instead of just beats
  5. BPM stored per-project instead of globally
  6. Lots of hotkeys!

These things make the tool just that much faster to use.

Finally, here's our tool today. There's still plenty to be done, but we made 2 demos with it so far and it gets better every time!

Screenshot of current tool state, adding composition overlays and sneak peaking at the new render pipeline of our Yermom demo made in 2018

Music visuals!

My friend RoccoW, chiptune artist, released a new EP. I've had it on loop while jamming shaders and animations, and within a weeks time we now have a cool continuous mix with visuals!

RoccoW - TEK Continuous Mix (Visual by Tropical Trevor)

Click here to the separate tracks.