How to REALTIME render / get dataURI of the blender viewport IN FULL resolution 1920/1080?

Hi, with the new EEVEE realtime viewport rendering system, obviously a lot new things are “possible” but I really, REALLY want / need a realtime rendering feature. This is totally possible and barely / might not even need any real change to the C++ (I only did a little so far), so far I’m able to actually get the 3D viewport image, save it to a file, and get the dataURI for websocket use, and the files save for me IN REAL TIME as I play the timeline, here is my code:

import base64, io, os, bgl, gpu, bpy, threading, time, sys
import numpy as np
import multiprocessing.pool as mpool
from gpu_extras.presets import draw_texture_2d
from PIL import Image
from queue import Queue

myQ = Queue()
finalPath = bpy.context.scene.render.filepath + "hithere.png"




WIDTH = bpy.context.scene.render.resolution_x
HEIGHT = bpy.context.scene.render.resolution_y

offscreen = gpu.types.GPUOffScreen(WIDTH, HEIGHT)

def draw2():
    global finalPath
    global WIDTH
    global HEIGHT

    
    context = bpy.context
    scene = context.scene
    render = scene.render
    camera = scene.camera
    scaleFactor = 1
    
    view_matrix = scene.camera.matrix_world.inverted()

    projection_matrix = scene.camera.calc_matrix_camera(
        context.depsgraph, x=WIDTH * scaleFactor, y=HEIGHT*scaleFactor, scale_x = scaleFactor, scale_y = scaleFactor)
    
    offscreen.draw_view3d(
        scene,
        context.view_layer,
        context.space_data,
        context.region,
        view_matrix,
        projection_matrix)
    bgl.glDisable(bgl.GL_DEPTH_TEST)
    draw_texture_2d(offscreen.color_texture, (0, 0), WIDTH * scaleFactor, HEIGHT * scaleFactor)
    buffer = bgl.Buffer(bgl.GL_BYTE, WIDTH * HEIGHT * 4 * scaleFactor * scaleFactor)
    bgl.glReadBuffer(bgl.GL_BACK)
    w = WIDTH * scaleFactor
    h = HEIGHT * scaleFactor
    bgl.glReadPixels(0, 0, w,h, bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, buffer)
   
    print("starting thread for pic:" + finalPath)
    needle = threading.Thread(target=saveIt,args=[buffer, finalPath, WIDTH, HEIGHT])
    needle.daemon = True
    needle.start()
    print("finished starting thread for"+finalPath)
 
    
    
    
def coby(scene):
    frame = scene.frame_current
    folder = scene.render.filepath
    myFormat = "png"#scene.render.image_settings.renderformat.lower()
    outputPath = os.path.join(folder, "%05d.%s" % (frame, myFormat))
    global finalPath
    finalPath = outputPath


    
h = bpy.types.SpaceView3D.draw_handler_add(draw2, (), 'WINDOW', 'POST_PIXEL')
bpy.app.handlers.frame_change_pre.clear()
bpy.app.handlers.frame_change_pre.append(coby)


def saveIt(buffer, path, width, height):

    print("now I'm in the actual thread! SO exciting (for picture: "+path+")")
    array = np.asarray(buffer, dtype=np.uint8)
    myBytes = array.tobytes()
    im = Image.frombytes("RGBA",(width, height), myBytes)
    rawBytes = io.BytesIO()
    im.save(rawBytes, "PNG")
    rawBytes.seek(0)
    base64Encoded = base64.b64encode(rawBytes.read())
    txt =  "data:image/png;base64," + base64Encoded.decode()
    filebytes = base64.decodebytes(base64Encoded)
    myQ.put(filebytes)
    
        
    f = open(path, "wb")
    f.write(filebytes)
    while(myQ.qsize()):
        f.write(myQ.get())
    f.close()
    print("gotmeThis time for picture:"+path)

It’s not ideal for many reasons, first I had to change some C++ code as described in this post (look at bottom of “EDIT 4” section): [https://blender.stackexchange.com/questions/128174/python-get-image-of-3d-view-for-streaming-realtime-eevee-rendering](http://python get “image” of 3D view for streaming / realtime EEVEE rendering)

(basically just a this to bgl.c:

    static int itemsize_by_buffer_type(int buffer_type)
    {
        if (buffer_type == GL_BYTE) return sizeof(GLbyte);
        if (buffer_type == GL_SHORT) return sizeof(GLshort);
        if (buffer_type == GL_INT) return sizeof(GLint);
        if (buffer_type == GL_FLOAT) return sizeof(GLfloat);
        return -1;  /* should never happen */
    }

    static const char *bp_format_from_buffer_type(int type)
    {
        if (type == GL_BYTE) return "b";
        if (type == GL_SHORT) return "h";
        if (type == GL_INT) return "i";
        if (type == GL_FLOAT) return "f";
        return NULL;
    }

    static int BPy_Buffer_getbuffer(Buffer *self, Py_buffer *view, int flags)
    {
        void* buffer = self->buf.asvoid;
        int itemsize = itemsize_by_buffer_type(self->type);
        // Number of entries in the buffer
        const unsigned long n = *self->dimensions;
        unsigned long length = itemsize * n;

    if (PyBuffer_FillInfo(view, (PyObject *)self, buffer, length, false,     flags) == -1) {
            return -1;
        }

        view->itemsize = itemsize;
        view->format = (char*)bp_format_from_buffer_type(self->type);

        Py_ssize_t *shape = MEM_mallocN(sizeof(Py_ssize_t), __func__);
        shape[0] = n;
        view->shape = shape;

        return 0;
    }

    static void BPy_Buffer_releasebuffer(Buffer *UNUSED(self), Py_buffer *view)
    {
        MEM_freeN(view->shape);
    }

    static PyBufferProcs BPy_Buffer_Buffer = {
        (getbufferproc)BPy_Buffer_getbuffer,
        (releasebufferproc)BPy_Buffer_releasebuffer,
    };
    ```
and then change this:

    /* Functions to access object as input/output buffer */
    NULL,         /* PyBufferProcs *tp_as_buffer; */
to this:
    /* Functions to access object as input/output buffer */
    &BPy_Buffer_Buffer,         /* PyBufferProcs *tp_as_buffer; */








but there's probably another way to convert the blender buffer to bytes, AND I had to install the python moduele PIL manually for saving the image, but there's probably another way. Anyway, if you do those two things, you should be able to test my code. I made a simple animation for about 600+ frames with the camera moving, and when I start from frame 0 and simply hit the play button, I can watch my /tmp folder instantly fill up with images. The problem: I image only captures what is actually SHOWN on the viewport, so if the 3D viewport is covered partly by another window (which is usually almost is), then the picture comes out looking like this: ![00016|690x388](upload://aqBQIkmQ2AxhHdMTawFdx5Dlww6.png) 

notice that the top part of the picture looks somewhat normal, but the entire bottom is smeared; that is because that entire bottom section is covered by another (the python editor) window....


SO my main problem is actually VERY SIMPLE to get this to work: I just need a way to "resize the window" but behind-the-scenes, preferably so the  user doesn't even see a new window opened up, or if I need to open up an actual window, then that would be OK... and then I need to be able to get the 3D image of that window, similarly to how the render function works. 

I made a post on the blender stack exchange (linked above): 
but I didn't really get the answer I was looking for, the main answer just said to use the **`gi`** library for screenshots, but first of all it's almost impossible to install on my computer, and I don't know how to get an EXACT resolution (like 1920 / 1080) from that, also IDK if the screenshot would have an alpha background, whereas this solution, so far, indeed has alpha in the picture.


The bulk of this approach is basically from the blender docs (linked in the above stackexchange post, at the beginning of "_**EDIT 3**_")

I really need someone who understands the offscreen rendering properly, if there's a way to do it so that the camera view is actually capturing in 1920 / 1080, and not just by scaling / pixelating the openGL, besides the fact that it's pixelated, there is still the problem of the other windows overlaying.


SO: How can I (either by editing the C++ more or expanding this python script) get the 3D camera-view image of the viewport in 1920/1080 format, without any windows blocking it? Prefereably I'd like to use this method made so far, and not some other 3rd party screenshot addon, since I also want to get the dataURI for use with a websocket... but the main thing is: 

**_How can I get a 1920/1080 resolution image of the camera-view of the rendered scene?_**

You can get a 3dview into borderless full screen by first putting Blender into true full screen mode with Window->Toggle Window Fullscreen, and then using CTRL+ALT+SPACE to fill the Blender window with the 3dview, which should leave you without any borders, etc.

I wonder if from there you could use a video capture tool (Fraps or any of the tools people use for streaming these days) to capture the screen frames at whatever rate you want and feed them into whatever the rest of your pipeline is.

You ought to be able to do this on a second monitor while retaining control over blender on the first monitor (Window->New Window or New Main Window depending on how you want it to sync the two).

Hi, thanks for the reply, I didn’t know you can fullscreen blender that way… although I only have / want to use one monifter, is there any way to open the window in the “background”, and set the camera view to 1920/1080? What if my monitor is smaller than 1920/1080, how can I still get the image in that resolution, similarly to how the render function does it, where it opens a new window and you (the user) sees it rendering, and the output is any resolution you want, but the actual window just stays the same. Do I have to modify any C++ to be able to duplicate the render function but instead of fullout rendering the whole scene, just do a quick

buffer = bgl.Buffer(bgl.GL_BYTE, WIDTH * HEIGHT * 4)
    bgl.glReadBuffer(bgl.GL_BACK)

bgl.glReadPixels(0, 0, w,h, bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, buffer)

in python? This is DEFINETELY possible to do at least like the render function, but I just don’t know what part of the C++ to edit…

Also I don’t plan on using an external software to capture, since there might be a lag and then the framerate would be off… I want to capture an image sequence in real time, which already works in a way, although the current code I have is smeared off at the bottom and I don’t know how to set it to a specific resolution

Also trinyg CTRL+ALT+SPACE and just an options toolbox shows up, but nothing happens

With a recent 2.80 and your mouse is over the 3dview when you do the CTRL+ALT+SPACE?

You’re out of my depth when it comes to scripting this stuff in Python, so I can’t be of too much help there yet. Another way of getting workspace renders is of course the ability to render animations from the 3dview, and this uses the current frame size settings etc. so you can get any size images that you want. It might be worth looking at how this is implemented and whether you can get the same thing from Python, or you could potentially use it as is and pick up the generated frames from the output directory.

Hi thanks I’m aware of the viewport render animation, although it is a LOT slower than simply playing the timeline, and I have been looking a litle in the C++ source for it, and I was trying to implement the new window-open but I guess I can just try some more…

Good to know someone is trying to implement this.
Streaming software like OBS you can capture a window even if it is on the background.
Other softwares like Vision Mappio(projection mapping software and free on linux) you can capture onely a screen.
If youre using Linux you can make a virtual screen a place blender’s window there for capture.
Still as you sad having a a window gets things slower… Would be wonderfull to implement a NDI output in blender.
Or for you to manage to stream out the video GLbuffer.
Please keep up, loking forward to have a more eficient solution.

1 Like

This is a solution:

Use spout:
https://leadedge.github.io/

and the spout addon for blender:

This can give some functionality that you are after.

Daniel

2 Likes

and forgot to mention

and NDI:

You could also send to /v4l2loopback (fake webcam) or shmdata easily convertible to NDI

This reads the current buffer and records it in a PPM

import base64, io, os, bgl, gpu, bpy, threading, time, sys
import numpy as np
import multiprocessing.pool as mpool
from gpu_extras.presets import draw_texture_2d
from PIL import Image
from queue import Queue

myQ = Queue()


WIDTH = bpy.context.scene.render.resolution_x
HEIGHT = bpy.context.scene.render.resolution_y

buffer = bgl.Buffer(bgl.GL_BYTE, WIDTH * HEIGHT * 4)
bgl.glReadPixels(0, 0, WIDTH,HEIGHT, bgl.GL_RGB, bgl.GL_UNSIGNED_BYTE, buffer)


buffer  = buffer
#print(buffer.to_list())


f = open("demofile.ppm", "w")

f.write("P3")
f.write("\n")
f.write( str(WIDTH) + " " + str(HEIGHT))
f.write("\n")
f.write("255")
f.write("\n")
for x in buffer.to_list():
    f.write(str(x))
    f.write(" ")
f.close()
1 Like
bgl.glDisable(bgl.GL_BLEND)
bgl.glDisable(bgl.GL_LINE_SMOOTH)
bgl.glDisable(bgl.GL_DEPTH_TEST)

draw_texture_2d(offscreen.color_texture, (0, 0), dWIDTH, dHEIGHT)
buffer = bgl.Buffer(bgl.GL_INT, dWIDTH * dHEIGHT * 4)
bgl.glReadPixels(0, 0, dWIDTH, dHEIGHT, bgl.GL_RGBA, bgl.GL_UNSIGNED_BYTE, buffer)
 ...

I got it, but I got the viewport with the pseudo-render I’d like to know how I can do this without showing it on screen.

Blender to Shmdata