I've been messing around with OpenGL/ModernGL in Python/Pygame, and after getting my images rendered to the screen, I have been having issues rotating them.
I'm aware of the order that I need to translate, rotate and then scale, which works fine, however my problem stems from the scaling of images being a percentage of the screen resolution. My approach has been to create a shader that spans the screen and then when I need my image to fit, I scale it down to lets say 0.2 (aspect ratio) on the X and 0.1 on the Y.
When the image is rotated by 0 degrees (i.e. horizontal) that 0.1 on the y-axis makes the image 0.1 of the height of the screen, however when it is rotated by 90 degrees (vertical) the same value of 0.1 now scales the image by 0.1 of the width of the screen instead. Due to this, when I rotate a square by 90 degrees it becomes stretched horizontally.
Some attached images showing the problems below. The image used is just a start button from another old project of mine. The button is scaled to (0.5, 0.5) and initially rotated 0, then 90 degrees.
I'm not quite sure if there is a common way to fix this or if my approach to rendering images is wrong.
Below is the relevant code:
Inside of vbo.py:
class SurfaceVBO(BaseVBO):
def __init__(self, app, context):
super().__init__(app, context)
self.format = '2f 2f'
self.attribs = ['in_uv', 'in_position']
def getVertexData(self):
#aspect_ratio = self.app.height / self.app.width
aspect_ratio = 1
vertices = [((-1 * aspect_ratio),-1),((1 * aspect_ratio),-1),((1 * aspect_ratio),1),((-1 * aspect_ratio),1)]
indices = [(0,2,3),(0,1,2)]
tex_coord = [(0,0), (1,0), (1,1), (0,1)]
tex_coord_indices = [(0,2,3),(0,1,2)]
vertex_data = self.getData(vertices, indices)
tex_coord_data = self.getData(tex_coord, tex_coord_indices)
vertex_data = np.hstack([tex_coord_data, vertex_data])
return vertex_data
Inside of surface.vert:
#version 330 core
in vec2 in_uv;
in vec2 in_position;
out vec2 uv;
// m_ortho is just a orthographic projection matrix spanning (-1, -1), (1, 1) with near 0, and far 10,000
uniform mat4 m_model;
uniform mat4 m_ortho;
void main()
{
uv = in_uv;
gl_Position = m_ortho * m_model * vec4(in_position.xy, 0, 1.0);
}
Inside of button.py:
import pygame
import math as m
import glm
class BaseButton:
def __init__(self, game, vao_name='surface', tex_id='game_menu', pos=(0,0), rot=0, scale=(1,1)):
self.game = game
self.pos = pos
self.rot = glm.radians(rot)
self.aspect_ratio = self.game.height / self.game.width
#self.aspect_ratio = 1
self.original_scale = (scale[0], scale[1])
self.scale = (self.aspect_ratio * (self.original_scale[0]), (self.original_scale[1]))
self.m_model = self.getModelMatrix()
self.vao_name = vao_name
self.tex_id = tex_id
self.vao = game.mesh.vao.vaos[vao_name]
self.program = self.vao.program
self.image = self.getPygameImage(path=f'images/{tex_id}.png')
self.rect = self.image.get_rect(center = (((self.pos[0] + 1) / 2) * self.game.width, ((-self.pos[1] + 1) / 2) * self.game.height))
self.texture = self.get2DTexture(image=self.image)
self.tint = 1
self.rend = False
def update(self): ...
def getModelMatrix(self):
# identity matrix
m_model = glm.mat4()
# position
m_model = glm.translate(m_model, (*self.pos, 0))
#print(1, "\n", m_model)
# rotation
m_model = glm.rotate(m_model, self.rot, glm.vec3(0,0,1))
#print(2, "\n", m_model)
# scaling
m_model = glm.scale(m_model, (*self.scale, 1))
#print(3, "\n", m_model)
return m_model
def getPygameImage(self, path):
image = pygame.image.load(path).convert_alpha()
image = pygame.transform.flip(image, flip_x=False, flip_y=True)
image = pygame.transform.scale(image, (self.scale[0] * self.game.width, self.scale[1] * self.game.height))
return image
def get2DTexture(self, image):
texture = self.game.context.texture(size=image.get_size(), components=4,data=pygame.image.tostring(image, 'RGBA'))
return texture
def render(self):
self.update()
if self.rend:
self.vao.render()
# print(f'DEBUG - rendering {self}')
The code runs at 60 fps, and for every frame it updates each image, and renders each image to the screen.

