0

Years ago I implemented a CPU raytracing software and things work properly. I then implemented a DirectX 12 viewer. I just found out there is some discrepancies between my 2 implementations when i t comes to texture mapping. Negative texture coordinates are not properly handled.

The test scene i am using is the Sponza model from the McGuire Computer Graphics Archive.
The test mesh is named "sponza_34".

Here what i get with DirectX 12:

enter image description here

Here what i get by CPU raytracing:

enter image description here

Here the issue:

enter image description here

The issue is where there is negative texture coordinates. DirectX and CPU gives different results.
Here the diff.

CPU
enter image description here

DirectX 12

enter image description here

Investigations

  1. I first checked if the texture is loaded properly on the GPU. PIX says yes
    enter image description here

  2. I validated that the vertex and index buffer GPU side are the same than the CPU.
    To do so i checked the first and last vertices, values matches. The mesh has 612 vertices so i cannot validate them one by one.
    Unfortunately that test passes too.

So i am doing something wrong on one of these platforms. My bet is the issue comes from the CPU raytracer.

DirectX12 Code
I use a deferred rendering system. Here how the Gbuffer is rendered.
Vertex shader:

#include "MeshGroup.hlsli"

struct VS_INPUT
{
    float3 position : POSITION;
    float3 normal : NORMAL;
    float3 tangent : TANGENT;
    float3 bitangent : BITANGENT;
    float2 texCoord: TEXCOORD;
};

struct VS_OUTPUT
{
    float4 position: SV_POSITION;
    float3 worldPosition : POSITION;
    float3 tangent : Tangent;
    float3 bitangent : Bitangent;
    float3 normal : NORMAL;
    float2 texCoord: TEXCOORD;
};

cbuffer VertexShaderSharedCB : register(b0)
{
    float4x4 vpMat;
};

VS_OUTPUT main(VS_INPUT input, uint instanceID : SV_InstanceID)
{
    VS_OUTPUT output;

    const float4x4 modelMat = meshGroupDatas[instanceID].transform;
    const float4 worldPosition = mul(float4(input.position, 1.0f), modelMat);
    output.worldPosition = worldPosition.xyz;
    output.position = mul(worldPosition, vpMat);
    output.texCoord = input.texCoord;
    output.normal = normalize(mul(float4(input.normal, 0.0f), modelMat));
    output.tangent = normalize(mul(float4(input.tangent, 0.0f), modelMat));
    output.bitangent = normalize(mul(float4(input.bitangent, 0.0f), modelMat));

    return output;
}

Pixel shader:

#include "SharedLightning_PS.hlsli"

struct GBufferPSOut
{
    float4 positionWsOccluded : SV_TARGET0;
    float4 normalWs : SV_TARGET1;
    float4 tangentWs : SV_TARGET2;
    float4 bitangentWs : SV_TARGET3;
    float4 albedoShininess : SV_TARGET4;
    float4 specularAnisotropy : SV_TARGET5;
    float4 emissiveMaterialType : SV_TARGET6;
};

GBufferPSOut main(VS_OUTPUT input)
{
    const float3 P = input.worldPosition;
    const float3 N = computeNormal(Material, normalTex, tSampler, input.normal, input.tangent, input.bitangent, input.texCoord);

    float4 matBaseColor = Material.baseColor;
    if (Material.hasBaseColorTex)
    {
        matBaseColor *= baseColorTex.Sample(tSampler, input.texCoord);
    }

    float matShininess = Material.shininess;
    if (Material.hasGlossTex)
    {
        float roughness = _GLOSS(matShininess);
        roughness *= glossTex.Sample(tSampler, input.texCoord).r;
        matShininess = _SHININESS(roughness);
    }

    GBufferPSOut psOut;
    psOut.positionWsOccluded = float4(P, 1.0f);
    psOut.normalWs = float4(N, 1.0f);
    psOut.tangentWs = float4(input.tangent, 1.0f);
    psOut.bitangentWs = float4(input.bitangent, 1.0f);
    psOut.albedoShininess = float4(matBaseColor.xyz, matShininess);
    psOut.specularAnisotropy = Material.specular;

    if (Material.type == EMITTER_IDX)
        psOut.emissiveMaterialType = float4(matBaseColor.xyz, Material.type);
    else
        psOut.emissiveMaterialType = float4(Material.emissive.xyz, Material.type);

    return psOut;
}

CPU side code:
Texture coordinates computation


inline Math::Vec2 BaseMaterial::interpolateTexCoordinates(const Math::Vec2& t1, const Math::Vec2& t2, const Math::Vec2& t3, const Math::Vec3& coefs) const
{
    Math::Vec2 texCoord = (t1 * coefs.x) + (t2 * coefs.y) + (t3 * coefs.z);

    texCoord.s = std::abs(texCoord.s);
    texCoord.t = std::abs(texCoord.t);

    double dummy;
    if (texCoord.s > 1.0f)
        texCoord.s = (float)std::modf(texCoord.s, &dummy);

    if (texCoord.t > 1.0f)
        texCoord.t = (float)std::modf(texCoord.t, &dummy);

    return texCoord;
}

The calling function:

    IntersectionProperties buildIntersectionProperties(const Math::Ray& ray, const Intersector::IntersectionInfo& info, const Scene::BaseScene* scene)
    {
        const auto mesh = info.object;
        const auto P = ray.getPoint(info.meshIntersectData.t);

        const uint32_t triStartIdx = info.meshIntersectData.primId * _PRIMITIVE_NB_VTX;

        _ASSERT(_PRIMITIVE_NB_VTX == 3u);
        const auto v1 = mesh->buildTransformedVertexFromIndex(triStartIdx);
        const auto v2 = mesh->buildTransformedVertexFromIndex(triStartIdx + 1);
        const auto v3 = mesh->buildTransformedVertexFromIndex(triStartIdx + 2);

        float area = 0.0f;
        {
            const Math::Vec3 e2 = v2.position - v1.position;
            const Math::Vec3 e3 = v3.position - v1.position;

            area = 0.5f * glm::length(glm::cross(e2, e3));
            area = glm::max(area, 1e-10f);
        }

        const Math::Vec3 coefs = Math::interpolate(v1.position, v2.position, v3.position, P, area);

        // Read material
        const Model::ModelPtr& model = scene->getModel();
        const Material::BaseMaterial* material = model->fastGetMaterialRawPtr_FromEntityOrDefault(mesh->getMaterialId());

        // Texture coordinates
        Math::Vec2 texCoord = material->interpolateTexCoordinates(v1.texCoord, v2.texCoord, v3.texCoord,  coefs);

        // Eye vector
        const Math::Vec3 V = -ray.getDirection();

        // Compute normal
        Math::Vec3 N = (v1.normal * coefs.x) + (v2.normal * coefs.y) + (v3.normal * coefs.z);

        // Tangent and bitangent
        Math::Vec3 T = (v1.tangent * coefs.x) + (v2.tangent * coefs.y) + (v3.tangent * coefs.z);
        Math::Vec3 B = (v1.bitangent * coefs.x) + (v2.bitangent * coefs.y) + (v3.bitangent * coefs.z);

        // Apply normal mapping
        if (material->isFresnelMaterial())
        {
            const auto* fresnelMat = static_cast<const Material::FresnelMaterial*>(material);
            const EntityIdentifier normalMapId = fresnelMat->getNormalImageId();
            
            if (normalMapId)
            {
                // Read bump map
                const auto image = Texture::fastGetRGBAImageRawPtr_FromEntity(normalMapId);
                if (image)
                {
                    const RGBAFColor bumpMapNormal = image->getNormalizedPixelFromRatio(texCoord) * 2.0f - 1.0f;
                    const Math::Mat3 tbn = Math::Mat3(T, B, N);

                    // Bump mapped normal
                    N = tbn * glm::swizzle<glm::X, glm::Y, glm::Z>(bumpMapNormal);
                }
            }
        }

        // Finalize normal
        N = glm::normalize(N);

        if (glm::dot(N, V) < 0.0f)
            N *= -1;

        IntersectionProperties props;
        props.P = P;
        props.deltaP = getOffsetedPositionInDirection(P, N, scene->getCurrentRenderSettings().m_rayEpsilon);
        props.inDeltaP = getOffsetedPositionInDirection(P, -N, scene->getCurrentRenderSettings().m_rayEpsilon);
        props.V = V;
        props.texCoord = texCoord;
        props.BsdfProps.N = N;
        props.BsdfProps.T = T;
        props.BsdfProps.B = B;

        return props;
    }

What am I doing wrong?

THANKS!

0

Start asking to get answers

Find the answer to your question by asking.

Ask question

Explore related questions

See similar questions with these tags.