Commit c9878cb1 by Austin Kinross Committed by Geoff Lang

Fix normalized GL_BYTE vertex attributes on D3D11 9_3

Feature Level 9_3 doesn't support as many formats for Input Layouts as 10_0+. On 9_3, we have to make sure that GL vertex attributes are converted into formats that 9_3 supports. Change-Id: I27b9a85a6eb21a37bd36e60bf011b83fce743fd0 Reviewed-on: https://chromium-review.googlesource.com/234523Tested-by: 's avatarAustin Kinross <aukinros@microsoft.com> Reviewed-by: 's avatarJamie Madill <jmadill@chromium.org> Reviewed-by: 's avatarGeoff Lang <geofflang@chromium.org>
parent 44b422c1
......@@ -20,6 +20,9 @@ template <typename T, size_t componentCount, uint32_t widenDefaultValueBits>
inline void CopyNativeVertexData(const uint8_t *input, size_t stride, size_t count, uint8_t *output);
template <size_t componentCount>
inline void Copy8SnormTo16SnormVertexData(const uint8_t *input, size_t stride, size_t count, uint8_t *output);
template <size_t componentCount>
inline void Copy32FixedTo32FVertexData(const uint8_t *input, size_t stride, size_t count, uint8_t *output);
template <typename T, size_t componentCount, bool normalized>
......
......@@ -40,6 +40,42 @@ inline void CopyNativeVertexData(const uint8_t *input, size_t stride, size_t cou
}
}
template <size_t inputComponentCount, size_t outputComponentCount>
inline void Copy8SnormTo16SnormVertexData(const uint8_t *input, size_t stride, size_t count, uint8_t *output)
{
for (size_t i = 0; i < count; i++)
{
const GLbyte *offsetInput = reinterpret_cast<const GLbyte*>(input + i * stride);
GLshort *offsetOutput = reinterpret_cast<GLshort*>(output) + i * outputComponentCount;
for (size_t j = 0; j < inputComponentCount; j++)
{
// The original GLbyte value ranges from -128 to +127 (INT8_MAX).
// When converted to GLshort, the value must be scaled to between -32768 and +32767 (INT16_MAX).
if (offsetInput[j] > 0)
{
offsetOutput[j] = offsetInput[j] << 8 | offsetInput[j] << 1 | ((offsetInput[j] & 0x40) >> 6);
}
else
{
offsetOutput[j] = offsetInput[j] << 8;
}
}
for (size_t j = inputComponentCount; j < std::min<size_t>(outputComponentCount, 3); j++)
{
// Set remaining G/B channels to 0.
offsetOutput[j] = 0;
}
if (inputComponentCount < outputComponentCount && outputComponentCount == 4)
{
// On normalized formats, we must set the Alpha channel to the max value if it's unused.
offsetOutput[3] = INT16_MAX;
}
}
}
template <size_t componentCount>
inline void Copy32FixedTo32FVertexData(const uint8_t *input, size_t stride, size_t count, uint8_t *output)
{
......
......@@ -63,6 +63,7 @@ void InputLayoutCache::initialize(ID3D11Device *device, ID3D11DeviceContext *con
clear();
mDevice = device;
mDeviceContext = context;
mFeatureLevel = device->GetFeatureLevel();
}
void InputLayoutCache::clear()
......@@ -110,7 +111,7 @@ gl::Error InputLayoutCache::applyVertexBuffers(TranslatedAttribute attributes[gl
D3D11_INPUT_CLASSIFICATION inputClass = attributes[i].divisor > 0 ? D3D11_INPUT_PER_INSTANCE_DATA : D3D11_INPUT_PER_VERTEX_DATA;
gl::VertexFormat vertexFormat(*attributes[i].attribute, attributes[i].currentValueType);
const d3d11::VertexFormat &vertexFormatInfo = d3d11::GetVertexFormatInfo(vertexFormat);
const d3d11::VertexFormat &vertexFormatInfo = d3d11::GetVertexFormatInfo(vertexFormat, mFeatureLevel);
// Record the type of the associated vertex shader vector in our key
// This will prevent mismatched vertex shaders from using the same input layout
......
......@@ -94,6 +94,7 @@ class InputLayoutCache
ID3D11Device *mDevice;
ID3D11DeviceContext *mDeviceContext;
D3D_FEATURE_LEVEL mFeatureLevel;
};
}
......
......@@ -3430,12 +3430,12 @@ bool Renderer11::getLUID(LUID *adapterLuid) const
VertexConversionType Renderer11::getVertexConversionType(const gl::VertexFormat &vertexFormat) const
{
return d3d11::GetVertexFormatInfo(vertexFormat).conversionType;
return d3d11::GetVertexFormatInfo(vertexFormat, mFeatureLevel).conversionType;
}
GLenum Renderer11::getVertexComponentType(const gl::VertexFormat &vertexFormat) const
{
return d3d11::GetDXGIFormatInfo(d3d11::GetVertexFormatInfo(vertexFormat).nativeFormat).componentType;
return d3d11::GetDXGIFormatInfo(d3d11::GetVertexFormatInfo(vertexFormat, mFeatureLevel).nativeFormat).componentType;
}
void Renderer11::generateCaps(gl::Caps *outCaps, gl::TextureCapsMap *outTextureCaps, gl::Extensions *outExtensions) const
......
......@@ -115,7 +115,7 @@ gl::Error VertexBuffer11::storeVertexAttributes(const gl::VertexAttribute &attri
}
gl::VertexFormat vertexFormat(attrib, currentValue.Type);
const d3d11::VertexFormat &vertexFormatInfo = d3d11::GetVertexFormatInfo(vertexFormat);
const d3d11::VertexFormat &vertexFormatInfo = d3d11::GetVertexFormatInfo(vertexFormat, mRenderer->getFeatureLevel());
ASSERT(vertexFormatInfo.copyFunction != NULL);
vertexFormatInfo.copyFunction(input, inputStride, count, output);
......@@ -141,7 +141,7 @@ gl::Error VertexBuffer11::getSpaceRequired(const gl::VertexAttribute &attrib, GL
}
gl::VertexFormat vertexFormat(attrib);
const d3d11::VertexFormat &vertexFormatInfo = d3d11::GetVertexFormatInfo(vertexFormat);
const d3d11::VertexFormat &vertexFormatInfo = d3d11::GetVertexFormatInfo(vertexFormat, mRenderer->getFeatureLevel());
const d3d11::DXGIFormat &dxgiFormatInfo = d3d11::GetDXGIFormatInfo(vertexFormatInfo.nativeFormat);
unsigned int elementSize = dxgiFormatInfo.pixelBytes;
if (elementSize <= std::numeric_limits<unsigned int>::max() / elementCount)
......
......@@ -897,6 +897,24 @@ static void AddIntegerVertexFormatInfo(D3D11VertexFormatInfoMap *map, GLenum inp
map->insert(D3D11VertexFormatPair(inputFormat, info));
}
static D3D11VertexFormatInfoMap BuildD3D11_FL9_3VertexFormatInfoOverrideMap()
{
// D3D11 Feature Level 9_3 doesn't support as many formats for vertex buffer resource as Feature Level 10_0+.
// http://msdn.microsoft.com/en-us/library/windows/desktop/ff471324(v=vs.85).aspx
// In particular, it doesn't support:
// - Any 8-bit _SNORM format
D3D11VertexFormatInfoMap map;
// GL_BYTE -- normalized
AddVertexFormatInfo(&map, GL_BYTE, GL_TRUE, 1, VERTEX_CONVERT_CPU, DXGI_FORMAT_R16G16_SNORM, &Copy8SnormTo16SnormVertexData<1, 2>);
AddVertexFormatInfo(&map, GL_BYTE, GL_TRUE, 2, VERTEX_CONVERT_CPU, DXGI_FORMAT_R16G16_SNORM, &Copy8SnormTo16SnormVertexData<2, 2>);
AddVertexFormatInfo(&map, GL_BYTE, GL_TRUE, 3, VERTEX_CONVERT_CPU, DXGI_FORMAT_R16G16B16A16_SNORM, &Copy8SnormTo16SnormVertexData<3, 4>);
AddVertexFormatInfo(&map, GL_BYTE, GL_TRUE, 4, VERTEX_CONVERT_CPU, DXGI_FORMAT_R16G16B16A16_SNORM, &Copy8SnormTo16SnormVertexData<4, 4>);
return map;
}
static D3D11VertexFormatInfoMap BuildD3D11VertexFormatInfoMap()
{
D3D11VertexFormatInfoMap map;
......@@ -1054,9 +1072,20 @@ static D3D11VertexFormatInfoMap BuildD3D11VertexFormatInfoMap()
return map;
}
const VertexFormat &GetVertexFormatInfo(const gl::VertexFormat &vertexFormat)
const VertexFormat &GetVertexFormatInfo(const gl::VertexFormat &vertexFormat, D3D_FEATURE_LEVEL featureLevel)
{
static const D3D11VertexFormatInfoMap vertexFormatMap = BuildD3D11VertexFormatInfoMap();
static const D3D11VertexFormatInfoMap vertexFormatMapFL9_3Override = BuildD3D11_FL9_3VertexFormatInfoOverrideMap();
if (featureLevel == D3D_FEATURE_LEVEL_9_3)
{
// First see if the format has a special mapping for FL9_3
D3D11VertexFormatInfoMap::const_iterator iter = vertexFormatMapFL9_3Override.find(vertexFormat);
if (iter != vertexFormatMapFL9_3Override.end())
{
return iter->second;
}
}
D3D11VertexFormatInfoMap::const_iterator iter = vertexFormatMap.find(vertexFormat);
if (iter != vertexFormatMap.end())
......
......@@ -75,7 +75,7 @@ struct VertexFormat
DXGI_FORMAT nativeFormat;
VertexCopyFunction copyFunction;
};
const VertexFormat &GetVertexFormatInfo(const gl::VertexFormat &vertexFormat);
const VertexFormat &GetVertexFormatInfo(const gl::VertexFormat &vertexFormat, D3D_FEATURE_LEVEL featureLevel);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment