On 07/15/2013 08:06 AM, Tomasz Lis wrote: > From: Tomasz Lis <[email protected]> > > Replaces old use of floatMode attribute with new, extended range of > values in __DRI_ATTRIB_RENDER_TYPE. Also adds new conditions, where > the float modes support requires it. Enables support for not only > float configs, but packed float configs as well. Requires MESA > to be patched with "Float fbconfigs frontend patch [3/3]".
There should be a configure dependency on Mesa 9.2. That will ensure that the new __DRI_ATTRIB_RENDER_TYPE values are available. > Signed-off-by: Tomasz Lis <[email protected]> > --- > glx/glxcmds.c | 12 +++++++++++- > glx/glxdricommon.c | 27 ++++++++++++++++++++++++--- > hw/xwin/glx/indirect.c | 4 +++- > 3 files changed, 38 insertions(+), 5 deletions(-) > > diff --git a/glx/glxcmds.c b/glx/glxcmds.c > index 5b7a628..716e60c 100644 > --- a/glx/glxcmds.c > +++ b/glx/glxcmds.c > @@ -1400,6 +1400,7 @@ DoCreatePbuffer(ClientPtr client, int screenNum, XID > fbconfigId, > __GLXconfig *config; > __GLXscreen *pGlxScreen; > PixmapPtr pPixmap; > + int depth; > int err; > > LEGAL_NEW_RESOURCE(glxDrawableId, client); > @@ -1409,10 +1410,19 @@ DoCreatePbuffer(ClientPtr client, int screenNum, XID > fbconfigId, > if (!validGlxFBConfig(client, pGlxScreen, fbconfigId, &config, &err)) > return err; > > + if (config->renderType & (GLX_RGBA_FLOAT_BIT_ARB | > + GLX_RGBA_UNSIGNED_FLOAT_BIT_EXT)) { > + /* There are no float X pixmaps - create a dummy one, with 1 BPP. */ > + depth = 1; > + } else { > + depth = config->rgbBits; > + } I still think this is no good. With the changes that are going to be made on the client side, is this hack even necessary? > + > + > __glXenterServer(GL_FALSE); > pPixmap = (*pGlxScreen->pScreen->CreatePixmap) (pGlxScreen->pScreen, > width, height, > - config->rgbBits, 0); > + depth, 0); > __glXleaveServer(GL_FALSE); > > /* Assign the pixmap the same id as the pbuffer and add it as a > diff --git a/glx/glxdricommon.c b/glx/glxdricommon.c > index b027f24..ab9a0bf 100644 > --- a/glx/glxdricommon.c > +++ b/glx/glxdricommon.c > @@ -36,6 +36,7 @@ > #include <GL/internal/dri_interface.h> > #include <os.h> > #include "glxserver.h" > +#include "glxext.h" > #include "glxcontext.h" > #include "glxscreens.h" > #include "glxdricommon.h" > @@ -127,6 +128,7 @@ createModeFromConfig(const __DRIcoreExtension * core, > unsigned int visualType, unsigned int drawableType) > { > __GLXDRIconfig *config; > + GLint renderType = 0; > unsigned int attrib, value; > int i; > > @@ -138,11 +140,14 @@ createModeFromConfig(const __DRIcoreExtension * core, > while (core->indexConfigAttrib(driConfig, i++, &attrib, &value)) { > switch (attrib) { > case __DRI_ATTRIB_RENDER_TYPE: > - config->config.renderType = 0; > if (value & __DRI_ATTRIB_RGBA_BIT) > - config->config.renderType |= GLX_RGBA_BIT; > + renderType |= GLX_RGBA_BIT; > if (value & __DRI_ATTRIB_COLOR_INDEX_BIT) > - config->config.renderType |= GLX_COLOR_INDEX_BIT; > + renderType |= GLX_COLOR_INDEX_BIT; > + if (value & __DRI_ATTRIB_FLOAT_BIT) > + renderType |= GLX_RGBA_FLOAT_BIT_ARB; > + if (value & __DRI_ATTRIB_UNSIGNED_FLOAT_BIT) > + renderType |= GLX_RGBA_UNSIGNED_FLOAT_BIT_EXT; > break; > case __DRI_ATTRIB_CONFIG_CAVEAT: > if (value & __DRI_ATTRIB_NON_CONFORMANT_CONFIG) > @@ -168,6 +173,9 @@ createModeFromConfig(const __DRIcoreExtension * core, > } > } > > + if (renderType) /* fbconfig render type are bit fields */ > + config->config.renderType = renderType; > + Indentation error, but... I don't think this needs to be conditionalized at all. Setting config->config.renderType seems better than potentially leaving it uninitialized. Right? > config->config.next = NULL; > config->config.xRenderable = GL_TRUE; > config->config.visualType = visualType; > @@ -187,6 +195,12 @@ glxConvertConfigs(const __DRIcoreExtension * core, > head.next = NULL; > > for (i = 0; configs[i]; i++) { > + /* Add only integer RGB modes as GLX_TRUE_COLOR modes */ > + int renderType = 0; > + if > (core->getConfigAttrib(configs[i],__DRI_ATTRIB_RENDER_TYPE,&renderType)) { > + if ((renderType & __DRI_ATTRIB_RGBA_BIT) == 0) continue; "continue" should be on its own line, and the commas should have a space after each. > + } > + /* Add all the others */ > tail->next = createModeFromConfig(core, > configs[i], GLX_TRUE_COLOR, > drawableType); > @@ -197,6 +211,13 @@ glxConvertConfigs(const __DRIcoreExtension * core, > } > > for (i = 0; configs[i]; i++) { > + /* Don't add float modes at all if we don't want pbuffer > drawableTypes */ > + int renderType = 0; > + if > (core->getConfigAttrib(configs[i],__DRI_ATTRIB_RENDER_TYPE,&renderType)) { > + if ((renderType & (__DRI_ATTRIB_UNSIGNED_FLOAT_BIT | > + __DRI_ATTRIB_FLOAT_BIT)) && !(drawableType & > GLX_PBUFFER_BIT)) continue; Same style comments here as above. What's really missing here is some rationale for the changes. Why is this filtering necessary? Why is the filtering different for true color and direct color? Without that, I can't evaluate whether or not it's correct. > + } > + /* Add all the others */ > tail->next = createModeFromConfig(core, > configs[i], GLX_DIRECT_COLOR, > drawableType); > diff --git a/hw/xwin/glx/indirect.c b/hw/xwin/glx/indirect.c > index 14a4711..e389499 100644 > --- a/hw/xwin/glx/indirect.c > +++ b/hw/xwin/glx/indirect.c > @@ -386,7 +386,9 @@ fbConfigsDump(unsigned int n, __GLXconfig * c) > c->accumAlphaBits, c->sampleBuffers, c->samples, > (c->drawableType & GLX_WINDOW_BIT) ? "y" : ".", > (c->drawableType & GLX_PIXMAP_BIT) ? "y" : ".", > - (c->drawableType & GLX_PBUFFER_BIT) ? "y" : ".", ".", > + (c->drawableType & GLX_PBUFFER_BIT) ? "y" : ".", > + (c->renderType & (GLX_RGBA_FLOAT_BIT_ARB | > + GLX_RGBA_UNSIGNED_FLOAT_BIT_EXT)) ? "y" : ".", > (c->transparentPixel != GLX_NONE_EXT) ? "y" : ".", > c->visualSelectGroup, > (c->visualRating == GLX_SLOW_VISUAL_EXT) ? "*" : " "); > _______________________________________________ [email protected]: X.Org development Archives: http://lists.x.org/archives/xorg-devel Info: http://lists.x.org/mailman/listinfo/xorg-devel
