VirtualBox

source: vbox/trunk/src/VBox/Devices/Graphics/shaderlib/directx.c@ 91487

Last change on this file since 91487 was 65381, checked in by vboxsync, 8 years ago

bugref:8282: Additions/linux: submit DRM driver to the Linux kernel: move all graphics device-related header files to a separate sub-directory and add that to the include path where they are needed. The intention is too be able to remove the VBox/ include folder in the DRM driver package.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 117.3 KB
Line 
1/*
2 * IWineD3D implementation
3 *
4 * Copyright 2002-2004 Jason Edmeades
5 * Copyright 2003-2004 Raphael Junqueira
6 * Copyright 2004 Christian Costa
7 * Copyright 2005 Oliver Stieber
8 * Copyright 2007-2008 Stefan Dösinger for CodeWeavers
9 * Copyright 2009 Henri Verbeet for CodeWeavers
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
24 */
25
26/*
27 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
28 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
29 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
30 * a choice of LGPL license versions is made available with the language indicating
31 * that LGPLv2 or any later version may be used, or where a choice of which version
32 * of the LGPL is applied is otherwise unspecified.
33 */
34
35#include "config.h"
36#include <stdio.h>
37#include "wined3d_private.h"
38
39#ifdef VBOX_WITH_WDDM
40# include <VBoxCrHgsmi.h>
41#endif
42
43#ifdef VBOX_WITH_VMSVGA
44# ifdef RT_OS_WINDOWS
45DECLIMPORT(void) APIENTRY glFinish(void);
46# else
47void glFinish(void);
48# endif
49#endif
50
51WINE_DEFAULT_DEBUG_CHANNEL(d3d);
52WINE_DECLARE_DEBUG_CHANNEL(d3d_caps);
53
54#define GLINFO_LOCATION (*gl_info)
55#define WINE_DEFAULT_VIDMEM (64 * 1024 * 1024)
56
57/* The d3d device ID */
58#if 0 /* VBox: unused */
59static const GUID IID_D3DDEVICE_D3DUID = { 0xaeb2cdd4, 0x6e41, 0x43ea, { 0x94,0x1c,0x83,0x61,0xcc,0x76,0x07,0x81 } };
60#endif
61
62
63/* Extension detection */
64static const struct {
65 const char *extension_string;
66 GL_SupportedExt extension;
67 DWORD version;
68} EXTENSION_MAP[] = {
69 /* APPLE */
70 {"GL_APPLE_client_storage", APPLE_CLIENT_STORAGE, 0 },
71 {"GL_APPLE_fence", APPLE_FENCE, 0 },
72 {"GL_APPLE_float_pixels", APPLE_FLOAT_PIXELS, 0 },
73 {"GL_APPLE_flush_buffer_range", APPLE_FLUSH_BUFFER_RANGE, 0 },
74 {"GL_APPLE_flush_render", APPLE_FLUSH_RENDER, 0 },
75 {"GL_APPLE_ycbcr_422", APPLE_YCBCR_422, 0 },
76
77 /* ARB */
78 {"GL_ARB_color_buffer_float", ARB_COLOR_BUFFER_FLOAT, 0 },
79 {"GL_ARB_depth_buffer_float", ARB_DEPTH_BUFFER_FLOAT, 0 },
80 {"GL_ARB_depth_clamp", ARB_DEPTH_CLAMP, 0 },
81 {"GL_ARB_depth_texture", ARB_DEPTH_TEXTURE, 0 },
82 {"GL_ARB_draw_buffers", ARB_DRAW_BUFFERS, 0 },
83 {"GL_ARB_fragment_program", ARB_FRAGMENT_PROGRAM, 0 },
84 {"GL_ARB_fragment_shader", ARB_FRAGMENT_SHADER, 0 },
85 {"GL_ARB_framebuffer_object", ARB_FRAMEBUFFER_OBJECT, 0 },
86 {"GL_ARB_geometry_shader4", ARB_GEOMETRY_SHADER4, 0 },
87 {"GL_ARB_half_float_pixel", ARB_HALF_FLOAT_PIXEL, 0 },
88 {"GL_ARB_half_float_vertex", ARB_HALF_FLOAT_VERTEX, 0 },
89 {"GL_ARB_imaging", ARB_IMAGING, 0 },
90 {"GL_ARB_map_buffer_range", ARB_MAP_BUFFER_RANGE, 0 },
91 {"GL_ARB_multisample", ARB_MULTISAMPLE, 0 }, /* needs GLX_ARB_MULTISAMPLE as well */
92 {"GL_ARB_multitexture", ARB_MULTITEXTURE, 0 },
93 {"GL_ARB_occlusion_query", ARB_OCCLUSION_QUERY, 0 },
94 {"GL_ARB_pixel_buffer_object", ARB_PIXEL_BUFFER_OBJECT, 0 },
95 {"GL_ARB_point_parameters", ARB_POINT_PARAMETERS, 0 },
96 {"GL_ARB_point_sprite", ARB_POINT_SPRITE, 0 },
97 {"GL_ARB_provoking_vertex", ARB_PROVOKING_VERTEX, 0 },
98 {"GL_ARB_shader_objects", ARB_SHADER_OBJECTS, 0 },
99 {"GL_ARB_shader_texture_lod", ARB_SHADER_TEXTURE_LOD, 0 },
100 {"GL_ARB_shading_language_100", ARB_SHADING_LANGUAGE_100, 0 },
101 {"GL_ARB_sync", ARB_SYNC, 0 },
102 {"GL_ARB_texture_border_clamp", ARB_TEXTURE_BORDER_CLAMP, 0 },
103 {"GL_ARB_texture_compression", ARB_TEXTURE_COMPRESSION, 0 },
104 {"GL_ARB_texture_cube_map", ARB_TEXTURE_CUBE_MAP, 0 },
105 {"GL_ARB_texture_env_add", ARB_TEXTURE_ENV_ADD, 0 },
106 {"GL_ARB_texture_env_combine", ARB_TEXTURE_ENV_COMBINE, 0 },
107 {"GL_ARB_texture_env_dot3", ARB_TEXTURE_ENV_DOT3, 0 },
108 {"GL_ARB_texture_float", ARB_TEXTURE_FLOAT, 0 },
109 {"GL_ARB_texture_mirrored_repeat", ARB_TEXTURE_MIRRORED_REPEAT, 0 },
110 {"GL_IBM_texture_mirrored_repeat", ARB_TEXTURE_MIRRORED_REPEAT, 0 },
111 {"GL_ARB_texture_non_power_of_two", ARB_TEXTURE_NON_POWER_OF_TWO, MAKEDWORD_VERSION(2, 0) },
112 {"GL_ARB_texture_rectangle", ARB_TEXTURE_RECTANGLE, 0 },
113 {"GL_ARB_texture_rg", ARB_TEXTURE_RG, 0 },
114 {"GL_ARB_vertex_array_bgra", ARB_VERTEX_ARRAY_BGRA, 0 },
115 {"GL_ARB_vertex_blend", ARB_VERTEX_BLEND, 0 },
116 {"GL_ARB_vertex_buffer_object", ARB_VERTEX_BUFFER_OBJECT, 0 },
117 {"GL_ARB_vertex_program", ARB_VERTEX_PROGRAM, 0 },
118 {"GL_ARB_vertex_shader", ARB_VERTEX_SHADER, 0 },
119
120 /* ATI */
121 {"GL_ATI_fragment_shader", ATI_FRAGMENT_SHADER, 0 },
122 {"GL_ATI_separate_stencil", ATI_SEPARATE_STENCIL, 0 },
123 {"GL_ATI_texture_compression_3dc", ATI_TEXTURE_COMPRESSION_3DC, 0 },
124 {"GL_ATI_texture_env_combine3", ATI_TEXTURE_ENV_COMBINE3, 0 },
125 {"GL_ATI_texture_mirror_once", ATI_TEXTURE_MIRROR_ONCE, 0 },
126
127 /* EXT */
128 {"GL_EXT_blend_color", EXT_BLEND_COLOR, 0 },
129 {"GL_EXT_blend_equation_separate", EXT_BLEND_EQUATION_SEPARATE, 0 },
130 {"GL_EXT_blend_func_separate", EXT_BLEND_FUNC_SEPARATE, 0 },
131 {"GL_EXT_blend_minmax", EXT_BLEND_MINMAX, 0 },
132 {"GL_EXT_draw_buffers2", EXT_DRAW_BUFFERS2, 0 },
133 {"GL_EXT_fog_coord", EXT_FOG_COORD, 0 },
134 {"GL_EXT_framebuffer_blit", EXT_FRAMEBUFFER_BLIT, 0 },
135 {"GL_EXT_framebuffer_multisample", EXT_FRAMEBUFFER_MULTISAMPLE, 0 },
136 {"GL_EXT_framebuffer_object", EXT_FRAMEBUFFER_OBJECT, 0 },
137 {"GL_EXT_gpu_program_parameters", EXT_GPU_PROGRAM_PARAMETERS, 0 },
138 {"GL_EXT_gpu_shader4", EXT_GPU_SHADER4, 0 },
139 {"GL_EXT_packed_depth_stencil", EXT_PACKED_DEPTH_STENCIL, 0 },
140 {"GL_EXT_paletted_texture", EXT_PALETTED_TEXTURE, 0 },
141 {"GL_EXT_point_parameters", EXT_POINT_PARAMETERS, 0 },
142 {"GL_EXT_provoking_vertex", EXT_PROVOKING_VERTEX, 0 },
143 {"GL_EXT_secondary_color", EXT_SECONDARY_COLOR, 0 },
144 {"GL_EXT_stencil_two_side", EXT_STENCIL_TWO_SIDE, 0 },
145 {"GL_EXT_stencil_wrap", EXT_STENCIL_WRAP, 0 },
146 {"GL_EXT_texture3D", EXT_TEXTURE3D, MAKEDWORD_VERSION(1, 2) },
147 {"GL_EXT_texture_compression_rgtc", EXT_TEXTURE_COMPRESSION_RGTC, 0 },
148 {"GL_EXT_texture_compression_s3tc", EXT_TEXTURE_COMPRESSION_S3TC, 0 },
149 {"GL_EXT_texture_env_add", EXT_TEXTURE_ENV_ADD, 0 },
150 {"GL_EXT_texture_env_combine", EXT_TEXTURE_ENV_COMBINE, 0 },
151 {"GL_EXT_texture_env_dot3", EXT_TEXTURE_ENV_DOT3, 0 },
152 {"GL_EXT_texture_filter_anisotropic", EXT_TEXTURE_FILTER_ANISOTROPIC, 0 },
153 {"GL_EXT_texture_lod_bias", EXT_TEXTURE_LOD_BIAS, 0 },
154 {"GL_EXT_texture_sRGB", EXT_TEXTURE_SRGB, 0 },
155 {"GL_EXT_vertex_array_bgra", EXT_VERTEX_ARRAY_BGRA, 0 },
156
157 /* NV */
158 {"GL_NV_depth_clamp", NV_DEPTH_CLAMP, 0 },
159 {"GL_NV_fence", NV_FENCE, 0 },
160 {"GL_NV_fog_distance", NV_FOG_DISTANCE, 0 },
161 {"GL_NV_fragment_program", NV_FRAGMENT_PROGRAM, 0 },
162 {"GL_NV_fragment_program2", NV_FRAGMENT_PROGRAM2, 0 },
163 {"GL_NV_fragment_program_option", NV_FRAGMENT_PROGRAM_OPTION, 0 },
164 {"GL_NV_half_float", NV_HALF_FLOAT, 0 },
165 {"GL_NV_light_max_exponent", NV_LIGHT_MAX_EXPONENT, 0 },
166 {"GL_NV_register_combiners", NV_REGISTER_COMBINERS, 0 },
167 {"GL_NV_register_combiners2", NV_REGISTER_COMBINERS2, 0 },
168 {"GL_NV_texgen_reflection", NV_TEXGEN_REFLECTION, 0 },
169 {"GL_NV_texture_env_combine4", NV_TEXTURE_ENV_COMBINE4, 0 },
170 {"GL_NV_texture_shader", NV_TEXTURE_SHADER, 0 },
171 {"GL_NV_texture_shader2", NV_TEXTURE_SHADER2, 0 },
172 {"GL_NV_vertex_program", NV_VERTEX_PROGRAM, 0 },
173 {"GL_NV_vertex_program1_1", NV_VERTEX_PROGRAM1_1, 0 },
174 {"GL_NV_vertex_program2", NV_VERTEX_PROGRAM2, 0 },
175 {"GL_NV_vertex_program2_option", NV_VERTEX_PROGRAM2_OPTION, 0 },
176 {"GL_NV_vertex_program3", NV_VERTEX_PROGRAM3, 0 },
177
178 /* SGI */
179 {"GL_SGIS_generate_mipmap", SGIS_GENERATE_MIPMAP, 0 },
180};
181
182/**********************************************************
183 * Utility functions follow
184 **********************************************************/
185
186const struct min_lookup minMipLookup[] =
187{
188 /* NONE POINT LINEAR */
189 {{GL_NEAREST, GL_NEAREST, GL_NEAREST}}, /* NONE */
190 {{GL_NEAREST, GL_NEAREST_MIPMAP_NEAREST, GL_NEAREST_MIPMAP_LINEAR}}, /* POINT*/
191 {{GL_LINEAR, GL_LINEAR_MIPMAP_NEAREST, GL_LINEAR_MIPMAP_LINEAR}}, /* LINEAR */
192};
193
194const struct min_lookup minMipLookup_noFilter[] =
195{
196 /* NONE POINT LINEAR */
197 {{GL_NEAREST, GL_NEAREST, GL_NEAREST}}, /* NONE */
198 {{GL_NEAREST, GL_NEAREST, GL_NEAREST}}, /* POINT */
199 {{GL_NEAREST, GL_NEAREST, GL_NEAREST}}, /* LINEAR */
200};
201
202const struct min_lookup minMipLookup_noMip[] =
203{
204 /* NONE POINT LINEAR */
205 {{GL_NEAREST, GL_NEAREST, GL_NEAREST}}, /* NONE */
206 {{GL_NEAREST, GL_NEAREST, GL_NEAREST}}, /* POINT */
207 {{GL_LINEAR, GL_LINEAR, GL_LINEAR }}, /* LINEAR */
208};
209
210const GLenum magLookup[] =
211{
212 /* NONE POINT LINEAR */
213 GL_NEAREST, GL_NEAREST, GL_LINEAR,
214};
215
216const GLenum magLookup_noFilter[] =
217{
218 /* NONE POINT LINEAR */
219 GL_NEAREST, GL_NEAREST, GL_NEAREST,
220};
221
222/* drawStridedSlow attributes */
223glAttribFunc position_funcs[WINED3D_FFP_EMIT_COUNT];
224glAttribFunc diffuse_funcs[WINED3D_FFP_EMIT_COUNT];
225glAttribFunc specular_func_3ubv;
226glAttribFunc specular_funcs[WINED3D_FFP_EMIT_COUNT];
227glAttribFunc normal_funcs[WINED3D_FFP_EMIT_COUNT];
228glMultiTexCoordFunc multi_texcoord_funcs[WINED3D_FFP_EMIT_COUNT];
229
230
231/**********************************************************
232 * IWineD3D parts follows
233 **********************************************************/
234
235/* GL locking is done by the caller */
236static inline BOOL test_arb_vs_offset_limit(const struct wined3d_gl_info *gl_info)
237{
238 GLuint prog;
239 BOOL ret = FALSE;
240 const char *testcode =
241 "!!ARBvp1.0\n"
242 "PARAM C[66] = { program.env[0..65] };\n"
243 "ADDRESS A0;"
244 "PARAM zero = {0.0, 0.0, 0.0, 0.0};\n"
245 "ARL A0.x, zero.x;\n"
246 "MOV result.position, C[A0.x + 65];\n"
247 "END\n";
248
249 while(glGetError());
250 GL_EXTCALL(glGenProgramsARB(1, &prog));
251 if(!prog) {
252 ERR("Failed to create an ARB offset limit test program\n");
253 }
254 GL_EXTCALL(glBindProgramARB(GL_VERTEX_PROGRAM_ARB, prog));
255 GL_EXTCALL(glProgramStringARB(GL_VERTEX_PROGRAM_ARB, GL_PROGRAM_FORMAT_ASCII_ARB,
256 (GLsizei)strlen(testcode), testcode));
257 if(glGetError() != 0) {
258 TRACE("OpenGL implementation does not allow indirect addressing offsets > 63\n");
259 TRACE("error: %s\n", debugstr_a((const char *)glGetString(GL_PROGRAM_ERROR_STRING_ARB)));
260 ret = TRUE;
261 } else TRACE("OpenGL implementation allows offsets > 63\n");
262
263 GL_EXTCALL(glBindProgramARB(GL_VERTEX_PROGRAM_ARB, 0));
264 GL_EXTCALL(glDeleteProgramsARB(1, &prog));
265 checkGLcall("ARB vp offset limit test cleanup");
266
267 return ret;
268}
269
270static DWORD ver_for_ext(GL_SupportedExt ext)
271{
272 unsigned int i;
273 for (i = 0; i < (sizeof(EXTENSION_MAP) / sizeof(*EXTENSION_MAP)); ++i) {
274 if(EXTENSION_MAP[i].extension == ext) {
275 return EXTENSION_MAP[i].version;
276 }
277 }
278 return 0;
279}
280
281static BOOL match_ati_r300_to_500(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
282 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
283{
284 if (card_vendor != HW_VENDOR_ATI) return FALSE;
285 if (device == CARD_ATI_RADEON_9500) return TRUE;
286 if (device == CARD_ATI_RADEON_X700) return TRUE;
287 if (device == CARD_ATI_RADEON_X1600) return TRUE;
288 return FALSE;
289}
290
291static BOOL match_geforce5(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
292 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
293{
294 if (card_vendor == HW_VENDOR_NVIDIA)
295 {
296 if (device == CARD_NVIDIA_GEFORCEFX_5800 || device == CARD_NVIDIA_GEFORCEFX_5600)
297 {
298 return TRUE;
299 }
300 }
301 return FALSE;
302}
303
304static BOOL match_apple(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
305 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
306{
307 /* MacOS has various specialities in the extensions it advertises. Some have to be loaded from
308 * the opengl 1.2+ core, while other extensions are advertised, but software emulated. So try to
309 * detect the Apple OpenGL implementation to apply some extension fixups afterwards.
310 *
311 * Detecting this isn't really easy. The vendor string doesn't mention Apple. Compile-time checks
312 * aren't sufficient either because a Linux binary may display on a macos X server via remote X11.
313 * So try to detect the GL implementation by looking at certain Apple extensions. Some extensions
314 * like client storage might be supported on other implementations too, but GL_APPLE_flush_render
315 * is specific to the Mac OS X window management, and GL_APPLE_ycbcr_422 is QuickTime specific. So
316 * the chance that other implementations support them is rather small since Win32 QuickTime uses
317 * DirectDraw, not OpenGL.
318 *
319 * This test has been moved into wined3d_guess_gl_vendor()
320 */
321 if (gl_vendor == GL_VENDOR_APPLE)
322 {
323 return TRUE;
324 }
325 return FALSE;
326}
327
328/* Context activation is done by the caller. */
329static void test_pbo_functionality(struct wined3d_gl_info *gl_info)
330{
331 /* Some OpenGL implementations, namely Apple's Geforce 8 driver, advertises PBOs,
332 * but glTexSubImage from a PBO fails miserably, with the first line repeated over
333 * all the texture. This function detects this bug by its symptom and disables PBOs
334 * if the test fails.
335 *
336 * The test uploads a 4x4 texture via the PBO in the "native" format GL_BGRA,
337 * GL_UNSIGNED_INT_8_8_8_8_REV. This format triggers the bug, and it is what we use
338 * for D3DFMT_A8R8G8B8. Then the texture is read back without any PBO and the data
339 * read back is compared to the original. If they are equal PBOs are assumed to work,
340 * otherwise the PBO extension is disabled. */
341 GLuint texture, pbo;
342 static const unsigned int pattern[] =
343 {
344 0x00000000, 0x000000ff, 0x0000ff00, 0x40ff0000,
345 0x80ffffff, 0x40ffff00, 0x00ff00ff, 0x0000ffff,
346 0x00ffff00, 0x00ff00ff, 0x0000ffff, 0x000000ff,
347 0x80ff00ff, 0x0000ffff, 0x00ff00ff, 0x40ff00ff
348 };
349 unsigned int check[sizeof(pattern) / sizeof(pattern[0])];
350
351 /* No PBO -> No point in testing them. */
352 if (!gl_info->supported[ARB_PIXEL_BUFFER_OBJECT]) return;
353
354 ENTER_GL();
355
356 while (glGetError());
357 glGenTextures(1, &texture);
358 glBindTexture(GL_TEXTURE_2D, texture);
359
360 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0);
361 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 4, 4, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, 0);
362 checkGLcall("Specifying the PBO test texture");
363
364 GL_EXTCALL(glGenBuffersARB(1, &pbo));
365 GL_EXTCALL(glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, pbo));
366 GL_EXTCALL(glBufferDataARB(GL_PIXEL_UNPACK_BUFFER_ARB, sizeof(pattern), pattern, GL_STREAM_DRAW_ARB));
367 checkGLcall("Specifying the PBO test pbo");
368
369 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, 4, 4, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, NULL);
370 checkGLcall("Loading the PBO test texture");
371
372 GL_EXTCALL(glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, 0));
373#ifdef VBOX_WITH_VMSVGA
374 glFinish();
375#else
376 wglFinish(); /* just to be sure */
377#endif
378 memset(check, 0, sizeof(check));
379 glGetTexImage(GL_TEXTURE_2D, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, check);
380 checkGLcall("Reading back the PBO test texture");
381
382 glDeleteTextures(1, &texture);
383 GL_EXTCALL(glDeleteBuffersARB(1, &pbo));
384 checkGLcall("PBO test cleanup");
385
386 LEAVE_GL();
387
388 if (memcmp(check, pattern, sizeof(check)))
389 {
390 WARN_(d3d_caps)("PBO test failed, read back data doesn't match original.\n");
391 WARN_(d3d_caps)("Disabling PBOs. This may result in slower performance.\n");
392 gl_info->supported[ARB_PIXEL_BUFFER_OBJECT] = FALSE;
393 }
394 else
395 {
396 TRACE_(d3d_caps)("PBO test successful.\n");
397 }
398}
399
400static BOOL match_apple_intel(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
401 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
402{
403 return (card_vendor == HW_VENDOR_INTEL) && (gl_vendor == GL_VENDOR_APPLE);
404}
405
406static BOOL match_apple_nonr500ati(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
407 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
408{
409 if (gl_vendor != GL_VENDOR_APPLE) return FALSE;
410 if (card_vendor != HW_VENDOR_ATI) return FALSE;
411 if (device == CARD_ATI_RADEON_X1600) return FALSE;
412 return TRUE;
413}
414
415static BOOL match_fglrx(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
416 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
417{
418 return gl_vendor == GL_VENDOR_FGLRX;
419
420}
421
422static BOOL match_dx10_capable(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
423 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
424{
425 /* DX9 cards support 40 single float varyings in hardware, most drivers report 32. ATI misreports
426 * 44 varyings. So assume that if we have more than 44 varyings we have a dx10 card.
427 * This detection is for the gl_ClipPos varying quirk. If a d3d9 card really supports more than 44
428 * varyings and we subtract one in dx9 shaders its not going to hurt us because the dx9 limit is
429 * hardcoded
430 *
431 * dx10 cards usually have 64 varyings */
432 return gl_info->limits.glsl_varyings > 44;
433}
434
435/* A GL context is provided by the caller */
436static BOOL match_allows_spec_alpha(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
437 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
438{
439 GLenum error;
440 DWORD data[16];
441
442 if (!gl_info->supported[EXT_SECONDARY_COLOR]) return FALSE;
443
444 ENTER_GL();
445 while(glGetError());
446 GL_EXTCALL(glSecondaryColorPointerEXT)(4, GL_UNSIGNED_BYTE, 4, data);
447 error = glGetError();
448 LEAVE_GL();
449
450 if(error == GL_NO_ERROR)
451 {
452 TRACE("GL Implementation accepts 4 component specular color pointers\n");
453 return TRUE;
454 }
455 else
456 {
457 TRACE("GL implementation does not accept 4 component specular colors, error %s\n",
458 debug_glerror(error));
459 return FALSE;
460 }
461}
462
463static BOOL match_apple_nvts(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
464 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
465{
466 if (!match_apple(gl_info, gl_renderer, gl_vendor, card_vendor, device)) return FALSE;
467 return gl_info->supported[NV_TEXTURE_SHADER];
468}
469
470/* A GL context is provided by the caller */
471static BOOL match_broken_nv_clip(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
472 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
473{
474 GLuint prog;
475 BOOL ret = FALSE;
476 GLint pos;
477 const char *testcode =
478 "!!ARBvp1.0\n"
479 "OPTION NV_vertex_program2;\n"
480 "MOV result.clip[0], 0.0;\n"
481 "MOV result.position, 0.0;\n"
482 "END\n";
483
484 if (!gl_info->supported[NV_VERTEX_PROGRAM2_OPTION]) return FALSE;
485
486 ENTER_GL();
487 while(glGetError());
488
489 GL_EXTCALL(glGenProgramsARB(1, &prog));
490 if(!prog)
491 {
492 ERR("Failed to create the NVvp clip test program\n");
493 LEAVE_GL();
494 return FALSE;
495 }
496 GL_EXTCALL(glBindProgramARB(GL_VERTEX_PROGRAM_ARB, prog));
497 GL_EXTCALL(glProgramStringARB(GL_VERTEX_PROGRAM_ARB, GL_PROGRAM_FORMAT_ASCII_ARB,
498 (GLsizei)strlen(testcode), testcode));
499 glGetIntegerv(GL_PROGRAM_ERROR_POSITION_ARB, &pos);
500 if(pos != -1)
501 {
502 WARN("GL_NV_vertex_program2_option result.clip[] test failed\n");
503 TRACE("error: %s\n", debugstr_a((const char *)glGetString(GL_PROGRAM_ERROR_STRING_ARB)));
504 ret = TRUE;
505 while(glGetError());
506 }
507 else TRACE("GL_NV_vertex_program2_option result.clip[] test passed\n");
508
509 GL_EXTCALL(glBindProgramARB(GL_VERTEX_PROGRAM_ARB, 0));
510 GL_EXTCALL(glDeleteProgramsARB(1, &prog));
511 checkGLcall("GL_NV_vertex_program2_option result.clip[] test cleanup");
512
513 LEAVE_GL();
514 return ret;
515}
516
517/* Context activation is done by the caller. */
518static BOOL match_fbo_tex_update(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
519 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
520{
521 char data[4 * 4 * 4];
522 GLuint tex, fbo;
523 GLenum status;
524
525#ifndef VBOX_WITH_VMSVGA
526 if (wined3d_settings.offscreen_rendering_mode != ORM_FBO) return FALSE;
527#endif
528 memset(data, 0xcc, sizeof(data));
529
530 ENTER_GL();
531
532 glGenTextures(1, &tex);
533 glBindTexture(GL_TEXTURE_2D, tex);
534 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
535 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
536 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 4, 4, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, NULL);
537 checkGLcall("glTexImage2D");
538
539 gl_info->fbo_ops.glGenFramebuffers(1, &fbo);
540 gl_info->fbo_ops.glBindFramebuffer(GL_FRAMEBUFFER, fbo);
541 gl_info->fbo_ops.glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tex, 0);
542 checkGLcall("glFramebufferTexture2D");
543
544 status = gl_info->fbo_ops.glCheckFramebufferStatus(GL_FRAMEBUFFER);
545 if (status != GL_FRAMEBUFFER_COMPLETE) ERR("FBO status %#x\n", status);
546 checkGLcall("glCheckFramebufferStatus");
547
548 memset(data, 0x11, sizeof(data));
549 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, 4, 4, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, data);
550 checkGLcall("glTexSubImage2D");
551
552 glClearColor(0.996, 0.729, 0.745, 0.792);
553 glClear(GL_COLOR_BUFFER_BIT);
554 checkGLcall("glClear");
555
556 glGetTexImage(GL_TEXTURE_2D, 0, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8_REV, data);
557 checkGLcall("glGetTexImage");
558
559 gl_info->fbo_ops.glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0);
560 gl_info->fbo_ops.glBindFramebuffer(GL_FRAMEBUFFER, 0);
561 glBindTexture(GL_TEXTURE_2D, 0);
562 checkGLcall("glBindTexture");
563
564 gl_info->fbo_ops.glDeleteFramebuffers(1, &fbo);
565 glDeleteTextures(1, &tex);
566 checkGLcall("glDeleteTextures");
567
568 LEAVE_GL();
569
570 return *(DWORD *)data == 0x11111111;
571}
572
573static void quirk_arb_constants(struct wined3d_gl_info *gl_info)
574{
575 TRACE_(d3d_caps)("Using ARB vs constant limit(=%u) for GLSL.\n", gl_info->limits.arb_vs_native_constants);
576 gl_info->limits.glsl_vs_float_constants = gl_info->limits.arb_vs_native_constants;
577 TRACE_(d3d_caps)("Using ARB ps constant limit(=%u) for GLSL.\n", gl_info->limits.arb_ps_native_constants);
578 gl_info->limits.glsl_ps_float_constants = gl_info->limits.arb_ps_native_constants;
579}
580
581static void quirk_apple_glsl_constants(struct wined3d_gl_info *gl_info)
582{
583 quirk_arb_constants(gl_info);
584 /* MacOS needs uniforms for relative addressing offsets. This can accumulate to quite a few uniforms.
585 * Beyond that the general uniform isn't optimal, so reserve a number of uniforms. 12 vec4's should
586 * allow 48 different offsets or other helper immediate values. */
587 TRACE_(d3d_caps)("Reserving 12 GLSL constants for compiler private use.\n");
588 gl_info->reserved_glsl_constants = max(gl_info->reserved_glsl_constants, 12);
589}
590
591/* fglrx crashes with a very bad kernel panic if GL_POINT_SPRITE_ARB is set to GL_COORD_REPLACE_ARB
592 * on more than one texture unit. This means that the d3d9 visual point size test will cause a
593 * kernel panic on any machine running fglrx 9.3(latest that supports r300 to r500 cards). This
594 * quirk only enables point sprites on the first texture unit. This keeps point sprites working in
595 * most games, but avoids the crash
596 *
597 * A more sophisticated way would be to find all units that need texture coordinates and enable
598 * point sprites for one if only one is found, and software emulate point sprites in drawStridedSlow
599 * if more than one unit needs texture coordinates(This requires software ffp and vertex shaders though)
600 *
601 * Note that disabling the extension entirely does not gain predictability because there is no point
602 * sprite capability flag in d3d, so the potential rendering bugs are the same if we disable the extension. */
603static void quirk_one_point_sprite(struct wined3d_gl_info *gl_info)
604{
605 if (gl_info->supported[ARB_POINT_SPRITE])
606 {
607 TRACE("Limiting point sprites to one texture unit.\n");
608 gl_info->limits.point_sprite_units = 1;
609 }
610}
611
612static void quirk_ati_dx9(struct wined3d_gl_info *gl_info)
613{
614 quirk_arb_constants(gl_info);
615
616 /* MacOS advertises GL_ARB_texture_non_power_of_two on ATI r500 and earlier cards, although
617 * these cards only support GL_ARB_texture_rectangle(D3DPTEXTURECAPS_NONPOW2CONDITIONAL).
618 * If real NP2 textures are used, the driver falls back to software. We could just remove the
619 * extension and use GL_ARB_texture_rectangle instead, but texture_rectangle is inconventient
620 * due to the non-normalized texture coordinates. Thus set an internal extension flag,
621 * GL_WINE_normalized_texrect, which signals the code that it can use non power of two textures
622 * as per GL_ARB_texture_non_power_of_two, but has to stick to the texture_rectangle limits.
623 *
624 * fglrx doesn't advertise GL_ARB_texture_non_power_of_two, but it advertises opengl 2.0 which
625 * has this extension promoted to core. The extension loading code sets this extension supported
626 * due to that, so this code works on fglrx as well. */
627 if(gl_info->supported[ARB_TEXTURE_NON_POWER_OF_TWO])
628 {
629 TRACE("GL_ARB_texture_non_power_of_two advertised on R500 or earlier card, removing.\n");
630 gl_info->supported[ARB_TEXTURE_NON_POWER_OF_TWO] = FALSE;
631 gl_info->supported[WINE_NORMALIZED_TEXRECT] = TRUE;
632 }
633
634 /* fglrx has the same structural issues as the one described in quirk_apple_glsl_constants, although
635 * it is generally more efficient. Reserve just 8 constants. */
636 TRACE_(d3d_caps)("Reserving 8 GLSL constants for compiler private use.\n");
637 gl_info->reserved_glsl_constants = max(gl_info->reserved_glsl_constants, 8);
638}
639
640static void quirk_no_np2(struct wined3d_gl_info *gl_info)
641{
642 /* The nVidia GeForceFX series reports OpenGL 2.0 capabilities with the latest drivers versions, but
643 * doesn't explicitly advertise the ARB_tex_npot extension in the GL extension string.
644 * This usually means that ARB_tex_npot is supported in hardware as long as the application is staying
645 * within the limits enforced by the ARB_texture_rectangle extension. This however is not true for the
646 * FX series, which instantly falls back to a slower software path as soon as ARB_tex_npot is used.
647 * We therefore completely remove ARB_tex_npot from the list of supported extensions.
648 *
649 * Note that wine_normalized_texrect can't be used in this case because internally it uses ARB_tex_npot,
650 * triggering the software fallback. There is not much we can do here apart from disabling the
651 * software-emulated extension and reenable ARB_tex_rect (which was previously disabled
652 * in IWineD3DImpl_FillGLCaps).
653 * This fixup removes performance problems on both the FX 5900 and FX 5700 (e.g. for framebuffer
654 * post-processing effects in the game "Max Payne 2").
655 * The behaviour can be verified through a simple test app attached in bugreport #14724. */
656 TRACE("GL_ARB_texture_non_power_of_two advertised through OpenGL 2.0 on NV FX card, removing.\n");
657 gl_info->supported[ARB_TEXTURE_NON_POWER_OF_TWO] = FALSE;
658 gl_info->supported[ARB_TEXTURE_RECTANGLE] = TRUE;
659}
660
661static void quirk_texcoord_w(struct wined3d_gl_info *gl_info)
662{
663 /* The Intel GPUs on MacOS set the .w register of texcoords to 0.0 by default, which causes problems
664 * with fixed function fragment processing. Ideally this flag should be detected with a test shader
665 * and OpenGL feedback mode, but some GL implementations (MacOS ATI at least, probably all MacOS ones)
666 * do not like vertex shaders in feedback mode and return an error, even though it should be valid
667 * according to the spec.
668 *
669 * We don't want to enable this on all cards, as it adds an extra instruction per texcoord used. This
670 * makes the shader slower and eats instruction slots which should be available to the d3d app.
671 *
672 * ATI Radeon HD 2xxx cards on MacOS have the issue. Instead of checking for the buggy cards, blacklist
673 * all radeon cards on Macs and whitelist the good ones. That way we're prepared for the future. If
674 * this workaround is activated on cards that do not need it, it won't break things, just affect
675 * performance negatively. */
676 TRACE("Enabling vertex texture coord fixes in vertex shaders.\n");
677 gl_info->quirks |= WINED3D_QUIRK_SET_TEXCOORD_W;
678}
679
680static void quirk_clip_varying(struct wined3d_gl_info *gl_info)
681{
682 gl_info->quirks |= WINED3D_QUIRK_GLSL_CLIP_VARYING;
683}
684
685static void quirk_allows_specular_alpha(struct wined3d_gl_info *gl_info)
686{
687 gl_info->quirks |= WINED3D_QUIRK_ALLOWS_SPECULAR_ALPHA;
688}
689
690static void quirk_apple_nvts(struct wined3d_gl_info *gl_info)
691{
692 gl_info->supported[NV_TEXTURE_SHADER] = FALSE;
693 gl_info->supported[NV_TEXTURE_SHADER2] = FALSE;
694}
695
696static void quirk_disable_nvvp_clip(struct wined3d_gl_info *gl_info)
697{
698 gl_info->quirks |= WINED3D_QUIRK_NV_CLIP_BROKEN;
699}
700
701static void quirk_fbo_tex_update(struct wined3d_gl_info *gl_info)
702{
703 gl_info->quirks |= WINED3D_QUIRK_FBO_TEX_UPDATE;
704}
705
706static BOOL match_ati_hd4800(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
707 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
708{
709 if (card_vendor != HW_VENDOR_ATI) return FALSE;
710 if (device == CARD_ATI_RADEON_HD4800) return TRUE;
711 return FALSE;
712}
713
714static void quirk_fullsize_blit(struct wined3d_gl_info *gl_info)
715{
716 gl_info->quirks |= WINED3D_QUIRK_FULLSIZE_BLIT;
717}
718
719#ifdef VBOX_WITH_WDDM
720static BOOL match_mesa_nvidia(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
721 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
722{
723 if (card_vendor != HW_VENDOR_NVIDIA) return FALSE;
724 if (gl_vendor != GL_VENDOR_MESA) return FALSE;
725 return TRUE;
726}
727
728static void quirk_no_shader_3(struct wined3d_gl_info *gl_info)
729{
730 int vs_selected_mode, ps_selected_mode;
731 select_shader_mode(gl_info, &ps_selected_mode, &vs_selected_mode);
732 if (vs_selected_mode != SHADER_GLSL && ps_selected_mode != SHADER_GLSL)
733 return;
734
735 gl_info->limits.arb_ps_instructions = 512;
736}
737#endif
738
739static BOOL match_intel(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
740 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
741{
742 if (card_vendor == HW_VENDOR_INTEL) return TRUE;
743 if (gl_vendor == GL_VENDOR_INTEL) return TRUE;
744 return FALSE;
745}
746
747static void quirk_force_blit(struct wined3d_gl_info *gl_info)
748{
749 gl_info->quirks |= WINED3D_QUIRK_FORCE_BLIT;
750}
751
752struct driver_quirk
753{
754 BOOL (*match)(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
755 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device);
756 void (*apply)(struct wined3d_gl_info *gl_info);
757 const char *description;
758};
759
760static const struct driver_quirk quirk_table[] =
761{
762 {
763 match_ati_r300_to_500,
764 quirk_ati_dx9,
765 "ATI GLSL constant and normalized texrect quirk"
766 },
767 /* MacOS advertises more GLSL vertex shader uniforms than supported by the hardware, and if more are
768 * used it falls back to software. While the compiler can detect if the shader uses all declared
769 * uniforms, the optimization fails if the shader uses relative addressing. So any GLSL shader
770 * using relative addressing falls back to software.
771 *
772 * ARB vp gives the correct amount of uniforms, so use it instead of GLSL. */
773 {
774 match_apple,
775 quirk_apple_glsl_constants,
776 "Apple GLSL uniform override"
777 },
778 {
779 match_geforce5,
780 quirk_no_np2,
781 "Geforce 5 NP2 disable"
782 },
783 {
784 match_apple_intel,
785 quirk_texcoord_w,
786 "Init texcoord .w for Apple Intel GPU driver"
787 },
788 {
789 match_apple_nonr500ati,
790 quirk_texcoord_w,
791 "Init texcoord .w for Apple ATI >= r600 GPU driver"
792 },
793 {
794 match_fglrx,
795 quirk_one_point_sprite,
796 "Fglrx point sprite crash workaround"
797 },
798 {
799 match_dx10_capable,
800 quirk_clip_varying,
801 "Reserved varying for gl_ClipPos"
802 },
803 {
804 /* GL_EXT_secondary_color does not allow 4 component secondary colors, but most
805 * GL implementations accept it. The Mac GL is the only implementation known to
806 * reject it.
807 *
808 * If we can pass 4 component specular colors, do it, because (a) we don't have
809 * to screw around with the data, and (b) the D3D fixed function vertex pipeline
810 * passes specular alpha to the pixel shader if any is used. Otherwise the
811 * specular alpha is used to pass the fog coordinate, which we pass to opengl
812 * via GL_EXT_fog_coord.
813 */
814 match_allows_spec_alpha,
815 quirk_allows_specular_alpha,
816 "Allow specular alpha quirk"
817 },
818 {
819 /* The pixel formats provided by GL_NV_texture_shader are broken on OSX
820 * (rdar://5682521).
821 */
822 match_apple_nvts,
823 quirk_apple_nvts,
824 "Apple NV_texture_shader disable"
825 },
826#ifndef VBOX_WITH_VMSVGA
827 {
828 match_broken_nv_clip,
829 quirk_disable_nvvp_clip,
830 "Apple NV_vertex_program clip bug quirk"
831 },
832#endif
833 {
834 match_fbo_tex_update,
835 quirk_fbo_tex_update,
836 "FBO rebind for attachment updates"
837 },
838 {
839 match_ati_hd4800,
840 quirk_fullsize_blit,
841 "Fullsize blit"
842 },
843#ifdef VBOX_WITH_WDDM
844 {
845 match_mesa_nvidia,
846 quirk_no_shader_3,
847 "disable shader 3 support"
848 },
849#endif
850 {
851 match_intel,
852 quirk_force_blit,
853 "force framebuffer blit when possible"
854 }
855};
856
857/* Context activation is done by the caller. */
858static void fixup_extensions(struct wined3d_gl_info *gl_info, const char *gl_renderer,
859 enum wined3d_gl_vendor gl_vendor, enum wined3d_pci_vendor card_vendor, enum wined3d_pci_device device)
860{
861 unsigned int i;
862
863 for (i = 0; i < (sizeof(quirk_table) / sizeof(*quirk_table)); ++i)
864 {
865 if (!quirk_table[i].match(gl_info, gl_renderer, gl_vendor, card_vendor, device)) continue;
866 TRACE_(d3d_caps)("Applying driver quirk \"%s\".\n", quirk_table[i].description);
867 quirk_table[i].apply(gl_info);
868 }
869
870 /* Find out if PBOs work as they are supposed to. */
871 test_pbo_functionality(gl_info);
872}
873
874
875/* Certain applications (Steam) complain if we report an outdated driver version. In general,
876 * reporting a driver version is moot because we are not the Windows driver, and we have different
877 * bugs, features, etc.
878 *
879 * The driver version has the form "x.y.z.w".
880 *
881 * "x" is the Windows version the driver is meant for:
882 * 4 -> 95/98/NT4
883 * 5 -> 2000
884 * 6 -> 2000/XP
885 * 7 -> Vista
886 * 8 -> Win 7
887 *
888 * "y" is the Direct3D level the driver supports:
889 * 11 -> d3d6
890 * 12 -> d3d7
891 * 13 -> d3d8
892 * 14 -> d3d9
893 * 15 -> d3d10
894 *
895 * "z" is unknown, possibly vendor specific.
896 *
897 * "w" is the vendor specific driver version.
898 */
899struct driver_version_information
900{
901 WORD vendor; /* reported PCI card vendor ID */
902 WORD card; /* reported PCI card device ID */
903 const char *description; /* Description of the card e.g. NVIDIA RIVA TNT */
904 WORD d3d_level; /* driver hiword to report */
905 WORD lopart_hi, lopart_lo; /* driver loword to report */
906};
907
908#if 0 /* VBox: unused */
909static const struct driver_version_information driver_version_table[] =
910{
911 /* Nvidia drivers. Geforce6 and newer cards are supported by the current driver (180.x)
912 * GeforceFX support is up to 173.x, - driver uses numbering x.y.11.7341 for 173.41 where x is the windows revision (6=2000/xp, 7=vista), y is unknown
913 * Geforce2MX/3/4 up to 96.x - driver uses numbering 9.6.8.9 for 96.89
914 * TNT/Geforce1/2 up to 71.x - driver uses numbering 7.1.8.6 for 71.86
915 *
916 * All version numbers used below are from the Linux nvidia drivers. */
917 {HW_VENDOR_NVIDIA, CARD_NVIDIA_RIVA_TNT, "NVIDIA RIVA TNT", 1, 8, 6 },
918 {HW_VENDOR_NVIDIA, CARD_NVIDIA_RIVA_TNT2, "NVIDIA RIVA TNT2/TNT2 Pro", 1, 8, 6 },
919 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE, "NVIDIA GeForce 256", 1, 8, 6 },
920 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE2_MX, "NVIDIA GeForce2 MX/MX 400", 6, 4, 3 },
921 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE2, "NVIDIA GeForce2 GTS/GeForce2 Pro", 1, 8, 6 },
922 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE3, "NVIDIA GeForce3", 6, 10, 9371 },
923 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE4_MX, "NVIDIA GeForce4 MX 460", 6, 10, 9371 },
924 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE4_TI4200, "NVIDIA GeForce4 Ti 4200", 6, 10, 9371 },
925 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCEFX_5200, "NVIDIA GeForce FX 5200", 15, 11, 7516 },
926 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCEFX_5600, "NVIDIA GeForce FX 5600", 15, 11, 7516 },
927 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCEFX_5800, "NVIDIA GeForce FX 5800", 15, 11, 7516 },
928 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_6200, "NVIDIA GeForce 6200", 15, 11, 8618 },
929 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_6600GT, "NVIDIA GeForce 6600 GT", 15, 11, 8618 },
930 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_6800, "NVIDIA GeForce 6800", 15, 11, 8618 },
931 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_7300, "NVIDIA GeForce Go 7300", 15, 11, 8585 },
932 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_7400, "NVIDIA GeForce Go 7400", 15, 11, 8585 },
933 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_7600, "NVIDIA GeForce 7600 GT", 15, 11, 8618 },
934 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_7800GT, "NVIDIA GeForce 7800 GT", 15, 11, 8618 },
935 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_8300GS, "NVIDIA GeForce 8300 GS", 15, 11, 8618 },
936 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_8600GT, "NVIDIA GeForce 8600 GT", 15, 11, 8618 },
937 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_8600MGT, "NVIDIA GeForce 8600M GT", 15, 11, 8585 },
938 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_8800GTS, "NVIDIA GeForce 8800 GTS", 15, 11, 8618 },
939 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_9200, "NVIDIA GeForce 9200", 15, 11, 8618 },
940 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_9400GT, "NVIDIA GeForce 9400 GT", 15, 11, 8618 },
941 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_9500GT, "NVIDIA GeForce 9500 GT", 15, 11, 8618 },
942 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_9600GT, "NVIDIA GeForce 9600 GT", 15, 11, 8618 },
943 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_9800GT, "NVIDIA GeForce 9800 GT", 15, 11, 8618 },
944 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTX260, "NVIDIA GeForce GTX 260", 15, 11, 8618 },
945 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTX275, "NVIDIA GeForce GTX 275", 15, 11, 8618 },
946 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GTX280, "NVIDIA GeForce GTX 280", 15, 11, 8618 },
947 {HW_VENDOR_NVIDIA, CARD_NVIDIA_GEFORCE_GT240, "NVIDIA GeForce GT 240", 15, 11, 8618 },
948
949 /* ATI cards. The driver versions are somewhat similar, but not quite the same. Let's hardcode. */
950 {HW_VENDOR_ATI, CARD_ATI_RADEON_9500, "ATI Radeon 9500", 14, 10, 6764 },
951 {HW_VENDOR_ATI, CARD_ATI_RADEON_X700, "ATI Radeon X700 SE", 14, 10, 6764 },
952 {HW_VENDOR_ATI, CARD_ATI_RADEON_X1600, "ATI Radeon X1600 Series", 14, 10, 6764 },
953 {HW_VENDOR_ATI, CARD_ATI_RADEON_HD2350, "ATI Mobility Radeon HD 2350", 14, 10, 6764 },
954 {HW_VENDOR_ATI, CARD_ATI_RADEON_HD2600, "ATI Mobility Radeon HD 2600", 14, 10, 6764 },
955 {HW_VENDOR_ATI, CARD_ATI_RADEON_HD2900, "ATI Radeon HD 2900 XT", 14, 10, 6764 },
956 {HW_VENDOR_ATI, CARD_ATI_RADEON_HD4350, "ATI Radeon HD 4350", 14, 10, 6764 },
957 {HW_VENDOR_ATI, CARD_ATI_RADEON_HD4600, "ATI Radeon HD 4600 Series", 14, 10, 6764 },
958 {HW_VENDOR_ATI, CARD_ATI_RADEON_HD4700, "ATI Radeon HD 4700 Series", 14, 10, 6764 },
959 {HW_VENDOR_ATI, CARD_ATI_RADEON_HD4800, "ATI Radeon HD 4800 Series", 14, 10, 6764 },
960 {HW_VENDOR_ATI, CARD_ATI_RADEON_HD5700, "ATI Radeon HD 5700 Series", 14, 10, 8681 },
961 {HW_VENDOR_ATI, CARD_ATI_RADEON_HD5800, "ATI Radeon HD 5800 Series", 14, 10, 8681 },
962
963 /* TODO: Add information about legacy ATI hardware, Intel and other cards. */
964};
965#endif /* VBox: unused */
966
967
968static DWORD wined3d_parse_gl_version(const char *gl_version)
969{
970 const char *ptr = gl_version;
971 int major, minor;
972
973 major = atoi(ptr);
974 if (major <= 0) ERR_(d3d_caps)("Invalid opengl major version: %d.\n", major);
975
976 while (isdigit(*ptr)) ++ptr;
977 if (*ptr++ != '.') ERR_(d3d_caps)("Invalid opengl version string: %s.\n", debugstr_a(gl_version));
978
979 minor = atoi(ptr);
980
981 TRACE_(d3d_caps)("Found OpenGL version: %d.%d.\n", major, minor);
982
983 return MAKEDWORD_VERSION(major, minor);
984}
985
986static enum wined3d_gl_vendor wined3d_guess_gl_vendor(struct wined3d_gl_info *gl_info, const char *gl_vendor_string, const char *gl_renderer)
987{
988
989 /* MacOS has various specialities in the extensions it advertises. Some have to be loaded from
990 * the opengl 1.2+ core, while other extensions are advertised, but software emulated. So try to
991 * detect the Apple OpenGL implementation to apply some extension fixups afterwards.
992 *
993 * Detecting this isn't really easy. The vendor string doesn't mention Apple. Compile-time checks
994 * aren't sufficient either because a Linux binary may display on a macos X server via remote X11.
995 * So try to detect the GL implementation by looking at certain Apple extensions. Some extensions
996 * like client storage might be supported on other implementations too, but GL_APPLE_flush_render
997 * is specific to the Mac OS X window management, and GL_APPLE_ycbcr_422 is QuickTime specific. So
998 * the chance that other implementations support them is rather small since Win32 QuickTime uses
999 * DirectDraw, not OpenGL. */
1000 if (gl_info->supported[APPLE_FENCE]
1001 && gl_info->supported[APPLE_CLIENT_STORAGE]
1002 && gl_info->supported[APPLE_FLUSH_RENDER]
1003 && gl_info->supported[APPLE_YCBCR_422])
1004 return GL_VENDOR_APPLE;
1005
1006 if (strstr(gl_vendor_string, "NVIDIA"))
1007 return GL_VENDOR_NVIDIA;
1008
1009 if (strstr(gl_vendor_string, "ATI"))
1010 return GL_VENDOR_FGLRX;
1011
1012 if (strstr(gl_vendor_string, "Intel(R)")
1013 || strstr(gl_renderer, "Intel(R)")
1014 || strstr(gl_vendor_string, "Intel Inc."))
1015 {
1016 if (strstr(gl_renderer, "Mesa"))
1017 return GL_VENDOR_MESA;
1018 return GL_VENDOR_INTEL;
1019 }
1020
1021 if (strstr(gl_vendor_string, "Mesa")
1022 || strstr(gl_vendor_string, "Advanced Micro Devices, Inc.")
1023 || strstr(gl_vendor_string, "DRI R300 Project")
1024 || strstr(gl_vendor_string, "X.Org R300 Project")
1025 || strstr(gl_vendor_string, "Tungsten Graphics, Inc")
1026 || strstr(gl_vendor_string, "VMware, Inc.")
1027 || strstr(gl_renderer, "Mesa")
1028 || strstr(gl_renderer, "Gallium"))
1029 return GL_VENDOR_MESA;
1030
1031 FIXME_(d3d_caps)("Received unrecognized GL_VENDOR %s. Returning GL_VENDOR_UNKNOWN.\n",
1032 debugstr_a(gl_vendor_string));
1033
1034 return GL_VENDOR_UNKNOWN;
1035}
1036
1037static enum wined3d_pci_vendor wined3d_guess_card_vendor(const char *gl_vendor_string, const char *gl_renderer)
1038{
1039 if (strstr(gl_vendor_string, "NVIDIA"))
1040 return HW_VENDOR_NVIDIA;
1041
1042 if (strstr(gl_vendor_string, "ATI")
1043 || strstr(gl_vendor_string, "Advanced Micro Devices, Inc.")
1044 || strstr(gl_vendor_string, "X.Org R300 Project")
1045 || strstr(gl_vendor_string, "DRI R300 Project"))
1046 return HW_VENDOR_ATI;
1047
1048 if (strstr(gl_vendor_string, "Intel(R)")
1049 || strstr(gl_renderer, "Intel(R)")
1050 || strstr(gl_vendor_string, "Intel Inc."))
1051 return HW_VENDOR_INTEL;
1052
1053 if (strstr(gl_vendor_string, "Mesa")
1054 || strstr(gl_vendor_string, "Tungsten Graphics, Inc")
1055 || strstr(gl_vendor_string, "VMware, Inc."))
1056 return HW_VENDOR_SOFTWARE;
1057
1058 FIXME_(d3d_caps)("Received unrecognized GL_VENDOR %s. Returning HW_VENDOR_NVIDIA.\n", debugstr_a(gl_vendor_string));
1059
1060 return HW_VENDOR_NVIDIA;
1061}
1062
1063
1064
1065static enum wined3d_pci_device select_card_nvidia_binary(const struct wined3d_gl_info *gl_info,
1066 const char *gl_renderer, unsigned int *vidmem)
1067{
1068#ifndef VBOX_WITH_WDDM
1069 if (WINE_D3D10_CAPABLE(gl_info))
1070#endif
1071 {
1072 /* Geforce 200 - highend */
1073 if (strstr(gl_renderer, "GTX 280")
1074 || strstr(gl_renderer, "GTX 285")
1075 || strstr(gl_renderer, "GTX 295"))
1076 {
1077 *vidmem = 1024;
1078 return CARD_NVIDIA_GEFORCE_GTX280;
1079 }
1080
1081 /* Geforce 200 - midend high */
1082 if (strstr(gl_renderer, "GTX 275"))
1083 {
1084 *vidmem = 896;
1085 return CARD_NVIDIA_GEFORCE_GTX275;
1086 }
1087
1088 /* Geforce 200 - midend */
1089 if (strstr(gl_renderer, "GTX 260"))
1090 {
1091 *vidmem = 1024;
1092 return CARD_NVIDIA_GEFORCE_GTX260;
1093 }
1094 /* Geforce 200 - midend */
1095 if (strstr(gl_renderer, "GT 240"))
1096 {
1097 *vidmem = 512;
1098 return CARD_NVIDIA_GEFORCE_GT240;
1099 }
1100
1101 /* Geforce9 - highend / Geforce 200 - midend (GTS 150/250 are based on the same core) */
1102 if (strstr(gl_renderer, "9800")
1103 || strstr(gl_renderer, "GTS 150")
1104 || strstr(gl_renderer, "GTS 250"))
1105 {
1106 *vidmem = 512;
1107 return CARD_NVIDIA_GEFORCE_9800GT;
1108 }
1109
1110 /* Geforce9 - midend */
1111 if (strstr(gl_renderer, "9600"))
1112 {
1113 *vidmem = 384; /* The 9600GSO has 384MB, the 9600GT has 512-1024MB */
1114 return CARD_NVIDIA_GEFORCE_9600GT;
1115 }
1116
1117 /* Geforce9 - midend low / Geforce 200 - low */
1118 if (strstr(gl_renderer, "9500")
1119 || strstr(gl_renderer, "GT 120")
1120 || strstr(gl_renderer, "GT 130"))
1121 {
1122 *vidmem = 256; /* The 9500GT has 256-1024MB */
1123 return CARD_NVIDIA_GEFORCE_9500GT;
1124 }
1125
1126 /* Geforce9 - lowend */
1127 if (strstr(gl_renderer, "9400"))
1128 {
1129 *vidmem = 256; /* The 9400GT has 256-1024MB */
1130 return CARD_NVIDIA_GEFORCE_9400GT;
1131 }
1132
1133 /* Geforce9 - lowend low */
1134 if (strstr(gl_renderer, "9100")
1135 || strstr(gl_renderer, "9200")
1136 || strstr(gl_renderer, "9300")
1137 || strstr(gl_renderer, "G 100"))
1138 {
1139 *vidmem = 256; /* The 9100-9300 cards have 256MB */
1140 return CARD_NVIDIA_GEFORCE_9200;
1141 }
1142
1143 /* Geforce8 - highend */
1144 if (strstr(gl_renderer, "8800"))
1145 {
1146 *vidmem = 320; /* The 8800GTS uses 320MB, a 8800GTX can have 768MB */
1147 return CARD_NVIDIA_GEFORCE_8800GTS;
1148 }
1149
1150 /* Geforce8 - midend mobile */
1151 if (strstr(gl_renderer, "8600 M"))
1152 {
1153 *vidmem = 512;
1154 return CARD_NVIDIA_GEFORCE_8600MGT;
1155 }
1156
1157 /* Geforce8 - midend */
1158 if (strstr(gl_renderer, "8600")
1159 || strstr(gl_renderer, "8700"))
1160 {
1161 *vidmem = 256;
1162 return CARD_NVIDIA_GEFORCE_8600GT;
1163 }
1164
1165 /* Geforce8 - lowend */
1166 if (strstr(gl_renderer, "8100")
1167 || strstr(gl_renderer, "8200")
1168 || strstr(gl_renderer, "8300")
1169 || strstr(gl_renderer, "8400")
1170 || strstr(gl_renderer, "8500"))
1171 {
1172 *vidmem = 128; /* 128-256MB for a 8300, 256-512MB for a 8400 */
1173 return CARD_NVIDIA_GEFORCE_8300GS;
1174 }
1175
1176 /* Geforce8-compatible fall back if the GPU is not in the list yet */
1177 *vidmem = 128;
1178 return CARD_NVIDIA_GEFORCE_8300GS;
1179 }
1180
1181 /* Both the GeforceFX, 6xxx and 7xxx series support D3D9. The last two types have more
1182 * shader capabilities, so we use the shader capabilities to distinguish between FX and 6xxx/7xxx.
1183 */
1184 if (WINE_D3D9_CAPABLE(gl_info) && gl_info->supported[NV_VERTEX_PROGRAM3])
1185 {
1186 /* Geforce7 - highend */
1187 if (strstr(gl_renderer, "7800")
1188 || strstr(gl_renderer, "7900")
1189 || strstr(gl_renderer, "7950")
1190 || strstr(gl_renderer, "Quadro FX 4")
1191 || strstr(gl_renderer, "Quadro FX 5"))
1192 {
1193 *vidmem = 256; /* A 7800GT uses 256MB while highend 7900 cards can use 512MB */
1194 return CARD_NVIDIA_GEFORCE_7800GT;
1195 }
1196
1197 /* Geforce7 midend */
1198 if (strstr(gl_renderer, "7600")
1199 || strstr(gl_renderer, "7700"))
1200 {
1201 *vidmem = 256; /* The 7600 uses 256-512MB */
1202 return CARD_NVIDIA_GEFORCE_7600;
1203 }
1204
1205 /* Geforce7 lower medium */
1206 if (strstr(gl_renderer, "7400"))
1207 {
1208 *vidmem = 256; /* The 7400 uses 256-512MB */
1209 return CARD_NVIDIA_GEFORCE_7400;
1210 }
1211
1212 /* Geforce7 lowend */
1213 if (strstr(gl_renderer, "7300"))
1214 {
1215 *vidmem = 256; /* Mac Pros with this card have 256 MB */
1216 return CARD_NVIDIA_GEFORCE_7300;
1217 }
1218
1219 /* Geforce6 highend */
1220 if (strstr(gl_renderer, "6800"))
1221 {
1222 *vidmem = 128; /* The 6800 uses 128-256MB, the 7600 uses 256-512MB */
1223 return CARD_NVIDIA_GEFORCE_6800;
1224 }
1225
1226 /* Geforce6 - midend */
1227 if (strstr(gl_renderer, "6600")
1228 || strstr(gl_renderer, "6610")
1229 || strstr(gl_renderer, "6700"))
1230 {
1231 *vidmem = 128; /* A 6600GT has 128-256MB */
1232 return CARD_NVIDIA_GEFORCE_6600GT;
1233 }
1234
1235 /* Geforce6/7 lowend */
1236 *vidmem = 64; /* */
1237 return CARD_NVIDIA_GEFORCE_6200; /* Geforce 6100/6150/6200/7300/7400/7500 */
1238 }
1239
1240 if (WINE_D3D9_CAPABLE(gl_info))
1241 {
1242 /* GeforceFX - highend */
1243 if (strstr(gl_renderer, "5800")
1244 || strstr(gl_renderer, "5900")
1245 || strstr(gl_renderer, "5950")
1246 || strstr(gl_renderer, "Quadro FX"))
1247 {
1248 *vidmem = 256; /* 5800-5900 cards use 256MB */
1249 return CARD_NVIDIA_GEFORCEFX_5800;
1250 }
1251
1252 /* GeforceFX - midend */
1253 if (strstr(gl_renderer, "5600")
1254 || strstr(gl_renderer, "5650")
1255 || strstr(gl_renderer, "5700")
1256 || strstr(gl_renderer, "5750"))
1257 {
1258 *vidmem = 128; /* A 5600 uses 128-256MB */
1259 return CARD_NVIDIA_GEFORCEFX_5600;
1260 }
1261
1262 /* GeforceFX - lowend */
1263 *vidmem = 64; /* Normal FX5200 cards use 64-256MB; laptop (non-standard) can have less */
1264 return CARD_NVIDIA_GEFORCEFX_5200; /* GeforceFX 5100/5200/5250/5300/5500 */
1265 }
1266
1267 if (WINE_D3D8_CAPABLE(gl_info))
1268 {
1269 if (strstr(gl_renderer, "GeForce4 Ti") || strstr(gl_renderer, "Quadro4"))
1270 {
1271 *vidmem = 64; /* Geforce4 Ti cards have 64-128MB */
1272 return CARD_NVIDIA_GEFORCE4_TI4200; /* Geforce4 Ti4200/Ti4400/Ti4600/Ti4800, Quadro4 */
1273 }
1274
1275 *vidmem = 64; /* Geforce3 cards have 64-128MB */
1276 return CARD_NVIDIA_GEFORCE3; /* Geforce3 standard/Ti200/Ti500, Quadro DCC */
1277 }
1278
1279 if (WINE_D3D7_CAPABLE(gl_info))
1280 {
1281 if (strstr(gl_renderer, "GeForce4 MX"))
1282 {
1283 /* Most Geforce4MX GPUs have at least 64MB of memory, some
1284 * early models had 32MB but most have 64MB or even 128MB. */
1285 *vidmem = 64;
1286 return CARD_NVIDIA_GEFORCE4_MX; /* MX420/MX440/MX460/MX4000 */
1287 }
1288
1289 if (strstr(gl_renderer, "GeForce2 MX") || strstr(gl_renderer, "Quadro2 MXR"))
1290 {
1291 *vidmem = 32; /* Geforce2MX GPUs have 32-64MB of video memory */
1292 return CARD_NVIDIA_GEFORCE2_MX; /* Geforce2 standard/MX100/MX200/MX400, Quadro2 MXR */
1293 }
1294
1295 if (strstr(gl_renderer, "GeForce2") || strstr(gl_renderer, "Quadro2"))
1296 {
1297 *vidmem = 32; /* Geforce2 GPUs have 32-64MB of video memory */
1298 return CARD_NVIDIA_GEFORCE2; /* Geforce2 GTS/Pro/Ti/Ultra, Quadro2 */
1299 }
1300
1301 /* Most Geforce1 cards have 32MB, there are also some rare 16
1302 * and 64MB (Dell) models. */
1303 *vidmem = 32;
1304 return CARD_NVIDIA_GEFORCE; /* Geforce 256/DDR, Quadro */
1305 }
1306
1307 if (strstr(gl_renderer, "TNT2"))
1308 {
1309 *vidmem = 32; /* Most TNT2 boards have 32MB, though there are 16MB boards too */
1310 return CARD_NVIDIA_RIVA_TNT2; /* Riva TNT2 standard/M64/Pro/Ultra */
1311 }
1312
1313 *vidmem = 16; /* Most TNT boards have 16MB, some rare models have 8MB */
1314 return CARD_NVIDIA_RIVA_TNT; /* Riva TNT, Vanta */
1315
1316}
1317
1318static enum wined3d_pci_device select_card_ati_binary(const struct wined3d_gl_info *gl_info,
1319 const char *gl_renderer, unsigned int *vidmem)
1320{
1321 /* See http://developer.amd.com/drivers/pc_vendor_id/Pages/default.aspx
1322 *
1323 * Beware: renderer string do not match exact card model,
1324 * eg HD 4800 is returned for multiple cards, even for RV790 based ones. */
1325#ifndef VBOX_WITH_WDDM
1326 if (WINE_D3D10_CAPABLE(gl_info))
1327#endif
1328 {
1329 /* Radeon EG CYPRESS XT / PRO HD5800 - highend */
1330 if (strstr(gl_renderer, "HD 5800") /* Radeon EG CYPRESS HD58xx generic renderer string */
1331 || strstr(gl_renderer, "HD 5850") /* Radeon EG CYPRESS XT */
1332 || strstr(gl_renderer, "HD 5870")) /* Radeon EG CYPRESS PRO */
1333 {
1334 *vidmem = 1024; /* note: HD58xx cards use 1024MB */
1335 return CARD_ATI_RADEON_HD5800;
1336 }
1337
1338 /* Radeon EG JUNIPER XT / LE HD5700 - midend */
1339 if (strstr(gl_renderer, "HD 5700") /* Radeon EG JUNIPER HD57xx generic renderer string */
1340 || strstr(gl_renderer, "HD 5750") /* Radeon EG JUNIPER LE */
1341 || strstr(gl_renderer, "HD 5770")) /* Radeon EG JUNIPER XT */
1342 {
1343 *vidmem = 512; /* note: HD5770 cards use 1024MB and HD5750 cards use 512MB or 1024MB */
1344 return CARD_ATI_RADEON_HD5700;
1345 }
1346
1347 /* Radeon R7xx HD4800 - highend */
1348 if (strstr(gl_renderer, "HD 4800") /* Radeon RV7xx HD48xx generic renderer string */
1349 || strstr(gl_renderer, "HD 4830") /* Radeon RV770 */
1350 || strstr(gl_renderer, "HD 4850") /* Radeon RV770 */
1351 || strstr(gl_renderer, "HD 4870") /* Radeon RV770 */
1352 || strstr(gl_renderer, "HD 4890")) /* Radeon RV790 */
1353 {
1354 *vidmem = 512; /* note: HD4890 cards use 1024MB */
1355 return CARD_ATI_RADEON_HD4800;
1356 }
1357
1358 /* Radeon R740 HD4700 - midend */
1359 if (strstr(gl_renderer, "HD 4700") /* Radeon RV770 */
1360 || strstr(gl_renderer, "HD 4770")) /* Radeon RV740 */
1361 {
1362 *vidmem = 512;
1363 return CARD_ATI_RADEON_HD4700;
1364 }
1365
1366 /* Radeon R730 HD4600 - midend */
1367 if (strstr(gl_renderer, "HD 4600") /* Radeon RV730 */
1368 || strstr(gl_renderer, "HD 4650") /* Radeon RV730 */
1369 || strstr(gl_renderer, "HD 4670")) /* Radeon RV730 */
1370 {
1371 *vidmem = 512;
1372 return CARD_ATI_RADEON_HD4600;
1373 }
1374
1375 /* Radeon R710 HD4500/HD4350 - lowend */
1376 if (strstr(gl_renderer, "HD 4350") /* Radeon RV710 */
1377 || strstr(gl_renderer, "HD 4550")) /* Radeon RV710 */
1378 {
1379 *vidmem = 256;
1380 return CARD_ATI_RADEON_HD4350;
1381 }
1382
1383 /* Radeon R6xx HD2900/HD3800 - highend */
1384 if (strstr(gl_renderer, "HD 2900")
1385 || strstr(gl_renderer, "HD 3870")
1386 || strstr(gl_renderer, "HD 3850"))
1387 {
1388 *vidmem = 512; /* HD2900/HD3800 uses 256-1024MB */
1389 return CARD_ATI_RADEON_HD2900;
1390 }
1391
1392 /* Radeon R6xx HD2600/HD3600 - midend; HD3830 is China-only midend */
1393 if (strstr(gl_renderer, "HD 2600")
1394 || strstr(gl_renderer, "HD 3830")
1395 || strstr(gl_renderer, "HD 3690")
1396 || strstr(gl_renderer, "HD 3650"))
1397 {
1398 *vidmem = 256; /* HD2600/HD3600 uses 256-512MB */
1399 return CARD_ATI_RADEON_HD2600;
1400 }
1401
1402 /* Radeon R6xx HD2350/HD2400/HD3400 - lowend
1403 * Note HD2300=DX9, HD2350=DX10 */
1404 if (strstr(gl_renderer, "HD 2350")
1405 || strstr(gl_renderer, "HD 2400")
1406 || strstr(gl_renderer, "HD 3470")
1407 || strstr(gl_renderer, "HD 3450")
1408 || strstr(gl_renderer, "HD 3430")
1409 || strstr(gl_renderer, "HD 3400"))
1410 {
1411 *vidmem = 256; /* HD2350/2400 use 256MB, HD34xx use 256-512MB */
1412 return CARD_ATI_RADEON_HD2350;
1413 }
1414
1415 /* Radeon R6xx/R7xx integrated */
1416 if (strstr(gl_renderer, "HD 3100")
1417 || strstr(gl_renderer, "HD 3200")
1418 || strstr(gl_renderer, "HD 3300"))
1419 {
1420 *vidmem = 128; /* 128MB */
1421 return CARD_ATI_RADEON_HD3200;
1422 }
1423
1424 /* Default for when no GPU has been found */
1425 *vidmem = 128; /* 128MB */
1426 return CARD_ATI_RADEON_HD3200;
1427 }
1428
1429 if (WINE_D3D8_CAPABLE(gl_info))
1430 {
1431 /* Radeon R5xx */
1432 if (strstr(gl_renderer, "X1600")
1433 || strstr(gl_renderer, "X1650")
1434 || strstr(gl_renderer, "X1800")
1435 || strstr(gl_renderer, "X1900")
1436 || strstr(gl_renderer, "X1950"))
1437 {
1438 *vidmem = 128; /* X1600 uses 128-256MB, >=X1800 uses 256MB */
1439 return CARD_ATI_RADEON_X1600;
1440 }
1441
1442 /* Radeon R4xx + X1300/X1400/X1450/X1550/X2300/X2500/HD2300 (lowend R5xx)
1443 * Note X2300/X2500/HD2300 are R5xx GPUs with a 2xxx naming but they are still DX9-only */
1444 if (strstr(gl_renderer, "X700")
1445 || strstr(gl_renderer, "X800")
1446 || strstr(gl_renderer, "X850")
1447 || strstr(gl_renderer, "X1300")
1448 || strstr(gl_renderer, "X1400")
1449 || strstr(gl_renderer, "X1450")
1450 || strstr(gl_renderer, "X1550")
1451 || strstr(gl_renderer, "X2300")
1452 || strstr(gl_renderer, "X2500")
1453 || strstr(gl_renderer, "HD 2300")
1454 )
1455 {
1456 *vidmem = 128; /* x700/x8*0 use 128-256MB, >=x1300 128-512MB */
1457 return CARD_ATI_RADEON_X700;
1458 }
1459
1460 /* Radeon Xpress Series - onboard, DX9b, Shader 2.0, 300-400MHz */
1461 if (strstr(gl_renderer, "Radeon Xpress"))
1462 {
1463 *vidmem = 64; /* Shared RAM, BIOS configurable, 64-256M */
1464 return CARD_ATI_RADEON_XPRESS_200M;
1465 }
1466
1467 /* Radeon R3xx */
1468 *vidmem = 64; /* Radeon 9500 uses 64MB, higher models use up to 256MB */
1469 return CARD_ATI_RADEON_9500; /* Radeon 9500/9550/9600/9700/9800/X300/X550/X600 */
1470 }
1471
1472 if (WINE_D3D8_CAPABLE(gl_info))
1473 {
1474 *vidmem = 64; /* 8500/9000 cards use mostly 64MB, though there are 32MB and 128MB models */
1475 return CARD_ATI_RADEON_8500; /* Radeon 8500/9000/9100/9200/9300 */
1476 }
1477
1478 if (WINE_D3D7_CAPABLE(gl_info))
1479 {
1480 *vidmem = 32; /* There are models with up to 64MB */
1481 return CARD_ATI_RADEON_7200; /* Radeon 7000/7100/7200/7500 */
1482 }
1483
1484 *vidmem = 16; /* There are 16-32MB models */
1485 return CARD_ATI_RAGE_128PRO;
1486
1487}
1488
1489static enum wined3d_pci_device select_card_intel_binary(const struct wined3d_gl_info *gl_info,
1490 const char *gl_renderer, unsigned int *vidmem)
1491{
1492 if (strstr(gl_renderer, "X3100"))
1493 {
1494 /* MacOS calls the card GMA X3100, Google findings also suggest the name GM965 */
1495 *vidmem = 128;
1496 return CARD_INTEL_X3100;
1497 }
1498
1499 if (strstr(gl_renderer, "GMA 950") || strstr(gl_renderer, "945GM"))
1500 {
1501 /* MacOS calls the card GMA 950, but everywhere else the PCI ID is named 945GM */
1502 *vidmem = 64;
1503 return CARD_INTEL_I945GM;
1504 }
1505
1506 if (strstr(gl_renderer, "915GM")) return CARD_INTEL_I915GM;
1507 if (strstr(gl_renderer, "915G")) return CARD_INTEL_I915G;
1508 if (strstr(gl_renderer, "865G")) return CARD_INTEL_I865G;
1509 if (strstr(gl_renderer, "855G")) return CARD_INTEL_I855G;
1510 if (strstr(gl_renderer, "830G")) return CARD_INTEL_I830G;
1511 return CARD_INTEL_I915G;
1512
1513}
1514
1515static enum wined3d_pci_device select_card_ati_mesa(const struct wined3d_gl_info *gl_info,
1516 const char *gl_renderer, unsigned int *vidmem)
1517{
1518 /* See http://developer.amd.com/drivers/pc_vendor_id/Pages/default.aspx
1519 *
1520 * Beware: renderer string do not match exact card model,
1521 * eg HD 4800 is returned for multiple cards, even for RV790 based ones. */
1522 if (strstr(gl_renderer, "Gallium"))
1523 {
1524 /* Radeon R7xx HD4800 - highend */
1525 if (strstr(gl_renderer, "R700") /* Radeon R7xx HD48xx generic renderer string */
1526 || strstr(gl_renderer, "RV770") /* Radeon RV770 */
1527 || strstr(gl_renderer, "RV790")) /* Radeon RV790 */
1528 {
1529 *vidmem = 512; /* note: HD4890 cards use 1024MB */
1530 return CARD_ATI_RADEON_HD4800;
1531 }
1532
1533 /* Radeon R740 HD4700 - midend */
1534 if (strstr(gl_renderer, "RV740")) /* Radeon RV740 */
1535 {
1536 *vidmem = 512;
1537 return CARD_ATI_RADEON_HD4700;
1538 }
1539
1540 /* Radeon R730 HD4600 - midend */
1541 if (strstr(gl_renderer, "RV730")) /* Radeon RV730 */
1542 {
1543 *vidmem = 512;
1544 return CARD_ATI_RADEON_HD4600;
1545 }
1546
1547 /* Radeon R710 HD4500/HD4350 - lowend */
1548 if (strstr(gl_renderer, "RV710")) /* Radeon RV710 */
1549 {
1550 *vidmem = 256;
1551 return CARD_ATI_RADEON_HD4350;
1552 }
1553
1554 /* Radeon R6xx HD2900/HD3800 - highend */
1555 if (strstr(gl_renderer, "R600")
1556 || strstr(gl_renderer, "RV670")
1557 || strstr(gl_renderer, "R680"))
1558 {
1559 *vidmem = 512; /* HD2900/HD3800 uses 256-1024MB */
1560 return CARD_ATI_RADEON_HD2900;
1561 }
1562
1563 /* Radeon R6xx HD2600/HD3600 - midend; HD3830 is China-only midend */
1564 if (strstr(gl_renderer, "RV630")
1565 || strstr(gl_renderer, "RV635"))
1566 {
1567 *vidmem = 256; /* HD2600/HD3600 uses 256-512MB */
1568 return CARD_ATI_RADEON_HD2600;
1569 }
1570
1571 /* Radeon R6xx HD2350/HD2400/HD3400 - lowend */
1572 if (strstr(gl_renderer, "RV610")
1573 || strstr(gl_renderer, "RV620"))
1574 {
1575 *vidmem = 256; /* HD2350/2400 use 256MB, HD34xx use 256-512MB */
1576 return CARD_ATI_RADEON_HD2350;
1577 }
1578
1579 /* Radeon R6xx/R7xx integrated */
1580 if (strstr(gl_renderer, "RS780")
1581 || strstr(gl_renderer, "RS880"))
1582 {
1583 *vidmem = 128; /* 128MB */
1584 return CARD_ATI_RADEON_HD3200;
1585 }
1586
1587 /* Radeon R5xx */
1588 if (strstr(gl_renderer, "RV530")
1589 || strstr(gl_renderer, "RV535")
1590 || strstr(gl_renderer, "RV560")
1591 || strstr(gl_renderer, "R520")
1592 || strstr(gl_renderer, "RV570")
1593 || strstr(gl_renderer, "R580"))
1594 {
1595 *vidmem = 128; /* X1600 uses 128-256MB, >=X1800 uses 256MB */
1596 return CARD_ATI_RADEON_X1600;
1597 }
1598
1599 /* Radeon R4xx + X1300/X1400/X1450/X1550/X2300 (lowend R5xx) */
1600 if (strstr(gl_renderer, "R410")
1601 || strstr(gl_renderer, "R420")
1602 || strstr(gl_renderer, "R423")
1603 || strstr(gl_renderer, "R430")
1604 || strstr(gl_renderer, "R480")
1605 || strstr(gl_renderer, "R481")
1606 || strstr(gl_renderer, "RV410")
1607 || strstr(gl_renderer, "RV515")
1608 || strstr(gl_renderer, "RV516"))
1609 {
1610 *vidmem = 128; /* x700/x8*0 use 128-256MB, >=x1300 128-512MB */
1611 return CARD_ATI_RADEON_X700;
1612 }
1613
1614 /* Radeon Xpress Series - onboard, DX9b, Shader 2.0, 300-400MHz */
1615 if (strstr(gl_renderer, "RS400")
1616 || strstr(gl_renderer, "RS480")
1617 || strstr(gl_renderer, "RS482")
1618 || strstr(gl_renderer, "RS485")
1619 || strstr(gl_renderer, "RS600")
1620 || strstr(gl_renderer, "RS690")
1621 || strstr(gl_renderer, "RS740"))
1622 {
1623 *vidmem = 64; /* Shared RAM, BIOS configurable, 64-256M */
1624 return CARD_ATI_RADEON_XPRESS_200M;
1625 }
1626
1627 /* Radeon R3xx */
1628 if (strstr(gl_renderer, "R300")
1629 || strstr(gl_renderer, "RV350")
1630 || strstr(gl_renderer, "RV351")
1631 || strstr(gl_renderer, "RV360")
1632 || strstr(gl_renderer, "RV370")
1633 || strstr(gl_renderer, "R350")
1634 || strstr(gl_renderer, "R360"))
1635 {
1636 *vidmem = 64; /* Radeon 9500 uses 64MB, higher models use up to 256MB */
1637 return CARD_ATI_RADEON_9500; /* Radeon 9500/9550/9600/9700/9800/X300/X550/X600 */
1638 }
1639 }
1640
1641 if (WINE_D3D9_CAPABLE(gl_info))
1642 {
1643 /* Radeon R7xx HD4800 - highend */
1644 if (strstr(gl_renderer, "(R700") /* Radeon R7xx HD48xx generic renderer string */
1645 || strstr(gl_renderer, "(RV770") /* Radeon RV770 */
1646 || strstr(gl_renderer, "(RV790")) /* Radeon RV790 */
1647 {
1648 *vidmem = 512; /* note: HD4890 cards use 1024MB */
1649 return CARD_ATI_RADEON_HD4800;
1650 }
1651
1652 /* Radeon R740 HD4700 - midend */
1653 if (strstr(gl_renderer, "(RV740")) /* Radeon RV740 */
1654 {
1655 *vidmem = 512;
1656 return CARD_ATI_RADEON_HD4700;
1657 }
1658
1659 /* Radeon R730 HD4600 - midend */
1660 if (strstr(gl_renderer, "(RV730")) /* Radeon RV730 */
1661 {
1662 *vidmem = 512;
1663 return CARD_ATI_RADEON_HD4600;
1664 }
1665
1666 /* Radeon R710 HD4500/HD4350 - lowend */
1667 if (strstr(gl_renderer, "(RV710")) /* Radeon RV710 */
1668 {
1669 *vidmem = 256;
1670 return CARD_ATI_RADEON_HD4350;
1671 }
1672
1673 /* Radeon R6xx HD2900/HD3800 - highend */
1674 if (strstr(gl_renderer, "(R600")
1675 || strstr(gl_renderer, "(RV670")
1676 || strstr(gl_renderer, "(R680"))
1677 {
1678 *vidmem = 512; /* HD2900/HD3800 uses 256-1024MB */
1679 return CARD_ATI_RADEON_HD2900;
1680 }
1681
1682 /* Radeon R6xx HD2600/HD3600 - midend; HD3830 is China-only midend */
1683 if (strstr(gl_renderer, "(RV630")
1684 || strstr(gl_renderer, "(RV635"))
1685 {
1686 *vidmem = 256; /* HD2600/HD3600 uses 256-512MB */
1687 return CARD_ATI_RADEON_HD2600;
1688 }
1689
1690 /* Radeon R6xx HD2300/HD2400/HD3400 - lowend */
1691 if (strstr(gl_renderer, "(RV610")
1692 || strstr(gl_renderer, "(RV620"))
1693 {
1694 *vidmem = 256; /* HD2350/2400 use 256MB, HD34xx use 256-512MB */
1695 return CARD_ATI_RADEON_HD2350;
1696 }
1697
1698 /* Radeon R6xx/R7xx integrated */
1699 if (strstr(gl_renderer, "(RS780")
1700 || strstr(gl_renderer, "(RS880"))
1701 {
1702 *vidmem = 128; /* 128MB */
1703 return CARD_ATI_RADEON_HD3200;
1704 }
1705 }
1706
1707 if (WINE_D3D8_CAPABLE(gl_info))
1708 {
1709 *vidmem = 64; /* 8500/9000 cards use mostly 64MB, though there are 32MB and 128MB models */
1710 return CARD_ATI_RADEON_8500; /* Radeon 8500/9000/9100/9200/9300 */
1711 }
1712
1713 if (WINE_D3D7_CAPABLE(gl_info))
1714 {
1715 *vidmem = 32; /* There are models with up to 64MB */
1716 return CARD_ATI_RADEON_7200; /* Radeon 7000/7100/7200/7500 */
1717 }
1718
1719 *vidmem = 16; /* There are 16-32MB models */
1720 return CARD_ATI_RAGE_128PRO;
1721
1722}
1723
1724static enum wined3d_pci_device select_card_nvidia_mesa(const struct wined3d_gl_info *gl_info,
1725 const char *gl_renderer, unsigned int *vidmem)
1726{
1727 FIXME_(d3d_caps)("Card selection not handled for Mesa Nouveau driver\n");
1728#ifndef VBOX_WITH_WDDM
1729 if (WINE_D3D9_CAPABLE(gl_info)) return CARD_NVIDIA_GEFORCEFX_5600;
1730#else
1731 /* tmp work around to disable quirk_no_np2 quirk for mesa drivers */
1732 if (WINE_D3D9_CAPABLE(gl_info)) return CARD_NVIDIA_GEFORCE_6200;
1733#endif
1734 if (WINE_D3D8_CAPABLE(gl_info)) return CARD_NVIDIA_GEFORCE3;
1735 if (WINE_D3D7_CAPABLE(gl_info)) return CARD_NVIDIA_GEFORCE;
1736 if (WINE_D3D6_CAPABLE(gl_info)) return CARD_NVIDIA_RIVA_TNT;
1737 return CARD_NVIDIA_RIVA_128;
1738}
1739
1740static enum wined3d_pci_device select_card_intel_cmn(const struct wined3d_gl_info *gl_info,
1741 const char *gl_renderer, unsigned int *vidmem)
1742{
1743 if (strstr(gl_renderer, "HD Graphics")
1744 || strstr(gl_renderer, "Sandybridge"))
1745 return CARD_INTEL_SBHD;
1746 FIXME_(d3d_caps)("Card selection not handled for Windows Intel driver\n");
1747 return CARD_INTEL_I915G;
1748}
1749
1750static enum wined3d_pci_device select_card_intel_mesa(const struct wined3d_gl_info *gl_info,
1751 const char *gl_renderer, unsigned int *vidmem)
1752{
1753 return select_card_intel_cmn(gl_info, gl_renderer, vidmem);
1754}
1755
1756struct vendor_card_selection
1757{
1758 enum wined3d_gl_vendor gl_vendor;
1759 enum wined3d_pci_vendor card_vendor;
1760 const char *description; /* Description of the card selector i.e. Apple OS/X Intel */
1761 enum wined3d_pci_device (*select_card)(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
1762 unsigned int *vidmem );
1763};
1764
1765static const struct vendor_card_selection vendor_card_select_table[] =
1766{
1767 {GL_VENDOR_NVIDIA, HW_VENDOR_NVIDIA, "Nvidia binary driver", select_card_nvidia_binary},
1768 {GL_VENDOR_APPLE, HW_VENDOR_NVIDIA, "Apple OSX NVidia binary driver", select_card_nvidia_binary},
1769 {GL_VENDOR_APPLE, HW_VENDOR_ATI, "Apple OSX AMD/ATI binary driver", select_card_ati_binary},
1770 {GL_VENDOR_APPLE, HW_VENDOR_INTEL, "Apple OSX Intel binary driver", select_card_intel_binary},
1771 {GL_VENDOR_FGLRX, HW_VENDOR_ATI, "AMD/ATI binary driver", select_card_ati_binary},
1772 {GL_VENDOR_MESA, HW_VENDOR_ATI, "Mesa AMD/ATI driver", select_card_ati_mesa},
1773 {GL_VENDOR_MESA, HW_VENDOR_NVIDIA, "Mesa Nouveau driver", select_card_nvidia_mesa},
1774 {GL_VENDOR_MESA, HW_VENDOR_INTEL, "Mesa Intel driver", select_card_intel_mesa},
1775 {GL_VENDOR_INTEL, HW_VENDOR_INTEL, "Windows Intel binary driver", select_card_intel_cmn}
1776};
1777
1778
1779static enum wined3d_pci_device wined3d_guess_card(const struct wined3d_gl_info *gl_info, const char *gl_renderer,
1780 enum wined3d_gl_vendor *gl_vendor, enum wined3d_pci_vendor *card_vendor, unsigned int *vidmem)
1781{
1782 /* Above is a list of Nvidia and ATI GPUs. Both vendors have dozens of
1783 * different GPUs with roughly the same features. In most cases GPUs from a
1784 * certain family differ in clockspeeds, the amount of video memory and the
1785 * number of shader pipelines.
1786 *
1787 * A Direct3D device object contains the PCI id (vendor + device) of the
1788 * videocard which is used for rendering. Various applications use this
1789 * information to get a rough estimation of the features of the card and
1790 * some might use it for enabling 3d effects only on certain types of
1791 * videocards. In some cases games might even use it to work around bugs
1792 * which happen on certain videocards/driver combinations. The problem is
1793 * that OpenGL only exposes a rendering string containing the name of the
1794 * videocard and not the PCI id.
1795 *
1796 * Various games depend on the PCI id, so somehow we need to provide one.
1797 * A simple option is to parse the renderer string and translate this to
1798 * the right PCI id. This is a lot of work because there are more than 200
1799 * GPUs just for Nvidia. Various cards share the same renderer string, so
1800 * the amount of code might be 'small' but there are quite a number of
1801 * exceptions which would make this a pain to maintain. Another way would
1802 * be to query the PCI id from the operating system (assuming this is the
1803 * videocard which is used for rendering which is not always the case).
1804 * This would work but it is not very portable. Second it would not work
1805 * well in, let's say, a remote X situation in which the amount of 3d
1806 * features which can be used is limited.
1807 *
1808 * As said most games only use the PCI id to get an indication of the
1809 * capabilities of the card. It doesn't really matter if the given id is
1810 * the correct one if we return the id of a card with similar 3d features.
1811 *
1812 * The code below checks the OpenGL capabilities of a videocard and matches
1813 * that to a certain level of Direct3D functionality. Once a card passes
1814 * the Direct3D9 check, we know that the card (in case of Nvidia) is at
1815 * least a GeforceFX. To give a better estimate we do a basic check on the
1816 * renderer string but if that won't pass we return a default card. This
1817 * way is better than maintaining a full card database as even without a
1818 * full database we can return a card with similar features. Second the
1819 * size of the database can be made quite small because when you know what
1820 * type of 3d functionality a card has, you know to which GPU family the
1821 * GPU must belong. Because of this you only have to check a small part of
1822 * the renderer string to distinguishes between different models from that
1823 * family.
1824 *
1825 * The code also selects a default amount of video memory which we will
1826 * use for an estimation of the amount of free texture memory. In case of
1827 * real D3D the amount of texture memory includes video memory and system
1828 * memory (to be specific AGP memory or in case of PCIE TurboCache /
1829 * HyperMemory). We don't know how much system memory can be addressed by
1830 * the system but we can make a reasonable estimation about the amount of
1831 * video memory. If the value is slightly wrong it doesn't matter as we
1832 * didn't include AGP-like memory which makes the amount of addressable
1833 * memory higher and second OpenGL isn't that critical it moves to system
1834 * memory behind our backs if really needed. Note that the amount of video
1835 * memory can be overruled using a registry setting. */
1836
1837#ifndef VBOX
1838 int i;
1839#else
1840 size_t i;
1841#endif
1842
1843 for (i = 0; i < (sizeof(vendor_card_select_table) / sizeof(*vendor_card_select_table)); ++i)
1844 {
1845 if ((vendor_card_select_table[i].gl_vendor != *gl_vendor)
1846 || (vendor_card_select_table[i].card_vendor != *card_vendor))
1847 continue;
1848 TRACE_(d3d_caps)("Applying card_selector \"%s\".\n", vendor_card_select_table[i].description);
1849 return vendor_card_select_table[i].select_card(gl_info, gl_renderer, vidmem);
1850 }
1851
1852 FIXME_(d3d_caps)("No card selector available for GL vendor %d and card vendor %04x.\n",
1853 *gl_vendor, *card_vendor);
1854
1855 /* Default to generic Nvidia hardware based on the supported OpenGL extensions. The choice
1856 * for Nvidia was because the hardware and drivers they make are of good quality. This makes
1857 * them a good generic choice. */
1858 *card_vendor = HW_VENDOR_NVIDIA;
1859#ifndef VBOX_WITH_WDDM
1860 if (WINE_D3D9_CAPABLE(gl_info)) return CARD_NVIDIA_GEFORCEFX_5600;
1861#else
1862 /* tmp work around to disable quirk_no_np2 quirk for not-recognized drivers */
1863 if (WINE_D3D9_CAPABLE(gl_info)) return CARD_NVIDIA_GEFORCE_6200;
1864#endif
1865
1866 if (WINE_D3D8_CAPABLE(gl_info)) return CARD_NVIDIA_GEFORCE3;
1867 if (WINE_D3D7_CAPABLE(gl_info)) return CARD_NVIDIA_GEFORCE;
1868 if (WINE_D3D6_CAPABLE(gl_info)) return CARD_NVIDIA_RIVA_TNT;
1869 return CARD_NVIDIA_RIVA_128;
1870}
1871
1872#ifndef VBOX_WITH_VMSVGA
1873static const struct fragment_pipeline *select_fragment_implementation(struct wined3d_adapter *adapter)
1874{
1875 const struct wined3d_gl_info *gl_info = &adapter->gl_info;
1876 int vs_selected_mode, ps_selected_mode;
1877
1878 select_shader_mode(gl_info, &ps_selected_mode, &vs_selected_mode);
1879 if ((ps_selected_mode == SHADER_ARB || ps_selected_mode == SHADER_GLSL)
1880 && gl_info->supported[ARB_FRAGMENT_PROGRAM]) return &arbfp_fragment_pipeline;
1881 else if (ps_selected_mode == SHADER_ATI) return &atifs_fragment_pipeline;
1882 else if (gl_info->supported[NV_REGISTER_COMBINERS]
1883 && gl_info->supported[NV_TEXTURE_SHADER2]) return &nvts_fragment_pipeline;
1884 else if (gl_info->supported[NV_REGISTER_COMBINERS]) return &nvrc_fragment_pipeline;
1885 else return &ffp_fragment_pipeline;
1886}
1887#endif
1888
1889static const shader_backend_t *select_shader_backend(struct wined3d_adapter *adapter)
1890{
1891 int vs_selected_mode, ps_selected_mode;
1892
1893 select_shader_mode(&adapter->gl_info, &ps_selected_mode, &vs_selected_mode);
1894 if (vs_selected_mode == SHADER_GLSL || ps_selected_mode == SHADER_GLSL) return &glsl_shader_backend;
1895#ifndef VBOX_WITH_VMSVGA
1896 if (vs_selected_mode == SHADER_ARB || ps_selected_mode == SHADER_ARB) return &arb_program_shader_backend;
1897#endif
1898 return &none_shader_backend;
1899}
1900
1901#ifndef VBOX_WITH_VMSVGA
1902static const struct blit_shader *select_blit_implementation(struct wined3d_adapter *adapter)
1903{
1904 const struct wined3d_gl_info *gl_info = &adapter->gl_info;
1905 int vs_selected_mode, ps_selected_mode;
1906
1907 select_shader_mode(gl_info, &ps_selected_mode, &vs_selected_mode);
1908 if ((ps_selected_mode == SHADER_ARB || ps_selected_mode == SHADER_GLSL)
1909 && gl_info->supported[ARB_FRAGMENT_PROGRAM]) return &arbfp_blit;
1910 else return &ffp_blit;
1911}
1912#endif
1913
1914#ifdef VBOX_WITH_VMSVGA
1915/** Checks if @a pszExtension is one of the extensions we're looking for and
1916 * updates @a pGlInfo->supported accordingly. */
1917static void check_gl_extension(struct wined3d_gl_info *pGlInfo, const char *pszExtension)
1918{
1919 size_t i;
1920 TRACE_(d3d_caps)("- %s\n", debugstr_a(pszExtension));
1921 for (i = 0; i < RT_ELEMENTS(EXTENSION_MAP); i++)
1922 if (!strcmp(pszExtension, EXTENSION_MAP[i].extension_string))
1923 {
1924 TRACE_(d3d_caps)(" FOUND: %s support.\n", EXTENSION_MAP[i].extension_string);
1925 pGlInfo->supported[EXTENSION_MAP[i].extension] = TRUE;
1926 return;
1927 }
1928}
1929#endif
1930
1931/* Context activation is done by the caller. */
1932BOOL IWineD3DImpl_FillGLCaps(struct wined3d_adapter *adapter, struct VBOXVMSVGASHADERIF *pVBoxShaderIf)
1933{
1934#ifndef VBOX_WITH_VMSVGA
1935 struct wined3d_driver_info *driver_info = &adapter->driver_info;
1936#endif
1937 struct wined3d_gl_info *gl_info = &adapter->gl_info;
1938#ifndef VBOX_WITH_VMSVGA
1939 const char *GL_Extensions = NULL;
1940 const char *WGL_Extensions = NULL;
1941#endif
1942 const char *gl_vendor_str, *gl_renderer_str, *gl_version_str;
1943#ifndef VBOX_WITH_VMSVGA
1944 struct fragment_caps fragment_caps;
1945#endif
1946 enum wined3d_gl_vendor gl_vendor;
1947 enum wined3d_pci_vendor card_vendor;
1948 enum wined3d_pci_device device;
1949 GLint gl_max;
1950 GLfloat gl_floatv[2];
1951 unsigned i;
1952#ifndef VBOX_WITH_VMSVGA
1953 HDC hdc;
1954#endif
1955 unsigned int vidmem=0;
1956 DWORD gl_version;
1957#ifndef VBOX_WITH_VMSVGA
1958 size_t len;
1959#endif
1960
1961 TRACE_(d3d_caps)("(%p)\n", gl_info);
1962
1963 ENTER_GL();
1964
1965 VBOX_CHECK_GL_CALL(gl_renderer_str = (const char *)glGetString(GL_RENDERER));
1966 TRACE_(d3d_caps)("GL_RENDERER: %s.\n", debugstr_a(gl_renderer_str));
1967 if (!gl_renderer_str)
1968 {
1969 LEAVE_GL();
1970 ERR_(d3d_caps)("Received a NULL GL_RENDERER.\n");
1971 return FALSE;
1972 }
1973
1974 VBOX_CHECK_GL_CALL(gl_vendor_str = (const char *)glGetString(GL_VENDOR));
1975 TRACE_(d3d_caps)("GL_VENDOR: %s.\n", debugstr_a(gl_vendor_str));
1976 if (!gl_vendor_str)
1977 {
1978 LEAVE_GL();
1979 ERR_(d3d_caps)("Received a NULL GL_VENDOR.\n");
1980 return FALSE;
1981 }
1982
1983 /* Parse the GL_VERSION field into major and minor information */
1984 VBOX_CHECK_GL_CALL(gl_version_str = (const char *)glGetString(GL_VERSION));
1985 TRACE_(d3d_caps)("GL_VERSION: %s.\n", debugstr_a(gl_version_str));
1986 if (!gl_version_str)
1987 {
1988 LEAVE_GL();
1989 ERR_(d3d_caps)("Received a NULL GL_VERSION.\n");
1990 return FALSE;
1991 }
1992 gl_version = wined3d_parse_gl_version(gl_version_str);
1993
1994 /*
1995 * Initialize openGL extension related variables
1996 * with Default values
1997 */
1998 memset(gl_info->supported, 0, sizeof(gl_info->supported));
1999 gl_info->limits.blends = 1;
2000 gl_info->limits.buffers = 1;
2001 gl_info->limits.textures = 1;
2002 gl_info->limits.fragment_samplers = 1;
2003 gl_info->limits.vertex_samplers = 0;
2004 gl_info->limits.combined_samplers = gl_info->limits.fragment_samplers + gl_info->limits.vertex_samplers;
2005 gl_info->limits.sampler_stages = 1;
2006 gl_info->limits.glsl_vs_float_constants = 0;
2007 gl_info->limits.glsl_ps_float_constants = 0;
2008 gl_info->limits.arb_vs_float_constants = 0;
2009 gl_info->limits.arb_vs_native_constants = 0;
2010 gl_info->limits.arb_vs_instructions = 0;
2011 gl_info->limits.arb_vs_temps = 0;
2012 gl_info->limits.arb_ps_float_constants = 0;
2013 gl_info->limits.arb_ps_local_constants = 0;
2014 gl_info->limits.arb_ps_instructions = 0;
2015 gl_info->limits.arb_ps_temps = 0;
2016
2017 /* Retrieve opengl defaults */
2018 VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_CLIP_PLANES, &gl_max));
2019 gl_info->limits.clipplanes = min(WINED3DMAXUSERCLIPPLANES, gl_max);
2020 TRACE_(d3d_caps)("ClipPlanes support - num Planes=%d\n", gl_max);
2021
2022#ifdef VBOX_VMSVGA3D_DUAL_OPENGL_PROFILE
2023 glGetIntegerv(GL_MAX_LIGHTS, &gl_max);
2024 if (glGetError() != GL_NO_ERROR)
2025 {
2026 pVBoxShaderIf->pfnSwitchInitProfile(pVBoxShaderIf, true /*fOtherProfile*/);
2027 VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_LIGHTS, &gl_max));
2028 pVBoxShaderIf->pfnSwitchInitProfile(pVBoxShaderIf, false /*fOtherProfile*/);
2029 }
2030#else
2031 VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_LIGHTS, &gl_max));
2032#endif
2033 gl_info->limits.lights = gl_max;
2034 TRACE_(d3d_caps)("Lights support - max lights=%d\n", gl_max);
2035
2036 VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_TEXTURE_SIZE, &gl_max));
2037 gl_info->limits.texture_size = gl_max;
2038 TRACE_(d3d_caps)("Maximum texture size support - max texture size=%d\n", gl_max);
2039
2040#ifdef VBOX_VMSVGA3D_DUAL_OPENGL_PROFILE
2041 glGetFloatv(GL_ALIASED_POINT_SIZE_RANGE, gl_floatv);
2042 if (glGetError() != GL_NO_ERROR)
2043 {
2044 pVBoxShaderIf->pfnSwitchInitProfile(pVBoxShaderIf, true /*fOtherProfile*/);
2045 VBOX_CHECK_GL_CALL(glGetFloatv(GL_ALIASED_POINT_SIZE_RANGE, gl_floatv));
2046 if (glGetError() != GL_NO_ERROR)
2047 gl_floatv[0] = gl_floatv[1] = 1;
2048 pVBoxShaderIf->pfnSwitchInitProfile(pVBoxShaderIf, false /*fOtherProfile*/);
2049 }
2050#else
2051 VBOX_CHECK_GL_CALL(glGetFloatv(GL_ALIASED_POINT_SIZE_RANGE, gl_floatv));
2052#endif
2053 gl_info->limits.pointsize_min = gl_floatv[0];
2054 gl_info->limits.pointsize_max = gl_floatv[1];
2055 TRACE_(d3d_caps)("Maximum point size support - max point size=%f\n", gl_floatv[1]);
2056
2057 /* Parse the gl supported features, in theory enabling parts of our code appropriately. */
2058#ifndef VBOX_WITH_VMSVGA
2059 GL_Extensions = (const char *)glGetString(GL_EXTENSIONS);
2060 if (!GL_Extensions)
2061 {
2062 LEAVE_GL();
2063 ERR_(d3d_caps)("Received a NULL GL_EXTENSIONS.\n");
2064 return FALSE;
2065 }
2066
2067 LEAVE_GL();
2068
2069 TRACE_(d3d_caps)("GL_Extensions reported:\n");
2070#endif
2071
2072 gl_info->supported[WINED3D_GL_EXT_NONE] = TRUE;
2073
2074 gl_info->supported[VBOX_SHARED_CONTEXTS] = TRUE;
2075
2076#ifdef VBOX_WITH_VMSVGA
2077 {
2078 void *pvEnumCtx = NULL;
2079 char szCurExt[256];
2080 while (pVBoxShaderIf->pfnGetNextExtension(pVBoxShaderIf, &pvEnumCtx, szCurExt, sizeof(szCurExt), false /*fOtherProfile*/))
2081 check_gl_extension(gl_info, szCurExt);
2082
2083 /* The cheap way out. */
2084 pvEnumCtx = NULL;
2085 while (pVBoxShaderIf->pfnGetNextExtension(pVBoxShaderIf, &pvEnumCtx, szCurExt, sizeof(szCurExt), true /*fOtherProfile*/))
2086 check_gl_extension(gl_info, szCurExt);
2087 }
2088#else /* !VBOX_WITH_VMSVGA */
2089 while (*GL_Extensions)
2090 {
2091 const char *start;
2092 char current_ext[256];
2093
2094 while (isspace(*GL_Extensions)) ++GL_Extensions;
2095 start = GL_Extensions;
2096 while (!isspace(*GL_Extensions) && *GL_Extensions) ++GL_Extensions;
2097
2098 len = GL_Extensions - start;
2099 if (!len || len >= sizeof(current_ext)) continue;
2100
2101 memcpy(current_ext, start, len);
2102 current_ext[len] = '\0';
2103 TRACE_(d3d_caps)("- %s\n", debugstr_a(current_ext));
2104
2105 for (i = 0; i < (sizeof(EXTENSION_MAP) / sizeof(*EXTENSION_MAP)); ++i)
2106 {
2107 if (!strcmp(current_ext, EXTENSION_MAP[i].extension_string))
2108 {
2109 TRACE_(d3d_caps)(" FOUND: %s support.\n", EXTENSION_MAP[i].extension_string);
2110 gl_info->supported[EXTENSION_MAP[i].extension] = TRUE;
2111 break;
2112 }
2113 }
2114 }
2115#endif /* !VBOX_WITH_VMSVGA */
2116
2117#ifdef VBOX_WITH_VMSVGA
2118# ifdef RT_OS_WINDOWS
2119# define OGLGETPROCADDRESS wglGetProcAddress
2120# elif RT_OS_DARWIN
2121# define OGLGETPROCADDRESS(x) MyNSGLGetProcAddress(x)
2122# else
2123extern void (*glXGetProcAddress(const GLubyte *procname))( void );
2124# define OGLGETPROCADDRESS(x) glXGetProcAddress((const GLubyte *)x)
2125# endif
2126#endif
2127
2128 /* Now work out what GL support this card really has */
2129#define USE_GL_FUNC(type, pfn, ext, replace) \
2130{ \
2131 DWORD ver = ver_for_ext(ext); \
2132 if (gl_info->supported[ext]) gl_info->pfn = (type)OGLGETPROCADDRESS(#pfn); \
2133 else if (ver && ver <= gl_version) gl_info->pfn = (type)OGLGETPROCADDRESS(#replace); \
2134 else gl_info->pfn = NULL; \
2135}
2136 GL_EXT_FUNCS_GEN;
2137#undef USE_GL_FUNC
2138
2139#ifndef VBOX_WITH_VMSVGA
2140#define USE_GL_FUNC(type, pfn, ext, replace) gl_info->pfn = (type)OGLGETPROCADDRESS(#pfn);
2141 WGL_EXT_FUNCS_GEN;
2142#undef USE_GL_FUNC
2143#endif
2144
2145 ENTER_GL();
2146
2147 /* Now mark all the extensions supported which are included in the opengl core version. Do this *after*
2148 * loading the functions, otherwise the code above will load the extension entry points instead of the
2149 * core functions, which may not work. */
2150 for (i = 0; i < (sizeof(EXTENSION_MAP) / sizeof(*EXTENSION_MAP)); ++i)
2151 {
2152 if (!gl_info->supported[EXTENSION_MAP[i].extension]
2153 && EXTENSION_MAP[i].version <= gl_version && EXTENSION_MAP[i].version)
2154 {
2155 TRACE_(d3d_caps)(" GL CORE: %s support.\n", EXTENSION_MAP[i].extension_string);
2156 gl_info->supported[EXTENSION_MAP[i].extension] = TRUE;
2157 }
2158 }
2159
2160 if (gl_info->supported[APPLE_FENCE])
2161 {
2162 /* GL_NV_fence and GL_APPLE_fence provide the same functionality basically.
2163 * The apple extension interacts with some other apple exts. Disable the NV
2164 * extension if the apple one is support to prevent confusion in other parts
2165 * of the code. */
2166 gl_info->supported[NV_FENCE] = FALSE;
2167 }
2168 if (gl_info->supported[APPLE_FLOAT_PIXELS])
2169 {
2170 /* GL_APPLE_float_pixels == GL_ARB_texture_float + GL_ARB_half_float_pixel
2171 *
2172 * The enums are the same:
2173 * GL_RGBA16F_ARB = GL_RGBA_FLOAT16_APPLE = 0x881A
2174 * GL_RGB16F_ARB = GL_RGB_FLOAT16_APPLE = 0x881B
2175 * GL_RGBA32F_ARB = GL_RGBA_FLOAT32_APPLE = 0x8814
2176 * GL_RGB32F_ARB = GL_RGB_FLOAT32_APPLE = 0x8815
2177 * GL_HALF_FLOAT_ARB = GL_HALF_APPLE = 0x140B
2178 */
2179 if (!gl_info->supported[ARB_TEXTURE_FLOAT])
2180 {
2181 TRACE_(d3d_caps)(" IMPLIED: GL_ARB_texture_float support(from GL_APPLE_float_pixels.\n");
2182 gl_info->supported[ARB_TEXTURE_FLOAT] = TRUE;
2183 }
2184 if (!gl_info->supported[ARB_HALF_FLOAT_PIXEL])
2185 {
2186 TRACE_(d3d_caps)(" IMPLIED: GL_ARB_half_float_pixel support(from GL_APPLE_float_pixels.\n");
2187 gl_info->supported[ARB_HALF_FLOAT_PIXEL] = TRUE;
2188 }
2189 }
2190 if (gl_info->supported[ARB_MAP_BUFFER_RANGE])
2191 {
2192 /* GL_ARB_map_buffer_range and GL_APPLE_flush_buffer_range provide the same
2193 * functionality. Prefer the ARB extension */
2194 gl_info->supported[APPLE_FLUSH_BUFFER_RANGE] = FALSE;
2195 }
2196 if (gl_info->supported[ARB_TEXTURE_CUBE_MAP])
2197 {
2198 TRACE_(d3d_caps)(" IMPLIED: NVIDIA (NV) Texture Gen Reflection support.\n");
2199 gl_info->supported[NV_TEXGEN_REFLECTION] = TRUE;
2200 }
2201 if (!gl_info->supported[ARB_DEPTH_CLAMP] && gl_info->supported[NV_DEPTH_CLAMP])
2202 {
2203 TRACE_(d3d_caps)(" IMPLIED: ARB_depth_clamp support (by NV_depth_clamp).\n");
2204 gl_info->supported[ARB_DEPTH_CLAMP] = TRUE;
2205 }
2206 if (!gl_info->supported[ARB_VERTEX_ARRAY_BGRA] && gl_info->supported[EXT_VERTEX_ARRAY_BGRA])
2207 {
2208 TRACE_(d3d_caps)(" IMPLIED: ARB_vertex_array_bgra support (by EXT_vertex_array_bgra).\n");
2209 gl_info->supported[ARB_VERTEX_ARRAY_BGRA] = TRUE;
2210 }
2211 if (gl_info->supported[NV_TEXTURE_SHADER2])
2212 {
2213 if (gl_info->supported[NV_REGISTER_COMBINERS])
2214 {
2215 /* Also disable ATI_FRAGMENT_SHADER if register combiners and texture_shader2
2216 * are supported. The nv extensions provide the same functionality as the
2217 * ATI one, and a bit more(signed pixelformats). */
2218 gl_info->supported[ATI_FRAGMENT_SHADER] = FALSE;
2219 }
2220 }
2221
2222 if (gl_info->supported[NV_REGISTER_COMBINERS])
2223 {
2224 VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_GENERAL_COMBINERS_NV, &gl_max));
2225 gl_info->limits.general_combiners = gl_max;
2226 TRACE_(d3d_caps)("Max general combiners: %d.\n", gl_max);
2227 }
2228 if (gl_info->supported[ARB_DRAW_BUFFERS])
2229 {
2230 VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_DRAW_BUFFERS_ARB, &gl_max));
2231 gl_info->limits.buffers = gl_max;
2232 TRACE_(d3d_caps)("Max draw buffers: %u.\n", gl_max);
2233 }
2234 if (gl_info->supported[ARB_MULTITEXTURE])
2235 {
2236#ifdef VBOX_VMSVGA3D_DUAL_OPENGL_PROFILE
2237 glGetIntegerv(GL_MAX_TEXTURE_UNITS_ARB, &gl_max);
2238 if (glGetError() != GL_NO_ERROR)
2239 VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_TEXTURE_IMAGE_UNITS, &gl_max));
2240#else
2241 VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_TEXTURE_UNITS_ARB, &gl_max));
2242#endif
2243 gl_info->limits.textures = min(MAX_TEXTURES, gl_max);
2244 TRACE_(d3d_caps)("Max textures: %d.\n", gl_info->limits.textures);
2245
2246 if (gl_info->supported[ARB_FRAGMENT_PROGRAM])
2247 {
2248 GLint tmp;
2249 VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_TEXTURE_IMAGE_UNITS_ARB, &tmp));
2250 gl_info->limits.fragment_samplers = min(MAX_FRAGMENT_SAMPLERS, tmp);
2251 }
2252 else
2253 {
2254 gl_info->limits.fragment_samplers = max(gl_info->limits.fragment_samplers, (UINT)gl_max);
2255 }
2256 TRACE_(d3d_caps)("Max fragment samplers: %d.\n", gl_info->limits.fragment_samplers);
2257
2258 if (gl_info->supported[ARB_VERTEX_SHADER])
2259 {
2260 GLint tmp;
2261 VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS_ARB, &tmp));
2262 gl_info->limits.vertex_samplers = tmp;
2263 VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS_ARB, &tmp));
2264 gl_info->limits.combined_samplers = tmp;
2265
2266 /* Loading GLSL sampler uniforms is much simpler if we can assume that the sampler setup
2267 * is known at shader link time. In a vertex shader + pixel shader combination this isn't
2268 * an issue because then the sampler setup only depends on the two shaders. If a pixel
2269 * shader is used with fixed function vertex processing we're fine too because fixed function
2270 * vertex processing doesn't use any samplers. If fixed function fragment processing is
2271 * used we have to make sure that all vertex sampler setups are valid together with all
2272 * possible fixed function fragment processing setups. This is true if vsamplers + MAX_TEXTURES
2273 * <= max_samplers. This is true on all d3d9 cards that support vtf(gf 6 and gf7 cards).
2274 * dx9 radeon cards do not support vertex texture fetch. DX10 cards have 128 samplers, and
2275 * dx9 is limited to 8 fixed function texture stages and 4 vertex samplers. DX10 does not have
2276 * a fixed function pipeline anymore.
2277 *
2278 * So this is just a check to check that our assumption holds true. If not, write a warning
2279 * and reduce the number of vertex samplers or probably disable vertex texture fetch. */
2280 if (gl_info->limits.vertex_samplers && gl_info->limits.combined_samplers < 12
2281 && MAX_TEXTURES + gl_info->limits.vertex_samplers > gl_info->limits.combined_samplers)
2282 {
2283 FIXME("OpenGL implementation supports %u vertex samplers and %u total samplers.\n",
2284 gl_info->limits.vertex_samplers, gl_info->limits.combined_samplers);
2285 FIXME("Expected vertex samplers + MAX_TEXTURES(=8) > combined_samplers.\n");
2286 if (gl_info->limits.combined_samplers > MAX_TEXTURES)
2287 gl_info->limits.vertex_samplers = gl_info->limits.combined_samplers - MAX_TEXTURES;
2288 else
2289 gl_info->limits.vertex_samplers = 0;
2290 }
2291 }
2292 else
2293 {
2294 gl_info->limits.combined_samplers = gl_info->limits.fragment_samplers;
2295 }
2296 TRACE_(d3d_caps)("Max vertex samplers: %u.\n", gl_info->limits.vertex_samplers);
2297 TRACE_(d3d_caps)("Max combined samplers: %u.\n", gl_info->limits.combined_samplers);
2298 }
2299 if (gl_info->supported[ARB_VERTEX_BLEND])
2300 {
2301#ifdef VBOX_VMSVGA3D_DUAL_OPENGL_PROFILE
2302 glGetIntegerv(GL_MAX_VERTEX_UNITS_ARB, &gl_max);
2303 if (glGetError() != GL_NO_ERROR)
2304 {
2305 pVBoxShaderIf->pfnSwitchInitProfile(pVBoxShaderIf, true /*fOtherProfile*/);
2306 VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_VERTEX_UNITS_ARB, &gl_max));
2307 pVBoxShaderIf->pfnSwitchInitProfile(pVBoxShaderIf, false /*fOtherProfile*/);
2308 }
2309#else
2310 VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_VERTEX_UNITS_ARB, &gl_max));
2311#endif
2312 gl_info->limits.blends = gl_max;
2313 TRACE_(d3d_caps)("Max blends: %u.\n", gl_info->limits.blends);
2314 }
2315 if (gl_info->supported[EXT_TEXTURE3D])
2316 {
2317 VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_3D_TEXTURE_SIZE_EXT, &gl_max));
2318 gl_info->limits.texture3d_size = gl_max;
2319 TRACE_(d3d_caps)("Max texture3D size: %d.\n", gl_info->limits.texture3d_size);
2320 }
2321 if (gl_info->supported[EXT_TEXTURE_FILTER_ANISOTROPIC])
2322 {
2323 VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT, &gl_max));
2324 gl_info->limits.anisotropy = gl_max;
2325 TRACE_(d3d_caps)("Max anisotropy: %d.\n", gl_info->limits.anisotropy);
2326 }
2327 if (gl_info->supported[ARB_FRAGMENT_PROGRAM])
2328 {
2329#ifdef VBOX_VMSVGA3D_DUAL_OPENGL_PROFILE
2330 GL_EXTCALL(glGetProgramivARB(GL_FRAGMENT_PROGRAM_ARB, GL_MAX_PROGRAM_ENV_PARAMETERS_ARB, &gl_max));
2331 if (glGetError() != GL_NO_ERROR)
2332 pVBoxShaderIf->pfnSwitchInitProfile(pVBoxShaderIf, true /*fOtherProfile*/);
2333#endif
2334 VBOX_CHECK_GL_CALL(GL_EXTCALL(glGetProgramivARB(GL_FRAGMENT_PROGRAM_ARB, GL_MAX_PROGRAM_ENV_PARAMETERS_ARB, &gl_max)));
2335 gl_info->limits.arb_ps_float_constants = gl_max;
2336 TRACE_(d3d_caps)("Max ARB_FRAGMENT_PROGRAM float constants: %d.\n", gl_info->limits.arb_ps_float_constants);
2337 VBOX_CHECK_GL_CALL(GL_EXTCALL(glGetProgramivARB(GL_FRAGMENT_PROGRAM_ARB, GL_MAX_PROGRAM_NATIVE_PARAMETERS_ARB, &gl_max)));
2338 gl_info->limits.arb_ps_native_constants = gl_max;
2339 TRACE_(d3d_caps)("Max ARB_FRAGMENT_PROGRAM native float constants: %d.\n",
2340 gl_info->limits.arb_ps_native_constants);
2341 VBOX_CHECK_GL_CALL(GL_EXTCALL(glGetProgramivARB(GL_FRAGMENT_PROGRAM_ARB, GL_MAX_PROGRAM_NATIVE_TEMPORARIES_ARB, &gl_max)));
2342 gl_info->limits.arb_ps_temps = gl_max;
2343 TRACE_(d3d_caps)("Max ARB_FRAGMENT_PROGRAM native temporaries: %d.\n", gl_info->limits.arb_ps_temps);
2344 VBOX_CHECK_GL_CALL(GL_EXTCALL(glGetProgramivARB(GL_FRAGMENT_PROGRAM_ARB, GL_MAX_PROGRAM_NATIVE_INSTRUCTIONS_ARB, &gl_max)));
2345 gl_info->limits.arb_ps_instructions = gl_max;
2346 TRACE_(d3d_caps)("Max ARB_FRAGMENT_PROGRAM native instructions: %d.\n", gl_info->limits.arb_ps_instructions);
2347 VBOX_CHECK_GL_CALL(GL_EXTCALL(glGetProgramivARB(GL_FRAGMENT_PROGRAM_ARB, GL_MAX_PROGRAM_LOCAL_PARAMETERS_ARB, &gl_max)));
2348 gl_info->limits.arb_ps_local_constants = gl_max;
2349 TRACE_(d3d_caps)("Max ARB_FRAGMENT_PROGRAM local parameters: %d.\n", gl_info->limits.arb_ps_instructions);
2350#ifdef VBOX_VMSVGA3D_DUAL_OPENGL_PROFILE
2351 pVBoxShaderIf->pfnSwitchInitProfile(pVBoxShaderIf, false /*fOtherProfile*/);
2352#endif
2353 }
2354 if (gl_info->supported[ARB_VERTEX_PROGRAM])
2355 {
2356#ifdef VBOX_VMSVGA3D_DUAL_OPENGL_PROFILE
2357 GL_EXTCALL(glGetProgramivARB(GL_VERTEX_PROGRAM_ARB, GL_MAX_PROGRAM_ENV_PARAMETERS_ARB, &gl_max));
2358 if (glGetError() != GL_NO_ERROR)
2359 pVBoxShaderIf->pfnSwitchInitProfile(pVBoxShaderIf, true /*fOtherProfile*/);
2360#endif
2361 VBOX_CHECK_GL_CALL(GL_EXTCALL(glGetProgramivARB(GL_VERTEX_PROGRAM_ARB, GL_MAX_PROGRAM_ENV_PARAMETERS_ARB, &gl_max)));
2362 gl_info->limits.arb_vs_float_constants = gl_max;
2363 TRACE_(d3d_caps)("Max ARB_VERTEX_PROGRAM float constants: %d.\n", gl_info->limits.arb_vs_float_constants);
2364 VBOX_CHECK_GL_CALL(GL_EXTCALL(glGetProgramivARB(GL_VERTEX_PROGRAM_ARB, GL_MAX_PROGRAM_NATIVE_PARAMETERS_ARB, &gl_max)));
2365 gl_info->limits.arb_vs_native_constants = gl_max;
2366 TRACE_(d3d_caps)("Max ARB_VERTEX_PROGRAM native float constants: %d.\n",
2367 gl_info->limits.arb_vs_native_constants);
2368 VBOX_CHECK_GL_CALL(GL_EXTCALL(glGetProgramivARB(GL_VERTEX_PROGRAM_ARB, GL_MAX_PROGRAM_NATIVE_TEMPORARIES_ARB, &gl_max)));
2369 gl_info->limits.arb_vs_temps = gl_max;
2370 TRACE_(d3d_caps)("Max ARB_VERTEX_PROGRAM native temporaries: %d.\n", gl_info->limits.arb_vs_temps);
2371 VBOX_CHECK_GL_CALL(GL_EXTCALL(glGetProgramivARB(GL_VERTEX_PROGRAM_ARB, GL_MAX_PROGRAM_NATIVE_INSTRUCTIONS_ARB, &gl_max)));
2372 gl_info->limits.arb_vs_instructions = gl_max;
2373 TRACE_(d3d_caps)("Max ARB_VERTEX_PROGRAM native instructions: %d.\n", gl_info->limits.arb_vs_instructions);
2374#ifdef VBOX_VMSVGA3D_DUAL_OPENGL_PROFILE
2375 pVBoxShaderIf->pfnSwitchInitProfile(pVBoxShaderIf, false /*fOtherProfile*/);
2376#endif
2377#ifndef VBOX_WITH_VMSVGA
2378 if (test_arb_vs_offset_limit(gl_info)) gl_info->quirks |= WINED3D_QUIRK_ARB_VS_OFFSET_LIMIT;
2379#endif
2380 }
2381 if (gl_info->supported[ARB_VERTEX_SHADER])
2382 {
2383 VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_VERTEX_UNIFORM_COMPONENTS_ARB, &gl_max));
2384 gl_info->limits.glsl_vs_float_constants = gl_max / 4;
2385#ifdef VBOX_WITH_WDDM
2386 /* AFAICT the " / 4" here comes from that we're going to use the glsl_vs/ps_float_constants to create vec4 arrays,
2387 * thus each array element has 4 components, so the actual number of vec4 arrays is GL_MAX_VERTEX/FRAGMENT_UNIFORM_COMPONENTS_ARB / 4
2388 * win8 Aero won't properly work with this constant < 256 in any way,
2389 * while Intel drivers I've encountered this problem with supports vec4 arrays of size > GL_MAX_VERTEX/FRAGMENT_UNIFORM_COMPONENTS_ARB / 4
2390 * so use it here.
2391 * @todo: add logging
2392 * @todo: perhaps should be movet to quirks?
2393 * */
2394 if (gl_info->limits.glsl_vs_float_constants < 256 && gl_max >= 256)
2395 {
2396 DWORD dwVersion = GetVersion();
2397 DWORD dwMajor = (DWORD)(LOBYTE(LOWORD(dwVersion)));
2398 DWORD dwMinor = (DWORD)(HIBYTE(LOWORD(dwVersion)));
2399 /* tmp workaround Win8 Aero requirement for 256 */
2400 if (dwMajor > 6 || dwMinor > 1)
2401 {
2402 gl_info->limits.glsl_vs_float_constants = 256;
2403 }
2404 }
2405#endif
2406 TRACE_(d3d_caps)("Max ARB_VERTEX_SHADER float constants: %u.\n", gl_info->limits.glsl_vs_float_constants);
2407 }
2408 if (gl_info->supported[ARB_FRAGMENT_SHADER])
2409 {
2410 VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_FRAGMENT_UNIFORM_COMPONENTS_ARB, &gl_max));
2411 gl_info->limits.glsl_ps_float_constants = gl_max / 4;
2412#ifdef VBOX_WITH_WDDM
2413 /* AFAICT the " / 4" here comes from that we're going to use the glsl_vs/ps_float_constants to create vec4 arrays,
2414 * thus each array element has 4 components, so the actual number of vec4 arrays is GL_MAX_VERTEX/FRAGMENT_UNIFORM_COMPONENTS_ARB / 4
2415 * win8 Aero won't properly work with this constant < 256 in any way,
2416 * while Intel drivers I've encountered this problem with supports vec4 arrays of size > GL_MAX_VERTEX/FRAGMENT_UNIFORM_COMPONENTS_ARB / 4
2417 * so use it here.
2418 * @todo: add logging
2419 * @todo: perhaps should be movet to quirks?
2420 * */
2421 if (gl_info->limits.glsl_ps_float_constants < 256 && gl_max >= 256)
2422 {
2423 DWORD dwVersion = GetVersion();
2424 DWORD dwMajor = (DWORD)(LOBYTE(LOWORD(dwVersion)));
2425 DWORD dwMinor = (DWORD)(HIBYTE(LOWORD(dwVersion)));
2426 /* tmp workaround Win8 Aero requirement for 256 */
2427 if (dwMajor > 6 || dwMinor > 1)
2428 {
2429 gl_info->limits.glsl_ps_float_constants = 256;
2430 }
2431 }
2432#endif
2433 TRACE_(d3d_caps)("Max ARB_FRAGMENT_SHADER float constants: %u.\n", gl_info->limits.glsl_ps_float_constants);
2434#ifdef VBOX_VMSVGA3D_DUAL_OPENGL_PROFILE
2435 glGetIntegerv(GL_MAX_VARYING_FLOATS_ARB, &gl_max);
2436 if (glGetError() != GL_NO_ERROR)
2437 {
2438 pVBoxShaderIf->pfnSwitchInitProfile(pVBoxShaderIf, true /*fOtherProfile*/);
2439 VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_VARYING_FLOATS_ARB, &gl_max));
2440 pVBoxShaderIf->pfnSwitchInitProfile(pVBoxShaderIf, false /*fOtherProfile*/);
2441 }
2442#else
2443 VBOX_CHECK_GL_CALL(glGetIntegerv(GL_MAX_VARYING_FLOATS_ARB, &gl_max));
2444#endif
2445 gl_info->limits.glsl_varyings = gl_max;
2446 TRACE_(d3d_caps)("Max GLSL varyings: %u (%u 4 component varyings).\n", gl_max, gl_max / 4);
2447 }
2448 if (gl_info->supported[ARB_SHADING_LANGUAGE_100])
2449 {
2450 const char *str = (const char *)glGetString(GL_SHADING_LANGUAGE_VERSION_ARB);
2451 unsigned int major, minor;
2452
2453 TRACE_(d3d_caps)("GLSL version string: %s.\n", debugstr_a(str));
2454
2455 /* The format of the GLSL version string is "major.minor[.release] [vendor info]". */
2456 sscanf(str, "%u.%u", &major, &minor);
2457 gl_info->glsl_version = MAKEDWORD_VERSION(major, minor);
2458 }
2459 if (gl_info->supported[NV_LIGHT_MAX_EXPONENT])
2460 {
2461#ifdef VBOX_VMSVGA3D_DUAL_OPENGL_PROFILE
2462 glGetFloatv(GL_MAX_SHININESS_NV, &gl_info->limits.shininess);
2463 if (glGetError() != GL_NO_ERROR)
2464 {
2465 pVBoxShaderIf->pfnSwitchInitProfile(pVBoxShaderIf, true /*fOtherProfile*/);
2466 VBOX_CHECK_GL_CALL(glGetFloatv(GL_MAX_SHININESS_NV, &gl_info->limits.shininess));
2467 pVBoxShaderIf->pfnSwitchInitProfile(pVBoxShaderIf, false /*fOtherProfile*/);
2468 }
2469#else
2470 VBOX_CHECK_GL_CALL(glGetFloatv(GL_MAX_SHININESS_NV, &gl_info->limits.shininess));
2471#endif
2472 }
2473 else
2474 {
2475 gl_info->limits.shininess = 128.0f;
2476 }
2477 if (gl_info->supported[ARB_TEXTURE_NON_POWER_OF_TWO])
2478 {
2479 /* If we have full NP2 texture support, disable
2480 * GL_ARB_texture_rectangle because we will never use it.
2481 * This saves a few redundant glDisable calls. */
2482 gl_info->supported[ARB_TEXTURE_RECTANGLE] = FALSE;
2483 }
2484 if (gl_info->supported[ATI_FRAGMENT_SHADER])
2485 {
2486 /* Disable NV_register_combiners and fragment shader if this is supported.
2487 * generally the NV extensions are preferred over the ATI ones, and this
2488 * extension is disabled if register_combiners and texture_shader2 are both
2489 * supported. So we reach this place only if we have incomplete NV dxlevel 8
2490 * fragment processing support. */
2491 gl_info->supported[NV_REGISTER_COMBINERS] = FALSE;
2492 gl_info->supported[NV_REGISTER_COMBINERS2] = FALSE;
2493 gl_info->supported[NV_TEXTURE_SHADER] = FALSE;
2494 gl_info->supported[NV_TEXTURE_SHADER2] = FALSE;
2495 }
2496 if (gl_info->supported[NV_HALF_FLOAT])
2497 {
2498 /* GL_ARB_half_float_vertex is a subset of GL_NV_half_float. */
2499 gl_info->supported[ARB_HALF_FLOAT_VERTEX] = TRUE;
2500 }
2501 if (gl_info->supported[ARB_POINT_SPRITE])
2502 {
2503 gl_info->limits.point_sprite_units = gl_info->limits.textures;
2504 }
2505 else
2506 {
2507 gl_info->limits.point_sprite_units = 0;
2508 }
2509#ifndef VBOX_WITH_VMSVGA
2510 checkGLcall("extension detection");
2511#endif
2512 LEAVE_GL();
2513
2514#ifndef VBOX_WITH_VMSVGA
2515 adapter->fragment_pipe = select_fragment_implementation(adapter);
2516#endif
2517 adapter->shader_backend = select_shader_backend(adapter);
2518#ifndef VBOX_WITH_VMSVGA
2519 adapter->blitter = select_blit_implementation(adapter);
2520
2521 adapter->fragment_pipe->get_caps(gl_info, &fragment_caps);
2522 gl_info->limits.texture_stages = fragment_caps.MaxTextureBlendStages;
2523 TRACE_(d3d_caps)("Max texture stages: %u.\n", gl_info->limits.texture_stages);
2524
2525 /* In some cases the number of texture stages can be larger than the number
2526 * of samplers. The GF4 for example can use only 2 samplers (no fragment
2527 * shaders), but 8 texture stages (register combiners). */
2528 gl_info->limits.sampler_stages = max(gl_info->limits.fragment_samplers, gl_info->limits.texture_stages);
2529#endif
2530
2531 if (gl_info->supported[ARB_FRAMEBUFFER_OBJECT])
2532 {
2533 gl_info->fbo_ops.glIsRenderbuffer = gl_info->glIsRenderbuffer;
2534 gl_info->fbo_ops.glBindRenderbuffer = gl_info->glBindRenderbuffer;
2535 gl_info->fbo_ops.glDeleteRenderbuffers = gl_info->glDeleteRenderbuffers;
2536 gl_info->fbo_ops.glGenRenderbuffers = gl_info->glGenRenderbuffers;
2537 gl_info->fbo_ops.glRenderbufferStorage = gl_info->glRenderbufferStorage;
2538 gl_info->fbo_ops.glRenderbufferStorageMultisample = gl_info->glRenderbufferStorageMultisample;
2539 gl_info->fbo_ops.glGetRenderbufferParameteriv = gl_info->glGetRenderbufferParameteriv;
2540 gl_info->fbo_ops.glIsFramebuffer = gl_info->glIsFramebuffer;
2541 gl_info->fbo_ops.glBindFramebuffer = gl_info->glBindFramebuffer;
2542 gl_info->fbo_ops.glDeleteFramebuffers = gl_info->glDeleteFramebuffers;
2543 gl_info->fbo_ops.glGenFramebuffers = gl_info->glGenFramebuffers;
2544 gl_info->fbo_ops.glCheckFramebufferStatus = gl_info->glCheckFramebufferStatus;
2545 gl_info->fbo_ops.glFramebufferTexture1D = gl_info->glFramebufferTexture1D;
2546 gl_info->fbo_ops.glFramebufferTexture2D = gl_info->glFramebufferTexture2D;
2547 gl_info->fbo_ops.glFramebufferTexture3D = gl_info->glFramebufferTexture3D;
2548 gl_info->fbo_ops.glFramebufferRenderbuffer = gl_info->glFramebufferRenderbuffer;
2549 gl_info->fbo_ops.glGetFramebufferAttachmentParameteriv = gl_info->glGetFramebufferAttachmentParameteriv;
2550 gl_info->fbo_ops.glBlitFramebuffer = gl_info->glBlitFramebuffer;
2551 gl_info->fbo_ops.glGenerateMipmap = gl_info->glGenerateMipmap;
2552 }
2553 else
2554 {
2555 if (gl_info->supported[EXT_FRAMEBUFFER_OBJECT])
2556 {
2557 gl_info->fbo_ops.glIsRenderbuffer = gl_info->glIsRenderbufferEXT;
2558 gl_info->fbo_ops.glBindRenderbuffer = gl_info->glBindRenderbufferEXT;
2559 gl_info->fbo_ops.glDeleteRenderbuffers = gl_info->glDeleteRenderbuffersEXT;
2560 gl_info->fbo_ops.glGenRenderbuffers = gl_info->glGenRenderbuffersEXT;
2561 gl_info->fbo_ops.glRenderbufferStorage = gl_info->glRenderbufferStorageEXT;
2562 gl_info->fbo_ops.glGetRenderbufferParameteriv = gl_info->glGetRenderbufferParameterivEXT;
2563 gl_info->fbo_ops.glIsFramebuffer = gl_info->glIsFramebufferEXT;
2564 gl_info->fbo_ops.glBindFramebuffer = gl_info->glBindFramebufferEXT;
2565 gl_info->fbo_ops.glDeleteFramebuffers = gl_info->glDeleteFramebuffersEXT;
2566 gl_info->fbo_ops.glGenFramebuffers = gl_info->glGenFramebuffersEXT;
2567 gl_info->fbo_ops.glCheckFramebufferStatus = gl_info->glCheckFramebufferStatusEXT;
2568 gl_info->fbo_ops.glFramebufferTexture1D = gl_info->glFramebufferTexture1DEXT;
2569 gl_info->fbo_ops.glFramebufferTexture2D = gl_info->glFramebufferTexture2DEXT;
2570 gl_info->fbo_ops.glFramebufferTexture3D = gl_info->glFramebufferTexture3DEXT;
2571 gl_info->fbo_ops.glFramebufferRenderbuffer = gl_info->glFramebufferRenderbufferEXT;
2572 gl_info->fbo_ops.glGetFramebufferAttachmentParameteriv = gl_info->glGetFramebufferAttachmentParameterivEXT;
2573 gl_info->fbo_ops.glGenerateMipmap = gl_info->glGenerateMipmapEXT;
2574 }
2575#ifndef VBOX_WITH_VMSVGA
2576 else if (wined3d_settings.offscreen_rendering_mode == ORM_FBO)
2577 {
2578 WARN_(d3d_caps)("Framebuffer objects not supported, falling back to backbuffer offscreen rendering mode.\n");
2579 wined3d_settings.offscreen_rendering_mode = ORM_BACKBUFFER;
2580 }
2581#endif
2582 if (gl_info->supported[EXT_FRAMEBUFFER_BLIT])
2583 {
2584 gl_info->fbo_ops.glBlitFramebuffer = gl_info->glBlitFramebufferEXT;
2585 }
2586 if (gl_info->supported[EXT_FRAMEBUFFER_MULTISAMPLE])
2587 {
2588 gl_info->fbo_ops.glRenderbufferStorageMultisample = gl_info->glRenderbufferStorageMultisampleEXT;
2589 }
2590 }
2591
2592#ifndef VBOX_WITH_VMSVGA
2593 /* MRTs are currently only supported when FBOs are used. */
2594 if (wined3d_settings.offscreen_rendering_mode != ORM_FBO)
2595 {
2596 gl_info->limits.buffers = 1;
2597 }
2598#endif
2599 gl_vendor = wined3d_guess_gl_vendor(gl_info, gl_vendor_str, gl_renderer_str);
2600 card_vendor = wined3d_guess_card_vendor(gl_vendor_str, gl_renderer_str);
2601 TRACE_(d3d_caps)("found GL_VENDOR (%s)->(0x%04x/0x%04x)\n", debugstr_a(gl_vendor_str), gl_vendor, card_vendor);
2602
2603 device = wined3d_guess_card(gl_info, gl_renderer_str, &gl_vendor, &card_vendor, &vidmem);
2604 TRACE_(d3d_caps)("FOUND (fake) card: 0x%x (vendor id), 0x%x (device id)\n", card_vendor, device);
2605
2606 /* If we have an estimate use it, else default to 64MB; */
2607 if(vidmem)
2608 gl_info->vidmem = vidmem*1024*1024; /* convert from MBs to bytes */
2609 else
2610 gl_info->vidmem = WINE_DEFAULT_VIDMEM;
2611
2612 gl_info->wrap_lookup[WINED3DTADDRESS_WRAP - WINED3DTADDRESS_WRAP] = GL_REPEAT;
2613 gl_info->wrap_lookup[WINED3DTADDRESS_MIRROR - WINED3DTADDRESS_WRAP] =
2614 gl_info->supported[ARB_TEXTURE_MIRRORED_REPEAT] ? GL_MIRRORED_REPEAT_ARB : GL_REPEAT;
2615 gl_info->wrap_lookup[WINED3DTADDRESS_CLAMP - WINED3DTADDRESS_WRAP] = GL_CLAMP_TO_EDGE;
2616 gl_info->wrap_lookup[WINED3DTADDRESS_BORDER - WINED3DTADDRESS_WRAP] =
2617 gl_info->supported[ARB_TEXTURE_BORDER_CLAMP] ? GL_CLAMP_TO_BORDER_ARB : GL_REPEAT;
2618 gl_info->wrap_lookup[WINED3DTADDRESS_MIRRORONCE - WINED3DTADDRESS_WRAP] =
2619 gl_info->supported[ATI_TEXTURE_MIRROR_ONCE] ? GL_MIRROR_CLAMP_TO_EDGE_ATI : GL_REPEAT;
2620
2621#ifndef VBOX_WITH_VMSVGA
2622 /* Make sure there's an active HDC else the WGL extensions will fail */
2623 hdc = pwglGetCurrentDC();
2624 if (hdc) {
2625 /* Not all GL drivers might offer WGL extensions e.g. VirtualBox */
2626 if(GL_EXTCALL(wglGetExtensionsStringARB))
2627 WGL_Extensions = GL_EXTCALL(wglGetExtensionsStringARB(hdc));
2628
2629 if (NULL == WGL_Extensions) {
2630 ERR(" WGL_Extensions returns NULL\n");
2631 } else {
2632 TRACE_(d3d_caps)("WGL_Extensions reported:\n");
2633 while (*WGL_Extensions != 0x00) {
2634 const char *Start;
2635 char ThisExtn[256];
2636
2637 while (isspace(*WGL_Extensions)) WGL_Extensions++;
2638 Start = WGL_Extensions;
2639 while (!isspace(*WGL_Extensions) && *WGL_Extensions != 0x00) {
2640 WGL_Extensions++;
2641 }
2642
2643 len = WGL_Extensions - Start;
2644 if (len == 0 || len >= sizeof(ThisExtn))
2645 continue;
2646
2647 memcpy(ThisExtn, Start, len);
2648 ThisExtn[len] = '\0';
2649 TRACE_(d3d_caps)("- %s\n", debugstr_a(ThisExtn));
2650
2651 if (!strcmp(ThisExtn, "WGL_ARB_pixel_format")) {
2652 gl_info->supported[WGL_ARB_PIXEL_FORMAT] = TRUE;
2653 TRACE_(d3d_caps)("FOUND: WGL_ARB_pixel_format support\n");
2654 }
2655 if (!strcmp(ThisExtn, "WGL_WINE_pixel_format_passthrough")) {
2656 gl_info->supported[WGL_WINE_PIXEL_FORMAT_PASSTHROUGH] = TRUE;
2657 TRACE_(d3d_caps)("FOUND: WGL_WINE_pixel_format_passthrough support\n");
2658 }
2659 }
2660 }
2661 }
2662#endif
2663
2664 fixup_extensions(gl_info, gl_renderer_str, gl_vendor, card_vendor, device);
2665#ifndef VBOX_WITH_VMSVGA
2666 init_driver_info(driver_info, card_vendor, device);
2667 add_gl_compat_wrappers(gl_info);
2668#endif
2669
2670 return TRUE;
2671}
2672
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette