VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/ldr/ldrLX.cpp@ 74880

Last change on this file since 74880 was 74664, checked in by vboxsync, 6 years ago

common/ldr: add RT_FALL_THRU() to recently added files to fix build.
bugref:9232: disabling SIP through VirtualBox kext

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 96.2 KB
Line 
1/* $Id: ldrLX.cpp 74664 2018-10-08 09:51:47Z vboxsync $ */
2/** @file
3 * kLdr - The Module Interpreter for the Linear eXecutable (LX) Format.
4 */
5
6/*
7 * Copyright (c) 2006-2007 Knut St. Osmundsen <[email protected]>
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#define LOG_GROUP RTLOGGROUP_LDR
36#include <iprt/ldr.h>
37#include "internal/iprt.h"
38
39#include <iprt/asm.h>
40#include <iprt/assert.h>
41#include <iprt/err.h>
42#include <iprt/log.h>
43#include <iprt/mem.h>
44#include <iprt/string.h>
45
46#include <iprt/formats/lx.h>
47#include "internal/ldr.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53/** @def KLDRMODLX_STRICT
54 * Define KLDRMODLX_STRICT to enabled strict checks in KLDRMODLX. */
55#define KLDRMODLX_STRICT 1
56
57/** @def KLDRMODLX_ASSERT
58 * Assert that an expression is true when KLDR_STRICT is defined.
59 */
60#ifdef KLDRMODLX_STRICT
61# define KLDRMODLX_ASSERT(expr) Assert(expr)
62#else
63# define KLDRMODLX_ASSERT(expr) do {} while (0)
64#endif
65
66
67/*********************************************************************************************************************************
68* Structures and Typedefs *
69*********************************************************************************************************************************/
70/**
71 * Instance data for the LX module interpreter.
72 */
73typedef struct KLDRMODLX
74{
75 /** Core module structure. */
76 RTLDRMODINTERNAL Core;
77
78 /** Pointer to the user mapping. */
79 const void *pvMapping;
80 /** The size of the mapped LX image. */
81 size_t cbMapped;
82 /** Reserved flags. */
83 uint32_t f32Reserved;
84
85 /** The offset of the LX header. */
86 RTFOFF offHdr;
87 /** Copy of the LX header. */
88 struct e32_exe Hdr;
89
90 /** Pointer to the loader section.
91 * Allocated together with this strcture. */
92 const uint8_t *pbLoaderSection;
93 /** Pointer to the last byte in the loader section. */
94 const uint8_t *pbLoaderSectionLast;
95 /** Pointer to the object table in the loader section. */
96 const struct o32_obj *paObjs;
97 /** Pointer to the object page map table in the loader section. */
98 const struct o32_map *paPageMappings;
99 /** Pointer to the resource table in the loader section. */
100 const struct rsrc32 *paRsrcs;
101 /** Pointer to the resident name table in the loader section. */
102 const uint8_t *pbResNameTab;
103 /** Pointer to the entry table in the loader section. */
104 const uint8_t *pbEntryTab;
105
106 /** Pointer to the non-resident name table. */
107 uint8_t *pbNonResNameTab;
108 /** Pointer to the last byte in the non-resident name table. */
109 const uint8_t *pbNonResNameTabLast;
110
111 /** Pointer to the fixup section. */
112 uint8_t *pbFixupSection;
113 /** Pointer to the last byte in the fixup section. */
114 const uint8_t *pbFixupSectionLast;
115 /** Pointer to the fixup page table within pvFixupSection. */
116 const uint32_t *paoffPageFixups;
117 /** Pointer to the fixup record table within pvFixupSection. */
118 const uint8_t *pbFixupRecs;
119 /** Pointer to the import module name table within pvFixupSection. */
120 const uint8_t *pbImportMods;
121 /** Pointer to the import module name table within pvFixupSection. */
122 const uint8_t *pbImportProcs;
123
124 /** Pointer to the module name (in the resident name table). */
125 const char *pszName;
126 /** The name length. */
127 size_t cchName;
128
129 /** The target CPU. */
130 RTLDRCPU enmCpu;
131 /** Number of segments in aSegments. */
132 uint32_t cSegments;
133 /** Segment info. */
134 RTLDRSEG aSegments[RT_FLEXIBLE_ARRAY];
135} KLDRMODLX, *PKLDRMODLX;
136
137
138/*********************************************************************************************************************************
139* Internal Functions *
140*********************************************************************************************************************************/
141static int kldrModLXHasDbgInfo(PRTLDRMODINTERNAL pMod, const void *pvBits);
142static DECLCALLBACK(int) rtldrLX_RelocateBits(PRTLDRMODINTERNAL pMod, void *pvBits, RTUINTPTR NewBaseAddress,
143 RTUINTPTR OldBaseAddress, PFNRTLDRIMPORT pfnGetImport, void *pvUser);
144static const uint8_t *kldrModLXDoNameTableLookupByOrdinal(const uint8_t *pbNameTable, ssize_t cbNameTable, uint32_t iOrdinal);
145static int kldrModLXDoNameLookup(PKLDRMODLX pModLX, const char *pchSymbol, size_t cchSymbol, uint32_t *piSymbol);
146static const uint8_t *kldrModLXDoNameTableLookupByName(const uint8_t *pbNameTable, ssize_t cbNameTable,
147 const char *pchSymbol, size_t cchSymbol);
148static int kldrModLXGetImport(PKLDRMODLX pThis, const void *pvBits, uint32_t iImport,
149 char *pszName, size_t cchName, size_t *pcbNeeded);
150static int kldrModLXDoLoadBits(PKLDRMODLX pModLX, void *pvBits);
151static int kldrModLXDoIterDataUnpacking(uint8_t *pbDst, const uint8_t *pbSrc, int cbSrc);
152static int kldrModLXDoIterData2Unpacking(uint8_t *pbDst, const uint8_t *pbSrc, int cbSrc);
153static void kLdrModLXMemCopyW(uint8_t *pbDst, const uint8_t *pbSrc, int cb);
154static int kldrModLXDoForwarderQuery(PKLDRMODLX pModLX, const struct e32_entry *pEntry,
155 PFNRTLDRIMPORT pfnGetForwarder, void *pvUser, PRTLDRADDR puValue, uint32_t *pfKind);
156#if 0
157static int kldrModLXDoProtect(PKLDRMODLX pModLX, void *pvBits, unsigned fUnprotectOrProtect);
158static int kldrModLXDoCallDLL(PKLDRMODLX pModLX, void *pvMapping, unsigned uOp, uintptr_t uHandle);
159static int32_t kldrModLXDoCall(uintptr_t uEntrypoint, uintptr_t uHandle, uint32_t uOp, void *pvReserved);
160#endif
161static int kldrModLXDoLoadFixupSection(PKLDRMODLX pModLX);
162static int kldrModLXDoReloc(uint8_t *pbPage, int off, RTLDRADDR PageAddress, const struct r32_rlc *prlc,
163 int iSelector, RTLDRADDR uValue, uint32_t fKind);
164
165
166/**
167 * Separate function for reading creating the LX module instance to
168 * simplify cleanup on failure.
169 */
170static int kldrModLXDoCreate(PRTLDRREADER pRdr, RTFOFF offNewHdr, uint32_t fFlags, PKLDRMODLX *ppModLX, PRTERRINFO pErrInfo)
171{
172 struct e32_exe Hdr;
173 PKLDRMODLX pModLX;
174 uint32_t off, offEnd;
175 uint32_t i;
176 int fCanOptimizeMapping;
177 uint32_t NextRVA;
178
179 RT_NOREF(fFlags);
180 *ppModLX = NULL;
181
182 /*
183 * Read the signature and file header.
184 */
185 int rc = pRdr->pfnRead(pRdr, &Hdr, sizeof(Hdr), offNewHdr > 0 ? offNewHdr : 0);
186 if (RT_FAILURE(rc))
187 return RTErrInfoSetF(pErrInfo, rc, "Error reading LX header at %RTfoff: %Rrc", offNewHdr, rc);
188 if ( Hdr.e32_magic[0] != E32MAGIC1
189 || Hdr.e32_magic[1] != E32MAGIC2)
190 return RTErrInfoSetF(pErrInfo, VERR_INVALID_EXE_SIGNATURE, "Not LX magic: %02x %02x", Hdr.e32_magic[0], Hdr.e32_magic[1]);
191
192 /* We're not interested in anything but x86 images. */
193 if ( Hdr.e32_level != E32LEVEL
194 || Hdr.e32_border != E32LEBO
195 || Hdr.e32_worder != E32LEWO
196 || Hdr.e32_cpu < E32CPU286
197 || Hdr.e32_cpu > E32CPU486
198 || Hdr.e32_pagesize != OBJPAGELEN
199 )
200 return VERR_LDRLX_BAD_HEADER;
201
202 /* Some rough sanity checks. */
203 offEnd = pRdr->pfnSize(pRdr) >= (RTFOFF)~(uint32_t)16 ? ~(uint32_t)16 : (uint32_t)pRdr->pfnSize(pRdr);
204 if ( Hdr.e32_itermap > offEnd
205 || Hdr.e32_datapage > offEnd
206 || Hdr.e32_nrestab > offEnd
207 || Hdr.e32_nrestab + Hdr.e32_cbnrestab > offEnd
208 || Hdr.e32_ldrsize > offEnd - offNewHdr - sizeof(Hdr)
209 || Hdr.e32_fixupsize > offEnd - offNewHdr - sizeof(Hdr)
210 || Hdr.e32_fixupsize + Hdr.e32_ldrsize > offEnd - offNewHdr - sizeof(Hdr))
211 return VERR_LDRLX_BAD_HEADER;
212
213 /* Verify the loader section. */
214 offEnd = Hdr.e32_objtab + Hdr.e32_ldrsize;
215 if (Hdr.e32_objtab < sizeof(Hdr) && Hdr.e32_objcnt)
216 return RTErrInfoSetF(pErrInfo, VERR_LDRLX_BAD_LOADER_SECTION,
217 "Object table is inside the header: %#x", Hdr.e32_objtab);
218 off = Hdr.e32_objtab + sizeof(struct o32_obj) * Hdr.e32_objcnt;
219 if (off > offEnd)
220 return RTErrInfoSetF(pErrInfo, VERR_LDRLX_BAD_LOADER_SECTION,
221 "Object table spans beyond the executable: e32_objcnt=%u", Hdr.e32_objcnt);
222 if ( Hdr.e32_objmap
223 && (Hdr.e32_objmap < off || Hdr.e32_objmap > offEnd))
224 return RTErrInfoSetF(pErrInfo, VERR_LDRLX_BAD_LOADER_SECTION,
225 "Bad object page map table offset: %#x", Hdr.e32_objmap);
226 if ( Hdr.e32_rsrccnt
227 && ( Hdr.e32_rsrctab < off
228 || Hdr.e32_rsrctab > offEnd
229 || Hdr.e32_rsrctab + sizeof(struct rsrc32) * Hdr.e32_rsrccnt > offEnd))
230 return RTErrInfoSetF(pErrInfo, VERR_LDRLX_BAD_LOADER_SECTION,
231 "Resource table is out of bounds: %#x entries at %#x", Hdr.e32_rsrccnt, Hdr.e32_rsrctab);
232 if ( Hdr.e32_restab
233 && (Hdr.e32_restab < off || Hdr.e32_restab > offEnd - 2))
234 return VERR_LDRLX_BAD_LOADER_SECTION;
235 if ( Hdr.e32_enttab
236 && (Hdr.e32_enttab < off || Hdr.e32_enttab >= offEnd))
237 return VERR_LDRLX_BAD_LOADER_SECTION;
238 if ( Hdr.e32_dircnt
239 && (Hdr.e32_dirtab < off || Hdr.e32_dirtab > offEnd - 2))
240 return VERR_LDRLX_BAD_LOADER_SECTION;
241
242 /* Verify the fixup section. */
243 off = offEnd;
244 offEnd = off + Hdr.e32_fixupsize;
245 if ( Hdr.e32_fpagetab
246 && (Hdr.e32_fpagetab < off || Hdr.e32_fpagetab > offEnd))
247 {
248 /*
249 * wlink mixes the fixup section and the loader section.
250 */
251 off = Hdr.e32_fpagetab;
252 offEnd = off + Hdr.e32_fixupsize;
253 Hdr.e32_ldrsize = off - Hdr.e32_objtab;
254 }
255 if ( Hdr.e32_frectab
256 && (Hdr.e32_frectab < off || Hdr.e32_frectab > offEnd))
257 return VERR_LDRLX_BAD_FIXUP_SECTION;
258 if ( Hdr.e32_impmod
259 && (Hdr.e32_impmod < off || Hdr.e32_impmod > offEnd || Hdr.e32_impmod + Hdr.e32_impmodcnt > offEnd))
260 return VERR_LDRLX_BAD_FIXUP_SECTION;
261 if ( Hdr.e32_impproc
262 && (Hdr.e32_impproc < off || Hdr.e32_impproc > offEnd))
263 return VERR_LDRLX_BAD_FIXUP_SECTION;
264
265 /*
266 * Calc the instance size, allocate and initialize it.
267 */
268 size_t cbModLXAndSegments = RT_ALIGN_Z(RT_UOFFSETOF_DYN(KLDRMODLX, aSegments[Hdr.e32_objcnt + 1]), 8);
269 pModLX = (PKLDRMODLX)RTMemAlloc(cbModLXAndSegments + Hdr.e32_ldrsize + 2 /*for two extra zeros*/);
270 if (!pModLX)
271 return VERR_NO_MEMORY;
272 *ppModLX = pModLX;
273
274 /* Core & CPU. */
275 pModLX->Core.u32Magic = 0; /* set by caller. */
276 pModLX->Core.eState = LDR_STATE_OPENED;
277 pModLX->Core.pOps = NULL; /* set by caller. */
278 pModLX->Core.pReader = pRdr;
279 switch (Hdr.e32_cpu)
280 {
281 case E32CPU286:
282 pModLX->enmCpu = RTLDRCPU_I80286;
283 pModLX->Core.enmArch = RTLDRARCH_X86_16;
284 break;
285 case E32CPU386:
286 pModLX->enmCpu = RTLDRCPU_I386;
287 pModLX->Core.enmArch = RTLDRARCH_X86_32;
288 break;
289 case E32CPU486:
290 pModLX->enmCpu = RTLDRCPU_I486;
291 pModLX->Core.enmArch = RTLDRARCH_X86_32;
292 break;
293 }
294 pModLX->Core.enmEndian = RTLDRENDIAN_LITTLE;
295 pModLX->Core.enmFormat = RTLDRFMT_LX;
296 switch (Hdr.e32_mflags & E32MODMASK)
297 {
298 case E32MODEXE:
299 pModLX->Core.enmType = !(Hdr.e32_mflags & E32NOINTFIX)
300 ? RTLDRTYPE_EXECUTABLE_RELOCATABLE
301 : RTLDRTYPE_EXECUTABLE_FIXED;
302 break;
303
304 case E32MODDLL:
305 case E32PROTDLL:
306 case E32MODPROTDLL:
307 pModLX->Core.enmType = !(Hdr.e32_mflags & E32SYSDLL)
308 ? RTLDRTYPE_SHARED_LIBRARY_RELOCATABLE
309 : RTLDRTYPE_SHARED_LIBRARY_FIXED;
310 break;
311
312 case E32MODPDEV:
313 case E32MODVDEV:
314 pModLX->Core.enmType = RTLDRTYPE_SHARED_LIBRARY_RELOCATABLE;
315 break;
316 }
317
318 /* KLDRMODLX */
319 pModLX->cSegments = Hdr.e32_objcnt;
320 pModLX->pszName = NULL; /* finalized further down */
321 pModLX->cchName = 0;
322 pModLX->pvMapping = 0;
323 pModLX->cbMapped = 0;
324 pModLX->f32Reserved = 0;
325
326 pModLX->offHdr = offNewHdr >= 0 ? offNewHdr : 0;
327 memcpy(&pModLX->Hdr, &Hdr, sizeof(Hdr));
328
329 pModLX->pbLoaderSection = (uint8_t *)pModLX + cbModLXAndSegments;
330 pModLX->pbLoaderSectionLast = pModLX->pbLoaderSection + pModLX->Hdr.e32_ldrsize - 1;
331 pModLX->paObjs = NULL;
332 pModLX->paPageMappings = NULL;
333 pModLX->paRsrcs = NULL;
334 pModLX->pbResNameTab = NULL;
335 pModLX->pbEntryTab = NULL;
336
337 pModLX->pbNonResNameTab = NULL;
338 pModLX->pbNonResNameTabLast = NULL;
339
340 pModLX->pbFixupSection = NULL;
341 pModLX->pbFixupSectionLast = NULL;
342 pModLX->paoffPageFixups = NULL;
343 pModLX->pbFixupRecs = NULL;
344 pModLX->pbImportMods = NULL;
345 pModLX->pbImportProcs = NULL;
346
347 /*
348 * Read the loader data.
349 */
350 rc = pRdr->pfnRead(pRdr, (void *)pModLX->pbLoaderSection, pModLX->Hdr.e32_ldrsize, pModLX->Hdr.e32_objtab + pModLX->offHdr);
351 if (RT_FAILURE(rc))
352 return rc;
353 ((uint8_t *)pModLX->pbLoaderSectionLast)[1] = 0;
354 ((uint8_t *)pModLX->pbLoaderSectionLast)[2] = 0;
355 if (pModLX->Hdr.e32_objcnt)
356 pModLX->paObjs = (const struct o32_obj *)pModLX->pbLoaderSection;
357 if (pModLX->Hdr.e32_objmap)
358 pModLX->paPageMappings = (const struct o32_map *)(pModLX->pbLoaderSection + pModLX->Hdr.e32_objmap - pModLX->Hdr.e32_objtab);
359 if (pModLX->Hdr.e32_rsrccnt)
360 pModLX->paRsrcs = (const struct rsrc32 *)(pModLX->pbLoaderSection + pModLX->Hdr.e32_rsrctab - pModLX->Hdr.e32_objtab);
361 if (pModLX->Hdr.e32_restab)
362 pModLX->pbResNameTab = pModLX->pbLoaderSection + pModLX->Hdr.e32_restab - pModLX->Hdr.e32_objtab;
363 if (pModLX->Hdr.e32_enttab)
364 pModLX->pbEntryTab = pModLX->pbLoaderSection + pModLX->Hdr.e32_enttab - pModLX->Hdr.e32_objtab;
365
366 /*
367 * Get the soname from the resident name table.
368 * Very convenient that it's the 0 ordinal, because then we get a
369 * free string terminator.
370 * (The table entry consists of a pascal string followed by a 16-bit ordinal.)
371 */
372 if (pModLX->pbResNameTab)
373 pModLX->pszName = (const char *)kldrModLXDoNameTableLookupByOrdinal(pModLX->pbResNameTab,
374 pModLX->pbLoaderSectionLast - pModLX->pbResNameTab + 1,
375 0);
376 if (!pModLX->pszName)
377 return VERR_LDRLX_NO_SONAME;
378 pModLX->cchName = *(const uint8_t *)pModLX->pszName++;
379 if ( pModLX->pszName[pModLX->cchName] != '\0'
380 || pModLX->cchName != strlen(pModLX->pszName))
381 return VERR_LDRLX_BAD_SONAME;
382
383 /*
384 * Quick validation of the object table.
385 */
386 for (i = 0; i < pModLX->cSegments; i++)
387 {
388 if (pModLX->paObjs[i].o32_base & (OBJPAGELEN - 1))
389 return VERR_LDRLX_BAD_OBJECT_TABLE;
390 if (pModLX->paObjs[i].o32_base + pModLX->paObjs[i].o32_size <= pModLX->paObjs[i].o32_base)
391 return VERR_LDRLX_BAD_OBJECT_TABLE;
392 if (pModLX->paObjs[i].o32_mapsize > (pModLX->paObjs[i].o32_size + (OBJPAGELEN - 1)))
393 return VERR_LDRLX_BAD_OBJECT_TABLE;
394 if ( pModLX->paObjs[i].o32_mapsize
395 && ( (uint8_t *)&pModLX->paPageMappings[pModLX->paObjs[i].o32_pagemap] > pModLX->pbLoaderSectionLast
396 || (uint8_t *)&pModLX->paPageMappings[pModLX->paObjs[i].o32_pagemap + pModLX->paObjs[i].o32_mapsize]
397 > pModLX->pbLoaderSectionLast))
398 return VERR_LDRLX_BAD_OBJECT_TABLE;
399 if (i > 0 && !(pModLX->paObjs[i].o32_flags & OBJRSRC))
400 {
401 if (pModLX->paObjs[i].o32_base <= pModLX->paObjs[i - 1].o32_base)
402 return VERR_LDRLX_BAD_OBJECT_TABLE;
403 if (pModLX->paObjs[i].o32_base < pModLX->paObjs[i - 1].o32_base + pModLX->paObjs[i - 1].o32_mapsize)
404 return VERR_LDRLX_BAD_OBJECT_TABLE;
405 }
406 }
407
408 /*
409 * Check if we can optimize the mapping by using a different
410 * object alignment. The linker typically uses 64KB alignment,
411 * we can easily get away with page alignment in most cases.
412 */
413 fCanOptimizeMapping = !(Hdr.e32_mflags & (E32NOINTFIX | E32SYSDLL));
414 NextRVA = 0;
415
416 /*
417 * Setup the KLDRMOD segment array.
418 */
419 for (i = 0; i < pModLX->cSegments; i++)
420 {
421 /* unused */
422 pModLX->aSegments[i].pszName = NULL;
423 pModLX->aSegments[i].offFile = -1;
424 pModLX->aSegments[i].cbFile = -1;
425 pModLX->aSegments[i].SelFlat = 0;
426 pModLX->aSegments[i].Sel16bit = 0;
427
428 /* flags */
429 pModLX->aSegments[i].fFlags = 0;
430 if (pModLX->paObjs[i].o32_flags & OBJBIGDEF)
431 pModLX->aSegments[i].fFlags = RTLDRSEG_FLAG_16BIT;
432 if (pModLX->paObjs[i].o32_flags & OBJALIAS16)
433 pModLX->aSegments[i].fFlags = RTLDRSEG_FLAG_OS2_ALIAS16;
434 if (pModLX->paObjs[i].o32_flags & OBJCONFORM)
435 pModLX->aSegments[i].fFlags = RTLDRSEG_FLAG_OS2_CONFORM;
436 if (pModLX->paObjs[i].o32_flags & OBJIOPL)
437 pModLX->aSegments[i].fFlags = RTLDRSEG_FLAG_OS2_IOPL;
438
439 /* size and addresses */
440 pModLX->aSegments[i].Alignment = OBJPAGELEN;
441 pModLX->aSegments[i].cb = pModLX->paObjs[i].o32_size;
442 pModLX->aSegments[i].LinkAddress = pModLX->paObjs[i].o32_base;
443 pModLX->aSegments[i].RVA = NextRVA;
444 if ( fCanOptimizeMapping
445 || i + 1 >= pModLX->cSegments
446 || (pModLX->paObjs[i].o32_flags & OBJRSRC)
447 || (pModLX->paObjs[i + 1].o32_flags & OBJRSRC))
448 pModLX->aSegments[i].cbMapped = RT_ALIGN_Z(pModLX->paObjs[i].o32_size, OBJPAGELEN);
449 else
450 pModLX->aSegments[i].cbMapped = pModLX->paObjs[i + 1].o32_base - pModLX->paObjs[i].o32_base;
451 NextRVA += (uint32_t)pModLX->aSegments[i].cbMapped;
452
453 /* protection */
454 switch ( pModLX->paObjs[i].o32_flags
455 & (OBJSHARED | OBJREAD | OBJWRITE | OBJEXEC))
456 {
457 case 0:
458 case OBJSHARED:
459 pModLX->aSegments[i].fProt = 0;
460 break;
461 case OBJREAD:
462 case OBJREAD | OBJSHARED:
463 pModLX->aSegments[i].fProt = RTMEM_PROT_READ;
464 break;
465 case OBJWRITE:
466 case OBJWRITE | OBJREAD:
467 pModLX->aSegments[i].fProt = RTMEM_PROT_READ | RTMEM_PROT_WRITECOPY;
468 break;
469 case OBJWRITE | OBJSHARED:
470 case OBJWRITE | OBJSHARED | OBJREAD:
471 pModLX->aSegments[i].fProt = RTMEM_PROT_READ | RTMEM_PROT_WRITE;
472 break;
473 case OBJEXEC:
474 case OBJEXEC | OBJSHARED:
475 pModLX->aSegments[i].fProt = RTMEM_PROT_EXEC;
476 break;
477 case OBJEXEC | OBJREAD:
478 case OBJEXEC | OBJREAD | OBJSHARED:
479 pModLX->aSegments[i].fProt = RTMEM_PROT_EXEC | RTMEM_PROT_READ;
480 break;
481 case OBJEXEC | OBJWRITE:
482 case OBJEXEC | OBJWRITE | OBJREAD:
483 pModLX->aSegments[i].fProt = RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITECOPY;
484 break;
485 case OBJEXEC | OBJWRITE | OBJSHARED:
486 case OBJEXEC | OBJWRITE | OBJSHARED | OBJREAD:
487 pModLX->aSegments[i].fProt = RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE;
488 break;
489 }
490 if ((pModLX->paObjs[i].o32_flags & (OBJREAD | OBJWRITE | OBJEXEC | OBJRSRC)) == OBJRSRC)
491 pModLX->aSegments[i].fProt = RTMEM_PROT_READ;
492 /*pModLX->aSegments[i].f16bit = !(pModLX->paObjs[i].o32_flags & OBJBIGDEF)
493 pModLX->aSegments[i].fIOPL = !(pModLX->paObjs[i].o32_flags & OBJIOPL)
494 pModLX->aSegments[i].fConforming = !(pModLX->paObjs[i].o32_flags & OBJCONFORM) */
495 }
496
497 /* set the mapping size */
498 pModLX->cbMapped = NextRVA;
499
500 /*
501 * We're done.
502 */
503 *ppModLX = pModLX;
504 return VINF_SUCCESS;
505}
506
507
508/**
509 * @interface_method_impl{RTLDROPS,pfnClose}
510 */
511static DECLCALLBACK(int) rtldrLX_Close(PRTLDRMODINTERNAL pMod)
512{
513 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
514 KLDRMODLX_ASSERT(!pModLX->pvMapping);
515
516 if (pModLX->pbNonResNameTab)
517 {
518 RTMemFree(pModLX->pbNonResNameTab);
519 pModLX->pbNonResNameTab = NULL;
520 }
521 if (pModLX->pbFixupSection)
522 {
523 RTMemFree(pModLX->pbFixupSection);
524 pModLX->pbFixupSection = NULL;
525 }
526 return VINF_SUCCESS;
527}
528
529
530/**
531 * Resolved base address aliases.
532 *
533 * @param pModLX The interpreter module instance
534 * @param pBaseAddress The base address, IN & OUT.
535 */
536static void kldrModLXResolveBaseAddress(PKLDRMODLX pModLX, PRTLDRADDR pBaseAddress)
537{
538 if (*pBaseAddress == RTLDR_BASEADDRESS_LINK)
539 *pBaseAddress = pModLX->aSegments[0].LinkAddress;
540}
541
542
543static int kldrModLXQuerySymbol(PRTLDRMODINTERNAL pMod, const void *pvBits, RTLDRADDR BaseAddress, uint32_t iSymbol,
544 const char *pchSymbol, size_t cchSymbol, const char *pszVersion,
545 PFNRTLDRIMPORT pfnGetForwarder, void *pvUser, PRTLDRADDR puValue, uint32_t *pfKind)
546{
547 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
548 uint32_t iOrdinal;
549 int rc;
550 const struct b32_bundle *pBundle;
551 RT_NOREF(pvBits);
552 RT_NOREF(pszVersion);
553
554 /*
555 * Give up at once if there is no entry table.
556 */
557 if (!pModLX->Hdr.e32_enttab)
558 return VERR_SYMBOL_NOT_FOUND;
559
560 /*
561 * Translate the symbol name into an ordinal.
562 */
563 if (pchSymbol)
564 {
565 rc = kldrModLXDoNameLookup(pModLX, pchSymbol, cchSymbol, &iSymbol);
566 if (RT_FAILURE(rc))
567 return rc;
568 }
569
570 /*
571 * Iterate the entry table.
572 * (The entry table is made up of bundles of similar exports.)
573 */
574 iOrdinal = 1;
575 pBundle = (const struct b32_bundle *)pModLX->pbEntryTab;
576 while (pBundle->b32_cnt && iOrdinal <= iSymbol)
577 {
578 static const size_t s_cbEntry[] = { 0, 3, 5, 5, 7 };
579
580 /*
581 * Check for a hit first.
582 */
583 iOrdinal += pBundle->b32_cnt;
584 if (iSymbol < iOrdinal)
585 {
586 uint32_t offObject;
587 const struct e32_entry *pEntry = (const struct e32_entry *)((uintptr_t)(pBundle + 1)
588 + (iSymbol - (iOrdinal - pBundle->b32_cnt))
589 * s_cbEntry[pBundle->b32_type]);
590
591 /*
592 * Calculate the return address.
593 */
594 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
595 switch (pBundle->b32_type)
596 {
597 /* empty bundles are place holders unused ordinal ranges. */
598 case EMPTY:
599 return VERR_SYMBOL_NOT_FOUND;
600
601 /* e32_flags + a 16-bit offset. */
602 case ENTRY16:
603 offObject = pEntry->e32_variant.e32_offset.offset16;
604 if (pfKind)
605 *pfKind = RTLDRSYMKIND_16BIT | RTLDRSYMKIND_NO_TYPE;
606 break;
607
608 /* e32_flags + a 16-bit offset + a 16-bit callgate selector. */
609 case GATE16:
610 offObject = pEntry->e32_variant.e32_callgate.offset;
611 if (pfKind)
612 *pfKind = RTLDRSYMKIND_16BIT | RTLDRSYMKIND_CODE;
613 break;
614
615 /* e32_flags + a 32-bit offset. */
616 case ENTRY32:
617 offObject = pEntry->e32_variant.e32_offset.offset32;
618 if (pfKind)
619 *pfKind = RTLDRSYMKIND_32BIT;
620 break;
621
622 /* e32_flags + 16-bit import module ordinal + a 32-bit procname or ordinal. */
623 case ENTRYFWD:
624 return kldrModLXDoForwarderQuery(pModLX, pEntry, pfnGetForwarder, pvUser, puValue, pfKind);
625
626 default:
627 /* anyone actually using TYPEINFO will end up here. */
628 KLDRMODLX_ASSERT(!"Bad bundle type");
629 return VERR_LDRLX_BAD_BUNDLE;
630 }
631
632 /*
633 * Validate the object number and calc the return address.
634 */
635 if ( pBundle->b32_obj <= 0
636 || pBundle->b32_obj > pModLX->cSegments)
637 return VERR_LDRLX_BAD_BUNDLE;
638 if (puValue)
639 *puValue = BaseAddress
640 + offObject
641 + pModLX->aSegments[pBundle->b32_obj - 1].RVA;
642 return VINF_SUCCESS;
643 }
644
645 /*
646 * Skip the bundle.
647 */
648 if (pBundle->b32_type > ENTRYFWD)
649 {
650 KLDRMODLX_ASSERT(!"Bad type"); /** @todo figure out TYPEINFO. */
651 return VERR_LDRLX_BAD_BUNDLE;
652 }
653 if (pBundle->b32_type == 0)
654 pBundle = (const struct b32_bundle *)((const uint8_t *)pBundle + 2);
655 else
656 pBundle = (const struct b32_bundle *)((const uint8_t *)(pBundle + 1) + s_cbEntry[pBundle->b32_type] * pBundle->b32_cnt);
657 }
658
659 return VERR_SYMBOL_NOT_FOUND;
660}
661
662
663/**
664 * @interface_method_impl{RTLDROPS,pfnGetSymbolEx}
665 */
666static DECLCALLBACK(int) rtldrLX_GetSymbolEx(PRTLDRMODINTERNAL pMod, const void *pvBits, RTUINTPTR BaseAddress,
667 uint32_t iOrdinal, const char *pszSymbol, RTUINTPTR *pValue)
668{
669 uint32_t fKind = RTLDRSYMKIND_REQ_FLAT;
670 return kldrModLXQuerySymbol(pMod, pvBits, BaseAddress, iOrdinal, pszSymbol, pszSymbol ? strlen(pszSymbol) : 0,
671 NULL, NULL, NULL, pValue, &fKind);
672}
673
674
675/**
676 * Do name lookup.
677 *
678 * @returns IPRT status code.
679 * @param pModLX The module to lookup the symbol in.
680 * @param pchSymbol The symbol to lookup.
681 * @param cchSymbol The symbol name length.
682 * @param piSymbol Where to store the symbol ordinal.
683 */
684static int kldrModLXDoNameLookup(PKLDRMODLX pModLX, const char *pchSymbol, size_t cchSymbol, uint32_t *piSymbol)
685{
686
687 /*
688 * First do a hash table lookup.
689 */
690 /** @todo hash name table for speed. */
691
692 /*
693 * Search the name tables.
694 */
695 const uint8_t *pbName = kldrModLXDoNameTableLookupByName(pModLX->pbResNameTab,
696 pModLX->pbLoaderSectionLast - pModLX->pbResNameTab + 1,
697 pchSymbol, cchSymbol);
698 if (!pbName)
699 {
700 if (!pModLX->pbNonResNameTab)
701 {
702 /* lazy load it */
703 /** @todo non-resident name table. */
704 }
705 if (pModLX->pbNonResNameTab)
706 pbName = kldrModLXDoNameTableLookupByName(pModLX->pbResNameTab,
707 pModLX->pbNonResNameTabLast - pModLX->pbResNameTab + 1,
708 pchSymbol, cchSymbol);
709 }
710 if (!pbName)
711 return VERR_SYMBOL_NOT_FOUND;
712
713 *piSymbol = *(const uint16_t *)(pbName + 1 + *pbName);
714 return VINF_SUCCESS;
715}
716
717
718/**
719 * Lookup a name table entry by name.
720 *
721 * @returns Pointer to the name table entry if found.
722 * @returns NULL if not found.
723 * @param pbNameTable Pointer to the name table that should be searched.
724 * @param cbNameTable The size of the name table.
725 * @param pchSymbol The name of the symbol we're looking for.
726 * @param cchSymbol The length of the symbol name.
727 */
728static const uint8_t *kldrModLXDoNameTableLookupByName(const uint8_t *pbNameTable, ssize_t cbNameTable,
729 const char *pchSymbol, size_t cchSymbol)
730{
731 /*
732 * Determin the namelength up front so we can skip anything which doesn't matches the length.
733 */
734 uint8_t cbSymbol8Bit = (uint8_t)cchSymbol;
735 if (cbSymbol8Bit != cchSymbol)
736 return NULL; /* too long. */
737
738 /*
739 * Walk the name table.
740 */
741 while (*pbNameTable != 0 && cbNameTable > 0)
742 {
743 const uint8_t cbName = *pbNameTable;
744
745 cbNameTable -= cbName + 1 + 2;
746 if (cbNameTable < 0)
747 break;
748
749 if ( cbName == cbSymbol8Bit
750 && !memcmp(pbNameTable + 1, pchSymbol, cbName))
751 return pbNameTable;
752
753 /* next entry */
754 pbNameTable += cbName + 1 + 2;
755 }
756
757 return NULL;
758}
759
760
761/**
762 * Deal with a forwarder entry.
763 *
764 * @returns IPRT status code.
765 * @param pModLX The PE module interpreter instance.
766 * @param pEntry The forwarder entry.
767 * @param pfnGetForwarder The callback for resolving forwarder symbols. (optional)
768 * @param pvUser The user argument for the callback.
769 * @param puValue Where to put the value. (optional)
770 * @param pfKind Where to put the symbol kind. (optional)
771 */
772static int kldrModLXDoForwarderQuery(PKLDRMODLX pModLX, const struct e32_entry *pEntry,
773 PFNRTLDRIMPORT pfnGetForwarder, void *pvUser, PRTLDRADDR puValue, uint32_t *pfKind)
774{
775 if (!pfnGetForwarder)
776 return VERR_LDR_FORWARDER;
777
778 /*
779 * Validate the entry import module ordinal.
780 */
781 if ( !pEntry->e32_variant.e32_fwd.modord
782 || pEntry->e32_variant.e32_fwd.modord > pModLX->Hdr.e32_impmodcnt)
783 return VERR_LDRLX_BAD_FORWARDER;
784
785 char szImpModule[256];
786 int rc = kldrModLXGetImport(pModLX, NULL, pEntry->e32_variant.e32_fwd.modord - 1, szImpModule, sizeof(szImpModule), NULL);
787 if (RT_FAILURE(rc))
788 return rc;
789
790 /*
791 * Figure out the parameters.
792 */
793 uint32_t iSymbol;
794 const char *pszSymbol;
795 char szSymbol[256];
796 if (pEntry->e32_flags & FWD_ORDINAL)
797 {
798 iSymbol = pEntry->e32_variant.e32_fwd.value;
799 pszSymbol = NULL; /* no symbol name. */
800 }
801 else
802 {
803 const uint8_t *pbName;
804
805 /* load the fixup section if necessary. */
806 if (!pModLX->pbImportProcs)
807 {
808 rc = kldrModLXDoLoadFixupSection(pModLX);
809 if (RT_FAILURE(rc))
810 return rc;
811 }
812
813 /* Make name pointer. */
814 pbName = pModLX->pbImportProcs + pEntry->e32_variant.e32_fwd.value;
815 if ( pbName >= pModLX->pbFixupSectionLast
816 || pbName < pModLX->pbFixupSection
817 || !*pbName)
818 return VERR_LDRLX_BAD_FORWARDER;
819
820
821 /* check for '#' name. */
822 if (pbName[1] == '#')
823 {
824 uint8_t cbLeft = *pbName;
825 const uint8_t *pb = pbName + 1;
826 unsigned uBase;
827
828 /* base detection */
829 uBase = 10;
830 if ( cbLeft > 1
831 && pb[1] == '0'
832 && (pb[2] == 'x' || pb[2] == 'X'))
833 {
834 uBase = 16;
835 pb += 2;
836 cbLeft -= 2;
837 }
838
839 /* ascii to integer */
840 iSymbol = 0;
841 while (cbLeft-- > 0)
842 {
843 /* convert char to digit. */
844 unsigned uDigit = *pb++;
845 if (uDigit >= '0' && uDigit <= '9')
846 uDigit -= '0';
847 else if (uDigit >= 'a' && uDigit <= 'z')
848 uDigit -= 'a' + 10;
849 else if (uDigit >= 'A' && uDigit <= 'Z')
850 uDigit -= 'A' + 10;
851 else if (!uDigit)
852 break;
853 else
854 return VERR_LDRLX_BAD_FORWARDER;
855 if (uDigit >= uBase)
856 return VERR_LDRLX_BAD_FORWARDER;
857
858 /* insert the digit */
859 iSymbol *= uBase;
860 iSymbol += uDigit;
861 }
862 if (!iSymbol)
863 return VERR_LDRLX_BAD_FORWARDER;
864
865 pszSymbol = NULL; /* no symbol name. */
866 }
867 else
868 {
869 memcpy(szSymbol, pbName + 1, *pbName);
870 szSymbol[*pbName] = '\0';
871 pszSymbol = szSymbol;
872 iSymbol = UINT32_MAX;
873 }
874 }
875
876 /*
877 * Resolve the forwarder.
878 */
879 rc = pfnGetForwarder(&pModLX->Core, szImpModule, pszSymbol, iSymbol, puValue, /*pfKind, */pvUser);
880 if (RT_SUCCESS(rc) && pfKind)
881 *pfKind |= RTLDRSYMKIND_FORWARDER;
882 return rc;
883}
884
885
886/**
887 * Loads the fixup section from the executable image.
888 *
889 * The fixup section isn't loaded until it's accessed. It's also freed by kLdrModDone().
890 *
891 * @returns IPRT status code.
892 * @param pModLX The PE module interpreter instance.
893 */
894static int kldrModLXDoLoadFixupSection(PKLDRMODLX pModLX)
895{
896 void *pv = RTMemAlloc(pModLX->Hdr.e32_fixupsize);
897 if (!pv)
898 return VERR_NO_MEMORY;
899
900 uint32_t off = pModLX->Hdr.e32_objtab + pModLX->Hdr.e32_ldrsize;
901 int rc = pModLX->Core.pReader->pfnRead(pModLX->Core.pReader, pv, pModLX->Hdr.e32_fixupsize,
902 off + pModLX->offHdr);
903 if (RT_SUCCESS(rc))
904 {
905 pModLX->pbFixupSection = (uint8_t *)pv;
906 pModLX->pbFixupSectionLast = pModLX->pbFixupSection + pModLX->Hdr.e32_fixupsize;
907 KLDRMODLX_ASSERT(!pModLX->paoffPageFixups);
908 if (pModLX->Hdr.e32_fpagetab)
909 pModLX->paoffPageFixups = (const uint32_t *)(pModLX->pbFixupSection + pModLX->Hdr.e32_fpagetab - off);
910 KLDRMODLX_ASSERT(!pModLX->pbFixupRecs);
911 if (pModLX->Hdr.e32_frectab)
912 pModLX->pbFixupRecs = pModLX->pbFixupSection + pModLX->Hdr.e32_frectab - off;
913 KLDRMODLX_ASSERT(!pModLX->pbImportMods);
914 if (pModLX->Hdr.e32_impmod)
915 pModLX->pbImportMods = pModLX->pbFixupSection + pModLX->Hdr.e32_impmod - off;
916 KLDRMODLX_ASSERT(!pModLX->pbImportProcs);
917 if (pModLX->Hdr.e32_impproc)
918 pModLX->pbImportProcs = pModLX->pbFixupSection + pModLX->Hdr.e32_impproc - off;
919 }
920 else
921 RTMemFree(pv);
922 return rc;
923}
924
925
926/**
927 * @interface_method_impl{RTLDROPS,pfnEnumSymbols}
928 */
929static DECLCALLBACK(int) rtldrLX_EnumSymbols(PRTLDRMODINTERNAL pMod, unsigned fFlags, const void *pvBits,
930 RTUINTPTR BaseAddress, PFNRTLDRENUMSYMS pfnCallback, void *pvUser)
931{
932 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
933 RT_NOREF(pvBits);
934 RT_NOREF(fFlags);
935
936 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
937
938 /*
939 * Enumerate the entry table.
940 * (The entry table is made up of bundles of similar exports.)
941 */
942 int rc = VINF_SUCCESS;
943 uint32_t iOrdinal = 1;
944 const struct b32_bundle *pBundle = (const struct b32_bundle *)pModLX->pbEntryTab;
945 while (pBundle->b32_cnt && iOrdinal)
946 {
947 static const size_t s_cbEntry[] = { 0, 3, 5, 5, 7 };
948
949 /*
950 * Enum the entries in the bundle.
951 */
952 if (pBundle->b32_type != EMPTY)
953 {
954 const struct e32_entry *pEntry;
955 size_t cbEntry;
956 RTLDRADDR BundleRVA;
957 unsigned cLeft;
958
959
960 /* Validate the bundle. */
961 switch (pBundle->b32_type)
962 {
963 case ENTRY16:
964 case GATE16:
965 case ENTRY32:
966 if ( pBundle->b32_obj <= 0
967 || pBundle->b32_obj > pModLX->cSegments)
968 return VERR_LDRLX_BAD_BUNDLE;
969 BundleRVA = pModLX->aSegments[pBundle->b32_obj - 1].RVA;
970 break;
971
972 case ENTRYFWD:
973 BundleRVA = 0;
974 break;
975
976 default:
977 /* anyone actually using TYPEINFO will end up here. */
978 KLDRMODLX_ASSERT(!"Bad bundle type");
979 return VERR_LDRLX_BAD_BUNDLE;
980 }
981
982 /* iterate the bundle entries. */
983 cbEntry = s_cbEntry[pBundle->b32_type];
984 pEntry = (const struct e32_entry *)(pBundle + 1);
985 cLeft = pBundle->b32_cnt;
986 while (cLeft-- > 0)
987 {
988 RTLDRADDR uValue;
989 uint32_t fKind;
990 int fFoundName;
991 const uint8_t *pbName;
992
993 /*
994 * Calc the symbol value and kind.
995 */
996 switch (pBundle->b32_type)
997 {
998 /* e32_flags + a 16-bit offset. */
999 case ENTRY16:
1000 uValue = BaseAddress + BundleRVA + pEntry->e32_variant.e32_offset.offset16;
1001 fKind = RTLDRSYMKIND_16BIT | RTLDRSYMKIND_NO_TYPE;
1002 break;
1003
1004 /* e32_flags + a 16-bit offset + a 16-bit callgate selector. */
1005 case GATE16:
1006 uValue = BaseAddress + BundleRVA + pEntry->e32_variant.e32_callgate.offset;
1007 fKind = RTLDRSYMKIND_16BIT | RTLDRSYMKIND_CODE;
1008 break;
1009
1010 /* e32_flags + a 32-bit offset. */
1011 case ENTRY32:
1012 uValue = BaseAddress + BundleRVA + pEntry->e32_variant.e32_offset.offset32;
1013 fKind = RTLDRSYMKIND_32BIT;
1014 break;
1015
1016 /* e32_flags + 16-bit import module ordinal + a 32-bit procname or ordinal. */
1017 case ENTRYFWD:
1018 uValue = 0; /** @todo implement enumeration of forwarders properly. */
1019 fKind = RTLDRSYMKIND_FORWARDER;
1020 break;
1021
1022 default: /* shut up gcc. */
1023 uValue = 0;
1024 fKind = RTLDRSYMKIND_NO_BIT | RTLDRSYMKIND_NO_TYPE;
1025 break;
1026 }
1027
1028 /*
1029 * Any symbol names?
1030 */
1031 fFoundName = 0;
1032 char szName[256];
1033
1034 /* resident name table. */
1035 pbName = pModLX->pbResNameTab;
1036 if (pbName)
1037 {
1038 do
1039 {
1040 pbName = kldrModLXDoNameTableLookupByOrdinal(pbName, pModLX->pbLoaderSectionLast - pbName + 1, iOrdinal);
1041 if (!pbName)
1042 break;
1043 fFoundName = 1;
1044 memcpy(szName, (const char *)pbName + 1, *pbName);
1045 szName[*pbName] = '\0';
1046 rc = pfnCallback(pMod, szName, iOrdinal, uValue, /*fKind,*/ pvUser);
1047 if (rc != VINF_SUCCESS)
1048 return rc;
1049
1050 /* skip to the next entry */
1051 pbName += 1 + *pbName + 2;
1052 } while (pbName < pModLX->pbLoaderSectionLast);
1053 }
1054
1055 /* resident name table. */
1056 pbName = pModLX->pbNonResNameTab;
1057 /** @todo lazy load the non-resident name table. */
1058 if (pbName)
1059 {
1060 do
1061 {
1062 pbName = kldrModLXDoNameTableLookupByOrdinal(pbName, pModLX->pbNonResNameTabLast - pbName + 1, iOrdinal);
1063 if (!pbName)
1064 break;
1065 fFoundName = 1;
1066 memcpy(szName, (const char *)pbName + 1, *pbName);
1067 szName[*pbName] = '\0';
1068 rc = pfnCallback(pMod, szName, iOrdinal, uValue, /*fKind,*/ pvUser);
1069 if (rc != VINF_SUCCESS)
1070 return rc;
1071
1072 /* skip to the next entry */
1073 pbName += 1 + *pbName + 2;
1074 } while (pbName < pModLX->pbLoaderSectionLast);
1075 }
1076
1077 /*
1078 * If no names, call once with the ordinal only.
1079 */
1080 if (!fFoundName)
1081 {
1082 RT_NOREF(fKind);
1083 rc = pfnCallback(pMod, NULL /*pszName*/, iOrdinal, uValue, /*fKind,*/ pvUser);
1084 if (rc != VINF_SUCCESS)
1085 return rc;
1086 }
1087
1088 /* next */
1089 iOrdinal++;
1090 pEntry = (const struct e32_entry *)((uintptr_t)pEntry + cbEntry);
1091 }
1092 }
1093
1094 /*
1095 * The next bundle.
1096 */
1097 if (pBundle->b32_type > ENTRYFWD)
1098 {
1099 KLDRMODLX_ASSERT(!"Bad type"); /** @todo figure out TYPEINFO. */
1100 return VERR_LDRLX_BAD_BUNDLE;
1101 }
1102 if (pBundle->b32_type == 0)
1103 pBundle = (const struct b32_bundle *)((const uint8_t *)pBundle + 2);
1104 else
1105 pBundle = (const struct b32_bundle *)((const uint8_t *)(pBundle + 1) + s_cbEntry[pBundle->b32_type] * pBundle->b32_cnt);
1106 }
1107
1108 return VINF_SUCCESS;
1109}
1110
1111
1112/**
1113 * Lookup a name table entry by ordinal.
1114 *
1115 * @returns Pointer to the name table entry if found.
1116 * @returns NULL if not found.
1117 * @param pbNameTable Pointer to the name table that should be searched.
1118 * @param cbNameTable The size of the name table.
1119 * @param iOrdinal The ordinal to search for.
1120 */
1121static const uint8_t *kldrModLXDoNameTableLookupByOrdinal(const uint8_t *pbNameTable, ssize_t cbNameTable, uint32_t iOrdinal)
1122{
1123 while (*pbNameTable != 0 && cbNameTable > 0)
1124 {
1125 const uint8_t cbName = *pbNameTable;
1126 uint32_t iName;
1127
1128 cbNameTable -= cbName + 1 + 2;
1129 if (cbNameTable < 0)
1130 break;
1131
1132 iName = *(pbNameTable + cbName + 1)
1133 | ((unsigned)*(pbNameTable + cbName + 2) << 8);
1134 if (iName == iOrdinal)
1135 return pbNameTable;
1136
1137 /* next entry */
1138 pbNameTable += cbName + 1 + 2;
1139 }
1140
1141 return NULL;
1142}
1143
1144
1145static int kldrModLXGetImport(PKLDRMODLX pModLX, const void *pvBits, uint32_t iImport, char *pszName, size_t cchName,
1146 size_t *pcbNeeded)
1147{
1148 const uint8_t *pb;
1149 int rc;
1150 RT_NOREF(pvBits);
1151
1152 /*
1153 * Validate
1154 */
1155 if (iImport >= pModLX->Hdr.e32_impmodcnt)
1156 return VERR_LDRLX_IMPORT_ORDINAL_OUT_OF_BOUNDS;
1157
1158 /*
1159 * Lazy loading the fixup section.
1160 */
1161 if (!pModLX->pbImportMods)
1162 {
1163 rc = kldrModLXDoLoadFixupSection(pModLX);
1164 if (RT_FAILURE(rc))
1165 return rc;
1166 }
1167
1168 /*
1169 * Iterate the module import table until we reach the requested import ordinal.
1170 */
1171 pb = pModLX->pbImportMods;
1172 while (iImport-- > 0)
1173 pb += *pb + 1;
1174
1175 /*
1176 * Copy out the result.
1177 */
1178 if (pcbNeeded)
1179 *pcbNeeded = *pb + 1;
1180 if (*pb < cchName)
1181 {
1182 memcpy(pszName, pb + 1, *pb);
1183 pszName[*pb] = '\0';
1184 rc = VINF_SUCCESS;
1185 }
1186 else
1187 {
1188 memcpy(pszName, pb + 1, cchName);
1189 if (cchName)
1190 pszName[cchName - 1] = '\0';
1191 rc = VERR_BUFFER_OVERFLOW;
1192 }
1193
1194 return rc;
1195}
1196
1197#if 0
1198
1199/** @copydoc kLdrModNumberOfImports */
1200static int32_t kldrModLXNumberOfImports(PRTLDRMODINTERNAL pMod, const void *pvBits)
1201{
1202 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
1203 RT_NOREF(pvBits);
1204 return pModLX->Hdr.e32_impmodcnt;
1205}
1206
1207
1208/** @copydoc kLdrModGetStackInfo */
1209static int kldrModLXGetStackInfo(PRTLDRMODINTERNAL pMod, const void *pvBits, RTLDRADDR BaseAddress, PKLDRSTACKINFO pStackInfo)
1210{
1211 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
1212 const uint32_t i = pModLX->Hdr.e32_stackobj;
1213 RT_NOREF(pvBits);
1214
1215 if ( i
1216 && i <= pModLX->cSegments
1217 && pModLX->Hdr.e32_esp <= pModLX->aSegments[i - 1].LinkAddress + pModLX->aSegments[i - 1].cb
1218 && pModLX->Hdr.e32_stacksize
1219 && pModLX->Hdr.e32_esp - pModLX->Hdr.e32_stacksize >= pModLX->aSegments[i - 1].LinkAddress)
1220 {
1221
1222 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
1223 pStackInfo->LinkAddress = pModLX->Hdr.e32_esp - pModLX->Hdr.e32_stacksize;
1224 pStackInfo->Address = BaseAddress
1225 + pModLX->aSegments[i - 1].RVA
1226 + pModLX->Hdr.e32_esp - pModLX->Hdr.e32_stacksize - pModLX->aSegments[i - 1].LinkAddress;
1227 }
1228 else
1229 {
1230 pSt0ackInfo->Address = NIL_RTLDRADDR;
1231 pStackInfo->LinkAddress = NIL_RTLDRADDR;
1232 }
1233 pStackInfo->cbStack = pModLX->Hdr.e32_stacksize;
1234 pStackInfo->cbStackThread = 0;
1235
1236 return VINF_SUCCESS;
1237}
1238
1239
1240/** @copydoc kLdrModQueryMainEntrypoint */
1241static int kldrModLXQueryMainEntrypoint(PRTLDRMODINTERNAL pMod, const void *pvBits, RTLDRADDR BaseAddress, PRTLDRADDR pMainEPAddress)
1242{
1243 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
1244 RT_NOREF(pvBits);
1245
1246 /*
1247 * Convert the address from the header.
1248 */
1249 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
1250 *pMainEPAddress = pModLX->Hdr.e32_startobj
1251 && pModLX->Hdr.e32_startobj <= pModLX->cSegments
1252 && pModLX->Hdr.e32_eip < pModLX->aSegments[pModLX->Hdr.e32_startobj - 1].cb
1253 ? BaseAddress + pModLX->aSegments[pModLX->Hdr.e32_startobj - 1].RVA + pModLX->Hdr.e32_eip
1254 : NIL_RTLDRADDR;
1255 return VINF_SUCCESS;
1256}
1257
1258#endif
1259
1260
1261/**
1262 * @interface_method_impl{RTLDROPS,pfnEnumDbgInfo}
1263 */
1264static DECLCALLBACK(int) rtldrLX_EnumDbgInfo(PRTLDRMODINTERNAL pMod, const void *pvBits,
1265 PFNRTLDRENUMDBG pfnCallback, void *pvUser)
1266{
1267 /*PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);*/
1268 RT_NOREF(pfnCallback);
1269 RT_NOREF(pvUser);
1270
1271 /*
1272 * Quit immediately if no debug info.
1273 */
1274 if (kldrModLXHasDbgInfo(pMod, pvBits))
1275 return VINF_SUCCESS;
1276#if 0
1277 /*
1278 * Read the debug info and look for familiar magics and structures.
1279 */
1280 /** @todo */
1281#endif
1282
1283 return VINF_SUCCESS;
1284}
1285
1286
1287static int kldrModLXHasDbgInfo(PRTLDRMODINTERNAL pMod, const void *pvBits)
1288{
1289 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
1290 RT_NOREF(pvBits);
1291
1292 /*
1293 * Don't curretnly bother with linkers which doesn't advertise it in the header.
1294 */
1295 if ( !pModLX->Hdr.e32_debuginfo
1296 || !pModLX->Hdr.e32_debuglen)
1297 return VERR_NOT_FOUND;
1298 return VINF_SUCCESS;
1299}
1300
1301#if 0
1302
1303/** @copydoc kLdrModMap */
1304static int kldrModLXMap(PRTLDRMODINTERNAL pMod)
1305{
1306 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
1307 unsigned fFixed;
1308 void *pvBase;
1309 int rc;
1310
1311 /*
1312 * Already mapped?
1313 */
1314 if (pModLX->pvMapping)
1315 return KLDR_ERR_ALREADY_MAPPED;
1316
1317 /*
1318 * Allocate memory for it.
1319 */
1320 /* fixed image? */
1321 fFixed = pModLX->Core.enmType == RTLDRTYPE_EXECUTABLE_FIXED
1322 || pModLX->Core.enmType == RTLDRTYPE_SHARED_LIBRARY_FIXED;
1323 if (!fFixed)
1324 pvBase = NULL;
1325 else
1326 {
1327 pvBase = (void *)(uintptr_t)pModLX->aSegments[0].LinkAddress;
1328 if ((uintptr_t)pvBase != pModLX->aSegments[0].LinkAddress)
1329 return KLDR_ERR_ADDRESS_OVERFLOW;
1330 }
1331 rc = kHlpPageAlloc(&pvBase, pModLX->cbMapped, KPROT_EXECUTE_READWRITE, fFixed);
1332 if (RT_FAILURE(rc))
1333 return rc;
1334
1335 /*
1336 * Load the bits, apply page protection, and update the segment table.
1337 */
1338 rc = kldrModLXDoLoadBits(pModLX, pvBase);
1339 if (RT_SUCCESS(rc))
1340 rc = kldrModLXDoProtect(pModLX, pvBase, 0 /* protect */);
1341 if (RT_SUCCESS(rc))
1342 {
1343 uint32_t i;
1344 for (i = 0; i < pModLX->cSegments; i++)
1345 {
1346 if (pModLX->aSegments[i].RVA != NIL_RTLDRADDR)
1347 pModLX->aSegments[i].MapAddress = (uintptr_t)pvBase + (uintptr_t)pModLX->aSegments[i].RVA;
1348 }
1349 pModLX->pvMapping = pvBase;
1350 }
1351 else
1352 kHlpPageFree(pvBase, pModLX->cbMapped);
1353 return rc;
1354}
1355
1356#endif
1357
1358/**
1359 * Loads the LX pages into the specified memory mapping.
1360 *
1361 * @returns IPRT status code.
1362 *
1363 * @param pModLX The LX module interpreter instance.
1364 * @param pvBits Where to load the bits.
1365 */
1366static int kldrModLXDoLoadBits(PKLDRMODLX pModLX, void *pvBits)
1367{
1368 const PRTLDRREADER pRdr = pModLX->Core.pReader;
1369 uint8_t *pbTmpPage = NULL;
1370 int rc = VINF_SUCCESS;
1371 uint32_t i;
1372
1373 /*
1374 * Iterate the segments.
1375 */
1376 for (i = 0; i < pModLX->Hdr.e32_objcnt; i++)
1377 {
1378 const struct o32_obj * const pObj = &pModLX->paObjs[i];
1379 const uint32_t cPages = (uint32_t)(pModLX->aSegments[i].cbMapped / OBJPAGELEN);
1380 uint32_t iPage;
1381 uint8_t *pbPage = (uint8_t *)pvBits + (uintptr_t)pModLX->aSegments[i].RVA;
1382
1383 /*
1384 * Iterate the page map pages.
1385 */
1386 for (iPage = 0; RT_SUCCESS(rc) && iPage < pObj->o32_mapsize; iPage++, pbPage += OBJPAGELEN)
1387 {
1388 const struct o32_map *pMap = &pModLX->paPageMappings[iPage + pObj->o32_pagemap - 1];
1389 switch (pMap->o32_pageflags)
1390 {
1391 case VALID:
1392 if (pMap->o32_pagesize == OBJPAGELEN)
1393 rc = pRdr->pfnRead(pRdr, pbPage, OBJPAGELEN,
1394 pModLX->Hdr.e32_datapage + (pMap->o32_pagedataoffset << pModLX->Hdr.e32_pageshift));
1395 else if (pMap->o32_pagesize < OBJPAGELEN)
1396 {
1397 rc = pRdr->pfnRead(pRdr, pbPage, pMap->o32_pagesize,
1398 pModLX->Hdr.e32_datapage + (pMap->o32_pagedataoffset << pModLX->Hdr.e32_pageshift));
1399 memset(pbPage + pMap->o32_pagesize, 0, OBJPAGELEN - pMap->o32_pagesize);
1400 }
1401 else
1402 rc = VERR_LDRLX_BAD_PAGE_MAP;
1403 break;
1404
1405 case ITERDATA:
1406 case ITERDATA2:
1407 /* make sure we've got a temp page .*/
1408 if (!pbTmpPage)
1409 {
1410 pbTmpPage = (uint8_t *)RTMemAlloc(OBJPAGELEN + 256);
1411 if (!pbTmpPage)
1412 break;
1413 }
1414 /* validate the size. */
1415 if (pMap->o32_pagesize > OBJPAGELEN + 252)
1416 {
1417 rc = VERR_LDRLX_BAD_PAGE_MAP;
1418 break;
1419 }
1420
1421 /* read it and ensure 4 extra zero bytes. */
1422 rc = pRdr->pfnRead(pRdr, pbTmpPage, pMap->o32_pagesize,
1423 pModLX->Hdr.e32_datapage + (pMap->o32_pagedataoffset << pModLX->Hdr.e32_pageshift));
1424 if (RT_FAILURE(rc))
1425 break;
1426 memset(pbTmpPage + pMap->o32_pagesize, 0, 4);
1427
1428 /* unpack it into the image page. */
1429 if (pMap->o32_pageflags == ITERDATA2)
1430 rc = kldrModLXDoIterData2Unpacking(pbPage, pbTmpPage, pMap->o32_pagesize);
1431 else
1432 rc = kldrModLXDoIterDataUnpacking(pbPage, pbTmpPage, pMap->o32_pagesize);
1433 break;
1434
1435 case INVALID: /* we're probably not dealing correctly with INVALID pages... */
1436 case ZEROED:
1437 memset(pbPage, 0, OBJPAGELEN);
1438 break;
1439
1440 case RANGE:
1441 KLDRMODLX_ASSERT(!"RANGE");
1442 RT_FALL_THRU();
1443 default:
1444 rc = VERR_LDRLX_BAD_PAGE_MAP;
1445 break;
1446 }
1447 }
1448 if (RT_FAILURE(rc))
1449 break;
1450
1451 /*
1452 * Zero the remaining pages.
1453 */
1454 if (iPage < cPages)
1455 memset(pbPage, 0, (cPages - iPage) * OBJPAGELEN);
1456 }
1457
1458 if (pbTmpPage)
1459 RTMemFree(pbTmpPage);
1460 return rc;
1461}
1462
1463
1464/**
1465 * Unpacks iterdata (aka EXEPACK).
1466 *
1467 * @returns IPRT status code.
1468 * @param pbDst Where to put the uncompressed data. (Assumes OBJPAGELEN size.)
1469 * @param pbSrc The compressed source data.
1470 * @param cbSrc The file size of the compressed data. The source buffer
1471 * contains 4 additional zero bytes.
1472 */
1473static int kldrModLXDoIterDataUnpacking(uint8_t *pbDst, const uint8_t *pbSrc, int cbSrc)
1474{
1475 const struct LX_Iter *pIter = (const struct LX_Iter *)pbSrc;
1476 int cbDst = OBJPAGELEN;
1477
1478 /* Validate size of data. */
1479 if (cbSrc >= (int)OBJPAGELEN - 2)
1480 return VERR_LDRLX_BAD_ITERDATA;
1481
1482 /*
1483 * Expand the page.
1484 */
1485 while (cbSrc > 0 && pIter->LX_nIter)
1486 {
1487 if (pIter->LX_nBytes == 1)
1488 {
1489 /*
1490 * Special case - one databyte.
1491 */
1492 cbDst -= pIter->LX_nIter;
1493 if (cbDst < 0)
1494 return VERR_LDRLX_BAD_ITERDATA;
1495
1496 cbSrc -= 4 + 1;
1497 if (cbSrc < -4)
1498 return VERR_LDRLX_BAD_ITERDATA;
1499
1500 memset(pbDst, pIter->LX_Iterdata, pIter->LX_nIter);
1501 pbDst += pIter->LX_nIter;
1502 pIter++;
1503 }
1504 else
1505 {
1506 /*
1507 * General.
1508 */
1509 int i;
1510
1511 cbDst -= pIter->LX_nIter * pIter->LX_nBytes;
1512 if (cbDst < 0)
1513 return VERR_LDRLX_BAD_ITERDATA;
1514
1515 cbSrc -= 4 + pIter->LX_nBytes;
1516 if (cbSrc < -4)
1517 return VERR_LDRLX_BAD_ITERDATA;
1518
1519 for (i = pIter->LX_nIter; i > 0; i--, pbDst += pIter->LX_nBytes)
1520 memcpy(pbDst, &pIter->LX_Iterdata, pIter->LX_nBytes);
1521 pIter = (struct LX_Iter *)((char*)pIter + 4 + pIter->LX_nBytes);
1522 }
1523 }
1524
1525 /*
1526 * Zero remainder of the page.
1527 */
1528 if (cbDst > 0)
1529 memset(pbDst, 0, cbDst);
1530
1531 return VINF_SUCCESS;
1532}
1533
1534
1535/**
1536 * Unpacks iterdata (aka EXEPACK).
1537 *
1538 * @returns IPRT status code.
1539 * @param pbDst Where to put the uncompressed data. (Assumes OBJPAGELEN size.)
1540 * @param pbSrc The compressed source data.
1541 * @param cbSrc The file size of the compressed data. The source buffer
1542 * contains 4 additional zero bytes.
1543 */
1544static int kldrModLXDoIterData2Unpacking(uint8_t *pbDst, const uint8_t *pbSrc, int cbSrc)
1545{
1546 int cbDst = OBJPAGELEN;
1547
1548 while (cbSrc > 0)
1549 {
1550 /*
1551 * Bit 0 and 1 is the encoding type.
1552 */
1553 switch (*pbSrc & 0x03)
1554 {
1555 /*
1556 *
1557 * 0 1 2 3 4 5 6 7
1558 * type | |
1559 * ----------------
1560 * cb <cb bytes of data>
1561 *
1562 * Bits 2-7 is, if not zero, the length of an uncompressed run
1563 * starting at the following byte.
1564 *
1565 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
1566 * type | | | | | |
1567 * ---------------- ---------------------- -----------------------
1568 * zero cb char to multiply
1569 *
1570 * If the bits are zero, the following two bytes describes a 1 byte interation
1571 * run. First byte is count, second is the byte to copy. A count of zero is
1572 * means end of data, and we simply stops. In that case the rest of the data
1573 * should be zero.
1574 */
1575 case 0:
1576 {
1577 if (*pbSrc)
1578 {
1579 const int cb = *pbSrc >> 2;
1580 cbDst -= cb;
1581 if (cbDst < 0)
1582 return VERR_LDRLX_BAD_ITERDATA2;
1583 cbSrc -= cb + 1;
1584 if (cbSrc < 0)
1585 return VERR_LDRLX_BAD_ITERDATA2;
1586 memcpy(pbDst, ++pbSrc, cb);
1587 pbDst += cb;
1588 pbSrc += cb;
1589 }
1590 else if (cbSrc < 2)
1591 return VERR_LDRLX_BAD_ITERDATA2;
1592 else
1593 {
1594 const int cb = pbSrc[1];
1595 if (!cb)
1596 goto l_endloop;
1597 cbDst -= cb;
1598 if (cbDst < 0)
1599 return VERR_LDRLX_BAD_ITERDATA2;
1600 cbSrc -= 3;
1601 if (cbSrc < 0)
1602 return VERR_LDRLX_BAD_ITERDATA2;
1603 memset(pbDst, pbSrc[2], cb);
1604 pbDst += cb;
1605 pbSrc += 3;
1606 }
1607 break;
1608 }
1609
1610
1611 /*
1612 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1613 * type | | | | | |
1614 * ---- ------- -------------------------
1615 * cb1 cb2 - 3 offset <cb1 bytes of data>
1616 *
1617 * Two bytes layed out as described above, followed by cb1 bytes of data to be copied.
1618 * The cb2(+3) and offset describes an amount of data to be copied from the expanded
1619 * data relative to the current position. The data copied as you would expect it to be.
1620 */
1621 case 1:
1622 {
1623 cbSrc -= 2;
1624 if (cbSrc < 0)
1625 return VERR_LDRLX_BAD_ITERDATA2;
1626 else
1627 {
1628 const unsigned off = ((unsigned)pbSrc[1] << 1) | (*pbSrc >> 7);
1629 const int cb1 = (*pbSrc >> 2) & 3;
1630 const int cb2 = ((*pbSrc >> 4) & 7) + 3;
1631
1632 pbSrc += 2;
1633 cbSrc -= cb1;
1634 if (cbSrc < 0)
1635 return VERR_LDRLX_BAD_ITERDATA2;
1636 cbDst -= cb1;
1637 if (cbDst < 0)
1638 return VERR_LDRLX_BAD_ITERDATA2;
1639 memcpy(pbDst, pbSrc, cb1);
1640 pbDst += cb1;
1641 pbSrc += cb1;
1642
1643 if (off > OBJPAGELEN - (unsigned)cbDst)
1644 return VERR_LDRLX_BAD_ITERDATA2;
1645 cbDst -= cb2;
1646 if (cbDst < 0)
1647 return VERR_LDRLX_BAD_ITERDATA2;
1648 memmove(pbDst, pbDst - off, cb2);
1649 pbDst += cb2;
1650 }
1651 break;
1652 }
1653
1654
1655 /*
1656 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1657 * type | | | |
1658 * ---- ----------------------------------
1659 * cb-3 offset
1660 *
1661 * Two bytes layed out as described above.
1662 * The cb(+3) and offset describes an amount of data to be copied from the expanded
1663 * data relative to the current position.
1664 *
1665 * If offset == 1 the data is not copied as expected, but in the memcpyw manner.
1666 */
1667 case 2:
1668 {
1669 cbSrc -= 2;
1670 if (cbSrc < 0)
1671 return VERR_LDRLX_BAD_ITERDATA2;
1672 else
1673 {
1674 const unsigned off = ((unsigned)pbSrc[1] << 4) | (*pbSrc >> 4);
1675 const int cb = ((*pbSrc >> 2) & 3) + 3;
1676
1677 pbSrc += 2;
1678 if (off > OBJPAGELEN - (unsigned)cbDst)
1679 return VERR_LDRLX_BAD_ITERDATA2;
1680 cbDst -= cb;
1681 if (cbDst < 0)
1682 return VERR_LDRLX_BAD_ITERDATA2;
1683 kLdrModLXMemCopyW(pbDst, pbDst - off, cb);
1684 pbDst += cb;
1685 }
1686 break;
1687 }
1688
1689
1690 /*
1691 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
1692 * type | | | | | |
1693 * ---------- ---------------- ----------------------------------
1694 * cb1 cb2 offset <cb1 bytes of data>
1695 *
1696 * Three bytes layed out as described above, followed by cb1 bytes of data to be copied.
1697 * The cb2 and offset describes an amount of data to be copied from the expanded
1698 * data relative to the current position.
1699 *
1700 * If offset == 1 the data is not copied as expected, but in the memcpyw manner.
1701 */
1702 case 3:
1703 {
1704 cbSrc -= 3;
1705 if (cbSrc < 0)
1706 return VERR_LDRLX_BAD_ITERDATA2;
1707 else
1708 {
1709 const int cb1 = (*pbSrc >> 2) & 0xf;
1710 const int cb2 = ((pbSrc[1] & 0xf) << 2) | (*pbSrc >> 6);
1711 const unsigned off = ((unsigned)pbSrc[2] << 4) | (pbSrc[1] >> 4);
1712
1713 pbSrc += 3;
1714 cbSrc -= cb1;
1715 if (cbSrc < 0)
1716 return VERR_LDRLX_BAD_ITERDATA2;
1717 cbDst -= cb1;
1718 if (cbDst < 0)
1719 return VERR_LDRLX_BAD_ITERDATA2;
1720 memcpy(pbDst, pbSrc, cb1);
1721 pbDst += cb1;
1722 pbSrc += cb1;
1723
1724 if (off > OBJPAGELEN - (unsigned)cbDst)
1725 return VERR_LDRLX_BAD_ITERDATA2;
1726 cbDst -= cb2;
1727 if (cbDst < 0)
1728 return VERR_LDRLX_BAD_ITERDATA2;
1729 kLdrModLXMemCopyW(pbDst, pbDst - off, cb2);
1730 pbDst += cb2;
1731 }
1732 break;
1733 }
1734 } /* type switch. */
1735 } /* unpack loop */
1736
1737l_endloop:
1738
1739
1740 /*
1741 * Zero remainder of the page.
1742 */
1743 if (cbDst > 0)
1744 memset(pbDst, 0, cbDst);
1745
1746 return VINF_SUCCESS;
1747}
1748
1749
1750/**
1751 * Special memcpy employed by the iterdata2 algorithm.
1752 *
1753 * Emulate a 16-bit memcpy (copying 16-bit at a time) and the effects this
1754 * has if src is very close to the destination.
1755 *
1756 * @param pbDst Destination pointer.
1757 * @param pbSrc Source pointer. Will always be <= pbDst.
1758 * @param cb Amount of data to be copied.
1759 * @remark This assumes that unaligned word and dword access is fine.
1760 */
1761static void kLdrModLXMemCopyW(uint8_t *pbDst, const uint8_t *pbSrc, int cb)
1762{
1763 switch (pbDst - pbSrc)
1764 {
1765 case 0:
1766 case 1:
1767 case 2:
1768 case 3:
1769 /* 16-bit copy (unaligned) */
1770 if (cb & 1)
1771 *pbDst++ = *pbSrc++;
1772 for (cb >>= 1; cb > 0; cb--, pbDst += 2, pbSrc += 2)
1773 *(uint16_t *)pbDst = *(const uint16_t *)pbSrc;
1774 break;
1775
1776 default:
1777 /* 32-bit copy (unaligned) */
1778 if (cb & 1)
1779 *pbDst++ = *pbSrc++;
1780 if (cb & 2)
1781 {
1782 *(uint16_t *)pbDst = *(const uint16_t *)pbSrc;
1783 pbDst += 2;
1784 pbSrc += 2;
1785 }
1786 for (cb >>= 2; cb > 0; cb--, pbDst += 4, pbSrc += 4)
1787 *(uint32_t *)pbDst = *(const uint32_t *)pbSrc;
1788 break;
1789 }
1790}
1791
1792#if 0
1793
1794/**
1795 * Unprotects or protects the specified image mapping.
1796 *
1797 * @returns IPRT status code.
1798 *
1799 * @param pModLX The LX module interpreter instance.
1800 * @param pvBits The mapping to protect.
1801 * @param UnprotectOrProtect If 1 unprotect (i.e. make all writable), otherwise
1802 * protect according to the object table.
1803 */
1804static int kldrModLXDoProtect(PKLDRMODLX pModLX, void *pvBits, unsigned fUnprotectOrProtect)
1805{
1806 uint32_t i;
1807
1808 /*
1809 * Change object protection.
1810 */
1811 for (i = 0; i < pModLX->cSegments; i++)
1812 {
1813 int rc;
1814 void *pv;
1815 KPROT enmProt;
1816
1817 /* calc new protection. */
1818 enmProt = pModLX->aSegments[i].enmProt;
1819 if (fUnprotectOrProtect)
1820 {
1821 switch (enmProt)
1822 {
1823 case KPROT_NOACCESS:
1824 case KPROT_READONLY:
1825 case KPROT_READWRITE:
1826 case KPROT_WRITECOPY:
1827 enmProt = KPROT_READWRITE;
1828 break;
1829 case KPROT_EXECUTE:
1830 case KPROT_EXECUTE_READ:
1831 case KPROT_EXECUTE_READWRITE:
1832 case KPROT_EXECUTE_WRITECOPY:
1833 enmProt = KPROT_EXECUTE_READWRITE;
1834 break;
1835 default:
1836 KLDRMODLX_ASSERT(!"bad enmProt");
1837 return -1;
1838 }
1839 }
1840 else
1841 {
1842 /* copy on write -> normal write. */
1843 if (enmProt == KPROT_EXECUTE_WRITECOPY)
1844 enmProt = KPROT_EXECUTE_READWRITE;
1845 else if (enmProt == KPROT_WRITECOPY)
1846 enmProt = KPROT_READWRITE;
1847 }
1848
1849
1850 /* calc the address and set page protection. */
1851 pv = (uint8_t *)pvBits + pModLX->aSegments[i].RVA;
1852
1853 rc = kHlpPageProtect(pv, pModLX->aSegments[i].cbMapped, enmProt);
1854 if (RT_FAILURE(rc))
1855 break;
1856
1857 /** @todo the gap page should be marked NOACCESS! */
1858 }
1859
1860 return VINF_SUCCESS;
1861}
1862
1863
1864/** @copydoc kLdrModUnmap */
1865static int kldrModLXUnmap(PRTLDRMODINTERNAL pMod)
1866{
1867 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
1868 uint32_t i;
1869 int rc;
1870
1871 /*
1872 * Mapped?
1873 */
1874 if (!pModLX->pvMapping)
1875 return KLDR_ERR_NOT_MAPPED;
1876
1877 /*
1878 * Free the mapping and update the segments.
1879 */
1880 rc = kHlpPageFree((void *)pModLX->pvMapping, pModLX->cbMapped);
1881 KLDRMODLX_ASSERT(!rc);
1882 pModLX->pvMapping = NULL;
1883
1884 for (i = 0; i < pModLX->cSegments; i++)
1885 pModLX->aSegments[i].MapAddress = 0;
1886
1887 return rc;
1888}
1889
1890
1891/** @copydoc kLdrModAllocTLS */
1892static int kldrModLXAllocTLS(PRTLDRMODINTERNAL pMod, void *pvMapping)
1893{
1894 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
1895
1896 /* no tls, just do the error checking. */
1897 if ( pvMapping == KLDRMOD_INT_MAP
1898 && pModLX->pvMapping)
1899 return KLDR_ERR_NOT_MAPPED;
1900 return VINF_SUCCESS;
1901}
1902
1903
1904/** @copydoc kLdrModFreeTLS */
1905static void kldrModLXFreeTLS(PRTLDRMODINTERNAL pMod, void *pvMapping)
1906{
1907 /* no tls. */
1908 RT_NOREF(pMod);
1909 RT_NOREF(pvMapping);
1910
1911}
1912
1913
1914/** @copydoc kLdrModReload */
1915static int kldrModLXReload(PRTLDRMODINTERNAL pMod)
1916{
1917 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
1918 int rc, rc2;
1919
1920 /*
1921 * Mapped?
1922 */
1923 if (!pModLX->pvMapping)
1924 return KLDR_ERR_NOT_MAPPED;
1925
1926 /*
1927 * Before doing anything we'll have to make all pages writable.
1928 */
1929 rc = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 1 /* unprotect */);
1930 if (RT_FAILURE(rc))
1931 return rc;
1932
1933 /*
1934 * Load the bits again.
1935 */
1936 rc = kldrModLXDoLoadBits(pModLX, (void *)pModLX->pvMapping);
1937
1938 /*
1939 * Restore protection.
1940 */
1941 rc2 = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 0 /* protect */);
1942 if (RT_SUCCESS(rc) && RT_FAILURE(rc2))
1943 rc = rc2;
1944 return rc;
1945}
1946
1947
1948/** @copydoc kLdrModFixupMapping */
1949static int kldrModLXFixupMapping(PRTLDRMODINTERNAL pMod, PFNRTLDRIMPORT pfnGetImport, void *pvUser)
1950{
1951 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
1952 int rc, rc2;
1953
1954 /*
1955 * Mapped?
1956 */
1957 if (!pModLX->pvMapping)
1958 return KLDR_ERR_NOT_MAPPED;
1959
1960 /*
1961 * Before doing anything we'll have to make all pages writable.
1962 */
1963 rc = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 1 /* unprotect */);
1964 if (RT_FAILURE(rc))
1965 return rc;
1966
1967 /*
1968 * Apply fixups and resolve imports.
1969 */
1970 rc = rtldrLX_RelocateBits(pMod, (void *)pModLX->pvMapping, (uintptr_t)pModLX->pvMapping,
1971 pModLX->aSegments[0].LinkAddress, pfnGetImport, pvUser);
1972
1973 /*
1974 * Restore protection.
1975 */
1976 rc2 = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 0 /* protect */);
1977 if (RT_SUCCESS(rc) && RT_FAILURE(rc2))
1978 rc = rc2;
1979 return rc;
1980}
1981
1982
1983/** @copydoc kLdrModCallInit */
1984static int kldrModLXCallInit(PRTLDRMODINTERNAL pMod, void *pvMapping, uintptr_t uHandle)
1985{
1986 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
1987 int rc;
1988
1989 /*
1990 * Mapped?
1991 */
1992 if (pvMapping == KLDRMOD_INT_MAP)
1993 {
1994 pvMapping = (void *)pModLX->pvMapping;
1995 if (!pvMapping)
1996 return KLDR_ERR_NOT_MAPPED;
1997 }
1998
1999 /*
2000 * Do TLS callbacks first and then call the init/term function if it's a DLL.
2001 */
2002 if ((pModLX->Hdr.e32_mflags & E32MODMASK) == E32MODDLL)
2003 rc = kldrModLXDoCallDLL(pModLX, pvMapping, 0 /* attach */, uHandle);
2004 else
2005 rc = VINF_SUCCESS;
2006 return rc;
2007}
2008
2009
2010/**
2011 * Call the DLL entrypoint.
2012 *
2013 * @returns 0 on success.
2014 * @returns KLDR_ERR_MODULE_INIT_FAILED or KLDR_ERR_THREAD_ATTACH_FAILED on failure.
2015 * @param pModLX The LX module interpreter instance.
2016 * @param pvMapping The module mapping to use (resolved).
2017 * @param uOp The operation (DLL_*).
2018 * @param uHandle The module handle to present.
2019 */
2020static int kldrModLXDoCallDLL(PKLDRMODLX pModLX, void *pvMapping, unsigned uOp, uintptr_t uHandle)
2021{
2022 int rc;
2023
2024 /*
2025 * If no entrypoint there isn't anything to be done.
2026 */
2027 if ( !pModLX->Hdr.e32_startobj
2028 || pModLX->Hdr.e32_startobj > pModLX->Hdr.e32_objcnt)
2029 return VINF_SUCCESS;
2030
2031 /*
2032 * Invoke the entrypoint and convert the boolean result to a kLdr status code.
2033 */
2034 rc = kldrModLXDoCall((uintptr_t)pvMapping
2035 + (uintptr_t)pModLX->aSegments[pModLX->Hdr.e32_startobj - 1].RVA
2036 + pModLX->Hdr.e32_eip,
2037 uHandle, uOp, NULL);
2038 if (rc)
2039 rc = VINF_SUCCESS;
2040 else if (uOp == 0 /* attach */)
2041 rc = KLDR_ERR_MODULE_INIT_FAILED;
2042 else /* detach: ignore failures */
2043 rc = VINF_SUCCESS;
2044 return rc;
2045}
2046
2047
2048/**
2049 * Do a 3 parameter callback.
2050 *
2051 * @returns 32-bit callback return.
2052 * @param uEntrypoint The address of the function to be called.
2053 * @param uHandle The first argument, the module handle.
2054 * @param uOp The second argumnet, the reason we're calling.
2055 * @param pvReserved The third argument, reserved argument. (figure this one out)
2056 */
2057static int32_t kldrModLXDoCall(uintptr_t uEntrypoint, uintptr_t uHandle, uint32_t uOp, void *pvReserved)
2058{
2059#if defined(__X86__) || defined(__i386__) || defined(_M_IX86)
2060 int32_t rc;
2061/** @todo try/except */
2062
2063 /*
2064 * Paranoia.
2065 */
2066# ifdef __GNUC__
2067 __asm__ __volatile__(
2068 "pushl %2\n\t"
2069 "pushl %1\n\t"
2070 "pushl %0\n\t"
2071 "lea 12(%%esp), %2\n\t"
2072 "call *%3\n\t"
2073 "movl %2, %%esp\n\t"
2074 : "=a" (rc)
2075 : "d" (uOp),
2076 "S" (0),
2077 "c" (uEntrypoint),
2078 "0" (uHandle));
2079# elif defined(_MSC_VER)
2080 __asm {
2081 mov eax, [uHandle]
2082 mov edx, [uOp]
2083 mov ecx, 0
2084 mov ebx, [uEntrypoint]
2085 push edi
2086 mov edi, esp
2087 push ecx
2088 push edx
2089 push eax
2090 call ebx
2091 mov esp, edi
2092 pop edi
2093 mov [rc], eax
2094 }
2095# else
2096# error "port me!"
2097# endif
2098 RT_NOREF(pvReserved);
2099 return rc;
2100
2101#else
2102 RT_NOREF(uEntrypoint);
2103 RT_NOREF(uHandle);
2104 RT_NOREF(uOp);
2105 RT_NOREF(pvReserved);
2106 return KCPU_ERR_ARCH_CPU_NOT_COMPATIBLE;
2107#endif
2108}
2109
2110
2111/** @copydoc kLdrModCallTerm */
2112static int kldrModLXCallTerm(PRTLDRMODINTERNAL pMod, void *pvMapping, uintptr_t uHandle)
2113{
2114 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
2115
2116 /*
2117 * Mapped?
2118 */
2119 if (pvMapping == KLDRMOD_INT_MAP)
2120 {
2121 pvMapping = (void *)pModLX->pvMapping;
2122 if (!pvMapping)
2123 return KLDR_ERR_NOT_MAPPED;
2124 }
2125
2126 /*
2127 * Do the call.
2128 */
2129 if ((pModLX->Hdr.e32_mflags & E32MODMASK) == E32MODDLL)
2130 kldrModLXDoCallDLL(pModLX, pvMapping, 1 /* detach */, uHandle);
2131
2132 return VINF_SUCCESS;
2133}
2134
2135
2136/** @copydoc kLdrModCallThread */
2137static int kldrModLXCallThread(PRTLDRMODINTERNAL pMod, void *pvMapping, uintptr_t uHandle, unsigned fAttachingOrDetaching)
2138{
2139 /* no thread attach/detach callout. */
2140 RT_NOREF(pMod);
2141 RT_NOREF(pvMapping);
2142 RT_NOREF(uHandle);
2143 RT_NOREF(fAttachingOrDetaching);
2144 return VINF_SUCCESS;
2145}
2146
2147#endif
2148
2149/**
2150 * @interface_method_impl{RTLDROPS,pfnGetImageSize}
2151 */
2152static DECLCALLBACK(size_t) rtldrLX_GetImageSize(PRTLDRMODINTERNAL pMod)
2153{
2154 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
2155 return pModLX->cbMapped;
2156}
2157
2158
2159/**
2160 * @interface_method_impl{RTLDROPS,pfnGetBits}
2161 */
2162static DECLCALLBACK(int) rtldrLX_GetBits(PRTLDRMODINTERNAL pMod, void *pvBits, RTUINTPTR BaseAddress,
2163 PFNRTLDRIMPORT pfnGetImport, void *pvUser)
2164{
2165 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
2166
2167 /*
2168 * Load the image bits.
2169 */
2170 int rc = kldrModLXDoLoadBits(pModLX, pvBits);
2171 if (RT_SUCCESS(rc))
2172 {
2173 /*
2174 * Perform relocations.
2175 */
2176 rc = rtldrLX_RelocateBits(pMod, pvBits, BaseAddress, pModLX->aSegments[0].LinkAddress, pfnGetImport, pvUser);
2177 }
2178 return rc;
2179}
2180
2181
2182/* GCC goes boinkers if we put this inside the function. */
2183union RELOC_VISIBILITY_STUPIDITY
2184{
2185 const uint8_t *pb;
2186 const struct r32_rlc *prlc;
2187};
2188
2189/**
2190 * @interface_method_impl{RTLDROPS,pfnRelocate}
2191 */
2192static DECLCALLBACK(int) rtldrLX_RelocateBits(PRTLDRMODINTERNAL pMod, void *pvBits, RTUINTPTR NewBaseAddress,
2193 RTUINTPTR OldBaseAddress, PFNRTLDRIMPORT pfnGetImport, void *pvUser)
2194{
2195 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
2196 uint32_t iSeg;
2197 int rc;
2198
2199 /*
2200 * Do we need to to *anything*?
2201 */
2202 if ( NewBaseAddress == OldBaseAddress
2203 && NewBaseAddress == pModLX->paObjs[0].o32_base
2204 && !pModLX->Hdr.e32_impmodcnt)
2205 return VINF_SUCCESS;
2206
2207 /*
2208 * Load the fixup section.
2209 */
2210 if (!pModLX->pbFixupSection)
2211 {
2212 rc = kldrModLXDoLoadFixupSection(pModLX);
2213 if (RT_FAILURE(rc))
2214 return rc;
2215 }
2216
2217 /*
2218 * Iterate the segments.
2219 */
2220 for (iSeg = 0; iSeg < pModLX->Hdr.e32_objcnt; iSeg++)
2221 {
2222 const struct o32_obj * const pObj = &pModLX->paObjs[iSeg];
2223 RTLDRADDR PageAddress = NewBaseAddress + pModLX->aSegments[iSeg].RVA;
2224 uint32_t iPage;
2225 uint8_t *pbPage = (uint8_t *)pvBits + (uintptr_t)pModLX->aSegments[iSeg].RVA;
2226
2227 /*
2228 * Iterate the page map pages.
2229 */
2230 for (iPage = 0, rc = VINF_SUCCESS;
2231 RT_SUCCESS(rc) && iPage < pObj->o32_mapsize;
2232 iPage++, pbPage += OBJPAGELEN, PageAddress += OBJPAGELEN)
2233 {
2234 const uint8_t * const pbFixupRecEnd = pModLX->pbFixupRecs + pModLX->paoffPageFixups[iPage + pObj->o32_pagemap];
2235 const uint8_t *pb = pModLX->pbFixupRecs + pModLX->paoffPageFixups[iPage + pObj->o32_pagemap - 1];
2236 RTLDRADDR uValue = NIL_RTLDRADDR;
2237 uint32_t fKind = 0;
2238 int iSelector;
2239
2240 /* sanity */
2241 if (pbFixupRecEnd < pb)
2242 return VERR_LDR_BAD_FIXUP;
2243 if (pbFixupRecEnd - 1 > pModLX->pbFixupSectionLast)
2244 return VERR_LDR_BAD_FIXUP;
2245 if (pb < pModLX->pbFixupSection)
2246 return VERR_LDR_BAD_FIXUP;
2247
2248 /*
2249 * Iterate the fixup record.
2250 */
2251 while (pb < pbFixupRecEnd)
2252 {
2253 union RELOC_VISIBILITY_STUPIDITY u;
2254 char szImpModule[256];
2255 u.pb = pb;
2256 pb += 3 + (u.prlc->nr_stype & NRCHAIN ? 0 : 1); /* place pch at the 4th member. */
2257
2258 /*
2259 * Figure out the target.
2260 */
2261 switch (u.prlc->nr_flags & NRRTYP)
2262 {
2263 /*
2264 * Internal fixup.
2265 */
2266 case NRRINT:
2267 {
2268 uint16_t iTrgObject;
2269 uint32_t offTrgObject;
2270
2271 /* the object */
2272 if (u.prlc->nr_flags & NR16OBJMOD)
2273 {
2274 iTrgObject = *(const uint16_t *)pb;
2275 pb += 2;
2276 }
2277 else
2278 iTrgObject = *pb++;
2279 iTrgObject--;
2280 if (iTrgObject >= pModLX->Hdr.e32_objcnt)
2281 return VERR_LDR_BAD_FIXUP;
2282
2283 /* the target */
2284 if ((u.prlc->nr_stype & NRSRCMASK) != NRSSEG)
2285 {
2286 if (u.prlc->nr_flags & NR32BITOFF)
2287 {
2288 offTrgObject = *(const uint32_t *)pb;
2289 pb += 4;
2290 }
2291 else
2292 {
2293 offTrgObject = *(const uint16_t *)pb;
2294 pb += 2;
2295 }
2296
2297 /* calculate the symbol info. */
2298 uValue = offTrgObject + NewBaseAddress + pModLX->aSegments[iTrgObject].RVA;
2299 }
2300 else
2301 uValue = NewBaseAddress + pModLX->aSegments[iTrgObject].RVA;
2302 if ( (u.prlc->nr_stype & NRALIAS)
2303 || (pModLX->aSegments[iTrgObject].fFlags & RTLDRSEG_FLAG_16BIT))
2304 iSelector = pModLX->aSegments[iTrgObject].Sel16bit;
2305 else
2306 iSelector = pModLX->aSegments[iTrgObject].SelFlat;
2307 fKind = 0;
2308 break;
2309 }
2310
2311 /*
2312 * Import by symbol ordinal.
2313 */
2314 case NRRORD:
2315 {
2316 uint16_t iModule;
2317 uint32_t iSymbol;
2318
2319 /* the module ordinal */
2320 if (u.prlc->nr_flags & NR16OBJMOD)
2321 {
2322 iModule = *(const uint16_t *)pb;
2323 pb += 2;
2324 }
2325 else
2326 iModule = *pb++;
2327 iModule--;
2328 if (iModule >= pModLX->Hdr.e32_impmodcnt)
2329 return VERR_LDR_BAD_FIXUP;
2330 rc = kldrModLXGetImport(pModLX, NULL, iModule, szImpModule, sizeof(szImpModule), NULL);
2331 if (RT_FAILURE(rc))
2332 return rc;
2333
2334#if 1
2335 if (u.prlc->nr_flags & NRICHAIN)
2336 return VERR_LDR_BAD_FIXUP;
2337#endif
2338
2339 /* . */
2340 if (u.prlc->nr_flags & NR32BITOFF)
2341 {
2342 iSymbol = *(const uint32_t *)pb;
2343 pb += 4;
2344 }
2345 else if (!(u.prlc->nr_flags & NR8BITORD))
2346 {
2347 iSymbol = *(const uint16_t *)pb;
2348 pb += 2;
2349 }
2350 else
2351 iSymbol = *pb++;
2352
2353 /* resolve it. */
2354 rc = pfnGetImport(pMod, szImpModule, NULL, iSymbol, &uValue, /*&fKind,*/ pvUser);
2355 if (RT_FAILURE(rc))
2356 return rc;
2357 iSelector = -1;
2358 break;
2359 }
2360
2361 /*
2362 * Import by symbol name.
2363 */
2364 case NRRNAM:
2365 {
2366 uint32_t iModule;
2367 uint16_t offSymbol;
2368 const uint8_t *pbSymbol;
2369
2370 /* the module ordinal */
2371 if (u.prlc->nr_flags & NR16OBJMOD)
2372 {
2373 iModule = *(const uint16_t *)pb;
2374 pb += 2;
2375 }
2376 else
2377 iModule = *pb++;
2378 iModule--;
2379 if (iModule >= pModLX->Hdr.e32_impmodcnt)
2380 return VERR_LDR_BAD_FIXUP;
2381 rc = kldrModLXGetImport(pModLX, NULL, iModule, szImpModule, sizeof(szImpModule), NULL);
2382 if (RT_FAILURE(rc))
2383 return rc;
2384#if 1
2385 if (u.prlc->nr_flags & NRICHAIN)
2386 return VERR_LDR_BAD_FIXUP;
2387#endif
2388
2389 /* . */
2390 if (u.prlc->nr_flags & NR32BITOFF)
2391 {
2392 offSymbol = *(const uint32_t *)pb;
2393 pb += 4;
2394 }
2395 else if (!(u.prlc->nr_flags & NR8BITORD))
2396 {
2397 offSymbol = *(const uint16_t *)pb;
2398 pb += 2;
2399 }
2400 else
2401 offSymbol = *pb++;
2402 pbSymbol = pModLX->pbImportProcs + offSymbol;
2403 if ( pbSymbol < pModLX->pbImportProcs
2404 || pbSymbol > pModLX->pbFixupSectionLast)
2405 return VERR_LDR_BAD_FIXUP;
2406 char szSymbol[256];
2407 memcpy(szSymbol, pbSymbol + 1, *pbSymbol);
2408 szSymbol[*pbSymbol] = '\0';
2409
2410 /* resolve it. */
2411 rc = pfnGetImport(pMod, szImpModule, szSymbol, UINT32_MAX, &uValue, /*&fKind,*/ pvUser);
2412 if (RT_FAILURE(rc))
2413 return rc;
2414 iSelector = -1;
2415 break;
2416 }
2417
2418 case NRRENT:
2419 KLDRMODLX_ASSERT(!"NRRENT");
2420 RT_FALL_THRU();
2421 default:
2422 iSelector = -1;
2423 break;
2424 }
2425
2426 /* addend */
2427 if (u.prlc->nr_flags & NRADD)
2428 {
2429 if (u.prlc->nr_flags & NR32BITADD)
2430 {
2431 uValue += *(const uint32_t *)pb;
2432 pb += 4;
2433 }
2434 else
2435 {
2436 uValue += *(const uint16_t *)pb;
2437 pb += 2;
2438 }
2439 }
2440
2441
2442 /*
2443 * Deal with the 'source' (i.e. the place that should be modified - very logical).
2444 */
2445 if (!(u.prlc->nr_stype & NRCHAIN))
2446 {
2447 int off = u.prlc->r32_soff;
2448
2449 /* common / simple */
2450 if ( (u.prlc->nr_stype & NRSRCMASK) == NROFF32
2451 && off >= 0
2452 && off <= (int)OBJPAGELEN - 4)
2453 *(uint32_t *)&pbPage[off] = (uint32_t)uValue;
2454 else if ( (u.prlc->nr_stype & NRSRCMASK) == NRSOFF32
2455 && off >= 0
2456 && off <= (int)OBJPAGELEN - 4)
2457 *(uint32_t *)&pbPage[off] = (uint32_t)(uValue - (PageAddress + off + 4));
2458 else
2459 {
2460 /* generic */
2461 rc = kldrModLXDoReloc(pbPage, off, PageAddress, u.prlc, iSelector, uValue, fKind);
2462 if (RT_FAILURE(rc))
2463 return rc;
2464 }
2465 }
2466 else if (!(u.prlc->nr_flags & NRICHAIN))
2467 {
2468 const int16_t *poffSrc = (const int16_t *)pb;
2469 uint8_t c = u.pb[2];
2470
2471 /* common / simple */
2472 if ((u.prlc->nr_stype & NRSRCMASK) == NROFF32)
2473 {
2474 while (c-- > 0)
2475 {
2476 int off = *poffSrc++;
2477 if (off >= 0 && off <= (int)OBJPAGELEN - 4)
2478 *(uint32_t *)&pbPage[off] = (uint32_t)uValue;
2479 else
2480 {
2481 rc = kldrModLXDoReloc(pbPage, off, PageAddress, u.prlc, iSelector, uValue, fKind);
2482 if (RT_FAILURE(rc))
2483 return rc;
2484 }
2485 }
2486 }
2487 else if ((u.prlc->nr_stype & NRSRCMASK) == NRSOFF32)
2488 {
2489 while (c-- > 0)
2490 {
2491 int off = *poffSrc++;
2492 if (off >= 0 && off <= (int)OBJPAGELEN - 4)
2493 *(uint32_t *)&pbPage[off] = (uint32_t)(uValue - (PageAddress + off + 4));
2494 else
2495 {
2496 rc = kldrModLXDoReloc(pbPage, off, PageAddress, u.prlc, iSelector, uValue, fKind);
2497 if (RT_FAILURE(rc))
2498 return rc;
2499 }
2500 }
2501 }
2502 else
2503 {
2504 while (c-- > 0)
2505 {
2506 rc = kldrModLXDoReloc(pbPage, *poffSrc++, PageAddress, u.prlc, iSelector, uValue, fKind);
2507 if (RT_FAILURE(rc))
2508 return rc;
2509 }
2510 }
2511 pb = (const uint8_t *)poffSrc;
2512 }
2513 else
2514 {
2515 /* This is a pain because it will require virgin pages on a relocation. */
2516 KLDRMODLX_ASSERT(!"NRICHAIN");
2517 return VERR_LDRLX_NRICHAIN_NOT_SUPPORTED;
2518 }
2519 }
2520 }
2521 }
2522
2523 return VINF_SUCCESS;
2524}
2525
2526
2527/**
2528 * Applies the relocation to one 'source' in a page.
2529 *
2530 * This takes care of the more esotic case while the common cases
2531 * are dealt with seperately.
2532 *
2533 * @returns IPRT status code.
2534 * @param pbPage The page in which to apply the fixup.
2535 * @param off Page relative offset of where to apply the offset.
2536 * @param PageAddress The page address.
2537 * @param prlc The relocation record.
2538 * @param iSelector Selector value, -1 if flat.
2539 * @param uValue The target value.
2540 * @param fKind The target kind.
2541 */
2542static int kldrModLXDoReloc(uint8_t *pbPage, int off, RTLDRADDR PageAddress, const struct r32_rlc *prlc,
2543 int iSelector, RTLDRADDR uValue, uint32_t fKind)
2544{
2545#pragma pack(1) /* just to be sure */
2546 union
2547 {
2548 uint8_t ab[6];
2549 uint32_t off32;
2550 uint16_t off16;
2551 uint8_t off8;
2552 struct
2553 {
2554 uint16_t off;
2555 uint16_t Sel;
2556 } Far16;
2557 struct
2558 {
2559 uint32_t off;
2560 uint16_t Sel;
2561 } Far32;
2562 } uData;
2563#pragma pack()
2564 const uint8_t *pbSrc;
2565 uint8_t *pbDst;
2566 uint8_t cb;
2567
2568 RT_NOREF(fKind);
2569
2570 /*
2571 * Compose the fixup data.
2572 */
2573 switch (prlc->nr_stype & NRSRCMASK)
2574 {
2575 case NRSBYT:
2576 uData.off8 = (uint8_t)uValue;
2577 cb = 1;
2578 break;
2579 case NRSSEG:
2580 if (iSelector == -1)
2581 {
2582 /* fixme */
2583 }
2584 uData.off16 = iSelector;
2585 cb = 2;
2586 break;
2587 case NRSPTR:
2588 if (iSelector == -1)
2589 {
2590 /* fixme */
2591 }
2592 uData.Far16.off = (uint16_t)uValue;
2593 uData.Far16.Sel = iSelector;
2594 cb = 4;
2595 break;
2596 case NRSOFF:
2597 uData.off16 = (uint16_t)uValue;
2598 cb = 2;
2599 break;
2600 case NRPTR48:
2601 if (iSelector == -1)
2602 {
2603 /* fixme */
2604 }
2605 uData.Far32.off = (uint32_t)uValue;
2606 uData.Far32.Sel = iSelector;
2607 cb = 6;
2608 break;
2609 case NROFF32:
2610 uData.off32 = (uint32_t)uValue;
2611 cb = 4;
2612 break;
2613 case NRSOFF32:
2614 uData.off32 = (uint32_t)(uValue - (PageAddress + off + 4));
2615 cb = 4;
2616 break;
2617 default:
2618 return VERR_LDRLX_BAD_FIXUP_SECTION; /** @todo fix error, add more checks! */
2619 }
2620
2621 /*
2622 * Apply it. This is sloooow...
2623 */
2624 pbSrc = &uData.ab[0];
2625 pbDst = pbPage + off;
2626 while (cb-- > 0)
2627 {
2628 if (off > (int)OBJPAGELEN)
2629 break;
2630 if (off >= 0)
2631 *pbDst = *pbSrc;
2632 pbSrc++;
2633 pbDst++;
2634 }
2635
2636 return VINF_SUCCESS;
2637}
2638
2639
2640/**
2641 * @interface_method_impl{RTLDROPS,pfnEnumSegments}
2642 */
2643static DECLCALLBACK(int) rtldrLX_EnumSegments(PRTLDRMODINTERNAL pMod, PFNRTLDRENUMSEGS pfnCallback, void *pvUser)
2644{
2645 PKLDRMODLX pThis = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
2646 uint32_t const cSegments = pThis->cSegments;
2647 for (uint32_t iSeg = 0; iSeg < cSegments; iSeg++)
2648 {
2649 int rc = pfnCallback(pMod, &pThis->aSegments[iSeg], pvUser);
2650 if (rc != VINF_SUCCESS)
2651 return rc;
2652 }
2653
2654 return VINF_SUCCESS;
2655}
2656
2657
2658/**
2659 * @interface_method_impl{RTLDROPS,pfnLinkAddressToSegOffset}
2660 */
2661static DECLCALLBACK(int) rtldrLX_LinkAddressToSegOffset(PRTLDRMODINTERNAL pMod, RTLDRADDR LinkAddress,
2662 uint32_t *piSeg, PRTLDRADDR poffSeg)
2663{
2664 PKLDRMODLX pThis = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
2665 uint32_t const cSegments = pThis->cSegments;
2666 for (uint32_t iSeg = 0; iSeg < cSegments; iSeg++)
2667 {
2668 RTLDRADDR offSeg = LinkAddress - pThis->aSegments[iSeg].LinkAddress;
2669 if ( offSeg < pThis->aSegments[iSeg].cbMapped
2670 || offSeg < pThis->aSegments[iSeg].cb)
2671 {
2672 *piSeg = iSeg;
2673 *poffSeg = offSeg;
2674 return VINF_SUCCESS;
2675 }
2676 }
2677
2678 return VERR_LDR_INVALID_LINK_ADDRESS;
2679}
2680
2681
2682/**
2683 * @interface_method_impl{RTLDROPS,pfnLinkAddressToRva}
2684 */
2685static DECLCALLBACK(int) rtldrLX_LinkAddressToRva(PRTLDRMODINTERNAL pMod, RTLDRADDR LinkAddress, PRTLDRADDR pRva)
2686{
2687 PKLDRMODLX pThis = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
2688 uint32_t const cSegments = pThis->cSegments;
2689 for (uint32_t iSeg = 0; iSeg < cSegments; iSeg++)
2690 {
2691 RTLDRADDR offSeg = LinkAddress - pThis->aSegments[iSeg].LinkAddress;
2692 if ( offSeg < pThis->aSegments[iSeg].cbMapped
2693 || offSeg < pThis->aSegments[iSeg].cb)
2694 {
2695 *pRva = pThis->aSegments[iSeg].RVA + offSeg;
2696 return VINF_SUCCESS;
2697 }
2698 }
2699
2700 return VERR_LDR_INVALID_RVA;
2701}
2702
2703
2704/**
2705 * @interface_method_impl{RTLDROPS,pfnSegOffsetToRva}
2706 */
2707static DECLCALLBACK(int) rtldrLX_SegOffsetToRva(PRTLDRMODINTERNAL pMod, uint32_t iSeg, RTLDRADDR offSeg, PRTLDRADDR pRva)
2708{
2709 PKLDRMODLX pThis = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
2710
2711 if (iSeg >= pThis->cSegments)
2712 return VERR_LDR_INVALID_SEG_OFFSET;
2713 PCRTLDRSEG pSegment = &pThis->aSegments[iSeg];
2714
2715 if ( offSeg > pSegment->cbMapped
2716 && offSeg > pSegment->cb
2717 && ( pSegment->cbFile < 0
2718 || offSeg > (uint64_t)pSegment->cbFile))
2719 return VERR_LDR_INVALID_SEG_OFFSET;
2720
2721 *pRva = pSegment->RVA + offSeg;
2722 return VINF_SUCCESS;
2723}
2724
2725
2726/**
2727 * @interface_method_impl{RTLDROPS,pfnRvaToSegOffset}
2728 */
2729static DECLCALLBACK(int) rtldrLX_RvaToSegOffset(PRTLDRMODINTERNAL pMod, RTLDRADDR Rva, uint32_t *piSeg, PRTLDRADDR poffSeg)
2730{
2731 PKLDRMODLX pThis = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
2732 uint32_t const cSegments = pThis->cSegments;
2733 for (uint32_t iSeg = 0; iSeg < cSegments; iSeg++)
2734 {
2735 RTLDRADDR offSeg = Rva - pThis->aSegments[iSeg].RVA;
2736 if ( offSeg < pThis->aSegments[iSeg].cbMapped
2737 || offSeg < pThis->aSegments[iSeg].cb)
2738 {
2739 *piSeg = iSeg;
2740 *poffSeg = offSeg;
2741 return VINF_SUCCESS;
2742 }
2743 }
2744
2745 return VERR_LDR_INVALID_RVA;
2746}
2747
2748
2749/**
2750 * @interface_method_impl{RTLDROPS,pfnReadDbgInfo}
2751 */
2752static DECLCALLBACK(int) rtldrLX_ReadDbgInfo(PRTLDRMODINTERNAL pMod, uint32_t iDbgInfo, RTFOFF off, size_t cb, void *pvBuf)
2753{
2754 PKLDRMODLX pThis = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
2755 if (iDbgInfo == 0)
2756 return pThis->Core.pReader->pfnRead(pThis->Core.pReader, pvBuf, cb, off);
2757 return VERR_OUT_OF_RANGE;
2758}
2759
2760
2761/**
2762 * @interface_method_impl{RTLDROPS,pfnQueryProp}
2763 */
2764static DECLCALLBACK(int) rtldrLX_QueryProp(PRTLDRMODINTERNAL pMod, RTLDRPROP enmProp, void const *pvBits,
2765 void *pvBuf, size_t cbBuf, size_t *pcbRet)
2766{
2767 PKLDRMODLX pThis = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
2768 int rc;
2769 switch (enmProp)
2770 {
2771 case RTLDRPROP_IMPORT_COUNT:
2772 Assert(cbBuf == sizeof(uint32_t));
2773 Assert(*pcbRet == cbBuf);
2774 *(uint32_t *)pvBuf = pThis->Hdr.e32_impmodcnt;
2775 rc = VINF_SUCCESS;
2776 break;
2777
2778 case RTLDRPROP_IMPORT_MODULE:
2779 rc = kldrModLXGetImport(pThis, pvBits, *(uint32_t const *)pvBuf, (char *)pvBuf, cbBuf, pcbRet);
2780 break;
2781
2782 case RTLDRPROP_INTERNAL_NAME:
2783 *pcbRet = pThis->cchName + 1;
2784 if (cbBuf >= pThis->cchName + 1)
2785 {
2786 memcpy(pvBuf, pThis->pszName, pThis->cchName + 1);
2787 rc = VINF_SUCCESS;
2788 }
2789 else
2790 rc = VERR_BUFFER_OVERFLOW;
2791 break;
2792
2793
2794 default:
2795 rc = VERR_NOT_FOUND;
2796 break;
2797 }
2798 RT_NOREF_PV(pvBits);
2799 return rc;
2800}
2801
2802
2803/**
2804 * Operations for a Mach-O module interpreter.
2805 */
2806static const RTLDROPS s_rtldrLXOps=
2807{
2808 "LX",
2809 rtldrLX_Close,
2810 NULL,
2811 NULL /*pfnDone*/,
2812 rtldrLX_EnumSymbols,
2813 /* ext */
2814 rtldrLX_GetImageSize,
2815 rtldrLX_GetBits,
2816 rtldrLX_RelocateBits,
2817 rtldrLX_GetSymbolEx,
2818 NULL /*pfnQueryForwarderInfo*/,
2819 rtldrLX_EnumDbgInfo,
2820 rtldrLX_EnumSegments,
2821 rtldrLX_LinkAddressToSegOffset,
2822 rtldrLX_LinkAddressToRva,
2823 rtldrLX_SegOffsetToRva,
2824 rtldrLX_RvaToSegOffset,
2825 rtldrLX_ReadDbgInfo,
2826 rtldrLX_QueryProp,
2827 NULL /*pfnVerifySignature*/,
2828 NULL /*pfnHashImage*/,
2829 NULL /*pfnUnwindFrame*/,
2830 42
2831};
2832
2833
2834/**
2835 * Handles opening LX images.
2836 */
2837DECLHIDDEN(int) rtldrLXOpen(PRTLDRREADER pReader, uint32_t fFlags, RTLDRARCH enmArch, RTFOFF offLxHdr,
2838 PRTLDRMOD phLdrMod, PRTERRINFO pErrInfo)
2839{
2840
2841 /*
2842 * Create the instance data and do a minimal header validation.
2843 */
2844 PKLDRMODLX pThis = NULL;
2845 int rc = kldrModLXDoCreate(pReader, offLxHdr, fFlags, &pThis, pErrInfo);
2846 if (RT_SUCCESS(rc))
2847 {
2848 /*
2849 * Match up against the requested CPU architecture.
2850 */
2851 if ( enmArch == RTLDRARCH_WHATEVER
2852 || pThis->Core.enmArch == enmArch)
2853 {
2854 pThis->Core.pOps = &s_rtldrLXOps;
2855 pThis->Core.u32Magic = RTLDRMOD_MAGIC;
2856 *phLdrMod = &pThis->Core;
2857 return VINF_SUCCESS;
2858 }
2859 rc = VERR_LDR_ARCH_MISMATCH;
2860 }
2861 if (pThis)
2862 RTMemFree(pThis);
2863 return rc;
2864
2865}
2866
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette