VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/ldr/ldrLX.cpp@ 74645

Last change on this file since 74645 was 74645, checked in by vboxsync, 6 years ago

IPRT: More adjustments to the LX and Mach-O loader code from kStuff. bugref:9232

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 96.0 KB
Line 
1/* $Id: ldrLX.cpp 74645 2018-10-06 20:03:54Z vboxsync $ */
2/** @file
3 * kLdr - The Module Interpreter for the Linear eXecutable (LX) Format.
4 */
5
6/*
7 * Copyright (c) 2006-2007 Knut St. Osmundsen <[email protected]>
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#define LOG_GROUP RTLOGGROUP_LDR
36#include <iprt/ldr.h>
37#include "internal/iprt.h"
38
39#include <iprt/asm.h>
40#include <iprt/assert.h>
41#include <iprt/err.h>
42#include <iprt/log.h>
43#include <iprt/mem.h>
44#include <iprt/string.h>
45
46#include <iprt/formats/lx.h>
47#include "internal/ldr.h"
48
49
50/*********************************************************************************************************************************
51* Defined Constants And Macros *
52*********************************************************************************************************************************/
53/** @def KLDRMODLX_STRICT
54 * Define KLDRMODLX_STRICT to enabled strict checks in KLDRMODLX. */
55#define KLDRMODLX_STRICT 1
56
57/** @def KLDRMODLX_ASSERT
58 * Assert that an expression is true when KLDR_STRICT is defined.
59 */
60#ifdef KLDRMODLX_STRICT
61# define KLDRMODLX_ASSERT(expr) Assert(expr)
62#else
63# define KLDRMODLX_ASSERT(expr) do {} while (0)
64#endif
65
66
67/*********************************************************************************************************************************
68* Structures and Typedefs *
69*********************************************************************************************************************************/
70/**
71 * Instance data for the LX module interpreter.
72 */
73typedef struct KLDRMODLX
74{
75 /** Core module structure. */
76 RTLDRMODINTERNAL Core;
77
78 /** Pointer to the user mapping. */
79 const void *pvMapping;
80 /** The size of the mapped LX image. */
81 size_t cbMapped;
82 /** Reserved flags. */
83 uint32_t f32Reserved;
84
85 /** The offset of the LX header. */
86 RTFOFF offHdr;
87 /** Copy of the LX header. */
88 struct e32_exe Hdr;
89
90 /** Pointer to the loader section.
91 * Allocated together with this strcture. */
92 const uint8_t *pbLoaderSection;
93 /** Pointer to the last byte in the loader section. */
94 const uint8_t *pbLoaderSectionLast;
95 /** Pointer to the object table in the loader section. */
96 const struct o32_obj *paObjs;
97 /** Pointer to the object page map table in the loader section. */
98 const struct o32_map *paPageMappings;
99 /** Pointer to the resource table in the loader section. */
100 const struct rsrc32 *paRsrcs;
101 /** Pointer to the resident name table in the loader section. */
102 const uint8_t *pbResNameTab;
103 /** Pointer to the entry table in the loader section. */
104 const uint8_t *pbEntryTab;
105
106 /** Pointer to the non-resident name table. */
107 uint8_t *pbNonResNameTab;
108 /** Pointer to the last byte in the non-resident name table. */
109 const uint8_t *pbNonResNameTabLast;
110
111 /** Pointer to the fixup section. */
112 uint8_t *pbFixupSection;
113 /** Pointer to the last byte in the fixup section. */
114 const uint8_t *pbFixupSectionLast;
115 /** Pointer to the fixup page table within pvFixupSection. */
116 const uint32_t *paoffPageFixups;
117 /** Pointer to the fixup record table within pvFixupSection. */
118 const uint8_t *pbFixupRecs;
119 /** Pointer to the import module name table within pvFixupSection. */
120 const uint8_t *pbImportMods;
121 /** Pointer to the import module name table within pvFixupSection. */
122 const uint8_t *pbImportProcs;
123
124 /** Pointer to the module name (in the resident name table). */
125 const char *pszName;
126 /** The name length. */
127 size_t cchName;
128
129 /** The target CPU. */
130 RTLDRCPU enmCpu;
131 /** Number of segments in aSegments. */
132 uint32_t cSegments;
133 /** Segment info. */
134 RTLDRSEG aSegments[RT_FLEXIBLE_ARRAY];
135} KLDRMODLX, *PKLDRMODLX;
136
137
138/*********************************************************************************************************************************
139* Internal Functions *
140*********************************************************************************************************************************/
141static int kldrModLXHasDbgInfo(PRTLDRMODINTERNAL pMod, const void *pvBits);
142static DECLCALLBACK(int) rtldrLX_RelocateBits(PRTLDRMODINTERNAL pMod, void *pvBits, RTUINTPTR NewBaseAddress,
143 RTUINTPTR OldBaseAddress, PFNRTLDRIMPORT pfnGetImport, void *pvUser);
144static const uint8_t *kldrModLXDoNameTableLookupByOrdinal(const uint8_t *pbNameTable, ssize_t cbNameTable, uint32_t iOrdinal);
145static int kldrModLXDoNameLookup(PKLDRMODLX pModLX, const char *pchSymbol, size_t cchSymbol, uint32_t *piSymbol);
146static const uint8_t *kldrModLXDoNameTableLookupByName(const uint8_t *pbNameTable, ssize_t cbNameTable,
147 const char *pchSymbol, size_t cchSymbol);
148static int kldrModLXGetImport(PKLDRMODLX pThis, const void *pvBits, uint32_t iImport,
149 char *pszName, size_t cchName, size_t *pcbNeeded);
150static int kldrModLXDoLoadBits(PKLDRMODLX pModLX, void *pvBits);
151static int kldrModLXDoIterDataUnpacking(uint8_t *pbDst, const uint8_t *pbSrc, int cbSrc);
152static int kldrModLXDoIterData2Unpacking(uint8_t *pbDst, const uint8_t *pbSrc, int cbSrc);
153static void kLdrModLXMemCopyW(uint8_t *pbDst, const uint8_t *pbSrc, int cb);
154static int kldrModLXDoForwarderQuery(PKLDRMODLX pModLX, const struct e32_entry *pEntry,
155 PFNRTLDRIMPORT pfnGetForwarder, void *pvUser, PRTLDRADDR puValue, uint32_t *pfKind);
156#if 0
157static int kldrModLXDoProtect(PKLDRMODLX pModLX, void *pvBits, unsigned fUnprotectOrProtect);
158static int kldrModLXDoCallDLL(PKLDRMODLX pModLX, void *pvMapping, unsigned uOp, uintptr_t uHandle);
159static int32_t kldrModLXDoCall(uintptr_t uEntrypoint, uintptr_t uHandle, uint32_t uOp, void *pvReserved);
160#endif
161static int kldrModLXDoLoadFixupSection(PKLDRMODLX pModLX);
162static int kldrModLXDoReloc(uint8_t *pbPage, int off, RTLDRADDR PageAddress, const struct r32_rlc *prlc,
163 int iSelector, RTLDRADDR uValue, uint32_t fKind);
164
165
166/**
167 * Separate function for reading creating the LX module instance to
168 * simplify cleanup on failure.
169 */
170static int kldrModLXDoCreate(PRTLDRREADER pRdr, RTFOFF offNewHdr, uint32_t fFlags, PKLDRMODLX *ppModLX, PRTERRINFO pErrInfo)
171{
172 struct e32_exe Hdr;
173 PKLDRMODLX pModLX;
174 uint32_t off, offEnd;
175 uint32_t i;
176 int fCanOptimizeMapping;
177 uint32_t NextRVA;
178
179 RT_NOREF(fFlags);
180 *ppModLX = NULL;
181
182 /*
183 * Read the signature and file header.
184 */
185 int rc = pRdr->pfnRead(pRdr, &Hdr, sizeof(Hdr), offNewHdr > 0 ? offNewHdr : 0);
186 if (RT_FAILURE(rc))
187 return RTErrInfoSetF(pErrInfo, rc, "Error reading LX header at %RTfoff: %Rrc", offNewHdr, rc);
188 if ( Hdr.e32_magic[0] != E32MAGIC1
189 || Hdr.e32_magic[1] != E32MAGIC2)
190 return RTErrInfoSetF(pErrInfo, VERR_INVALID_EXE_SIGNATURE, "Not LX magic: %02x %02x", Hdr.e32_magic[0], Hdr.e32_magic[1]);
191
192 /* We're not interested in anything but x86 images. */
193 if ( Hdr.e32_level != E32LEVEL
194 || Hdr.e32_border != E32LEBO
195 || Hdr.e32_worder != E32LEWO
196 || Hdr.e32_cpu < E32CPU286
197 || Hdr.e32_cpu > E32CPU486
198 || Hdr.e32_pagesize != OBJPAGELEN
199 )
200 return VERR_LDRLX_BAD_HEADER;
201
202 /* Some rough sanity checks. */
203 offEnd = pRdr->pfnSize(pRdr) >= (RTFOFF)~(uint32_t)16 ? ~(uint32_t)16 : (uint32_t)pRdr->pfnSize(pRdr);
204 if ( Hdr.e32_itermap > offEnd
205 || Hdr.e32_datapage > offEnd
206 || Hdr.e32_nrestab > offEnd
207 || Hdr.e32_nrestab + Hdr.e32_cbnrestab > offEnd
208 || Hdr.e32_ldrsize > offEnd - offNewHdr - sizeof(Hdr)
209 || Hdr.e32_fixupsize > offEnd - offNewHdr - sizeof(Hdr)
210 || Hdr.e32_fixupsize + Hdr.e32_ldrsize > offEnd - offNewHdr - sizeof(Hdr))
211 return VERR_LDRLX_BAD_HEADER;
212
213 /* Verify the loader section. */
214 offEnd = Hdr.e32_objtab + Hdr.e32_ldrsize;
215 if (Hdr.e32_objtab < sizeof(Hdr))
216 return VERR_LDRLX_BAD_LOADER_SECTION;
217 off = Hdr.e32_objtab + sizeof(struct o32_obj) * Hdr.e32_objcnt;
218 if (off > offEnd)
219 return VERR_LDRLX_BAD_LOADER_SECTION;
220 if ( Hdr.e32_objmap
221 && (Hdr.e32_objmap < off || Hdr.e32_objmap > offEnd))
222 return VERR_LDRLX_BAD_LOADER_SECTION;
223 if ( Hdr.e32_rsrccnt
224 && ( Hdr.e32_rsrctab < off
225 || Hdr.e32_rsrctab > offEnd
226 || Hdr.e32_rsrctab + sizeof(struct rsrc32) * Hdr.e32_rsrccnt > offEnd))
227 return VERR_LDRLX_BAD_LOADER_SECTION;
228 if ( Hdr.e32_restab
229 && (Hdr.e32_restab < off || Hdr.e32_restab > offEnd - 2))
230 return VERR_LDRLX_BAD_LOADER_SECTION;
231 if ( Hdr.e32_enttab
232 && (Hdr.e32_enttab < off || Hdr.e32_enttab >= offEnd))
233 return VERR_LDRLX_BAD_LOADER_SECTION;
234 if ( Hdr.e32_dircnt
235 && (Hdr.e32_dirtab < off || Hdr.e32_dirtab > offEnd - 2))
236 return VERR_LDRLX_BAD_LOADER_SECTION;
237
238 /* Verify the fixup section. */
239 off = offEnd;
240 offEnd = off + Hdr.e32_fixupsize;
241 if ( Hdr.e32_fpagetab
242 && (Hdr.e32_fpagetab < off || Hdr.e32_fpagetab > offEnd))
243 {
244 /*
245 * wlink mixes the fixup section and the loader section.
246 */
247 off = Hdr.e32_fpagetab;
248 offEnd = off + Hdr.e32_fixupsize;
249 Hdr.e32_ldrsize = off - Hdr.e32_objtab;
250 }
251 if ( Hdr.e32_frectab
252 && (Hdr.e32_frectab < off || Hdr.e32_frectab > offEnd))
253 return VERR_LDRLX_BAD_FIXUP_SECTION;
254 if ( Hdr.e32_impmod
255 && (Hdr.e32_impmod < off || Hdr.e32_impmod > offEnd || Hdr.e32_impmod + Hdr.e32_impmodcnt > offEnd))
256 return VERR_LDRLX_BAD_FIXUP_SECTION;
257 if ( Hdr.e32_impproc
258 && (Hdr.e32_impproc < off || Hdr.e32_impproc > offEnd))
259 return VERR_LDRLX_BAD_FIXUP_SECTION;
260
261 /*
262 * Calc the instance size, allocate and initialize it.
263 */
264 size_t cbModLXAndSegments = RT_ALIGN_Z(RT_UOFFSETOF_DYN(KLDRMODLX, aSegments[Hdr.e32_objcnt + 1]), 8);
265 pModLX = (PKLDRMODLX)RTMemAlloc(cbModLXAndSegments + Hdr.e32_ldrsize + 2 /*for two extra zeros*/);
266 if (!pModLX)
267 return VERR_NO_MEMORY;
268 *ppModLX = pModLX;
269
270 /* Core & CPU. */
271 pModLX->Core.u32Magic = 0; /* set by caller. */
272 pModLX->Core.eState = LDR_STATE_OPENED;
273 pModLX->Core.pOps = NULL; /* set by caller. */
274 pModLX->Core.pReader = pRdr;
275 switch (Hdr.e32_cpu)
276 {
277 case E32CPU286:
278 pModLX->enmCpu = RTLDRCPU_I80286;
279 pModLX->Core.enmArch = RTLDRARCH_X86_16;
280 break;
281 case E32CPU386:
282 pModLX->enmCpu = RTLDRCPU_I386;
283 pModLX->Core.enmArch = RTLDRARCH_X86_32;
284 break;
285 case E32CPU486:
286 pModLX->enmCpu = RTLDRCPU_I486;
287 pModLX->Core.enmArch = RTLDRARCH_X86_32;
288 break;
289 }
290 pModLX->Core.enmEndian = RTLDRENDIAN_LITTLE;
291 pModLX->Core.enmFormat = RTLDRFMT_LX;
292 switch (Hdr.e32_mflags & E32MODMASK)
293 {
294 case E32MODEXE:
295 pModLX->Core.enmType = !(Hdr.e32_mflags & E32NOINTFIX)
296 ? RTLDRTYPE_EXECUTABLE_RELOCATABLE
297 : RTLDRTYPE_EXECUTABLE_FIXED;
298 break;
299
300 case E32MODDLL:
301 case E32PROTDLL:
302 case E32MODPROTDLL:
303 pModLX->Core.enmType = !(Hdr.e32_mflags & E32SYSDLL)
304 ? RTLDRTYPE_SHARED_LIBRARY_RELOCATABLE
305 : RTLDRTYPE_SHARED_LIBRARY_FIXED;
306 break;
307
308 case E32MODPDEV:
309 case E32MODVDEV:
310 pModLX->Core.enmType = RTLDRTYPE_SHARED_LIBRARY_RELOCATABLE;
311 break;
312 }
313
314 /* KLDRMODLX */
315 pModLX->cSegments = Hdr.e32_objcnt;
316 pModLX->pszName = NULL; /* finalized further down */
317 pModLX->cchName = 0;
318 pModLX->pvMapping = 0;
319 pModLX->cbMapped = 0;
320 pModLX->f32Reserved = 0;
321
322 pModLX->offHdr = offNewHdr >= 0 ? offNewHdr : 0;
323 memcpy(&pModLX->Hdr, &Hdr, sizeof(Hdr));
324
325 pModLX->pbLoaderSection = (uint8_t *)pModLX + cbModLXAndSegments;
326 pModLX->pbLoaderSectionLast = pModLX->pbLoaderSection + pModLX->Hdr.e32_ldrsize - 1;
327 pModLX->paObjs = NULL;
328 pModLX->paPageMappings = NULL;
329 pModLX->paRsrcs = NULL;
330 pModLX->pbResNameTab = NULL;
331 pModLX->pbEntryTab = NULL;
332
333 pModLX->pbNonResNameTab = NULL;
334 pModLX->pbNonResNameTabLast = NULL;
335
336 pModLX->pbFixupSection = NULL;
337 pModLX->pbFixupSectionLast = NULL;
338 pModLX->paoffPageFixups = NULL;
339 pModLX->pbFixupRecs = NULL;
340 pModLX->pbImportMods = NULL;
341 pModLX->pbImportProcs = NULL;
342
343 /*
344 * Read the loader data.
345 */
346 rc = pRdr->pfnRead(pRdr, (void *)pModLX->pbLoaderSection, pModLX->Hdr.e32_ldrsize, pModLX->Hdr.e32_objtab + pModLX->offHdr);
347 if (RT_FAILURE(rc))
348 return rc;
349 ((uint8_t *)pModLX->pbLoaderSectionLast)[1] = 0;
350 ((uint8_t *)pModLX->pbLoaderSectionLast)[2] = 0;
351 if (pModLX->Hdr.e32_objcnt)
352 pModLX->paObjs = (const struct o32_obj *)pModLX->pbLoaderSection;
353 if (pModLX->Hdr.e32_objmap)
354 pModLX->paPageMappings = (const struct o32_map *)(pModLX->pbLoaderSection + pModLX->Hdr.e32_objmap - pModLX->Hdr.e32_objtab);
355 if (pModLX->Hdr.e32_rsrccnt)
356 pModLX->paRsrcs = (const struct rsrc32 *)(pModLX->pbLoaderSection + pModLX->Hdr.e32_rsrctab - pModLX->Hdr.e32_objtab);
357 if (pModLX->Hdr.e32_restab)
358 pModLX->pbResNameTab = pModLX->pbLoaderSection + pModLX->Hdr.e32_restab - pModLX->Hdr.e32_objtab;
359 if (pModLX->Hdr.e32_enttab)
360 pModLX->pbEntryTab = pModLX->pbLoaderSection + pModLX->Hdr.e32_enttab - pModLX->Hdr.e32_objtab;
361
362 /*
363 * Get the soname from the resident name table.
364 * Very convenient that it's the 0 ordinal, because then we get a
365 * free string terminator.
366 * (The table entry consists of a pascal string followed by a 16-bit ordinal.)
367 */
368 if (pModLX->pbResNameTab)
369 pModLX->pszName = (const char *)kldrModLXDoNameTableLookupByOrdinal(pModLX->pbResNameTab,
370 pModLX->pbLoaderSectionLast - pModLX->pbResNameTab + 1,
371 0);
372 if (!pModLX->pszName)
373 return VERR_LDRLX_NO_SONAME;
374 pModLX->cchName = *(const uint8_t *)pModLX->pszName++;
375 if ( pModLX->pszName[pModLX->cchName] != '\0'
376 || pModLX->cchName != strlen(pModLX->pszName))
377 return VERR_LDRLX_BAD_SONAME;
378
379 /*
380 * Quick validation of the object table.
381 */
382 for (i = 0; i < pModLX->cSegments; i++)
383 {
384 if (pModLX->paObjs[i].o32_base & (OBJPAGELEN - 1))
385 return VERR_LDRLX_BAD_OBJECT_TABLE;
386 if (pModLX->paObjs[i].o32_base + pModLX->paObjs[i].o32_size <= pModLX->paObjs[i].o32_base)
387 return VERR_LDRLX_BAD_OBJECT_TABLE;
388 if (pModLX->paObjs[i].o32_mapsize > (pModLX->paObjs[i].o32_size + (OBJPAGELEN - 1)))
389 return VERR_LDRLX_BAD_OBJECT_TABLE;
390 if ( pModLX->paObjs[i].o32_mapsize
391 && ( (uint8_t *)&pModLX->paPageMappings[pModLX->paObjs[i].o32_pagemap] > pModLX->pbLoaderSectionLast
392 || (uint8_t *)&pModLX->paPageMappings[pModLX->paObjs[i].o32_pagemap + pModLX->paObjs[i].o32_mapsize]
393 > pModLX->pbLoaderSectionLast))
394 return VERR_LDRLX_BAD_OBJECT_TABLE;
395 if (i > 0 && !(pModLX->paObjs[i].o32_flags & OBJRSRC))
396 {
397 if (pModLX->paObjs[i].o32_base <= pModLX->paObjs[i - 1].o32_base)
398 return VERR_LDRLX_BAD_OBJECT_TABLE;
399 if (pModLX->paObjs[i].o32_base < pModLX->paObjs[i - 1].o32_base + pModLX->paObjs[i - 1].o32_mapsize)
400 return VERR_LDRLX_BAD_OBJECT_TABLE;
401 }
402 }
403
404 /*
405 * Check if we can optimize the mapping by using a different
406 * object alignment. The linker typically uses 64KB alignment,
407 * we can easily get away with page alignment in most cases.
408 */
409 fCanOptimizeMapping = !(Hdr.e32_mflags & (E32NOINTFIX | E32SYSDLL));
410 NextRVA = 0;
411
412 /*
413 * Setup the KLDRMOD segment array.
414 */
415 for (i = 0; i < pModLX->cSegments; i++)
416 {
417 /* unused */
418 pModLX->aSegments[i].pszName = NULL;
419 pModLX->aSegments[i].offFile = -1;
420 pModLX->aSegments[i].cbFile = -1;
421 pModLX->aSegments[i].SelFlat = 0;
422 pModLX->aSegments[i].Sel16bit = 0;
423
424 /* flags */
425 pModLX->aSegments[i].fFlags = 0;
426 if (pModLX->paObjs[i].o32_flags & OBJBIGDEF)
427 pModLX->aSegments[i].fFlags = RTLDRSEG_FLAG_16BIT;
428 if (pModLX->paObjs[i].o32_flags & OBJALIAS16)
429 pModLX->aSegments[i].fFlags = RTLDRSEG_FLAG_OS2_ALIAS16;
430 if (pModLX->paObjs[i].o32_flags & OBJCONFORM)
431 pModLX->aSegments[i].fFlags = RTLDRSEG_FLAG_OS2_CONFORM;
432 if (pModLX->paObjs[i].o32_flags & OBJIOPL)
433 pModLX->aSegments[i].fFlags = RTLDRSEG_FLAG_OS2_IOPL;
434
435 /* size and addresses */
436 pModLX->aSegments[i].Alignment = OBJPAGELEN;
437 pModLX->aSegments[i].cb = pModLX->paObjs[i].o32_size;
438 pModLX->aSegments[i].LinkAddress = pModLX->paObjs[i].o32_base;
439 pModLX->aSegments[i].RVA = NextRVA;
440 if ( fCanOptimizeMapping
441 || i + 1 >= pModLX->cSegments
442 || (pModLX->paObjs[i].o32_flags & OBJRSRC)
443 || (pModLX->paObjs[i + 1].o32_flags & OBJRSRC))
444 pModLX->aSegments[i].cbMapped = RT_ALIGN_Z(pModLX->paObjs[i].o32_size, OBJPAGELEN);
445 else
446 pModLX->aSegments[i].cbMapped = pModLX->paObjs[i + 1].o32_base - pModLX->paObjs[i].o32_base;
447 NextRVA += (uint32_t)pModLX->aSegments[i].cbMapped;
448
449 /* protection */
450 switch ( pModLX->paObjs[i].o32_flags
451 & (OBJSHARED | OBJREAD | OBJWRITE | OBJEXEC))
452 {
453 case 0:
454 case OBJSHARED:
455 pModLX->aSegments[i].fProt = 0;
456 break;
457 case OBJREAD:
458 case OBJREAD | OBJSHARED:
459 pModLX->aSegments[i].fProt = RTMEM_PROT_READ;
460 break;
461 case OBJWRITE:
462 case OBJWRITE | OBJREAD:
463 pModLX->aSegments[i].fProt = RTMEM_PROT_READ | RTMEM_PROT_WRITECOPY;
464 break;
465 case OBJWRITE | OBJSHARED:
466 case OBJWRITE | OBJSHARED | OBJREAD:
467 pModLX->aSegments[i].fProt = RTMEM_PROT_READ | RTMEM_PROT_WRITE;
468 break;
469 case OBJEXEC:
470 case OBJEXEC | OBJSHARED:
471 pModLX->aSegments[i].fProt = RTMEM_PROT_EXEC;
472 break;
473 case OBJEXEC | OBJREAD:
474 case OBJEXEC | OBJREAD | OBJSHARED:
475 pModLX->aSegments[i].fProt = RTMEM_PROT_EXEC | RTMEM_PROT_READ;
476 break;
477 case OBJEXEC | OBJWRITE:
478 case OBJEXEC | OBJWRITE | OBJREAD:
479 pModLX->aSegments[i].fProt = RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITECOPY;
480 break;
481 case OBJEXEC | OBJWRITE | OBJSHARED:
482 case OBJEXEC | OBJWRITE | OBJSHARED | OBJREAD:
483 pModLX->aSegments[i].fProt = RTMEM_PROT_EXEC | RTMEM_PROT_READ | RTMEM_PROT_WRITE;
484 break;
485 }
486 if ((pModLX->paObjs[i].o32_flags & (OBJREAD | OBJWRITE | OBJEXEC | OBJRSRC)) == OBJRSRC)
487 pModLX->aSegments[i].fProt = RTMEM_PROT_READ;
488 /*pModLX->aSegments[i].f16bit = !(pModLX->paObjs[i].o32_flags & OBJBIGDEF)
489 pModLX->aSegments[i].fIOPL = !(pModLX->paObjs[i].o32_flags & OBJIOPL)
490 pModLX->aSegments[i].fConforming = !(pModLX->paObjs[i].o32_flags & OBJCONFORM) */
491 }
492
493 /* set the mapping size */
494 pModLX->cbMapped = NextRVA;
495
496 /*
497 * We're done.
498 */
499 *ppModLX = pModLX;
500 return VINF_SUCCESS;
501}
502
503
504/**
505 * @interface_method_impl{RTLDROPS,pfnClose}
506 */
507static DECLCALLBACK(int) rtldrLX_Close(PRTLDRMODINTERNAL pMod)
508{
509 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
510 KLDRMODLX_ASSERT(!pModLX->pvMapping);
511
512 int rc = VINF_SUCCESS;
513 if (pModLX->Core.pReader)
514 {
515 rc = pModLX->Core.pReader->pfnDestroy(pModLX->Core.pReader);
516 pModLX->Core.pReader = NULL;
517 }
518 if (pModLX->pbNonResNameTab)
519 {
520 RTMemFree(pModLX->pbNonResNameTab);
521 pModLX->pbNonResNameTab = NULL;
522 }
523 if (pModLX->pbFixupSection)
524 {
525 RTMemFree(pModLX->pbFixupSection);
526 pModLX->pbFixupSection = NULL;
527 }
528 pModLX->Core.u32Magic = 0;
529 pModLX->Core.pOps = NULL;
530 RTMemFree(pModLX);
531 return rc;
532}
533
534
535/**
536 * Resolved base address aliases.
537 *
538 * @param pModLX The interpreter module instance
539 * @param pBaseAddress The base address, IN & OUT.
540 */
541static void kldrModLXResolveBaseAddress(PKLDRMODLX pModLX, PRTLDRADDR pBaseAddress)
542{
543 if (*pBaseAddress == RTLDR_BASEADDRESS_LINK)
544 *pBaseAddress = pModLX->aSegments[0].LinkAddress;
545}
546
547
548static int kldrModLXQuerySymbol(PRTLDRMODINTERNAL pMod, const void *pvBits, RTLDRADDR BaseAddress, uint32_t iSymbol,
549 const char *pchSymbol, size_t cchSymbol, const char *pszVersion,
550 PFNRTLDRIMPORT pfnGetForwarder, void *pvUser, PRTLDRADDR puValue, uint32_t *pfKind)
551{
552 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
553 uint32_t iOrdinal;
554 int rc;
555 const struct b32_bundle *pBundle;
556 RT_NOREF(pvBits);
557 RT_NOREF(pszVersion);
558
559 /*
560 * Give up at once if there is no entry table.
561 */
562 if (!pModLX->Hdr.e32_enttab)
563 return VERR_SYMBOL_NOT_FOUND;
564
565 /*
566 * Translate the symbol name into an ordinal.
567 */
568 if (pchSymbol)
569 {
570 rc = kldrModLXDoNameLookup(pModLX, pchSymbol, cchSymbol, &iSymbol);
571 if (RT_FAILURE(rc))
572 return rc;
573 }
574
575 /*
576 * Iterate the entry table.
577 * (The entry table is made up of bundles of similar exports.)
578 */
579 iOrdinal = 1;
580 pBundle = (const struct b32_bundle *)pModLX->pbEntryTab;
581 while (pBundle->b32_cnt && iOrdinal <= iSymbol)
582 {
583 static const size_t s_cbEntry[] = { 0, 3, 5, 5, 7 };
584
585 /*
586 * Check for a hit first.
587 */
588 iOrdinal += pBundle->b32_cnt;
589 if (iSymbol < iOrdinal)
590 {
591 uint32_t offObject;
592 const struct e32_entry *pEntry = (const struct e32_entry *)((uintptr_t)(pBundle + 1)
593 + (iSymbol - (iOrdinal - pBundle->b32_cnt))
594 * s_cbEntry[pBundle->b32_type]);
595
596 /*
597 * Calculate the return address.
598 */
599 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
600 switch (pBundle->b32_type)
601 {
602 /* empty bundles are place holders unused ordinal ranges. */
603 case EMPTY:
604 return VERR_SYMBOL_NOT_FOUND;
605
606 /* e32_flags + a 16-bit offset. */
607 case ENTRY16:
608 offObject = pEntry->e32_variant.e32_offset.offset16;
609 if (pfKind)
610 *pfKind = RTLDRSYMKIND_16BIT | RTLDRSYMKIND_NO_TYPE;
611 break;
612
613 /* e32_flags + a 16-bit offset + a 16-bit callgate selector. */
614 case GATE16:
615 offObject = pEntry->e32_variant.e32_callgate.offset;
616 if (pfKind)
617 *pfKind = RTLDRSYMKIND_16BIT | RTLDRSYMKIND_CODE;
618 break;
619
620 /* e32_flags + a 32-bit offset. */
621 case ENTRY32:
622 offObject = pEntry->e32_variant.e32_offset.offset32;
623 if (pfKind)
624 *pfKind = RTLDRSYMKIND_32BIT;
625 break;
626
627 /* e32_flags + 16-bit import module ordinal + a 32-bit procname or ordinal. */
628 case ENTRYFWD:
629 return kldrModLXDoForwarderQuery(pModLX, pEntry, pfnGetForwarder, pvUser, puValue, pfKind);
630
631 default:
632 /* anyone actually using TYPEINFO will end up here. */
633 KLDRMODLX_ASSERT(!"Bad bundle type");
634 return VERR_LDRLX_BAD_BUNDLE;
635 }
636
637 /*
638 * Validate the object number and calc the return address.
639 */
640 if ( pBundle->b32_obj <= 0
641 || pBundle->b32_obj > pModLX->cSegments)
642 return VERR_LDRLX_BAD_BUNDLE;
643 if (puValue)
644 *puValue = BaseAddress
645 + offObject
646 + pModLX->aSegments[pBundle->b32_obj - 1].RVA;
647 return VINF_SUCCESS;
648 }
649
650 /*
651 * Skip the bundle.
652 */
653 if (pBundle->b32_type > ENTRYFWD)
654 {
655 KLDRMODLX_ASSERT(!"Bad type"); /** @todo figure out TYPEINFO. */
656 return VERR_LDRLX_BAD_BUNDLE;
657 }
658 if (pBundle->b32_type == 0)
659 pBundle = (const struct b32_bundle *)((const uint8_t *)pBundle + 2);
660 else
661 pBundle = (const struct b32_bundle *)((const uint8_t *)(pBundle + 1) + s_cbEntry[pBundle->b32_type] * pBundle->b32_cnt);
662 }
663
664 return VERR_SYMBOL_NOT_FOUND;
665}
666
667
668/**
669 * @interface_method_impl{RTLDROPS,pfnGetSymbolEx}
670 */
671static DECLCALLBACK(int) rtldrLX_GetSymbolEx(PRTLDRMODINTERNAL pMod, const void *pvBits, RTUINTPTR BaseAddress,
672 uint32_t iOrdinal, const char *pszSymbol, RTUINTPTR *pValue)
673{
674 uint32_t fKind = RTLDRSYMKIND_REQ_FLAT;
675 return kldrModLXQuerySymbol(pMod, pvBits, BaseAddress, iOrdinal, pszSymbol, pszSymbol ? strlen(pszSymbol) : 0,
676 NULL, NULL, NULL, pValue, &fKind);
677}
678
679
680/**
681 * Do name lookup.
682 *
683 * @returns IPRT status code.
684 * @param pModLX The module to lookup the symbol in.
685 * @param pchSymbol The symbol to lookup.
686 * @param cchSymbol The symbol name length.
687 * @param piSymbol Where to store the symbol ordinal.
688 */
689static int kldrModLXDoNameLookup(PKLDRMODLX pModLX, const char *pchSymbol, size_t cchSymbol, uint32_t *piSymbol)
690{
691
692 /*
693 * First do a hash table lookup.
694 */
695 /** @todo hash name table for speed. */
696
697 /*
698 * Search the name tables.
699 */
700 const uint8_t *pbName = kldrModLXDoNameTableLookupByName(pModLX->pbResNameTab,
701 pModLX->pbLoaderSectionLast - pModLX->pbResNameTab + 1,
702 pchSymbol, cchSymbol);
703 if (!pbName)
704 {
705 if (!pModLX->pbNonResNameTab)
706 {
707 /* lazy load it */
708 /** @todo non-resident name table. */
709 }
710 if (pModLX->pbNonResNameTab)
711 pbName = kldrModLXDoNameTableLookupByName(pModLX->pbResNameTab,
712 pModLX->pbNonResNameTabLast - pModLX->pbResNameTab + 1,
713 pchSymbol, cchSymbol);
714 }
715 if (!pbName)
716 return VERR_SYMBOL_NOT_FOUND;
717
718 *piSymbol = *(const uint16_t *)(pbName + 1 + *pbName);
719 return VINF_SUCCESS;
720}
721
722
723/**
724 * Lookup a name table entry by name.
725 *
726 * @returns Pointer to the name table entry if found.
727 * @returns NULL if not found.
728 * @param pbNameTable Pointer to the name table that should be searched.
729 * @param cbNameTable The size of the name table.
730 * @param pchSymbol The name of the symbol we're looking for.
731 * @param cchSymbol The length of the symbol name.
732 */
733static const uint8_t *kldrModLXDoNameTableLookupByName(const uint8_t *pbNameTable, ssize_t cbNameTable,
734 const char *pchSymbol, size_t cchSymbol)
735{
736 /*
737 * Determin the namelength up front so we can skip anything which doesn't matches the length.
738 */
739 uint8_t cbSymbol8Bit = (uint8_t)cchSymbol;
740 if (cbSymbol8Bit != cchSymbol)
741 return NULL; /* too long. */
742
743 /*
744 * Walk the name table.
745 */
746 while (*pbNameTable != 0 && cbNameTable > 0)
747 {
748 const uint8_t cbName = *pbNameTable;
749
750 cbNameTable -= cbName + 1 + 2;
751 if (cbNameTable < 0)
752 break;
753
754 if ( cbName == cbSymbol8Bit
755 && !memcmp(pbNameTable + 1, pchSymbol, cbName))
756 return pbNameTable;
757
758 /* next entry */
759 pbNameTable += cbName + 1 + 2;
760 }
761
762 return NULL;
763}
764
765
766/**
767 * Deal with a forwarder entry.
768 *
769 * @returns IPRT status code.
770 * @param pModLX The PE module interpreter instance.
771 * @param pEntry The forwarder entry.
772 * @param pfnGetForwarder The callback for resolving forwarder symbols. (optional)
773 * @param pvUser The user argument for the callback.
774 * @param puValue Where to put the value. (optional)
775 * @param pfKind Where to put the symbol kind. (optional)
776 */
777static int kldrModLXDoForwarderQuery(PKLDRMODLX pModLX, const struct e32_entry *pEntry,
778 PFNRTLDRIMPORT pfnGetForwarder, void *pvUser, PRTLDRADDR puValue, uint32_t *pfKind)
779{
780 if (!pfnGetForwarder)
781 return VERR_LDR_FORWARDER;
782
783 /*
784 * Validate the entry import module ordinal.
785 */
786 if ( !pEntry->e32_variant.e32_fwd.modord
787 || pEntry->e32_variant.e32_fwd.modord > pModLX->Hdr.e32_impmodcnt)
788 return VERR_LDRLX_BAD_FORWARDER;
789
790 char szImpModule[256];
791 int rc = kldrModLXGetImport(pModLX, NULL, pEntry->e32_variant.e32_fwd.modord - 1, szImpModule, sizeof(szImpModule), NULL);
792 if (RT_FAILURE(rc))
793 return rc;
794
795 /*
796 * Figure out the parameters.
797 */
798 uint32_t iSymbol;
799 const char *pszSymbol;
800 char szSymbol[256];
801 if (pEntry->e32_flags & FWD_ORDINAL)
802 {
803 iSymbol = pEntry->e32_variant.e32_fwd.value;
804 pszSymbol = NULL; /* no symbol name. */
805 }
806 else
807 {
808 const uint8_t *pbName;
809
810 /* load the fixup section if necessary. */
811 if (!pModLX->pbImportProcs)
812 {
813 rc = kldrModLXDoLoadFixupSection(pModLX);
814 if (RT_FAILURE(rc))
815 return rc;
816 }
817
818 /* Make name pointer. */
819 pbName = pModLX->pbImportProcs + pEntry->e32_variant.e32_fwd.value;
820 if ( pbName >= pModLX->pbFixupSectionLast
821 || pbName < pModLX->pbFixupSection
822 || !*pbName)
823 return VERR_LDRLX_BAD_FORWARDER;
824
825
826 /* check for '#' name. */
827 if (pbName[1] == '#')
828 {
829 uint8_t cbLeft = *pbName;
830 const uint8_t *pb = pbName + 1;
831 unsigned uBase;
832
833 /* base detection */
834 uBase = 10;
835 if ( cbLeft > 1
836 && pb[1] == '0'
837 && (pb[2] == 'x' || pb[2] == 'X'))
838 {
839 uBase = 16;
840 pb += 2;
841 cbLeft -= 2;
842 }
843
844 /* ascii to integer */
845 iSymbol = 0;
846 while (cbLeft-- > 0)
847 {
848 /* convert char to digit. */
849 unsigned uDigit = *pb++;
850 if (uDigit >= '0' && uDigit <= '9')
851 uDigit -= '0';
852 else if (uDigit >= 'a' && uDigit <= 'z')
853 uDigit -= 'a' + 10;
854 else if (uDigit >= 'A' && uDigit <= 'Z')
855 uDigit -= 'A' + 10;
856 else if (!uDigit)
857 break;
858 else
859 return VERR_LDRLX_BAD_FORWARDER;
860 if (uDigit >= uBase)
861 return VERR_LDRLX_BAD_FORWARDER;
862
863 /* insert the digit */
864 iSymbol *= uBase;
865 iSymbol += uDigit;
866 }
867 if (!iSymbol)
868 return VERR_LDRLX_BAD_FORWARDER;
869
870 pszSymbol = NULL; /* no symbol name. */
871 }
872 else
873 {
874 memcpy(szSymbol, pbName + 1, *pbName);
875 szSymbol[*pbName] = '\0';
876 pszSymbol = szSymbol;
877 iSymbol = UINT32_MAX;
878 }
879 }
880
881 /*
882 * Resolve the forwarder.
883 */
884 rc = pfnGetForwarder(&pModLX->Core, szImpModule, pszSymbol, iSymbol, puValue, /*pfKind, */pvUser);
885 if (RT_SUCCESS(rc) && pfKind)
886 *pfKind |= RTLDRSYMKIND_FORWARDER;
887 return rc;
888}
889
890
891/**
892 * Loads the fixup section from the executable image.
893 *
894 * The fixup section isn't loaded until it's accessed. It's also freed by kLdrModDone().
895 *
896 * @returns IPRT status code.
897 * @param pModLX The PE module interpreter instance.
898 */
899static int kldrModLXDoLoadFixupSection(PKLDRMODLX pModLX)
900{
901 void *pv = RTMemAlloc(pModLX->Hdr.e32_fixupsize);
902 if (!pv)
903 return VERR_NO_MEMORY;
904
905 uint32_t off = pModLX->Hdr.e32_objtab + pModLX->Hdr.e32_ldrsize;
906 int rc = pModLX->Core.pReader->pfnRead(pModLX->Core.pReader, pv, pModLX->Hdr.e32_fixupsize,
907 off + pModLX->offHdr);
908 if (RT_SUCCESS(rc))
909 {
910 pModLX->pbFixupSection = (uint8_t *)pv;
911 pModLX->pbFixupSectionLast = pModLX->pbFixupSection + pModLX->Hdr.e32_fixupsize;
912 KLDRMODLX_ASSERT(!pModLX->paoffPageFixups);
913 if (pModLX->Hdr.e32_fpagetab)
914 pModLX->paoffPageFixups = (const uint32_t *)(pModLX->pbFixupSection + pModLX->Hdr.e32_fpagetab - off);
915 KLDRMODLX_ASSERT(!pModLX->pbFixupRecs);
916 if (pModLX->Hdr.e32_frectab)
917 pModLX->pbFixupRecs = pModLX->pbFixupSection + pModLX->Hdr.e32_frectab - off;
918 KLDRMODLX_ASSERT(!pModLX->pbImportMods);
919 if (pModLX->Hdr.e32_impmod)
920 pModLX->pbImportMods = pModLX->pbFixupSection + pModLX->Hdr.e32_impmod - off;
921 KLDRMODLX_ASSERT(!pModLX->pbImportProcs);
922 if (pModLX->Hdr.e32_impproc)
923 pModLX->pbImportProcs = pModLX->pbFixupSection + pModLX->Hdr.e32_impproc - off;
924 }
925 else
926 RTMemFree(pv);
927 return rc;
928}
929
930
931/**
932 * @interface_method_impl{RTLDROPS,pfnEnumSymbols}
933 */
934static DECLCALLBACK(int) rtldrLX_EnumSymbols(PRTLDRMODINTERNAL pMod, unsigned fFlags, const void *pvBits,
935 RTUINTPTR BaseAddress, PFNRTLDRENUMSYMS pfnCallback, void *pvUser)
936{
937 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
938 RT_NOREF(pvBits);
939 RT_NOREF(fFlags);
940
941 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
942
943 /*
944 * Enumerate the entry table.
945 * (The entry table is made up of bundles of similar exports.)
946 */
947 int rc = VINF_SUCCESS;
948 uint32_t iOrdinal = 1;
949 const struct b32_bundle *pBundle = (const struct b32_bundle *)pModLX->pbEntryTab;
950 while (pBundle->b32_cnt && iOrdinal)
951 {
952 static const size_t s_cbEntry[] = { 0, 3, 5, 5, 7 };
953
954 /*
955 * Enum the entries in the bundle.
956 */
957 if (pBundle->b32_type != EMPTY)
958 {
959 const struct e32_entry *pEntry;
960 size_t cbEntry;
961 RTLDRADDR BundleRVA;
962 unsigned cLeft;
963
964
965 /* Validate the bundle. */
966 switch (pBundle->b32_type)
967 {
968 case ENTRY16:
969 case GATE16:
970 case ENTRY32:
971 if ( pBundle->b32_obj <= 0
972 || pBundle->b32_obj > pModLX->cSegments)
973 return VERR_LDRLX_BAD_BUNDLE;
974 BundleRVA = pModLX->aSegments[pBundle->b32_obj - 1].RVA;
975 break;
976
977 case ENTRYFWD:
978 BundleRVA = 0;
979 break;
980
981 default:
982 /* anyone actually using TYPEINFO will end up here. */
983 KLDRMODLX_ASSERT(!"Bad bundle type");
984 return VERR_LDRLX_BAD_BUNDLE;
985 }
986
987 /* iterate the bundle entries. */
988 cbEntry = s_cbEntry[pBundle->b32_type];
989 pEntry = (const struct e32_entry *)(pBundle + 1);
990 cLeft = pBundle->b32_cnt;
991 while (cLeft-- > 0)
992 {
993 RTLDRADDR uValue;
994 uint32_t fKind;
995 int fFoundName;
996 const uint8_t *pbName;
997
998 /*
999 * Calc the symbol value and kind.
1000 */
1001 switch (pBundle->b32_type)
1002 {
1003 /* e32_flags + a 16-bit offset. */
1004 case ENTRY16:
1005 uValue = BaseAddress + BundleRVA + pEntry->e32_variant.e32_offset.offset16;
1006 fKind = RTLDRSYMKIND_16BIT | RTLDRSYMKIND_NO_TYPE;
1007 break;
1008
1009 /* e32_flags + a 16-bit offset + a 16-bit callgate selector. */
1010 case GATE16:
1011 uValue = BaseAddress + BundleRVA + pEntry->e32_variant.e32_callgate.offset;
1012 fKind = RTLDRSYMKIND_16BIT | RTLDRSYMKIND_CODE;
1013 break;
1014
1015 /* e32_flags + a 32-bit offset. */
1016 case ENTRY32:
1017 uValue = BaseAddress + BundleRVA + pEntry->e32_variant.e32_offset.offset32;
1018 fKind = RTLDRSYMKIND_32BIT;
1019 break;
1020
1021 /* e32_flags + 16-bit import module ordinal + a 32-bit procname or ordinal. */
1022 case ENTRYFWD:
1023 uValue = 0; /** @todo implement enumeration of forwarders properly. */
1024 fKind = RTLDRSYMKIND_FORWARDER;
1025 break;
1026
1027 default: /* shut up gcc. */
1028 uValue = 0;
1029 fKind = RTLDRSYMKIND_NO_BIT | RTLDRSYMKIND_NO_TYPE;
1030 break;
1031 }
1032
1033 /*
1034 * Any symbol names?
1035 */
1036 fFoundName = 0;
1037 char szName[256];
1038
1039 /* resident name table. */
1040 pbName = pModLX->pbResNameTab;
1041 if (pbName)
1042 {
1043 do
1044 {
1045 pbName = kldrModLXDoNameTableLookupByOrdinal(pbName, pModLX->pbLoaderSectionLast - pbName + 1, iOrdinal);
1046 if (!pbName)
1047 break;
1048 fFoundName = 1;
1049 memcpy(szName, (const char *)pbName + 1, *pbName);
1050 szName[*pbName] = '\0';
1051 rc = pfnCallback(pMod, szName, iOrdinal, uValue, /*fKind,*/ pvUser);
1052 if (rc != VINF_SUCCESS)
1053 return rc;
1054
1055 /* skip to the next entry */
1056 pbName += 1 + *pbName + 2;
1057 } while (pbName < pModLX->pbLoaderSectionLast);
1058 }
1059
1060 /* resident name table. */
1061 pbName = pModLX->pbNonResNameTab;
1062 /** @todo lazy load the non-resident name table. */
1063 if (pbName)
1064 {
1065 do
1066 {
1067 pbName = kldrModLXDoNameTableLookupByOrdinal(pbName, pModLX->pbNonResNameTabLast - pbName + 1, iOrdinal);
1068 if (!pbName)
1069 break;
1070 fFoundName = 1;
1071 memcpy(szName, (const char *)pbName + 1, *pbName);
1072 szName[*pbName] = '\0';
1073 rc = pfnCallback(pMod, szName, iOrdinal, uValue, /*fKind,*/ pvUser);
1074 if (rc != VINF_SUCCESS)
1075 return rc;
1076
1077 /* skip to the next entry */
1078 pbName += 1 + *pbName + 2;
1079 } while (pbName < pModLX->pbLoaderSectionLast);
1080 }
1081
1082 /*
1083 * If no names, call once with the ordinal only.
1084 */
1085 if (!fFoundName)
1086 {
1087 RT_NOREF(fKind);
1088 rc = pfnCallback(pMod, NULL /*pszName*/, iOrdinal, uValue, /*fKind,*/ pvUser);
1089 if (rc != VINF_SUCCESS)
1090 return rc;
1091 }
1092
1093 /* next */
1094 iOrdinal++;
1095 pEntry = (const struct e32_entry *)((uintptr_t)pEntry + cbEntry);
1096 }
1097 }
1098
1099 /*
1100 * The next bundle.
1101 */
1102 if (pBundle->b32_type > ENTRYFWD)
1103 {
1104 KLDRMODLX_ASSERT(!"Bad type"); /** @todo figure out TYPEINFO. */
1105 return VERR_LDRLX_BAD_BUNDLE;
1106 }
1107 if (pBundle->b32_type == 0)
1108 pBundle = (const struct b32_bundle *)((const uint8_t *)pBundle + 2);
1109 else
1110 pBundle = (const struct b32_bundle *)((const uint8_t *)(pBundle + 1) + s_cbEntry[pBundle->b32_type] * pBundle->b32_cnt);
1111 }
1112
1113 return VINF_SUCCESS;
1114}
1115
1116
1117/**
1118 * Lookup a name table entry by ordinal.
1119 *
1120 * @returns Pointer to the name table entry if found.
1121 * @returns NULL if not found.
1122 * @param pbNameTable Pointer to the name table that should be searched.
1123 * @param cbNameTable The size of the name table.
1124 * @param iOrdinal The ordinal to search for.
1125 */
1126static const uint8_t *kldrModLXDoNameTableLookupByOrdinal(const uint8_t *pbNameTable, ssize_t cbNameTable, uint32_t iOrdinal)
1127{
1128 while (*pbNameTable != 0 && cbNameTable > 0)
1129 {
1130 const uint8_t cbName = *pbNameTable;
1131 uint32_t iName;
1132
1133 cbNameTable -= cbName + 1 + 2;
1134 if (cbNameTable < 0)
1135 break;
1136
1137 iName = *(pbNameTable + cbName + 1)
1138 | ((unsigned)*(pbNameTable + cbName + 2) << 8);
1139 if (iName == iOrdinal)
1140 return pbNameTable;
1141
1142 /* next entry */
1143 pbNameTable += cbName + 1 + 2;
1144 }
1145
1146 return NULL;
1147}
1148
1149
1150static int kldrModLXGetImport(PKLDRMODLX pModLX, const void *pvBits, uint32_t iImport, char *pszName, size_t cchName,
1151 size_t *pcbNeeded)
1152{
1153 const uint8_t *pb;
1154 int rc;
1155 RT_NOREF(pvBits);
1156
1157 /*
1158 * Validate
1159 */
1160 if (iImport >= pModLX->Hdr.e32_impmodcnt)
1161 return VERR_LDRLX_IMPORT_ORDINAL_OUT_OF_BOUNDS;
1162
1163 /*
1164 * Lazy loading the fixup section.
1165 */
1166 if (!pModLX->pbImportMods)
1167 {
1168 rc = kldrModLXDoLoadFixupSection(pModLX);
1169 if (RT_FAILURE(rc))
1170 return rc;
1171 }
1172
1173 /*
1174 * Iterate the module import table until we reach the requested import ordinal.
1175 */
1176 pb = pModLX->pbImportMods;
1177 while (iImport-- > 0)
1178 pb += *pb + 1;
1179
1180 /*
1181 * Copy out the result.
1182 */
1183 if (pcbNeeded)
1184 *pcbNeeded = *pb + 1;
1185 if (*pb < cchName)
1186 {
1187 memcpy(pszName, pb + 1, *pb);
1188 pszName[*pb] = '\0';
1189 rc = VINF_SUCCESS;
1190 }
1191 else
1192 {
1193 memcpy(pszName, pb + 1, cchName);
1194 if (cchName)
1195 pszName[cchName - 1] = '\0';
1196 rc = VERR_BUFFER_OVERFLOW;
1197 }
1198
1199 return rc;
1200}
1201
1202#if 0
1203
1204/** @copydoc kLdrModNumberOfImports */
1205static int32_t kldrModLXNumberOfImports(PRTLDRMODINTERNAL pMod, const void *pvBits)
1206{
1207 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
1208 RT_NOREF(pvBits);
1209 return pModLX->Hdr.e32_impmodcnt;
1210}
1211
1212
1213/** @copydoc kLdrModGetStackInfo */
1214static int kldrModLXGetStackInfo(PRTLDRMODINTERNAL pMod, const void *pvBits, RTLDRADDR BaseAddress, PKLDRSTACKINFO pStackInfo)
1215{
1216 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
1217 const uint32_t i = pModLX->Hdr.e32_stackobj;
1218 RT_NOREF(pvBits);
1219
1220 if ( i
1221 && i <= pModLX->cSegments
1222 && pModLX->Hdr.e32_esp <= pModLX->aSegments[i - 1].LinkAddress + pModLX->aSegments[i - 1].cb
1223 && pModLX->Hdr.e32_stacksize
1224 && pModLX->Hdr.e32_esp - pModLX->Hdr.e32_stacksize >= pModLX->aSegments[i - 1].LinkAddress)
1225 {
1226
1227 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
1228 pStackInfo->LinkAddress = pModLX->Hdr.e32_esp - pModLX->Hdr.e32_stacksize;
1229 pStackInfo->Address = BaseAddress
1230 + pModLX->aSegments[i - 1].RVA
1231 + pModLX->Hdr.e32_esp - pModLX->Hdr.e32_stacksize - pModLX->aSegments[i - 1].LinkAddress;
1232 }
1233 else
1234 {
1235 pSt0ackInfo->Address = NIL_RTLDRADDR;
1236 pStackInfo->LinkAddress = NIL_RTLDRADDR;
1237 }
1238 pStackInfo->cbStack = pModLX->Hdr.e32_stacksize;
1239 pStackInfo->cbStackThread = 0;
1240
1241 return VINF_SUCCESS;
1242}
1243
1244
1245/** @copydoc kLdrModQueryMainEntrypoint */
1246static int kldrModLXQueryMainEntrypoint(PRTLDRMODINTERNAL pMod, const void *pvBits, RTLDRADDR BaseAddress, PRTLDRADDR pMainEPAddress)
1247{
1248 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
1249 RT_NOREF(pvBits);
1250
1251 /*
1252 * Convert the address from the header.
1253 */
1254 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
1255 *pMainEPAddress = pModLX->Hdr.e32_startobj
1256 && pModLX->Hdr.e32_startobj <= pModLX->cSegments
1257 && pModLX->Hdr.e32_eip < pModLX->aSegments[pModLX->Hdr.e32_startobj - 1].cb
1258 ? BaseAddress + pModLX->aSegments[pModLX->Hdr.e32_startobj - 1].RVA + pModLX->Hdr.e32_eip
1259 : NIL_RTLDRADDR;
1260 return VINF_SUCCESS;
1261}
1262
1263#endif
1264
1265
1266/**
1267 * @interface_method_impl{RTLDROPS,pfnEnumDbgInfo}
1268 */
1269static DECLCALLBACK(int) rtldrLX_EnumDbgInfo(PRTLDRMODINTERNAL pMod, const void *pvBits,
1270 PFNRTLDRENUMDBG pfnCallback, void *pvUser)
1271{
1272 /*PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);*/
1273 RT_NOREF(pfnCallback);
1274 RT_NOREF(pvUser);
1275
1276 /*
1277 * Quit immediately if no debug info.
1278 */
1279 if (kldrModLXHasDbgInfo(pMod, pvBits))
1280 return VINF_SUCCESS;
1281#if 0
1282 /*
1283 * Read the debug info and look for familiar magics and structures.
1284 */
1285 /** @todo */
1286#endif
1287
1288 return VINF_SUCCESS;
1289}
1290
1291
1292static int kldrModLXHasDbgInfo(PRTLDRMODINTERNAL pMod, const void *pvBits)
1293{
1294 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
1295 RT_NOREF(pvBits);
1296
1297 /*
1298 * Don't curretnly bother with linkers which doesn't advertise it in the header.
1299 */
1300 if ( !pModLX->Hdr.e32_debuginfo
1301 || !pModLX->Hdr.e32_debuglen)
1302 return VERR_NOT_FOUND;
1303 return VINF_SUCCESS;
1304}
1305
1306#if 0
1307
1308/** @copydoc kLdrModMap */
1309static int kldrModLXMap(PRTLDRMODINTERNAL pMod)
1310{
1311 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
1312 unsigned fFixed;
1313 void *pvBase;
1314 int rc;
1315
1316 /*
1317 * Already mapped?
1318 */
1319 if (pModLX->pvMapping)
1320 return KLDR_ERR_ALREADY_MAPPED;
1321
1322 /*
1323 * Allocate memory for it.
1324 */
1325 /* fixed image? */
1326 fFixed = pModLX->Core.enmType == RTLDRTYPE_EXECUTABLE_FIXED
1327 || pModLX->Core.enmType == RTLDRTYPE_SHARED_LIBRARY_FIXED;
1328 if (!fFixed)
1329 pvBase = NULL;
1330 else
1331 {
1332 pvBase = (void *)(uintptr_t)pModLX->aSegments[0].LinkAddress;
1333 if ((uintptr_t)pvBase != pModLX->aSegments[0].LinkAddress)
1334 return KLDR_ERR_ADDRESS_OVERFLOW;
1335 }
1336 rc = kHlpPageAlloc(&pvBase, pModLX->cbMapped, KPROT_EXECUTE_READWRITE, fFixed);
1337 if (RT_FAILURE(rc))
1338 return rc;
1339
1340 /*
1341 * Load the bits, apply page protection, and update the segment table.
1342 */
1343 rc = kldrModLXDoLoadBits(pModLX, pvBase);
1344 if (RT_SUCCESS(rc))
1345 rc = kldrModLXDoProtect(pModLX, pvBase, 0 /* protect */);
1346 if (RT_SUCCESS(rc))
1347 {
1348 uint32_t i;
1349 for (i = 0; i < pModLX->cSegments; i++)
1350 {
1351 if (pModLX->aSegments[i].RVA != NIL_RTLDRADDR)
1352 pModLX->aSegments[i].MapAddress = (uintptr_t)pvBase + (uintptr_t)pModLX->aSegments[i].RVA;
1353 }
1354 pModLX->pvMapping = pvBase;
1355 }
1356 else
1357 kHlpPageFree(pvBase, pModLX->cbMapped);
1358 return rc;
1359}
1360
1361#endif
1362
1363/**
1364 * Loads the LX pages into the specified memory mapping.
1365 *
1366 * @returns IPRT status code.
1367 *
1368 * @param pModLX The LX module interpreter instance.
1369 * @param pvBits Where to load the bits.
1370 */
1371static int kldrModLXDoLoadBits(PKLDRMODLX pModLX, void *pvBits)
1372{
1373 const PRTLDRREADER pRdr = pModLX->Core.pReader;
1374 uint8_t *pbTmpPage = NULL;
1375 int rc = VINF_SUCCESS;
1376 uint32_t i;
1377
1378 /*
1379 * Iterate the segments.
1380 */
1381 for (i = 0; i < pModLX->Hdr.e32_objcnt; i++)
1382 {
1383 const struct o32_obj * const pObj = &pModLX->paObjs[i];
1384 const uint32_t cPages = (uint32_t)(pModLX->aSegments[i].cbMapped / OBJPAGELEN);
1385 uint32_t iPage;
1386 uint8_t *pbPage = (uint8_t *)pvBits + (uintptr_t)pModLX->aSegments[i].RVA;
1387
1388 /*
1389 * Iterate the page map pages.
1390 */
1391 for (iPage = 0; RT_SUCCESS(rc) && iPage < pObj->o32_mapsize; iPage++, pbPage += OBJPAGELEN)
1392 {
1393 const struct o32_map *pMap = &pModLX->paPageMappings[iPage + pObj->o32_pagemap - 1];
1394 switch (pMap->o32_pageflags)
1395 {
1396 case VALID:
1397 if (pMap->o32_pagesize == OBJPAGELEN)
1398 rc = pRdr->pfnRead(pRdr, pbPage, OBJPAGELEN,
1399 pModLX->Hdr.e32_datapage + (pMap->o32_pagedataoffset << pModLX->Hdr.e32_pageshift));
1400 else if (pMap->o32_pagesize < OBJPAGELEN)
1401 {
1402 rc = pRdr->pfnRead(pRdr, pbPage, pMap->o32_pagesize,
1403 pModLX->Hdr.e32_datapage + (pMap->o32_pagedataoffset << pModLX->Hdr.e32_pageshift));
1404 memset(pbPage + pMap->o32_pagesize, 0, OBJPAGELEN - pMap->o32_pagesize);
1405 }
1406 else
1407 rc = VERR_LDRLX_BAD_PAGE_MAP;
1408 break;
1409
1410 case ITERDATA:
1411 case ITERDATA2:
1412 /* make sure we've got a temp page .*/
1413 if (!pbTmpPage)
1414 {
1415 pbTmpPage = (uint8_t *)RTMemAlloc(OBJPAGELEN + 256);
1416 if (!pbTmpPage)
1417 break;
1418 }
1419 /* validate the size. */
1420 if (pMap->o32_pagesize > OBJPAGELEN + 252)
1421 {
1422 rc = VERR_LDRLX_BAD_PAGE_MAP;
1423 break;
1424 }
1425
1426 /* read it and ensure 4 extra zero bytes. */
1427 rc = pRdr->pfnRead(pRdr, pbTmpPage, pMap->o32_pagesize,
1428 pModLX->Hdr.e32_datapage + (pMap->o32_pagedataoffset << pModLX->Hdr.e32_pageshift));
1429 if (RT_FAILURE(rc))
1430 break;
1431 memset(pbTmpPage + pMap->o32_pagesize, 0, 4);
1432
1433 /* unpack it into the image page. */
1434 if (pMap->o32_pageflags == ITERDATA2)
1435 rc = kldrModLXDoIterData2Unpacking(pbPage, pbTmpPage, pMap->o32_pagesize);
1436 else
1437 rc = kldrModLXDoIterDataUnpacking(pbPage, pbTmpPage, pMap->o32_pagesize);
1438 break;
1439
1440 case INVALID: /* we're probably not dealing correctly with INVALID pages... */
1441 case ZEROED:
1442 memset(pbPage, 0, OBJPAGELEN);
1443 break;
1444
1445 case RANGE:
1446 KLDRMODLX_ASSERT(!"RANGE");
1447 /* Falls through. */
1448 default:
1449 rc = VERR_LDRLX_BAD_PAGE_MAP;
1450 break;
1451 }
1452 }
1453 if (RT_FAILURE(rc))
1454 break;
1455
1456 /*
1457 * Zero the remaining pages.
1458 */
1459 if (iPage < cPages)
1460 memset(pbPage, 0, (cPages - iPage) * OBJPAGELEN);
1461 }
1462
1463 if (pbTmpPage)
1464 RTMemFree(pbTmpPage);
1465 return rc;
1466}
1467
1468
1469/**
1470 * Unpacks iterdata (aka EXEPACK).
1471 *
1472 * @returns IPRT status code.
1473 * @param pbDst Where to put the uncompressed data. (Assumes OBJPAGELEN size.)
1474 * @param pbSrc The compressed source data.
1475 * @param cbSrc The file size of the compressed data. The source buffer
1476 * contains 4 additional zero bytes.
1477 */
1478static int kldrModLXDoIterDataUnpacking(uint8_t *pbDst, const uint8_t *pbSrc, int cbSrc)
1479{
1480 const struct LX_Iter *pIter = (const struct LX_Iter *)pbSrc;
1481 int cbDst = OBJPAGELEN;
1482
1483 /* Validate size of data. */
1484 if (cbSrc >= (int)OBJPAGELEN - 2)
1485 return VERR_LDRLX_BAD_ITERDATA;
1486
1487 /*
1488 * Expand the page.
1489 */
1490 while (cbSrc > 0 && pIter->LX_nIter)
1491 {
1492 if (pIter->LX_nBytes == 1)
1493 {
1494 /*
1495 * Special case - one databyte.
1496 */
1497 cbDst -= pIter->LX_nIter;
1498 if (cbDst < 0)
1499 return VERR_LDRLX_BAD_ITERDATA;
1500
1501 cbSrc -= 4 + 1;
1502 if (cbSrc < -4)
1503 return VERR_LDRLX_BAD_ITERDATA;
1504
1505 memset(pbDst, pIter->LX_Iterdata, pIter->LX_nIter);
1506 pbDst += pIter->LX_nIter;
1507 pIter++;
1508 }
1509 else
1510 {
1511 /*
1512 * General.
1513 */
1514 int i;
1515
1516 cbDst -= pIter->LX_nIter * pIter->LX_nBytes;
1517 if (cbDst < 0)
1518 return VERR_LDRLX_BAD_ITERDATA;
1519
1520 cbSrc -= 4 + pIter->LX_nBytes;
1521 if (cbSrc < -4)
1522 return VERR_LDRLX_BAD_ITERDATA;
1523
1524 for (i = pIter->LX_nIter; i > 0; i--, pbDst += pIter->LX_nBytes)
1525 memcpy(pbDst, &pIter->LX_Iterdata, pIter->LX_nBytes);
1526 pIter = (struct LX_Iter *)((char*)pIter + 4 + pIter->LX_nBytes);
1527 }
1528 }
1529
1530 /*
1531 * Zero remainder of the page.
1532 */
1533 if (cbDst > 0)
1534 memset(pbDst, 0, cbDst);
1535
1536 return VINF_SUCCESS;
1537}
1538
1539
1540/**
1541 * Unpacks iterdata (aka EXEPACK).
1542 *
1543 * @returns IPRT status code.
1544 * @param pbDst Where to put the uncompressed data. (Assumes OBJPAGELEN size.)
1545 * @param pbSrc The compressed source data.
1546 * @param cbSrc The file size of the compressed data. The source buffer
1547 * contains 4 additional zero bytes.
1548 */
1549static int kldrModLXDoIterData2Unpacking(uint8_t *pbDst, const uint8_t *pbSrc, int cbSrc)
1550{
1551 int cbDst = OBJPAGELEN;
1552
1553 while (cbSrc > 0)
1554 {
1555 /*
1556 * Bit 0 and 1 is the encoding type.
1557 */
1558 switch (*pbSrc & 0x03)
1559 {
1560 /*
1561 *
1562 * 0 1 2 3 4 5 6 7
1563 * type | |
1564 * ----------------
1565 * cb <cb bytes of data>
1566 *
1567 * Bits 2-7 is, if not zero, the length of an uncompressed run
1568 * starting at the following byte.
1569 *
1570 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
1571 * type | | | | | |
1572 * ---------------- ---------------------- -----------------------
1573 * zero cb char to multiply
1574 *
1575 * If the bits are zero, the following two bytes describes a 1 byte interation
1576 * run. First byte is count, second is the byte to copy. A count of zero is
1577 * means end of data, and we simply stops. In that case the rest of the data
1578 * should be zero.
1579 */
1580 case 0:
1581 {
1582 if (*pbSrc)
1583 {
1584 const int cb = *pbSrc >> 2;
1585 cbDst -= cb;
1586 if (cbDst < 0)
1587 return VERR_LDRLX_BAD_ITERDATA2;
1588 cbSrc -= cb + 1;
1589 if (cbSrc < 0)
1590 return VERR_LDRLX_BAD_ITERDATA2;
1591 memcpy(pbDst, ++pbSrc, cb);
1592 pbDst += cb;
1593 pbSrc += cb;
1594 }
1595 else if (cbSrc < 2)
1596 return VERR_LDRLX_BAD_ITERDATA2;
1597 else
1598 {
1599 const int cb = pbSrc[1];
1600 if (!cb)
1601 goto l_endloop;
1602 cbDst -= cb;
1603 if (cbDst < 0)
1604 return VERR_LDRLX_BAD_ITERDATA2;
1605 cbSrc -= 3;
1606 if (cbSrc < 0)
1607 return VERR_LDRLX_BAD_ITERDATA2;
1608 memset(pbDst, pbSrc[2], cb);
1609 pbDst += cb;
1610 pbSrc += 3;
1611 }
1612 break;
1613 }
1614
1615
1616 /*
1617 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1618 * type | | | | | |
1619 * ---- ------- -------------------------
1620 * cb1 cb2 - 3 offset <cb1 bytes of data>
1621 *
1622 * Two bytes layed out as described above, followed by cb1 bytes of data to be copied.
1623 * The cb2(+3) and offset describes an amount of data to be copied from the expanded
1624 * data relative to the current position. The data copied as you would expect it to be.
1625 */
1626 case 1:
1627 {
1628 cbSrc -= 2;
1629 if (cbSrc < 0)
1630 return VERR_LDRLX_BAD_ITERDATA2;
1631 else
1632 {
1633 const unsigned off = ((unsigned)pbSrc[1] << 1) | (*pbSrc >> 7);
1634 const int cb1 = (*pbSrc >> 2) & 3;
1635 const int cb2 = ((*pbSrc >> 4) & 7) + 3;
1636
1637 pbSrc += 2;
1638 cbSrc -= cb1;
1639 if (cbSrc < 0)
1640 return VERR_LDRLX_BAD_ITERDATA2;
1641 cbDst -= cb1;
1642 if (cbDst < 0)
1643 return VERR_LDRLX_BAD_ITERDATA2;
1644 memcpy(pbDst, pbSrc, cb1);
1645 pbDst += cb1;
1646 pbSrc += cb1;
1647
1648 if (off > OBJPAGELEN - (unsigned)cbDst)
1649 return VERR_LDRLX_BAD_ITERDATA2;
1650 cbDst -= cb2;
1651 if (cbDst < 0)
1652 return VERR_LDRLX_BAD_ITERDATA2;
1653 memmove(pbDst, pbDst - off, cb2);
1654 pbDst += cb2;
1655 }
1656 break;
1657 }
1658
1659
1660 /*
1661 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1662 * type | | | |
1663 * ---- ----------------------------------
1664 * cb-3 offset
1665 *
1666 * Two bytes layed out as described above.
1667 * The cb(+3) and offset describes an amount of data to be copied from the expanded
1668 * data relative to the current position.
1669 *
1670 * If offset == 1 the data is not copied as expected, but in the memcpyw manner.
1671 */
1672 case 2:
1673 {
1674 cbSrc -= 2;
1675 if (cbSrc < 0)
1676 return VERR_LDRLX_BAD_ITERDATA2;
1677 else
1678 {
1679 const unsigned off = ((unsigned)pbSrc[1] << 4) | (*pbSrc >> 4);
1680 const int cb = ((*pbSrc >> 2) & 3) + 3;
1681
1682 pbSrc += 2;
1683 if (off > OBJPAGELEN - (unsigned)cbDst)
1684 return VERR_LDRLX_BAD_ITERDATA2;
1685 cbDst -= cb;
1686 if (cbDst < 0)
1687 return VERR_LDRLX_BAD_ITERDATA2;
1688 kLdrModLXMemCopyW(pbDst, pbDst - off, cb);
1689 pbDst += cb;
1690 }
1691 break;
1692 }
1693
1694
1695 /*
1696 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
1697 * type | | | | | |
1698 * ---------- ---------------- ----------------------------------
1699 * cb1 cb2 offset <cb1 bytes of data>
1700 *
1701 * Three bytes layed out as described above, followed by cb1 bytes of data to be copied.
1702 * The cb2 and offset describes an amount of data to be copied from the expanded
1703 * data relative to the current position.
1704 *
1705 * If offset == 1 the data is not copied as expected, but in the memcpyw manner.
1706 */
1707 case 3:
1708 {
1709 cbSrc -= 3;
1710 if (cbSrc < 0)
1711 return VERR_LDRLX_BAD_ITERDATA2;
1712 else
1713 {
1714 const int cb1 = (*pbSrc >> 2) & 0xf;
1715 const int cb2 = ((pbSrc[1] & 0xf) << 2) | (*pbSrc >> 6);
1716 const unsigned off = ((unsigned)pbSrc[2] << 4) | (pbSrc[1] >> 4);
1717
1718 pbSrc += 3;
1719 cbSrc -= cb1;
1720 if (cbSrc < 0)
1721 return VERR_LDRLX_BAD_ITERDATA2;
1722 cbDst -= cb1;
1723 if (cbDst < 0)
1724 return VERR_LDRLX_BAD_ITERDATA2;
1725 memcpy(pbDst, pbSrc, cb1);
1726 pbDst += cb1;
1727 pbSrc += cb1;
1728
1729 if (off > OBJPAGELEN - (unsigned)cbDst)
1730 return VERR_LDRLX_BAD_ITERDATA2;
1731 cbDst -= cb2;
1732 if (cbDst < 0)
1733 return VERR_LDRLX_BAD_ITERDATA2;
1734 kLdrModLXMemCopyW(pbDst, pbDst - off, cb2);
1735 pbDst += cb2;
1736 }
1737 break;
1738 }
1739 } /* type switch. */
1740 } /* unpack loop */
1741
1742l_endloop:
1743
1744
1745 /*
1746 * Zero remainder of the page.
1747 */
1748 if (cbDst > 0)
1749 memset(pbDst, 0, cbDst);
1750
1751 return VINF_SUCCESS;
1752}
1753
1754
1755/**
1756 * Special memcpy employed by the iterdata2 algorithm.
1757 *
1758 * Emulate a 16-bit memcpy (copying 16-bit at a time) and the effects this
1759 * has if src is very close to the destination.
1760 *
1761 * @param pbDst Destination pointer.
1762 * @param pbSrc Source pointer. Will always be <= pbDst.
1763 * @param cb Amount of data to be copied.
1764 * @remark This assumes that unaligned word and dword access is fine.
1765 */
1766static void kLdrModLXMemCopyW(uint8_t *pbDst, const uint8_t *pbSrc, int cb)
1767{
1768 switch (pbDst - pbSrc)
1769 {
1770 case 0:
1771 case 1:
1772 case 2:
1773 case 3:
1774 /* 16-bit copy (unaligned) */
1775 if (cb & 1)
1776 *pbDst++ = *pbSrc++;
1777 for (cb >>= 1; cb > 0; cb--, pbDst += 2, pbSrc += 2)
1778 *(uint16_t *)pbDst = *(const uint16_t *)pbSrc;
1779 break;
1780
1781 default:
1782 /* 32-bit copy (unaligned) */
1783 if (cb & 1)
1784 *pbDst++ = *pbSrc++;
1785 if (cb & 2)
1786 {
1787 *(uint16_t *)pbDst = *(const uint16_t *)pbSrc;
1788 pbDst += 2;
1789 pbSrc += 2;
1790 }
1791 for (cb >>= 2; cb > 0; cb--, pbDst += 4, pbSrc += 4)
1792 *(uint32_t *)pbDst = *(const uint32_t *)pbSrc;
1793 break;
1794 }
1795}
1796
1797#if 0
1798
1799/**
1800 * Unprotects or protects the specified image mapping.
1801 *
1802 * @returns IPRT status code.
1803 *
1804 * @param pModLX The LX module interpreter instance.
1805 * @param pvBits The mapping to protect.
1806 * @param UnprotectOrProtect If 1 unprotect (i.e. make all writable), otherwise
1807 * protect according to the object table.
1808 */
1809static int kldrModLXDoProtect(PKLDRMODLX pModLX, void *pvBits, unsigned fUnprotectOrProtect)
1810{
1811 uint32_t i;
1812
1813 /*
1814 * Change object protection.
1815 */
1816 for (i = 0; i < pModLX->cSegments; i++)
1817 {
1818 int rc;
1819 void *pv;
1820 KPROT enmProt;
1821
1822 /* calc new protection. */
1823 enmProt = pModLX->aSegments[i].enmProt;
1824 if (fUnprotectOrProtect)
1825 {
1826 switch (enmProt)
1827 {
1828 case KPROT_NOACCESS:
1829 case KPROT_READONLY:
1830 case KPROT_READWRITE:
1831 case KPROT_WRITECOPY:
1832 enmProt = KPROT_READWRITE;
1833 break;
1834 case KPROT_EXECUTE:
1835 case KPROT_EXECUTE_READ:
1836 case KPROT_EXECUTE_READWRITE:
1837 case KPROT_EXECUTE_WRITECOPY:
1838 enmProt = KPROT_EXECUTE_READWRITE;
1839 break;
1840 default:
1841 KLDRMODLX_ASSERT(!"bad enmProt");
1842 return -1;
1843 }
1844 }
1845 else
1846 {
1847 /* copy on write -> normal write. */
1848 if (enmProt == KPROT_EXECUTE_WRITECOPY)
1849 enmProt = KPROT_EXECUTE_READWRITE;
1850 else if (enmProt == KPROT_WRITECOPY)
1851 enmProt = KPROT_READWRITE;
1852 }
1853
1854
1855 /* calc the address and set page protection. */
1856 pv = (uint8_t *)pvBits + pModLX->aSegments[i].RVA;
1857
1858 rc = kHlpPageProtect(pv, pModLX->aSegments[i].cbMapped, enmProt);
1859 if (RT_FAILURE(rc))
1860 break;
1861
1862 /** @todo the gap page should be marked NOACCESS! */
1863 }
1864
1865 return VINF_SUCCESS;
1866}
1867
1868
1869/** @copydoc kLdrModUnmap */
1870static int kldrModLXUnmap(PRTLDRMODINTERNAL pMod)
1871{
1872 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
1873 uint32_t i;
1874 int rc;
1875
1876 /*
1877 * Mapped?
1878 */
1879 if (!pModLX->pvMapping)
1880 return KLDR_ERR_NOT_MAPPED;
1881
1882 /*
1883 * Free the mapping and update the segments.
1884 */
1885 rc = kHlpPageFree((void *)pModLX->pvMapping, pModLX->cbMapped);
1886 KLDRMODLX_ASSERT(!rc);
1887 pModLX->pvMapping = NULL;
1888
1889 for (i = 0; i < pModLX->cSegments; i++)
1890 pModLX->aSegments[i].MapAddress = 0;
1891
1892 return rc;
1893}
1894
1895
1896/** @copydoc kLdrModAllocTLS */
1897static int kldrModLXAllocTLS(PRTLDRMODINTERNAL pMod, void *pvMapping)
1898{
1899 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
1900
1901 /* no tls, just do the error checking. */
1902 if ( pvMapping == KLDRMOD_INT_MAP
1903 && pModLX->pvMapping)
1904 return KLDR_ERR_NOT_MAPPED;
1905 return VINF_SUCCESS;
1906}
1907
1908
1909/** @copydoc kLdrModFreeTLS */
1910static void kldrModLXFreeTLS(PRTLDRMODINTERNAL pMod, void *pvMapping)
1911{
1912 /* no tls. */
1913 RT_NOREF(pMod);
1914 RT_NOREF(pvMapping);
1915
1916}
1917
1918
1919/** @copydoc kLdrModReload */
1920static int kldrModLXReload(PRTLDRMODINTERNAL pMod)
1921{
1922 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
1923 int rc, rc2;
1924
1925 /*
1926 * Mapped?
1927 */
1928 if (!pModLX->pvMapping)
1929 return KLDR_ERR_NOT_MAPPED;
1930
1931 /*
1932 * Before doing anything we'll have to make all pages writable.
1933 */
1934 rc = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 1 /* unprotect */);
1935 if (RT_FAILURE(rc))
1936 return rc;
1937
1938 /*
1939 * Load the bits again.
1940 */
1941 rc = kldrModLXDoLoadBits(pModLX, (void *)pModLX->pvMapping);
1942
1943 /*
1944 * Restore protection.
1945 */
1946 rc2 = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 0 /* protect */);
1947 if (RT_SUCCESS(rc) && RT_FAILURE(rc2))
1948 rc = rc2;
1949 return rc;
1950}
1951
1952
1953/** @copydoc kLdrModFixupMapping */
1954static int kldrModLXFixupMapping(PRTLDRMODINTERNAL pMod, PFNRTLDRIMPORT pfnGetImport, void *pvUser)
1955{
1956 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
1957 int rc, rc2;
1958
1959 /*
1960 * Mapped?
1961 */
1962 if (!pModLX->pvMapping)
1963 return KLDR_ERR_NOT_MAPPED;
1964
1965 /*
1966 * Before doing anything we'll have to make all pages writable.
1967 */
1968 rc = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 1 /* unprotect */);
1969 if (RT_FAILURE(rc))
1970 return rc;
1971
1972 /*
1973 * Apply fixups and resolve imports.
1974 */
1975 rc = rtldrLX_RelocateBits(pMod, (void *)pModLX->pvMapping, (uintptr_t)pModLX->pvMapping,
1976 pModLX->aSegments[0].LinkAddress, pfnGetImport, pvUser);
1977
1978 /*
1979 * Restore protection.
1980 */
1981 rc2 = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 0 /* protect */);
1982 if (RT_SUCCESS(rc) && RT_FAILURE(rc2))
1983 rc = rc2;
1984 return rc;
1985}
1986
1987
1988/** @copydoc kLdrModCallInit */
1989static int kldrModLXCallInit(PRTLDRMODINTERNAL pMod, void *pvMapping, uintptr_t uHandle)
1990{
1991 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
1992 int rc;
1993
1994 /*
1995 * Mapped?
1996 */
1997 if (pvMapping == KLDRMOD_INT_MAP)
1998 {
1999 pvMapping = (void *)pModLX->pvMapping;
2000 if (!pvMapping)
2001 return KLDR_ERR_NOT_MAPPED;
2002 }
2003
2004 /*
2005 * Do TLS callbacks first and then call the init/term function if it's a DLL.
2006 */
2007 if ((pModLX->Hdr.e32_mflags & E32MODMASK) == E32MODDLL)
2008 rc = kldrModLXDoCallDLL(pModLX, pvMapping, 0 /* attach */, uHandle);
2009 else
2010 rc = VINF_SUCCESS;
2011 return rc;
2012}
2013
2014
2015/**
2016 * Call the DLL entrypoint.
2017 *
2018 * @returns 0 on success.
2019 * @returns KLDR_ERR_MODULE_INIT_FAILED or KLDR_ERR_THREAD_ATTACH_FAILED on failure.
2020 * @param pModLX The LX module interpreter instance.
2021 * @param pvMapping The module mapping to use (resolved).
2022 * @param uOp The operation (DLL_*).
2023 * @param uHandle The module handle to present.
2024 */
2025static int kldrModLXDoCallDLL(PKLDRMODLX pModLX, void *pvMapping, unsigned uOp, uintptr_t uHandle)
2026{
2027 int rc;
2028
2029 /*
2030 * If no entrypoint there isn't anything to be done.
2031 */
2032 if ( !pModLX->Hdr.e32_startobj
2033 || pModLX->Hdr.e32_startobj > pModLX->Hdr.e32_objcnt)
2034 return VINF_SUCCESS;
2035
2036 /*
2037 * Invoke the entrypoint and convert the boolean result to a kLdr status code.
2038 */
2039 rc = kldrModLXDoCall((uintptr_t)pvMapping
2040 + (uintptr_t)pModLX->aSegments[pModLX->Hdr.e32_startobj - 1].RVA
2041 + pModLX->Hdr.e32_eip,
2042 uHandle, uOp, NULL);
2043 if (rc)
2044 rc = VINF_SUCCESS;
2045 else if (uOp == 0 /* attach */)
2046 rc = KLDR_ERR_MODULE_INIT_FAILED;
2047 else /* detach: ignore failures */
2048 rc = VINF_SUCCESS;
2049 return rc;
2050}
2051
2052
2053/**
2054 * Do a 3 parameter callback.
2055 *
2056 * @returns 32-bit callback return.
2057 * @param uEntrypoint The address of the function to be called.
2058 * @param uHandle The first argument, the module handle.
2059 * @param uOp The second argumnet, the reason we're calling.
2060 * @param pvReserved The third argument, reserved argument. (figure this one out)
2061 */
2062static int32_t kldrModLXDoCall(uintptr_t uEntrypoint, uintptr_t uHandle, uint32_t uOp, void *pvReserved)
2063{
2064#if defined(__X86__) || defined(__i386__) || defined(_M_IX86)
2065 int32_t rc;
2066/** @todo try/except */
2067
2068 /*
2069 * Paranoia.
2070 */
2071# ifdef __GNUC__
2072 __asm__ __volatile__(
2073 "pushl %2\n\t"
2074 "pushl %1\n\t"
2075 "pushl %0\n\t"
2076 "lea 12(%%esp), %2\n\t"
2077 "call *%3\n\t"
2078 "movl %2, %%esp\n\t"
2079 : "=a" (rc)
2080 : "d" (uOp),
2081 "S" (0),
2082 "c" (uEntrypoint),
2083 "0" (uHandle));
2084# elif defined(_MSC_VER)
2085 __asm {
2086 mov eax, [uHandle]
2087 mov edx, [uOp]
2088 mov ecx, 0
2089 mov ebx, [uEntrypoint]
2090 push edi
2091 mov edi, esp
2092 push ecx
2093 push edx
2094 push eax
2095 call ebx
2096 mov esp, edi
2097 pop edi
2098 mov [rc], eax
2099 }
2100# else
2101# error "port me!"
2102# endif
2103 RT_NOREF(pvReserved);
2104 return rc;
2105
2106#else
2107 RT_NOREF(uEntrypoint);
2108 RT_NOREF(uHandle);
2109 RT_NOREF(uOp);
2110 RT_NOREF(pvReserved);
2111 return KCPU_ERR_ARCH_CPU_NOT_COMPATIBLE;
2112#endif
2113}
2114
2115
2116/** @copydoc kLdrModCallTerm */
2117static int kldrModLXCallTerm(PRTLDRMODINTERNAL pMod, void *pvMapping, uintptr_t uHandle)
2118{
2119 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
2120
2121 /*
2122 * Mapped?
2123 */
2124 if (pvMapping == KLDRMOD_INT_MAP)
2125 {
2126 pvMapping = (void *)pModLX->pvMapping;
2127 if (!pvMapping)
2128 return KLDR_ERR_NOT_MAPPED;
2129 }
2130
2131 /*
2132 * Do the call.
2133 */
2134 if ((pModLX->Hdr.e32_mflags & E32MODMASK) == E32MODDLL)
2135 kldrModLXDoCallDLL(pModLX, pvMapping, 1 /* detach */, uHandle);
2136
2137 return VINF_SUCCESS;
2138}
2139
2140
2141/** @copydoc kLdrModCallThread */
2142static int kldrModLXCallThread(PRTLDRMODINTERNAL pMod, void *pvMapping, uintptr_t uHandle, unsigned fAttachingOrDetaching)
2143{
2144 /* no thread attach/detach callout. */
2145 RT_NOREF(pMod);
2146 RT_NOREF(pvMapping);
2147 RT_NOREF(uHandle);
2148 RT_NOREF(fAttachingOrDetaching);
2149 return VINF_SUCCESS;
2150}
2151
2152#endif
2153
2154/**
2155 * @interface_method_impl{RTLDROPS,pfnGetImageSize}
2156 */
2157static DECLCALLBACK(size_t) rtldrLX_GetImageSize(PRTLDRMODINTERNAL pMod)
2158{
2159 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
2160 return pModLX->cbMapped;
2161}
2162
2163
2164/**
2165 * @interface_method_impl{RTLDROPS,pfnGetBits}
2166 */
2167static DECLCALLBACK(int) rtldrLX_GetBits(PRTLDRMODINTERNAL pMod, void *pvBits, RTUINTPTR BaseAddress,
2168 PFNRTLDRIMPORT pfnGetImport, void *pvUser)
2169{
2170 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
2171
2172 /*
2173 * Load the image bits.
2174 */
2175 int rc = kldrModLXDoLoadBits(pModLX, pvBits);
2176 if (RT_SUCCESS(rc))
2177 {
2178 /*
2179 * Perform relocations.
2180 */
2181 rc = rtldrLX_RelocateBits(pMod, pvBits, BaseAddress, pModLX->aSegments[0].LinkAddress, pfnGetImport, pvUser);
2182 }
2183 return rc;
2184}
2185
2186
2187/* GCC goes boinkers if we put this inside the function. */
2188union RELOC_VISIBILITY_STUPIDITY
2189{
2190 const uint8_t *pb;
2191 const struct r32_rlc *prlc;
2192};
2193
2194/**
2195 * @interface_method_impl{RTLDROPS,pfnRelocate}
2196 */
2197static DECLCALLBACK(int) rtldrLX_RelocateBits(PRTLDRMODINTERNAL pMod, void *pvBits, RTUINTPTR NewBaseAddress,
2198 RTUINTPTR OldBaseAddress, PFNRTLDRIMPORT pfnGetImport, void *pvUser)
2199{
2200 PKLDRMODLX pModLX = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
2201 uint32_t iSeg;
2202 int rc;
2203
2204 /*
2205 * Do we need to to *anything*?
2206 */
2207 if ( NewBaseAddress == OldBaseAddress
2208 && NewBaseAddress == pModLX->paObjs[0].o32_base
2209 && !pModLX->Hdr.e32_impmodcnt)
2210 return VINF_SUCCESS;
2211
2212 /*
2213 * Load the fixup section.
2214 */
2215 if (!pModLX->pbFixupSection)
2216 {
2217 rc = kldrModLXDoLoadFixupSection(pModLX);
2218 if (RT_FAILURE(rc))
2219 return rc;
2220 }
2221
2222 /*
2223 * Iterate the segments.
2224 */
2225 for (iSeg = 0; iSeg < pModLX->Hdr.e32_objcnt; iSeg++)
2226 {
2227 const struct o32_obj * const pObj = &pModLX->paObjs[iSeg];
2228 RTLDRADDR PageAddress = NewBaseAddress + pModLX->aSegments[iSeg].RVA;
2229 uint32_t iPage;
2230 uint8_t *pbPage = (uint8_t *)pvBits + (uintptr_t)pModLX->aSegments[iSeg].RVA;
2231
2232 /*
2233 * Iterate the page map pages.
2234 */
2235 for (iPage = 0, rc = VINF_SUCCESS;
2236 RT_SUCCESS(rc) && iPage < pObj->o32_mapsize;
2237 iPage++, pbPage += OBJPAGELEN, PageAddress += OBJPAGELEN)
2238 {
2239 const uint8_t * const pbFixupRecEnd = pModLX->pbFixupRecs + pModLX->paoffPageFixups[iPage + pObj->o32_pagemap];
2240 const uint8_t *pb = pModLX->pbFixupRecs + pModLX->paoffPageFixups[iPage + pObj->o32_pagemap - 1];
2241 RTLDRADDR uValue = NIL_RTLDRADDR;
2242 uint32_t fKind = 0;
2243 int iSelector;
2244
2245 /* sanity */
2246 if (pbFixupRecEnd < pb)
2247 return VERR_LDR_BAD_FIXUP;
2248 if (pbFixupRecEnd - 1 > pModLX->pbFixupSectionLast)
2249 return VERR_LDR_BAD_FIXUP;
2250 if (pb < pModLX->pbFixupSection)
2251 return VERR_LDR_BAD_FIXUP;
2252
2253 /*
2254 * Iterate the fixup record.
2255 */
2256 while (pb < pbFixupRecEnd)
2257 {
2258 union RELOC_VISIBILITY_STUPIDITY u;
2259 char szImpModule[256];
2260 u.pb = pb;
2261 pb += 3 + (u.prlc->nr_stype & NRCHAIN ? 0 : 1); /* place pch at the 4th member. */
2262
2263 /*
2264 * Figure out the target.
2265 */
2266 switch (u.prlc->nr_flags & NRRTYP)
2267 {
2268 /*
2269 * Internal fixup.
2270 */
2271 case NRRINT:
2272 {
2273 uint16_t iTrgObject;
2274 uint32_t offTrgObject;
2275
2276 /* the object */
2277 if (u.prlc->nr_flags & NR16OBJMOD)
2278 {
2279 iTrgObject = *(const uint16_t *)pb;
2280 pb += 2;
2281 }
2282 else
2283 iTrgObject = *pb++;
2284 iTrgObject--;
2285 if (iTrgObject >= pModLX->Hdr.e32_objcnt)
2286 return VERR_LDR_BAD_FIXUP;
2287
2288 /* the target */
2289 if ((u.prlc->nr_stype & NRSRCMASK) != NRSSEG)
2290 {
2291 if (u.prlc->nr_flags & NR32BITOFF)
2292 {
2293 offTrgObject = *(const uint32_t *)pb;
2294 pb += 4;
2295 }
2296 else
2297 {
2298 offTrgObject = *(const uint16_t *)pb;
2299 pb += 2;
2300 }
2301
2302 /* calculate the symbol info. */
2303 uValue = offTrgObject + NewBaseAddress + pModLX->aSegments[iTrgObject].RVA;
2304 }
2305 else
2306 uValue = NewBaseAddress + pModLX->aSegments[iTrgObject].RVA;
2307 if ( (u.prlc->nr_stype & NRALIAS)
2308 || (pModLX->aSegments[iTrgObject].fFlags & RTLDRSEG_FLAG_16BIT))
2309 iSelector = pModLX->aSegments[iTrgObject].Sel16bit;
2310 else
2311 iSelector = pModLX->aSegments[iTrgObject].SelFlat;
2312 fKind = 0;
2313 break;
2314 }
2315
2316 /*
2317 * Import by symbol ordinal.
2318 */
2319 case NRRORD:
2320 {
2321 uint16_t iModule;
2322 uint32_t iSymbol;
2323
2324 /* the module ordinal */
2325 if (u.prlc->nr_flags & NR16OBJMOD)
2326 {
2327 iModule = *(const uint16_t *)pb;
2328 pb += 2;
2329 }
2330 else
2331 iModule = *pb++;
2332 iModule--;
2333 if (iModule >= pModLX->Hdr.e32_impmodcnt)
2334 return VERR_LDR_BAD_FIXUP;
2335 rc = kldrModLXGetImport(pModLX, NULL, iModule, szImpModule, sizeof(szImpModule), NULL);
2336 if (RT_FAILURE(rc))
2337 return rc;
2338
2339#if 1
2340 if (u.prlc->nr_flags & NRICHAIN)
2341 return VERR_LDR_BAD_FIXUP;
2342#endif
2343
2344 /* . */
2345 if (u.prlc->nr_flags & NR32BITOFF)
2346 {
2347 iSymbol = *(const uint32_t *)pb;
2348 pb += 4;
2349 }
2350 else if (!(u.prlc->nr_flags & NR8BITORD))
2351 {
2352 iSymbol = *(const uint16_t *)pb;
2353 pb += 2;
2354 }
2355 else
2356 iSymbol = *pb++;
2357
2358 /* resolve it. */
2359 rc = pfnGetImport(pMod, szImpModule, NULL, iSymbol, &uValue, /*&fKind,*/ pvUser);
2360 if (RT_FAILURE(rc))
2361 return rc;
2362 iSelector = -1;
2363 break;
2364 }
2365
2366 /*
2367 * Import by symbol name.
2368 */
2369 case NRRNAM:
2370 {
2371 uint32_t iModule;
2372 uint16_t offSymbol;
2373 const uint8_t *pbSymbol;
2374
2375 /* the module ordinal */
2376 if (u.prlc->nr_flags & NR16OBJMOD)
2377 {
2378 iModule = *(const uint16_t *)pb;
2379 pb += 2;
2380 }
2381 else
2382 iModule = *pb++;
2383 iModule--;
2384 if (iModule >= pModLX->Hdr.e32_impmodcnt)
2385 return VERR_LDR_BAD_FIXUP;
2386 rc = kldrModLXGetImport(pModLX, NULL, iModule, szImpModule, sizeof(szImpModule), NULL);
2387 if (RT_FAILURE(rc))
2388 return rc;
2389#if 1
2390 if (u.prlc->nr_flags & NRICHAIN)
2391 return VERR_LDR_BAD_FIXUP;
2392#endif
2393
2394 /* . */
2395 if (u.prlc->nr_flags & NR32BITOFF)
2396 {
2397 offSymbol = *(const uint32_t *)pb;
2398 pb += 4;
2399 }
2400 else if (!(u.prlc->nr_flags & NR8BITORD))
2401 {
2402 offSymbol = *(const uint16_t *)pb;
2403 pb += 2;
2404 }
2405 else
2406 offSymbol = *pb++;
2407 pbSymbol = pModLX->pbImportProcs + offSymbol;
2408 if ( pbSymbol < pModLX->pbImportProcs
2409 || pbSymbol > pModLX->pbFixupSectionLast)
2410 return VERR_LDR_BAD_FIXUP;
2411 char szSymbol[256];
2412 memcpy(szSymbol, pbSymbol + 1, *pbSymbol);
2413 szSymbol[*pbSymbol] = '\0';
2414
2415 /* resolve it. */
2416 rc = pfnGetImport(pMod, szImpModule, szSymbol, UINT32_MAX, &uValue, /*&fKind,*/ pvUser);
2417 if (RT_FAILURE(rc))
2418 return rc;
2419 iSelector = -1;
2420 break;
2421 }
2422
2423 case NRRENT:
2424 KLDRMODLX_ASSERT(!"NRRENT");
2425 /* Falls through. */
2426 default:
2427 iSelector = -1;
2428 break;
2429 }
2430
2431 /* addend */
2432 if (u.prlc->nr_flags & NRADD)
2433 {
2434 if (u.prlc->nr_flags & NR32BITADD)
2435 {
2436 uValue += *(const uint32_t *)pb;
2437 pb += 4;
2438 }
2439 else
2440 {
2441 uValue += *(const uint16_t *)pb;
2442 pb += 2;
2443 }
2444 }
2445
2446
2447 /*
2448 * Deal with the 'source' (i.e. the place that should be modified - very logical).
2449 */
2450 if (!(u.prlc->nr_stype & NRCHAIN))
2451 {
2452 int off = u.prlc->r32_soff;
2453
2454 /* common / simple */
2455 if ( (u.prlc->nr_stype & NRSRCMASK) == NROFF32
2456 && off >= 0
2457 && off <= (int)OBJPAGELEN - 4)
2458 *(uint32_t *)&pbPage[off] = (uint32_t)uValue;
2459 else if ( (u.prlc->nr_stype & NRSRCMASK) == NRSOFF32
2460 && off >= 0
2461 && off <= (int)OBJPAGELEN - 4)
2462 *(uint32_t *)&pbPage[off] = (uint32_t)(uValue - (PageAddress + off + 4));
2463 else
2464 {
2465 /* generic */
2466 rc = kldrModLXDoReloc(pbPage, off, PageAddress, u.prlc, iSelector, uValue, fKind);
2467 if (RT_FAILURE(rc))
2468 return rc;
2469 }
2470 }
2471 else if (!(u.prlc->nr_flags & NRICHAIN))
2472 {
2473 const int16_t *poffSrc = (const int16_t *)pb;
2474 uint8_t c = u.pb[2];
2475
2476 /* common / simple */
2477 if ((u.prlc->nr_stype & NRSRCMASK) == NROFF32)
2478 {
2479 while (c-- > 0)
2480 {
2481 int off = *poffSrc++;
2482 if (off >= 0 && off <= (int)OBJPAGELEN - 4)
2483 *(uint32_t *)&pbPage[off] = (uint32_t)uValue;
2484 else
2485 {
2486 rc = kldrModLXDoReloc(pbPage, off, PageAddress, u.prlc, iSelector, uValue, fKind);
2487 if (RT_FAILURE(rc))
2488 return rc;
2489 }
2490 }
2491 }
2492 else if ((u.prlc->nr_stype & NRSRCMASK) == NRSOFF32)
2493 {
2494 while (c-- > 0)
2495 {
2496 int off = *poffSrc++;
2497 if (off >= 0 && off <= (int)OBJPAGELEN - 4)
2498 *(uint32_t *)&pbPage[off] = (uint32_t)(uValue - (PageAddress + off + 4));
2499 else
2500 {
2501 rc = kldrModLXDoReloc(pbPage, off, PageAddress, u.prlc, iSelector, uValue, fKind);
2502 if (RT_FAILURE(rc))
2503 return rc;
2504 }
2505 }
2506 }
2507 else
2508 {
2509 while (c-- > 0)
2510 {
2511 rc = kldrModLXDoReloc(pbPage, *poffSrc++, PageAddress, u.prlc, iSelector, uValue, fKind);
2512 if (RT_FAILURE(rc))
2513 return rc;
2514 }
2515 }
2516 pb = (const uint8_t *)poffSrc;
2517 }
2518 else
2519 {
2520 /* This is a pain because it will require virgin pages on a relocation. */
2521 KLDRMODLX_ASSERT(!"NRICHAIN");
2522 return VERR_LDRLX_NRICHAIN_NOT_SUPPORTED;
2523 }
2524 }
2525 }
2526 }
2527
2528 return VINF_SUCCESS;
2529}
2530
2531
2532/**
2533 * Applies the relocation to one 'source' in a page.
2534 *
2535 * This takes care of the more esotic case while the common cases
2536 * are dealt with seperately.
2537 *
2538 * @returns IPRT status code.
2539 * @param pbPage The page in which to apply the fixup.
2540 * @param off Page relative offset of where to apply the offset.
2541 * @param PageAddress The page address.
2542 * @param prlc The relocation record.
2543 * @param iSelector Selector value, -1 if flat.
2544 * @param uValue The target value.
2545 * @param fKind The target kind.
2546 */
2547static int kldrModLXDoReloc(uint8_t *pbPage, int off, RTLDRADDR PageAddress, const struct r32_rlc *prlc,
2548 int iSelector, RTLDRADDR uValue, uint32_t fKind)
2549{
2550#pragma pack(1) /* just to be sure */
2551 union
2552 {
2553 uint8_t ab[6];
2554 uint32_t off32;
2555 uint16_t off16;
2556 uint8_t off8;
2557 struct
2558 {
2559 uint16_t off;
2560 uint16_t Sel;
2561 } Far16;
2562 struct
2563 {
2564 uint32_t off;
2565 uint16_t Sel;
2566 } Far32;
2567 } uData;
2568#pragma pack()
2569 const uint8_t *pbSrc;
2570 uint8_t *pbDst;
2571 uint8_t cb;
2572
2573 RT_NOREF(fKind);
2574
2575 /*
2576 * Compose the fixup data.
2577 */
2578 switch (prlc->nr_stype & NRSRCMASK)
2579 {
2580 case NRSBYT:
2581 uData.off8 = (uint8_t)uValue;
2582 cb = 1;
2583 break;
2584 case NRSSEG:
2585 if (iSelector == -1)
2586 {
2587 /* fixme */
2588 }
2589 uData.off16 = iSelector;
2590 cb = 2;
2591 break;
2592 case NRSPTR:
2593 if (iSelector == -1)
2594 {
2595 /* fixme */
2596 }
2597 uData.Far16.off = (uint16_t)uValue;
2598 uData.Far16.Sel = iSelector;
2599 cb = 4;
2600 break;
2601 case NRSOFF:
2602 uData.off16 = (uint16_t)uValue;
2603 cb = 2;
2604 break;
2605 case NRPTR48:
2606 if (iSelector == -1)
2607 {
2608 /* fixme */
2609 }
2610 uData.Far32.off = (uint32_t)uValue;
2611 uData.Far32.Sel = iSelector;
2612 cb = 6;
2613 break;
2614 case NROFF32:
2615 uData.off32 = (uint32_t)uValue;
2616 cb = 4;
2617 break;
2618 case NRSOFF32:
2619 uData.off32 = (uint32_t)(uValue - (PageAddress + off + 4));
2620 cb = 4;
2621 break;
2622 default:
2623 return VERR_LDRLX_BAD_FIXUP_SECTION; /** @todo fix error, add more checks! */
2624 }
2625
2626 /*
2627 * Apply it. This is sloooow...
2628 */
2629 pbSrc = &uData.ab[0];
2630 pbDst = pbPage + off;
2631 while (cb-- > 0)
2632 {
2633 if (off > (int)OBJPAGELEN)
2634 break;
2635 if (off >= 0)
2636 *pbDst = *pbSrc;
2637 pbSrc++;
2638 pbDst++;
2639 }
2640
2641 return VINF_SUCCESS;
2642}
2643
2644
2645/**
2646 * @interface_method_impl{RTLDROPS,pfnEnumSegments}
2647 */
2648static DECLCALLBACK(int) rtldrLX_EnumSegments(PRTLDRMODINTERNAL pMod, PFNRTLDRENUMSEGS pfnCallback, void *pvUser)
2649{
2650 PKLDRMODLX pThis = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
2651 uint32_t const cSegments = pThis->cSegments;
2652 for (uint32_t iSeg = 0; iSeg < cSegments; iSeg++)
2653 {
2654 int rc = pfnCallback(pMod, &pThis->aSegments[iSeg], pvUser);
2655 if (rc != VINF_SUCCESS)
2656 return rc;
2657 }
2658
2659 return VINF_SUCCESS;
2660}
2661
2662
2663/**
2664 * @interface_method_impl{RTLDROPS,pfnLinkAddressToSegOffset}
2665 */
2666static DECLCALLBACK(int) rtldrLX_LinkAddressToSegOffset(PRTLDRMODINTERNAL pMod, RTLDRADDR LinkAddress,
2667 uint32_t *piSeg, PRTLDRADDR poffSeg)
2668{
2669 PKLDRMODLX pThis = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
2670 uint32_t const cSegments = pThis->cSegments;
2671 for (uint32_t iSeg = 0; iSeg < cSegments; iSeg++)
2672 {
2673 RTLDRADDR offSeg = LinkAddress - pThis->aSegments[iSeg].LinkAddress;
2674 if ( offSeg < pThis->aSegments[iSeg].cbMapped
2675 || offSeg < pThis->aSegments[iSeg].cb)
2676 {
2677 *piSeg = iSeg;
2678 *poffSeg = offSeg;
2679 return VINF_SUCCESS;
2680 }
2681 }
2682
2683 return VERR_LDR_INVALID_LINK_ADDRESS;
2684}
2685
2686
2687/**
2688 * @interface_method_impl{RTLDROPS,pfnLinkAddressToRva}
2689 */
2690static DECLCALLBACK(int) rtldrLX_LinkAddressToRva(PRTLDRMODINTERNAL pMod, RTLDRADDR LinkAddress, PRTLDRADDR pRva)
2691{
2692 PKLDRMODLX pThis = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
2693 uint32_t const cSegments = pThis->cSegments;
2694 for (uint32_t iSeg = 0; iSeg < cSegments; iSeg++)
2695 {
2696 RTLDRADDR offSeg = LinkAddress - pThis->aSegments[iSeg].LinkAddress;
2697 if ( offSeg < pThis->aSegments[iSeg].cbMapped
2698 || offSeg < pThis->aSegments[iSeg].cb)
2699 {
2700 *pRva = pThis->aSegments[iSeg].RVA + offSeg;
2701 return VINF_SUCCESS;
2702 }
2703 }
2704
2705 return VERR_LDR_INVALID_RVA;
2706}
2707
2708
2709/**
2710 * @interface_method_impl{RTLDROPS,pfnSegOffsetToRva}
2711 */
2712static DECLCALLBACK(int) rtldrLX_SegOffsetToRva(PRTLDRMODINTERNAL pMod, uint32_t iSeg, RTLDRADDR offSeg, PRTLDRADDR pRva)
2713{
2714 PKLDRMODLX pThis = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
2715
2716 if (iSeg >= pThis->cSegments)
2717 return VERR_LDR_INVALID_SEG_OFFSET;
2718 PCRTLDRSEG pSegment = &pThis->aSegments[iSeg];
2719
2720 if ( offSeg > pSegment->cbMapped
2721 && offSeg > pSegment->cb
2722 && ( pSegment->cbFile < 0
2723 || offSeg > (uint64_t)pSegment->cbFile))
2724 return VERR_LDR_INVALID_SEG_OFFSET;
2725
2726 *pRva = pSegment->RVA + offSeg;
2727 return VINF_SUCCESS;
2728}
2729
2730
2731/**
2732 * @interface_method_impl{RTLDROPS,pfnRvaToSegOffset}
2733 */
2734static DECLCALLBACK(int) rtldrLX_RvaToSegOffset(PRTLDRMODINTERNAL pMod, RTLDRADDR Rva, uint32_t *piSeg, PRTLDRADDR poffSeg)
2735{
2736 PKLDRMODLX pThis = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
2737 uint32_t const cSegments = pThis->cSegments;
2738 for (uint32_t iSeg = 0; iSeg < cSegments; iSeg++)
2739 {
2740 RTLDRADDR offSeg = Rva - pThis->aSegments[iSeg].RVA;
2741 if ( offSeg < pThis->aSegments[iSeg].cbMapped
2742 || offSeg < pThis->aSegments[iSeg].cb)
2743 {
2744 *piSeg = iSeg;
2745 *poffSeg = offSeg;
2746 return VINF_SUCCESS;
2747 }
2748 }
2749
2750 return VERR_LDR_INVALID_RVA;
2751}
2752
2753
2754/**
2755 * @interface_method_impl{RTLDROPS,pfnReadDbgInfo}
2756 */
2757static DECLCALLBACK(int) rtldrLX_ReadDbgInfo(PRTLDRMODINTERNAL pMod, uint32_t iDbgInfo, RTFOFF off, size_t cb, void *pvBuf)
2758{
2759 PKLDRMODLX pThis = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
2760 if (iDbgInfo == 0)
2761 return pThis->Core.pReader->pfnRead(pThis->Core.pReader, pvBuf, cb, off);
2762 return VERR_OUT_OF_RANGE;
2763}
2764
2765
2766/**
2767 * @interface_method_impl{RTLDROPS,pfnQueryProp}
2768 */
2769static DECLCALLBACK(int) rtldrLX_QueryProp(PRTLDRMODINTERNAL pMod, RTLDRPROP enmProp, void const *pvBits,
2770 void *pvBuf, size_t cbBuf, size_t *pcbRet)
2771{
2772 PKLDRMODLX pThis = RT_FROM_MEMBER(pMod, KLDRMODLX, Core);
2773 int rc;
2774 switch (enmProp)
2775 {
2776 case RTLDRPROP_IMPORT_COUNT:
2777 Assert(cbBuf == sizeof(uint32_t));
2778 Assert(*pcbRet == cbBuf);
2779 *(uint32_t *)pvBuf = pThis->Hdr.e32_impmodcnt;
2780 rc = VINF_SUCCESS;
2781 break;
2782
2783 case RTLDRPROP_IMPORT_MODULE:
2784 rc = kldrModLXGetImport(pThis, pvBits, *(uint32_t const *)pvBuf, (char *)pvBuf, cbBuf, pcbRet);
2785 break;
2786
2787 case RTLDRPROP_INTERNAL_NAME:
2788 *pcbRet = pThis->cchName + 1;
2789 if (cbBuf >= pThis->cchName + 1)
2790 {
2791 memcpy(pvBuf, pThis->pszName, pThis->cchName + 1);
2792 rc = VINF_SUCCESS;
2793 }
2794 else
2795 rc = VERR_BUFFER_OVERFLOW;
2796 break;
2797
2798
2799 default:
2800 rc = VERR_NOT_FOUND;
2801 break;
2802 }
2803 RT_NOREF_PV(pvBits);
2804 return rc;
2805}
2806
2807
2808/**
2809 * Operations for a Mach-O module interpreter.
2810 */
2811static const RTLDROPS s_rtldrLXOps=
2812{
2813 "LX",
2814 rtldrLX_Close,
2815 NULL,
2816 NULL /*pfnDone*/,
2817 rtldrLX_EnumSymbols,
2818 /* ext */
2819 rtldrLX_GetImageSize,
2820 rtldrLX_GetBits,
2821 rtldrLX_RelocateBits,
2822 rtldrLX_GetSymbolEx,
2823 NULL /*pfnQueryForwarderInfo*/,
2824 rtldrLX_EnumDbgInfo,
2825 rtldrLX_EnumSegments,
2826 rtldrLX_LinkAddressToSegOffset,
2827 rtldrLX_LinkAddressToRva,
2828 rtldrLX_SegOffsetToRva,
2829 rtldrLX_RvaToSegOffset,
2830 rtldrLX_ReadDbgInfo,
2831 rtldrLX_QueryProp,
2832 NULL /*pfnVerifySignature*/,
2833 NULL /*pfnHashImage*/,
2834 NULL /*pfnUnwindFrame*/,
2835 42
2836};
2837
2838
2839/**
2840 * Handles opening LX images.
2841 */
2842DECLHIDDEN(int) rtldrLXOpen(PRTLDRREADER pReader, uint32_t fFlags, RTLDRARCH enmArch, RTFOFF offLxHdr,
2843 PRTLDRMOD phLdrMod, PRTERRINFO pErrInfo)
2844{
2845
2846 /*
2847 * Create the instance data and do a minimal header validation.
2848 */
2849 PKLDRMODLX pThis = NULL;
2850 int rc = kldrModLXDoCreate(pReader, offLxHdr, fFlags, &pThis, pErrInfo);
2851 if (RT_SUCCESS(rc))
2852 {
2853 /*
2854 * Match up against the requested CPU architecture.
2855 */
2856 if ( enmArch == RTLDRARCH_WHATEVER
2857 || pThis->Core.enmArch == enmArch)
2858 {
2859 pThis->Core.pOps = &s_rtldrLXOps;
2860 pThis->Core.u32Magic = RTLDRMOD_MAGIC;
2861 *phLdrMod = &pThis->Core;
2862 return VINF_SUCCESS;
2863 }
2864 rc = VERR_LDR_ARCH_MISMATCH;
2865 }
2866 if (pThis)
2867 RTMemFree(pThis);
2868 return rc;
2869
2870}
2871
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette