VirtualBox

source: kStuff/trunk/kLdr/kLdrModLX.c@ 96

Last change on this file since 96 was 96, checked in by bird, 7 years ago

kLdrModLX.c: warnings

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 88.2 KB
Line 
1/* $Id: kLdrModLX.c 96 2017-09-15 06:10:50Z bird $ */
2/** @file
3 * kLdr - The Module Interpreter for the Linear eXecutable (LX) Format.
4 */
5
6/*
7 * Copyright (c) 2006-2007 Knut St. Osmundsen <[email protected]>
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31/*******************************************************************************
32* Header Files *
33*******************************************************************************/
34#include <k/kLdr.h>
35#include "kLdrInternal.h"
36#include <k/kLdrFmts/lx.h>
37
38
39/*******************************************************************************
40* Defined Constants And Macros *
41*******************************************************************************/
42/** @def KLDRMODLX_STRICT
43 * Define KLDRMODLX_STRICT to enabled strict checks in KLDRMODLX. */
44#define KLDRMODLX_STRICT 1
45
46/** @def KLDRMODLX_ASSERT
47 * Assert that an expression is true when KLDR_STRICT is defined.
48 */
49#ifdef KLDRMODLX_STRICT
50# define KLDRMODLX_ASSERT(expr) kHlpAssert(expr)
51#else
52# define KLDRMODLX_ASSERT(expr) do {} while (0)
53#endif
54
55
56/*******************************************************************************
57* Structures and Typedefs *
58*******************************************************************************/
59/**
60 * Instance data for the LX module interpreter.
61 */
62typedef struct KLDRMODLX
63{
64 /** Pointer to the module. (Follows the section table.) */
65 PKLDRMOD pMod;
66 /** Pointer to the user mapping. */
67 const void *pvMapping;
68 /** The size of the mapped LX image. */
69 KSIZE cbMapped;
70 /** Reserved flags. */
71 KU32 f32Reserved;
72
73 /** The offset of the LX header. */
74 KLDRFOFF offHdr;
75 /** Copy of the LX header. */
76 struct e32_exe Hdr;
77
78 /** Pointer to the loader section.
79 * Allocated together with this strcture. */
80 const KU8 *pbLoaderSection;
81 /** Pointer to the last byte in the loader section. */
82 const KU8 *pbLoaderSectionLast;
83 /** Pointer to the object table in the loader section. */
84 const struct o32_obj *paObjs;
85 /** Pointer to the object page map table in the loader section. */
86 const struct o32_map *paPageMappings;
87 /** Pointer to the resource table in the loader section. */
88 const struct rsrc32 *paRsrcs;
89 /** Pointer to the resident name table in the loader section. */
90 const KU8 *pbResNameTab;
91 /** Pointer to the entry table in the loader section. */
92 const KU8 *pbEntryTab;
93
94 /** Pointer to the non-resident name table. */
95 KU8 *pbNonResNameTab;
96 /** Pointer to the last byte in the non-resident name table. */
97 const KU8 *pbNonResNameTabLast;
98
99 /** Pointer to the fixup section. */
100 KU8 *pbFixupSection;
101 /** Pointer to the last byte in the fixup section. */
102 const KU8 *pbFixupSectionLast;
103 /** Pointer to the fixup page table within pvFixupSection. */
104 const KU32 *paoffPageFixups;
105 /** Pointer to the fixup record table within pvFixupSection. */
106 const KU8 *pbFixupRecs;
107 /** Pointer to the import module name table within pvFixupSection. */
108 const KU8 *pbImportMods;
109 /** Pointer to the import module name table within pvFixupSection. */
110 const KU8 *pbImportProcs;
111} KLDRMODLX, *PKLDRMODLX;
112
113
114/*******************************************************************************
115* Internal Functions *
116*******************************************************************************/
117static int kldrModLXHasDbgInfo(PKLDRMOD pMod, const void *pvBits);
118static int kldrModLXRelocateBits(PKLDRMOD pMod, void *pvBits, KLDRADDR NewBaseAddress, KLDRADDR OldBaseAddress,
119 PFNKLDRMODGETIMPORT pfnGetImport, void *pvUser);
120static int kldrModLXDoCreate(PKRDR pRdr, KLDRFOFF offNewHdr, PKLDRMODLX *ppModLX);
121static const KU8 *kldrModLXDoNameTableLookupByOrdinal(const KU8 *pbNameTable, KSSIZE cbNameTable, KU32 iOrdinal);
122static int kldrModLXDoNameLookup(PKLDRMODLX pModLX, const char *pchSymbol, KSIZE cchSymbol, KU32 *piSymbol);
123static const KU8 *kldrModLXDoNameTableLookupByName(const KU8 *pbNameTable, KSSIZE cbNameTable,
124 const char *pchSymbol, KSIZE cchSymbol);
125static int kldrModLXDoLoadBits(PKLDRMODLX pModLX, void *pvBits);
126static int kldrModLXDoIterDataUnpacking(KU8 *pbDst, const KU8 *pbSrc, int cbSrc);
127static int kldrModLXDoIterData2Unpacking(KU8 *pbDst, const KU8 *pbSrc, int cbSrc);
128static void kLdrModLXMemCopyW(KU8 *pbDst, const KU8 *pbSrc, int cb);
129static int kldrModLXDoProtect(PKLDRMODLX pModLX, void *pvBits, unsigned fUnprotectOrProtect);
130static int kldrModLXDoCallDLL(PKLDRMODLX pModLX, void *pvMapping, unsigned uOp, KUPTR uHandle);
131static int kldrModLXDoForwarderQuery(PKLDRMODLX pModLX, const struct e32_entry *pEntry,
132 PFNKLDRMODGETIMPORT pfnGetForwarder, void *pvUser, PKLDRADDR puValue, KU32 *pfKind);
133static int kldrModLXDoLoadFixupSection(PKLDRMODLX pModLX);
134static KI32 kldrModLXDoCall(KUPTR uEntrypoint, KUPTR uHandle, KU32 uOp, void *pvReserved);
135static int kldrModLXDoReloc(KU8 *pbPage, int off, KLDRADDR PageAddress, const struct r32_rlc *prlc,
136 int iSelector, KLDRADDR uValue, KU32 fKind);
137
138
139/**
140 * Create a loader module instance interpreting the executable image found
141 * in the specified file provider instance.
142 *
143 * @returns 0 on success and *ppMod pointing to a module instance.
144 * On failure, a non-zero OS specific error code is returned.
145 * @param pOps Pointer to the registered method table.
146 * @param pRdr The file provider instance to use.
147 * @param fFlags Flags, MBZ.
148 * @param enmCpuArch The desired CPU architecture. KCPUARCH_UNKNOWN means
149 * anything goes, but with a preference for the current
150 * host architecture.
151 * @param offNewHdr The offset of the new header in MZ files. -1 if not found.
152 * @param ppMod Where to store the module instance pointer.
153 */
154static int kldrModLXCreate(PCKLDRMODOPS pOps, PKRDR pRdr, KU32 fFlags, KCPUARCH enmCpuArch, KLDRFOFF offNewHdr, PPKLDRMOD ppMod)
155{
156 PKLDRMODLX pModLX;
157 int rc;
158 K_NOREF(fFlags);
159
160 /*
161 * Create the instance data and do a minimal header validation.
162 */
163 rc = kldrModLXDoCreate(pRdr, offNewHdr, &pModLX);
164 if (!rc)
165 {
166 /*
167 * Match up against the requested CPU architecture.
168 */
169 if ( enmCpuArch == KCPUARCH_UNKNOWN
170 || pModLX->pMod->enmArch == enmCpuArch)
171 {
172 pModLX->pMod->pOps = pOps;
173 pModLX->pMod->u32Magic = KLDRMOD_MAGIC;
174 *ppMod = pModLX->pMod;
175 return 0;
176 }
177 rc = KLDR_ERR_CPU_ARCH_MISMATCH;
178 }
179 kHlpFree(pModLX);
180 return rc;
181}
182
183
184/**
185 * Separate function for reading creating the LX module instance to
186 * simplify cleanup on failure.
187 */
188static int kldrModLXDoCreate(PKRDR pRdr, KLDRFOFF offNewHdr, PKLDRMODLX *ppModLX)
189{
190 struct e32_exe Hdr;
191 PKLDRMODLX pModLX;
192 PKLDRMOD pMod;
193 KSIZE cb;
194 KSIZE cchFilename;
195 KU32 off, offEnd;
196 KU32 i;
197 int rc;
198 int fCanOptimizeMapping;
199 KU32 NextRVA;
200 *ppModLX = NULL;
201
202 /*
203 * Read the signature and file header.
204 */
205 rc = kRdrRead(pRdr, &Hdr, sizeof(Hdr), offNewHdr > 0 ? offNewHdr : 0);
206 if (rc)
207 return rc;
208 if ( Hdr.e32_magic[0] != E32MAGIC1
209 || Hdr.e32_magic[1] != E32MAGIC2)
210 return KLDR_ERR_UNKNOWN_FORMAT;
211
212 /* We're not interested in anything but x86 images. */
213 if ( Hdr.e32_level != E32LEVEL
214 || Hdr.e32_border != E32LEBO
215 || Hdr.e32_worder != E32LEWO
216 || Hdr.e32_cpu < E32CPU286
217 || Hdr.e32_cpu > E32CPU486
218 || Hdr.e32_pagesize != OBJPAGELEN
219 )
220 return KLDR_ERR_LX_BAD_HEADER;
221
222 /* Some rough sanity checks. */
223 offEnd = kRdrSize(pRdr) >= (KLDRFOFF)~(KU32)16 ? ~(KU32)16 : (KU32)kRdrSize(pRdr);
224 if ( Hdr.e32_itermap > offEnd
225 || Hdr.e32_datapage > offEnd
226 || Hdr.e32_nrestab > offEnd
227 || Hdr.e32_nrestab + Hdr.e32_cbnrestab > offEnd
228 || Hdr.e32_ldrsize > offEnd - offNewHdr - sizeof(Hdr)
229 || Hdr.e32_fixupsize > offEnd - offNewHdr - sizeof(Hdr)
230 || Hdr.e32_fixupsize + Hdr.e32_ldrsize > offEnd - offNewHdr - sizeof(Hdr))
231 return KLDR_ERR_LX_BAD_HEADER;
232
233 /* Verify the loader section. */
234 offEnd = Hdr.e32_objtab + Hdr.e32_ldrsize;
235 if (Hdr.e32_objtab < sizeof(Hdr))
236 return KLDR_ERR_LX_BAD_LOADER_SECTION;
237 off = Hdr.e32_objtab + sizeof(struct o32_obj) * Hdr.e32_objcnt;
238 if (off > offEnd)
239 return KLDR_ERR_LX_BAD_LOADER_SECTION;
240 if ( Hdr.e32_objmap
241 && (Hdr.e32_objmap < off || Hdr.e32_objmap > offEnd))
242 return KLDR_ERR_LX_BAD_LOADER_SECTION;
243 if ( Hdr.e32_rsrccnt
244 && ( Hdr.e32_rsrctab < off
245 || Hdr.e32_rsrctab > offEnd
246 || Hdr.e32_rsrctab + sizeof(struct rsrc32) * Hdr.e32_rsrccnt > offEnd))
247 return KLDR_ERR_LX_BAD_LOADER_SECTION;
248 if ( Hdr.e32_restab
249 && (Hdr.e32_restab < off || Hdr.e32_restab > offEnd - 2))
250 return KLDR_ERR_LX_BAD_LOADER_SECTION;
251 if ( Hdr.e32_enttab
252 && (Hdr.e32_enttab < off || Hdr.e32_enttab >= offEnd))
253 return KLDR_ERR_LX_BAD_LOADER_SECTION;
254 if ( Hdr.e32_dircnt
255 && (Hdr.e32_dirtab < off || Hdr.e32_dirtab > offEnd - 2))
256 return KLDR_ERR_LX_BAD_LOADER_SECTION;
257
258 /* Verify the fixup section. */
259 off = offEnd;
260 offEnd = off + Hdr.e32_fixupsize;
261 if ( Hdr.e32_fpagetab
262 && (Hdr.e32_fpagetab < off || Hdr.e32_fpagetab > offEnd))
263 {
264 /*
265 * wlink mixes the fixup section and the loader section.
266 */
267 off = Hdr.e32_fpagetab;
268 offEnd = off + Hdr.e32_fixupsize;
269 Hdr.e32_ldrsize = off - Hdr.e32_objtab;
270 }
271 if ( Hdr.e32_frectab
272 && (Hdr.e32_frectab < off || Hdr.e32_frectab > offEnd))
273 return KLDR_ERR_LX_BAD_FIXUP_SECTION;
274 if ( Hdr.e32_impmod
275 && (Hdr.e32_impmod < off || Hdr.e32_impmod > offEnd || Hdr.e32_impmod + Hdr.e32_impmodcnt > offEnd))
276 return KLDR_ERR_LX_BAD_FIXUP_SECTION;
277 if ( Hdr.e32_impproc
278 && (Hdr.e32_impproc < off || Hdr.e32_impproc > offEnd))
279 return KLDR_ERR_LX_BAD_FIXUP_SECTION;
280
281 /*
282 * Calc the instance size, allocate and initialize it.
283 */
284 cchFilename = kHlpStrLen(kRdrName(pRdr));
285 cb = K_ALIGN_Z(sizeof(KLDRMODLX), 8)
286 + K_ALIGN_Z(K_OFFSETOF(KLDRMOD, aSegments[Hdr.e32_objcnt + 1]), 8)
287 + K_ALIGN_Z(cchFilename + 1, 8)
288 + Hdr.e32_ldrsize + 2; /* +2 for two extra zeros. */
289 pModLX = (PKLDRMODLX)kHlpAlloc(cb);
290 if (!pModLX)
291 return KERR_NO_MEMORY;
292 *ppModLX = pModLX;
293
294 /* KLDRMOD */
295 pMod = (PKLDRMOD)((KU8 *)pModLX + K_ALIGN_Z(sizeof(KLDRMODLX), 8));
296 pMod->pvData = pModLX;
297 pMod->pRdr = pRdr;
298 pMod->pOps = NULL; /* set upon success. */
299 pMod->cSegments = Hdr.e32_objcnt;
300 pMod->cchFilename = (KU32)cchFilename;
301 pMod->pszFilename = (char *)K_ALIGN_P(&pMod->aSegments[pMod->cSegments], 8);
302 kHlpMemCopy((char *)pMod->pszFilename, kRdrName(pRdr), cchFilename + 1);
303 pMod->pszName = NULL; /* finalized further down */
304 pMod->cchName = 0;
305 pMod->fFlags = 0;
306 switch (Hdr.e32_cpu)
307 {
308 case E32CPU286:
309 pMod->enmCpu = KCPU_I80286;
310 pMod->enmArch = KCPUARCH_X86_16;
311 break;
312 case E32CPU386:
313 pMod->enmCpu = KCPU_I386;
314 pMod->enmArch = KCPUARCH_X86_32;
315 break;
316 case E32CPU486:
317 pMod->enmCpu = KCPU_I486;
318 pMod->enmArch = KCPUARCH_X86_32;
319 break;
320 }
321 pMod->enmEndian = KLDRENDIAN_LITTLE;
322 pMod->enmFmt = KLDRFMT_LX;
323 switch (Hdr.e32_mflags & E32MODMASK)
324 {
325 case E32MODEXE:
326 pMod->enmType = !(Hdr.e32_mflags & E32NOINTFIX)
327 ? KLDRTYPE_EXECUTABLE_RELOCATABLE
328 : KLDRTYPE_EXECUTABLE_FIXED;
329 break;
330
331 case E32MODDLL:
332 case E32PROTDLL:
333 case E32MODPROTDLL:
334 pMod->enmType = !(Hdr.e32_mflags & E32SYSDLL)
335 ? KLDRTYPE_SHARED_LIBRARY_RELOCATABLE
336 : KLDRTYPE_SHARED_LIBRARY_FIXED;
337 break;
338
339 case E32MODPDEV:
340 case E32MODVDEV:
341 pMod->enmType = KLDRTYPE_SHARED_LIBRARY_RELOCATABLE;
342 break;
343 }
344 pMod->u32Magic = 0; /* set upon success. */
345
346 /* KLDRMODLX */
347 pModLX->pMod = pMod;
348 pModLX->pvMapping = 0;
349 pModLX->cbMapped = 0;
350 pModLX->f32Reserved = 0;
351
352 pModLX->offHdr = offNewHdr >= 0 ? offNewHdr : 0;
353 kHlpMemCopy(&pModLX->Hdr, &Hdr, sizeof(Hdr));
354
355 pModLX->pbLoaderSection = K_ALIGN_P(pMod->pszFilename + pMod->cchFilename + 1, 16);
356 pModLX->pbLoaderSectionLast = pModLX->pbLoaderSection + pModLX->Hdr.e32_ldrsize - 1;
357 pModLX->paObjs = NULL;
358 pModLX->paPageMappings = NULL;
359 pModLX->paRsrcs = NULL;
360 pModLX->pbResNameTab = NULL;
361 pModLX->pbEntryTab = NULL;
362
363 pModLX->pbNonResNameTab = NULL;
364 pModLX->pbNonResNameTabLast = NULL;
365
366 pModLX->pbFixupSection = NULL;
367 pModLX->pbFixupSectionLast = NULL;
368 pModLX->paoffPageFixups = NULL;
369 pModLX->pbFixupRecs = NULL;
370 pModLX->pbImportMods = NULL;
371 pModLX->pbImportProcs = NULL;
372
373 /*
374 * Read the loader data.
375 */
376 rc = kRdrRead(pRdr, (void *)pModLX->pbLoaderSection, pModLX->Hdr.e32_ldrsize, pModLX->Hdr.e32_objtab + pModLX->offHdr);
377 if (rc)
378 return rc;
379 ((KU8 *)pModLX->pbLoaderSectionLast)[1] = 0;
380 ((KU8 *)pModLX->pbLoaderSectionLast)[2] = 0;
381 if (pModLX->Hdr.e32_objcnt)
382 pModLX->paObjs = (const struct o32_obj *)pModLX->pbLoaderSection;
383 if (pModLX->Hdr.e32_objmap)
384 pModLX->paPageMappings = (const struct o32_map *)(pModLX->pbLoaderSection + pModLX->Hdr.e32_objmap - pModLX->Hdr.e32_objtab);
385 if (pModLX->Hdr.e32_rsrccnt)
386 pModLX->paRsrcs = (const struct rsrc32 *)(pModLX->pbLoaderSection + pModLX->Hdr.e32_rsrctab - pModLX->Hdr.e32_objtab);
387 if (pModLX->Hdr.e32_restab)
388 pModLX->pbResNameTab = pModLX->pbLoaderSection + pModLX->Hdr.e32_restab - pModLX->Hdr.e32_objtab;
389 if (pModLX->Hdr.e32_enttab)
390 pModLX->pbEntryTab = pModLX->pbLoaderSection + pModLX->Hdr.e32_enttab - pModLX->Hdr.e32_objtab;
391
392 /*
393 * Get the soname from the resident name table.
394 * Very convenient that it's the 0 ordinal, because then we get a
395 * free string terminator.
396 * (The table entry consists of a pascal string followed by a 16-bit ordinal.)
397 */
398 if (pModLX->pbResNameTab)
399 pMod->pszName = (const char *)kldrModLXDoNameTableLookupByOrdinal(pModLX->pbResNameTab,
400 pModLX->pbLoaderSectionLast - pModLX->pbResNameTab + 1,
401 0);
402 if (!pMod->pszName)
403 return KLDR_ERR_LX_NO_SONAME;
404 pMod->cchName = *(const KU8 *)pMod->pszName++;
405 if (pMod->cchName != kHlpStrLen(pMod->pszName))
406 return KLDR_ERR_LX_BAD_SONAME;
407
408 /*
409 * Quick validation of the object table.
410 */
411 cb = 0;
412 for (i = 0; i < pMod->cSegments; i++)
413 {
414 if (pModLX->paObjs[i].o32_base & (OBJPAGELEN - 1))
415 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
416 if (pModLX->paObjs[i].o32_base + pModLX->paObjs[i].o32_size <= pModLX->paObjs[i].o32_base)
417 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
418 if (pModLX->paObjs[i].o32_mapsize > (pModLX->paObjs[i].o32_size + (OBJPAGELEN - 1)))
419 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
420 if ( pModLX->paObjs[i].o32_mapsize
421 && ( (KU8 *)&pModLX->paPageMappings[pModLX->paObjs[i].o32_pagemap] > pModLX->pbLoaderSectionLast
422 || (KU8 *)&pModLX->paPageMappings[pModLX->paObjs[i].o32_pagemap + pModLX->paObjs[i].o32_mapsize]
423 > pModLX->pbLoaderSectionLast))
424 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
425 if (i > 0 && !(pModLX->paObjs[i].o32_flags & OBJRSRC))
426 {
427 if (pModLX->paObjs[i].o32_base <= pModLX->paObjs[i - 1].o32_base)
428 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
429 if (pModLX->paObjs[i].o32_base < pModLX->paObjs[i - 1].o32_base + pModLX->paObjs[i - 1].o32_mapsize)
430 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
431 }
432 }
433
434 /*
435 * Check if we can optimize the mapping by using a different
436 * object alignment. The linker typically uses 64KB alignment,
437 * we can easily get away with page alignment in most cases.
438 */
439 fCanOptimizeMapping = !(Hdr.e32_mflags & (E32NOINTFIX | E32SYSDLL));
440 NextRVA = 0;
441
442 /*
443 * Setup the KLDRMOD segment array.
444 */
445 for (i = 0; i < pMod->cSegments; i++)
446 {
447 /* unused */
448 pMod->aSegments[i].pvUser = NULL;
449 pMod->aSegments[i].MapAddress = 0;
450 pMod->aSegments[i].pchName = NULL;
451 pMod->aSegments[i].cchName = 0;
452 pMod->aSegments[i].offFile = -1;
453 pMod->aSegments[i].cbFile = -1;
454 pMod->aSegments[i].SelFlat = 0;
455 pMod->aSegments[i].Sel16bit = 0;
456
457 /* flags */
458 pMod->aSegments[i].fFlags = 0;
459 if (pModLX->paObjs[i].o32_flags & OBJBIGDEF)
460 pMod->aSegments[i].fFlags = KLDRSEG_FLAG_16BIT;
461 if (pModLX->paObjs[i].o32_flags & OBJALIAS16)
462 pMod->aSegments[i].fFlags = KLDRSEG_FLAG_OS2_ALIAS16;
463 if (pModLX->paObjs[i].o32_flags & OBJCONFORM)
464 pMod->aSegments[i].fFlags = KLDRSEG_FLAG_OS2_CONFORM;
465 if (pModLX->paObjs[i].o32_flags & OBJIOPL)
466 pMod->aSegments[i].fFlags = KLDRSEG_FLAG_OS2_IOPL;
467
468 /* size and addresses */
469 pMod->aSegments[i].Alignment = OBJPAGELEN;
470 pMod->aSegments[i].cb = pModLX->paObjs[i].o32_size;
471 pMod->aSegments[i].LinkAddress = pModLX->paObjs[i].o32_base;
472 pMod->aSegments[i].RVA = NextRVA;
473 if ( fCanOptimizeMapping
474 || i + 1 >= pMod->cSegments
475 || (pModLX->paObjs[i].o32_flags & OBJRSRC)
476 || (pModLX->paObjs[i + 1].o32_flags & OBJRSRC))
477 pMod->aSegments[i].cbMapped = K_ALIGN_Z(pModLX->paObjs[i].o32_size, OBJPAGELEN);
478 else
479 pMod->aSegments[i].cbMapped = pModLX->paObjs[i + 1].o32_base - pModLX->paObjs[i].o32_base;
480 NextRVA += (KU32)pMod->aSegments[i].cbMapped;
481
482 /* protection */
483 switch ( pModLX->paObjs[i].o32_flags
484 & (OBJSHARED | OBJREAD | OBJWRITE | OBJEXEC))
485 {
486 case 0:
487 case OBJSHARED:
488 pMod->aSegments[i].enmProt = KPROT_NOACCESS;
489 break;
490 case OBJREAD:
491 case OBJREAD | OBJSHARED:
492 pMod->aSegments[i].enmProt = KPROT_READONLY;
493 break;
494 case OBJWRITE:
495 case OBJWRITE | OBJREAD:
496 pMod->aSegments[i].enmProt = KPROT_WRITECOPY;
497 break;
498 case OBJWRITE | OBJSHARED:
499 case OBJWRITE | OBJSHARED | OBJREAD:
500 pMod->aSegments[i].enmProt = KPROT_READWRITE;
501 break;
502 case OBJEXEC:
503 case OBJEXEC | OBJSHARED:
504 pMod->aSegments[i].enmProt = KPROT_EXECUTE;
505 break;
506 case OBJEXEC | OBJREAD:
507 case OBJEXEC | OBJREAD | OBJSHARED:
508 pMod->aSegments[i].enmProt = KPROT_EXECUTE_READ;
509 break;
510 case OBJEXEC | OBJWRITE:
511 case OBJEXEC | OBJWRITE | OBJREAD:
512 pMod->aSegments[i].enmProt = KPROT_EXECUTE_WRITECOPY;
513 break;
514 case OBJEXEC | OBJWRITE | OBJSHARED:
515 case OBJEXEC | OBJWRITE | OBJSHARED | OBJREAD:
516 pMod->aSegments[i].enmProt = KPROT_EXECUTE_READWRITE;
517 break;
518 }
519 if ((pModLX->paObjs[i].o32_flags & (OBJREAD | OBJWRITE | OBJEXEC | OBJRSRC)) == OBJRSRC)
520 pMod->aSegments[i].enmProt = KPROT_READONLY;
521 /*pMod->aSegments[i].f16bit = !(pModLX->paObjs[i].o32_flags & OBJBIGDEF)
522 pMod->aSegments[i].fIOPL = !(pModLX->paObjs[i].o32_flags & OBJIOPL)
523 pMod->aSegments[i].fConforming = !(pModLX->paObjs[i].o32_flags & OBJCONFORM) */
524 }
525
526 /* set the mapping size */
527 pModLX->cbMapped = NextRVA;
528
529 /*
530 * We're done.
531 */
532 *ppModLX = pModLX;
533 return 0;
534}
535
536
537/** @copydoc KLDRMODOPS::pfnDestroy */
538static int kldrModLXDestroy(PKLDRMOD pMod)
539{
540 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
541 int rc = 0;
542 KLDRMODLX_ASSERT(!pModLX->pvMapping);
543
544 if (pMod->pRdr)
545 {
546 rc = kRdrClose(pMod->pRdr);
547 pMod->pRdr = NULL;
548 }
549 if (pModLX->pbNonResNameTab)
550 {
551 kHlpFree(pModLX->pbNonResNameTab);
552 pModLX->pbNonResNameTab = NULL;
553 }
554 if (pModLX->pbFixupSection)
555 {
556 kHlpFree(pModLX->pbFixupSection);
557 pModLX->pbFixupSection = NULL;
558 }
559 pMod->u32Magic = 0;
560 pMod->pOps = NULL;
561 kHlpFree(pModLX);
562 return rc;
563}
564
565
566/**
567 * Resolved base address aliases.
568 *
569 * @param pModLX The interpreter module instance
570 * @param pBaseAddress The base address, IN & OUT.
571 */
572static void kldrModLXResolveBaseAddress(PKLDRMODLX pModLX, PKLDRADDR pBaseAddress)
573{
574 if (*pBaseAddress == KLDRMOD_BASEADDRESS_MAP)
575 *pBaseAddress = pModLX->pMod->aSegments[0].MapAddress;
576 else if (*pBaseAddress == KLDRMOD_BASEADDRESS_LINK)
577 *pBaseAddress = pModLX->pMod->aSegments[0].LinkAddress;
578}
579
580
581/** @copydoc kLdrModQuerySymbol */
582static int kldrModLXQuerySymbol(PKLDRMOD pMod, const void *pvBits, KLDRADDR BaseAddress, KU32 iSymbol,
583 const char *pchSymbol, KSIZE cchSymbol, const char *pszVersion,
584 PFNKLDRMODGETIMPORT pfnGetForwarder, void *pvUser, PKLDRADDR puValue, KU32 *pfKind)
585{
586 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
587 KU32 iOrdinal;
588 int rc;
589 const struct b32_bundle *pBundle;
590 K_NOREF(pvBits);
591 K_NOREF(pszVersion);
592
593 /*
594 * Give up at once if there is no entry table.
595 */
596 if (!pModLX->Hdr.e32_enttab)
597 return KLDR_ERR_SYMBOL_NOT_FOUND;
598
599 /*
600 * Translate the symbol name into an ordinal.
601 */
602 if (pchSymbol)
603 {
604 rc = kldrModLXDoNameLookup(pModLX, pchSymbol, cchSymbol, &iSymbol);
605 if (rc)
606 return rc;
607 }
608
609 /*
610 * Iterate the entry table.
611 * (The entry table is made up of bundles of similar exports.)
612 */
613 iOrdinal = 1;
614 pBundle = (const struct b32_bundle *)pModLX->pbEntryTab;
615 while (pBundle->b32_cnt && iOrdinal <= iSymbol)
616 {
617 static const KSIZE s_cbEntry[] = { 0, 3, 5, 5, 7 };
618
619 /*
620 * Check for a hit first.
621 */
622 iOrdinal += pBundle->b32_cnt;
623 if (iSymbol < iOrdinal)
624 {
625 KU32 offObject;
626 const struct e32_entry *pEntry = (const struct e32_entry *)((KUPTR)(pBundle + 1)
627 + (iSymbol - (iOrdinal - pBundle->b32_cnt))
628 * s_cbEntry[pBundle->b32_type]);
629
630 /*
631 * Calculate the return address.
632 */
633 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
634 switch (pBundle->b32_type)
635 {
636 /* empty bundles are place holders unused ordinal ranges. */
637 case EMPTY:
638 return KLDR_ERR_SYMBOL_NOT_FOUND;
639
640 /* e32_flags + a 16-bit offset. */
641 case ENTRY16:
642 offObject = pEntry->e32_variant.e32_offset.offset16;
643 if (pfKind)
644 *pfKind = KLDRSYMKIND_16BIT | KLDRSYMKIND_NO_TYPE;
645 break;
646
647 /* e32_flags + a 16-bit offset + a 16-bit callgate selector. */
648 case GATE16:
649 offObject = pEntry->e32_variant.e32_callgate.offset;
650 if (pfKind)
651 *pfKind = KLDRSYMKIND_16BIT | KLDRSYMKIND_CODE;
652 break;
653
654 /* e32_flags + a 32-bit offset. */
655 case ENTRY32:
656 offObject = pEntry->e32_variant.e32_offset.offset32;
657 if (pfKind)
658 *pfKind = KLDRSYMKIND_32BIT;
659 break;
660
661 /* e32_flags + 16-bit import module ordinal + a 32-bit procname or ordinal. */
662 case ENTRYFWD:
663 return kldrModLXDoForwarderQuery(pModLX, pEntry, pfnGetForwarder, pvUser, puValue, pfKind);
664
665 default:
666 /* anyone actually using TYPEINFO will end up here. */
667 KLDRMODLX_ASSERT(!"Bad bundle type");
668 return KLDR_ERR_LX_BAD_BUNDLE;
669 }
670
671 /*
672 * Validate the object number and calc the return address.
673 */
674 if ( pBundle->b32_obj <= 0
675 || pBundle->b32_obj > pMod->cSegments)
676 return KLDR_ERR_LX_BAD_BUNDLE;
677 if (puValue)
678 *puValue = BaseAddress
679 + offObject
680 + pMod->aSegments[pBundle->b32_obj - 1].RVA;
681 return 0;
682 }
683
684 /*
685 * Skip the bundle.
686 */
687 if (pBundle->b32_type > ENTRYFWD)
688 {
689 KLDRMODLX_ASSERT(!"Bad type"); /** @todo figure out TYPEINFO. */
690 return KLDR_ERR_LX_BAD_BUNDLE;
691 }
692 if (pBundle->b32_type == 0)
693 pBundle = (const struct b32_bundle *)((const KU8 *)pBundle + 2);
694 else
695 pBundle = (const struct b32_bundle *)((const KU8 *)(pBundle + 1) + s_cbEntry[pBundle->b32_type] * pBundle->b32_cnt);
696 }
697
698 return KLDR_ERR_SYMBOL_NOT_FOUND;
699}
700
701
702/**
703 * Do name lookup.
704 *
705 * @returns See kLdrModQuerySymbol.
706 * @param pModLX The module to lookup the symbol in.
707 * @param pchSymbol The symbol to lookup.
708 * @param cchSymbol The symbol name length.
709 * @param piSymbol Where to store the symbol ordinal.
710 */
711static int kldrModLXDoNameLookup(PKLDRMODLX pModLX, const char *pchSymbol, KSIZE cchSymbol, KU32 *piSymbol)
712{
713
714 /*
715 * First do a hash table lookup.
716 */
717 /** @todo hash name table for speed. */
718
719 /*
720 * Search the name tables.
721 */
722 const KU8 *pbName = kldrModLXDoNameTableLookupByName(pModLX->pbResNameTab,
723 pModLX->pbLoaderSectionLast - pModLX->pbResNameTab + 1,
724 pchSymbol, cchSymbol);
725 if (!pbName)
726 {
727 if (!pModLX->pbNonResNameTab)
728 {
729 /* lazy load it */
730 /** @todo non-resident name table. */
731 }
732 if (pModLX->pbNonResNameTab)
733 pbName = kldrModLXDoNameTableLookupByName(pModLX->pbResNameTab,
734 pModLX->pbNonResNameTabLast - pModLX->pbResNameTab + 1,
735 pchSymbol, cchSymbol);
736 }
737 if (!pbName)
738 return KLDR_ERR_SYMBOL_NOT_FOUND;
739
740 *piSymbol = *(const KU16 *)(pbName + 1 + *pbName);
741 return 0;
742}
743
744
745#if 0
746/**
747 * Hash a symbol using the algorithm from sdbm.
748 *
749 * The following was is the documenation of the orignal sdbm functions:
750 *
751 * This algorithm was created for sdbm (a public-domain reimplementation of
752 * ndbm) database library. it was found to do well in scrambling bits,
753 * causing better distribution of the keys and fewer splits. it also happens
754 * to be a good general hashing function with good distribution. the actual
755 * function is hash(i) = hash(i - 1) * 65599 + str[i]; what is included below
756 * is the faster version used in gawk. [there is even a faster, duff-device
757 * version] the magic constant 65599 was picked out of thin air while
758 * experimenting with different constants, and turns out to be a prime.
759 * this is one of the algorithms used in berkeley db (see sleepycat) and
760 * elsewhere.
761 */
762static KU32 kldrModLXDoHash(const char *pchSymbol, KU8 cchSymbol)
763{
764 KU32 hash = 0;
765 int ch;
766
767 while ( cchSymbol-- > 0
768 && (ch = *(unsigned const char *)pchSymbol++))
769 hash = ch + (hash << 6) + (hash << 16) - hash;
770
771 return hash;
772}
773#endif
774
775
776/**
777 * Lookup a name table entry by name.
778 *
779 * @returns Pointer to the name table entry if found.
780 * @returns NULL if not found.
781 * @param pbNameTable Pointer to the name table that should be searched.
782 * @param cbNameTable The size of the name table.
783 * @param pchSymbol The name of the symbol we're looking for.
784 * @param cchSymbol The length of the symbol name.
785 */
786static const KU8 *kldrModLXDoNameTableLookupByName(const KU8 *pbNameTable, KSSIZE cbNameTable,
787 const char *pchSymbol, KSIZE cchSymbol)
788{
789 /*
790 * Determin the namelength up front so we can skip anything which doesn't matches the length.
791 */
792 KU8 cbSymbol8Bit = (KU8)cchSymbol;
793 if (cbSymbol8Bit != cchSymbol)
794 return NULL; /* too long. */
795
796 /*
797 * Walk the name table.
798 */
799 while (*pbNameTable != 0 && cbNameTable > 0)
800 {
801 const KU8 cbName = *pbNameTable;
802
803 cbNameTable -= cbName + 1 + 2;
804 if (cbNameTable < 0)
805 break;
806
807 if ( cbName == cbSymbol8Bit
808 && !kHlpMemComp(pbNameTable + 1, pchSymbol, cbName))
809 return pbNameTable;
810
811 /* next entry */
812 pbNameTable += cbName + 1 + 2;
813 }
814
815 return NULL;
816}
817
818
819/**
820 * Deal with a forwarder entry.
821 *
822 * @returns See kLdrModQuerySymbol.
823 * @param pModLX The PE module interpreter instance.
824 * @param pEntry The forwarder entry.
825 * @param pfnGetForwarder The callback for resolving forwarder symbols. (optional)
826 * @param pvUser The user argument for the callback.
827 * @param puValue Where to put the value. (optional)
828 * @param pfKind Where to put the symbol kind. (optional)
829 */
830static int kldrModLXDoForwarderQuery(PKLDRMODLX pModLX, const struct e32_entry *pEntry,
831 PFNKLDRMODGETIMPORT pfnGetForwarder, void *pvUser, PKLDRADDR puValue, KU32 *pfKind)
832{
833 int rc;
834 KU32 iSymbol;
835 const char *pchSymbol;
836 KU8 cchSymbol;
837
838 if (!pfnGetForwarder)
839 return KLDR_ERR_FORWARDER_SYMBOL;
840
841 /*
842 * Validate the entry import module ordinal.
843 */
844 if ( !pEntry->e32_variant.e32_fwd.modord
845 || pEntry->e32_variant.e32_fwd.modord > pModLX->Hdr.e32_impmodcnt)
846 return KLDR_ERR_LX_BAD_FORWARDER;
847
848 /*
849 * Figure out the parameters.
850 */
851 if (pEntry->e32_flags & FWD_ORDINAL)
852 {
853 iSymbol = pEntry->e32_variant.e32_fwd.value;
854 pchSymbol = NULL; /* no symbol name. */
855 cchSymbol = 0;
856 }
857 else
858 {
859 const KU8 *pbName;
860
861 /* load the fixup section if necessary. */
862 if (!pModLX->pbImportProcs)
863 {
864 rc = kldrModLXDoLoadFixupSection(pModLX);
865 if (rc)
866 return rc;
867 }
868
869 /* Make name pointer. */
870 pbName = pModLX->pbImportProcs + pEntry->e32_variant.e32_fwd.value;
871 if ( pbName >= pModLX->pbFixupSectionLast
872 || pbName < pModLX->pbFixupSection
873 || !*pbName)
874 return KLDR_ERR_LX_BAD_FORWARDER;
875
876
877 /* check for '#' name. */
878 if (pbName[1] == '#')
879 {
880 KU8 cbLeft = *pbName;
881 const KU8 *pb = pbName + 1;
882 unsigned uBase;
883
884 /* base detection */
885 uBase = 10;
886 if ( cbLeft > 1
887 && pb[1] == '0'
888 && (pb[2] == 'x' || pb[2] == 'X'))
889 {
890 uBase = 16;
891 pb += 2;
892 cbLeft -= 2;
893 }
894
895 /* ascii to integer */
896 iSymbol = 0;
897 while (cbLeft-- > 0)
898 {
899 /* convert char to digit. */
900 unsigned uDigit = *pb++;
901 if (uDigit >= '0' && uDigit <= '9')
902 uDigit -= '0';
903 else if (uDigit >= 'a' && uDigit <= 'z')
904 uDigit -= 'a' + 10;
905 else if (uDigit >= 'A' && uDigit <= 'Z')
906 uDigit -= 'A' + 10;
907 else if (!uDigit)
908 break;
909 else
910 return KLDR_ERR_LX_BAD_FORWARDER;
911 if (uDigit >= uBase)
912 return KLDR_ERR_LX_BAD_FORWARDER;
913
914 /* insert the digit */
915 iSymbol *= uBase;
916 iSymbol += uDigit;
917 }
918 if (!iSymbol)
919 return KLDR_ERR_LX_BAD_FORWARDER;
920
921 pchSymbol = NULL; /* no symbol name. */
922 cchSymbol = 0;
923 }
924 else
925 {
926 pchSymbol = (char *)pbName + 1;
927 cchSymbol = *pbName;
928 iSymbol = NIL_KLDRMOD_SYM_ORDINAL;
929 }
930 }
931
932 /*
933 * Resolve the forwarder.
934 */
935 rc = pfnGetForwarder(pModLX->pMod, pEntry->e32_variant.e32_fwd.modord - 1, iSymbol, pchSymbol, cchSymbol, NULL, puValue, pfKind, pvUser);
936 if (!rc && pfKind)
937 *pfKind |= KLDRSYMKIND_FORWARDER;
938 return rc;
939}
940
941
942/**
943 * Loads the fixup section from the executable image.
944 *
945 * The fixup section isn't loaded until it's accessed. It's also freed by kLdrModDone().
946 *
947 * @returns 0 on success, non-zero kLdr or native status code on failure.
948 * @param pModLX The PE module interpreter instance.
949 */
950static int kldrModLXDoLoadFixupSection(PKLDRMODLX pModLX)
951{
952 int rc;
953 KU32 off;
954 void *pv;
955
956 pv = kHlpAlloc(pModLX->Hdr.e32_fixupsize);
957 if (!pv)
958 return KERR_NO_MEMORY;
959
960 off = pModLX->Hdr.e32_objtab + pModLX->Hdr.e32_ldrsize;
961 rc = kRdrRead(pModLX->pMod->pRdr, pv, pModLX->Hdr.e32_fixupsize,
962 off + pModLX->offHdr);
963 if (!rc)
964 {
965 pModLX->pbFixupSection = pv;
966 pModLX->pbFixupSectionLast = pModLX->pbFixupSection + pModLX->Hdr.e32_fixupsize;
967 KLDRMODLX_ASSERT(!pModLX->paoffPageFixups);
968 if (pModLX->Hdr.e32_fpagetab)
969 pModLX->paoffPageFixups = (const KU32 *)(pModLX->pbFixupSection + pModLX->Hdr.e32_fpagetab - off);
970 KLDRMODLX_ASSERT(!pModLX->pbFixupRecs);
971 if (pModLX->Hdr.e32_frectab)
972 pModLX->pbFixupRecs = pModLX->pbFixupSection + pModLX->Hdr.e32_frectab - off;
973 KLDRMODLX_ASSERT(!pModLX->pbImportMods);
974 if (pModLX->Hdr.e32_impmod)
975 pModLX->pbImportMods = pModLX->pbFixupSection + pModLX->Hdr.e32_impmod - off;
976 KLDRMODLX_ASSERT(!pModLX->pbImportProcs);
977 if (pModLX->Hdr.e32_impproc)
978 pModLX->pbImportProcs = pModLX->pbFixupSection + pModLX->Hdr.e32_impproc - off;
979 }
980 else
981 kHlpFree(pv);
982 return rc;
983}
984
985
986/** @copydoc kLdrModEnumSymbols */
987static int kldrModLXEnumSymbols(PKLDRMOD pMod, const void *pvBits, KLDRADDR BaseAddress,
988 KU32 fFlags, PFNKLDRMODENUMSYMS pfnCallback, void *pvUser)
989{
990 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
991 const struct b32_bundle *pBundle;
992 KU32 iOrdinal;
993 int rc = 0;
994 K_NOREF(pvBits);
995 K_NOREF(fFlags);
996
997 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
998
999 /*
1000 * Enumerate the entry table.
1001 * (The entry table is made up of bundles of similar exports.)
1002 */
1003 iOrdinal = 1;
1004 pBundle = (const struct b32_bundle *)pModLX->pbEntryTab;
1005 while (pBundle->b32_cnt && iOrdinal)
1006 {
1007 static const KSIZE s_cbEntry[] = { 0, 3, 5, 5, 7 };
1008
1009 /*
1010 * Enum the entries in the bundle.
1011 */
1012 if (pBundle->b32_type != EMPTY)
1013 {
1014 const struct e32_entry *pEntry;
1015 KSIZE cbEntry;
1016 KLDRADDR BundleRVA;
1017 unsigned cLeft;
1018
1019
1020 /* Validate the bundle. */
1021 switch (pBundle->b32_type)
1022 {
1023 case ENTRY16:
1024 case GATE16:
1025 case ENTRY32:
1026 if ( pBundle->b32_obj <= 0
1027 || pBundle->b32_obj > pMod->cSegments)
1028 return KLDR_ERR_LX_BAD_BUNDLE;
1029 BundleRVA = pMod->aSegments[pBundle->b32_obj - 1].RVA;
1030 break;
1031
1032 case ENTRYFWD:
1033 BundleRVA = 0;
1034 break;
1035
1036 default:
1037 /* anyone actually using TYPEINFO will end up here. */
1038 KLDRMODLX_ASSERT(!"Bad bundle type");
1039 return KLDR_ERR_LX_BAD_BUNDLE;
1040 }
1041
1042 /* iterate the bundle entries. */
1043 cbEntry = s_cbEntry[pBundle->b32_type];
1044 pEntry = (const struct e32_entry *)(pBundle + 1);
1045 cLeft = pBundle->b32_cnt;
1046 while (cLeft-- > 0)
1047 {
1048 KLDRADDR uValue;
1049 KU32 fKind;
1050 int fFoundName;
1051 const KU8 *pbName;
1052
1053 /*
1054 * Calc the symbol value and kind.
1055 */
1056 switch (pBundle->b32_type)
1057 {
1058 /* e32_flags + a 16-bit offset. */
1059 case ENTRY16:
1060 uValue = BaseAddress + BundleRVA + pEntry->e32_variant.e32_offset.offset16;
1061 fKind = KLDRSYMKIND_16BIT | KLDRSYMKIND_NO_TYPE;
1062 break;
1063
1064 /* e32_flags + a 16-bit offset + a 16-bit callgate selector. */
1065 case GATE16:
1066 uValue = BaseAddress + BundleRVA + pEntry->e32_variant.e32_callgate.offset;
1067 fKind = KLDRSYMKIND_16BIT | KLDRSYMKIND_CODE;
1068 break;
1069
1070 /* e32_flags + a 32-bit offset. */
1071 case ENTRY32:
1072 uValue = BaseAddress + BundleRVA + pEntry->e32_variant.e32_offset.offset32;
1073 fKind = KLDRSYMKIND_32BIT;
1074 break;
1075
1076 /* e32_flags + 16-bit import module ordinal + a 32-bit procname or ordinal. */
1077 case ENTRYFWD:
1078 uValue = 0; /** @todo implement enumeration of forwarders properly. */
1079 fKind = KLDRSYMKIND_FORWARDER;
1080 break;
1081
1082 default: /* shut up gcc. */
1083 uValue = 0;
1084 fKind = KLDRSYMKIND_NO_BIT | KLDRSYMKIND_NO_TYPE;
1085 break;
1086 }
1087
1088 /*
1089 * Any symbol names?
1090 */
1091 fFoundName = 0;
1092
1093 /* resident name table. */
1094 pbName = pModLX->pbResNameTab;
1095 if (pbName)
1096 {
1097 do
1098 {
1099 pbName = kldrModLXDoNameTableLookupByOrdinal(pbName, pModLX->pbLoaderSectionLast - pbName + 1, iOrdinal);
1100 if (!pbName)
1101 break;
1102 fFoundName = 1;
1103 rc = pfnCallback(pMod, iOrdinal, (const char *)pbName + 1, *pbName, NULL, uValue, fKind, pvUser);
1104 if (rc)
1105 return rc;
1106
1107 /* skip to the next entry */
1108 pbName += 1 + *pbName + 2;
1109 } while (pbName < pModLX->pbLoaderSectionLast);
1110 }
1111
1112 /* resident name table. */
1113 pbName = pModLX->pbNonResNameTab;
1114 /** @todo lazy load the non-resident name table. */
1115 if (pbName)
1116 {
1117 do
1118 {
1119 pbName = kldrModLXDoNameTableLookupByOrdinal(pbName, pModLX->pbNonResNameTabLast - pbName + 1, iOrdinal);
1120 if (!pbName)
1121 break;
1122 fFoundName = 1;
1123 rc = pfnCallback(pMod, iOrdinal, (const char *)pbName + 1, *pbName, NULL, uValue, fKind, pvUser);
1124 if (rc)
1125 return rc;
1126
1127 /* skip to the next entry */
1128 pbName += 1 + *pbName + 2;
1129 } while (pbName < pModLX->pbLoaderSectionLast);
1130 }
1131
1132 /*
1133 * If no names, call once with the ordinal only.
1134 */
1135 if (!fFoundName)
1136 {
1137 rc = pfnCallback(pMod, iOrdinal, NULL, 0, NULL, uValue, fKind, pvUser);
1138 if (rc)
1139 return rc;
1140 }
1141
1142 /* next */
1143 iOrdinal++;
1144 pEntry = (const struct e32_entry *)((KUPTR)pEntry + cbEntry);
1145 }
1146 }
1147
1148 /*
1149 * The next bundle.
1150 */
1151 if (pBundle->b32_type > ENTRYFWD)
1152 {
1153 KLDRMODLX_ASSERT(!"Bad type"); /** @todo figure out TYPEINFO. */
1154 return KLDR_ERR_LX_BAD_BUNDLE;
1155 }
1156 if (pBundle->b32_type == 0)
1157 pBundle = (const struct b32_bundle *)((const KU8 *)pBundle + 2);
1158 else
1159 pBundle = (const struct b32_bundle *)((const KU8 *)(pBundle + 1) + s_cbEntry[pBundle->b32_type] * pBundle->b32_cnt);
1160 }
1161
1162 return 0;
1163}
1164
1165
1166/**
1167 * Lookup a name table entry by ordinal.
1168 *
1169 * @returns Pointer to the name table entry if found.
1170 * @returns NULL if not found.
1171 * @param pbNameTable Pointer to the name table that should be searched.
1172 * @param cbNameTable The size of the name table.
1173 * @param iOrdinal The ordinal to search for.
1174 */
1175static const KU8 *kldrModLXDoNameTableLookupByOrdinal(const KU8 *pbNameTable, KSSIZE cbNameTable, KU32 iOrdinal)
1176{
1177 while (*pbNameTable != 0 && cbNameTable > 0)
1178 {
1179 const KU8 cbName = *pbNameTable;
1180 KU32 iName;
1181
1182 cbNameTable -= cbName + 1 + 2;
1183 if (cbNameTable < 0)
1184 break;
1185
1186 iName = *(pbNameTable + cbName + 1)
1187 | ((unsigned)*(pbNameTable + cbName + 2) << 8);
1188 if (iName == iOrdinal)
1189 return pbNameTable;
1190
1191 /* next entry */
1192 pbNameTable += cbName + 1 + 2;
1193 }
1194
1195 return NULL;
1196}
1197
1198
1199/** @copydoc kLdrModGetImport */
1200static int kldrModLXGetImport(PKLDRMOD pMod, const void *pvBits, KU32 iImport, char *pszName, KSIZE cchName)
1201{
1202 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1203 const KU8 *pb;
1204 int rc;
1205 K_NOREF(pvBits);
1206
1207 /*
1208 * Validate
1209 */
1210 if (iImport >= pModLX->Hdr.e32_impmodcnt)
1211 return KLDR_ERR_IMPORT_ORDINAL_OUT_OF_BOUNDS;
1212
1213 /*
1214 * Lazy loading the fixup section.
1215 */
1216 if (!pModLX->pbImportMods)
1217 {
1218 rc = kldrModLXDoLoadFixupSection(pModLX);
1219 if (rc)
1220 return rc;
1221 }
1222
1223 /*
1224 * Iterate the module import table until we reach the requested import ordinal.
1225 */
1226 pb = pModLX->pbImportMods;
1227 while (iImport-- > 0)
1228 pb += *pb + 1;
1229
1230 /*
1231 * Copy out the result.
1232 */
1233 if (*pb < cchName)
1234 {
1235 kHlpMemCopy(pszName, pb + 1, *pb);
1236 pszName[*pb] = '\0';
1237 rc = 0;
1238 }
1239 else
1240 {
1241 kHlpMemCopy(pszName, pb + 1, cchName);
1242 if (cchName)
1243 pszName[cchName - 1] = '\0';
1244 rc = KERR_BUFFER_OVERFLOW;
1245 }
1246
1247 return rc;
1248}
1249
1250
1251/** @copydoc kLdrModNumberOfImports */
1252static KI32 kldrModLXNumberOfImports(PKLDRMOD pMod, const void *pvBits)
1253{
1254 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1255 K_NOREF(pvBits);
1256 return pModLX->Hdr.e32_impmodcnt;
1257}
1258
1259
1260/** @copydoc kLdrModGetStackInfo */
1261static int kldrModLXGetStackInfo(PKLDRMOD pMod, const void *pvBits, KLDRADDR BaseAddress, PKLDRSTACKINFO pStackInfo)
1262{
1263 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1264 const KU32 i = pModLX->Hdr.e32_stackobj;
1265 K_NOREF(pvBits);
1266
1267 if ( i
1268 && i <= pMod->cSegments
1269 && pModLX->Hdr.e32_esp <= pMod->aSegments[i - 1].LinkAddress + pMod->aSegments[i - 1].cb
1270 && pModLX->Hdr.e32_stacksize
1271 && pModLX->Hdr.e32_esp - pModLX->Hdr.e32_stacksize >= pMod->aSegments[i - 1].LinkAddress)
1272 {
1273
1274 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
1275 pStackInfo->LinkAddress = pModLX->Hdr.e32_esp - pModLX->Hdr.e32_stacksize;
1276 pStackInfo->Address = BaseAddress
1277 + pMod->aSegments[i - 1].RVA
1278 + pModLX->Hdr.e32_esp - pModLX->Hdr.e32_stacksize - pMod->aSegments[i - 1].LinkAddress;
1279 }
1280 else
1281 {
1282 pStackInfo->Address = NIL_KLDRADDR;
1283 pStackInfo->LinkAddress = NIL_KLDRADDR;
1284 }
1285 pStackInfo->cbStack = pModLX->Hdr.e32_stacksize;
1286 pStackInfo->cbStackThread = 0;
1287
1288 return 0;
1289}
1290
1291
1292/** @copydoc kLdrModQueryMainEntrypoint */
1293static int kldrModLXQueryMainEntrypoint(PKLDRMOD pMod, const void *pvBits, KLDRADDR BaseAddress, PKLDRADDR pMainEPAddress)
1294{
1295 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1296 K_NOREF(pvBits);
1297
1298 /*
1299 * Convert the address from the header.
1300 */
1301 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
1302 *pMainEPAddress = pModLX->Hdr.e32_startobj
1303 && pModLX->Hdr.e32_startobj <= pMod->cSegments
1304 && pModLX->Hdr.e32_eip < pMod->aSegments[pModLX->Hdr.e32_startobj - 1].cb
1305 ? BaseAddress + pMod->aSegments[pModLX->Hdr.e32_startobj - 1].RVA + pModLX->Hdr.e32_eip
1306 : NIL_KLDRADDR;
1307 return 0;
1308}
1309
1310
1311/** @copydoc kLdrModEnumDbgInfo */
1312static int kldrModLXEnumDbgInfo(PKLDRMOD pMod, const void *pvBits, PFNKLDRENUMDBG pfnCallback, void *pvUser)
1313{
1314 /*PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;*/
1315 K_NOREF(pfnCallback);
1316 K_NOREF(pvUser);
1317
1318 /*
1319 * Quit immediately if no debug info.
1320 */
1321 if (kldrModLXHasDbgInfo(pMod, pvBits))
1322 return 0;
1323#if 0
1324 /*
1325 * Read the debug info and look for familiar magics and structures.
1326 */
1327 /** @todo */
1328#endif
1329
1330 return 0;
1331}
1332
1333
1334/** @copydoc kLdrModHasDbgInfo */
1335static int kldrModLXHasDbgInfo(PKLDRMOD pMod, const void *pvBits)
1336{
1337 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1338 K_NOREF(pvBits);
1339
1340 /*
1341 * Don't curretnly bother with linkers which doesn't advertise it in the header.
1342 */
1343 if ( !pModLX->Hdr.e32_debuginfo
1344 || !pModLX->Hdr.e32_debuglen)
1345 return KLDR_ERR_NO_DEBUG_INFO;
1346 return 0;
1347}
1348
1349
1350/** @copydoc kLdrModMap */
1351static int kldrModLXMap(PKLDRMOD pMod)
1352{
1353 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1354 unsigned fFixed;
1355 void *pvBase;
1356 int rc;
1357
1358 /*
1359 * Already mapped?
1360 */
1361 if (pModLX->pvMapping)
1362 return KLDR_ERR_ALREADY_MAPPED;
1363
1364 /*
1365 * Allocate memory for it.
1366 */
1367 /* fixed image? */
1368 fFixed = pMod->enmType == KLDRTYPE_EXECUTABLE_FIXED
1369 || pMod->enmType == KLDRTYPE_SHARED_LIBRARY_FIXED;
1370 if (!fFixed)
1371 pvBase = NULL;
1372 else
1373 {
1374 pvBase = (void *)(KUPTR)pMod->aSegments[0].LinkAddress;
1375 if ((KUPTR)pvBase != pMod->aSegments[0].LinkAddress)
1376 return KLDR_ERR_ADDRESS_OVERFLOW;
1377 }
1378 rc = kHlpPageAlloc(&pvBase, pModLX->cbMapped, KPROT_EXECUTE_READWRITE, fFixed);
1379 if (rc)
1380 return rc;
1381
1382 /*
1383 * Load the bits, apply page protection, and update the segment table.
1384 */
1385 rc = kldrModLXDoLoadBits(pModLX, pvBase);
1386 if (!rc)
1387 rc = kldrModLXDoProtect(pModLX, pvBase, 0 /* protect */);
1388 if (!rc)
1389 {
1390 KU32 i;
1391 for (i = 0; i < pMod->cSegments; i++)
1392 {
1393 if (pMod->aSegments[i].RVA != NIL_KLDRADDR)
1394 pMod->aSegments[i].MapAddress = (KUPTR)pvBase + (KUPTR)pMod->aSegments[i].RVA;
1395 }
1396 pModLX->pvMapping = pvBase;
1397 }
1398 else
1399 kHlpPageFree(pvBase, pModLX->cbMapped);
1400 return rc;
1401}
1402
1403
1404/**
1405 * Loads the LX pages into the specified memory mapping.
1406 *
1407 * @returns 0 on success.
1408 * @returns non-zero kLdr or OS status code on failure.
1409 *
1410 * @param pModLX The LX module interpreter instance.
1411 * @param pvBits Where to load the bits.
1412 */
1413static int kldrModLXDoLoadBits(PKLDRMODLX pModLX, void *pvBits)
1414{
1415 const PKRDR pRdr = pModLX->pMod->pRdr;
1416 KU8 *pbTmpPage = NULL;
1417 int rc = 0;
1418 KU32 i;
1419
1420 /*
1421 * Iterate the segments.
1422 */
1423 for (i = 0; i < pModLX->Hdr.e32_objcnt; i++)
1424 {
1425 const struct o32_obj * const pObj = &pModLX->paObjs[i];
1426 const KU32 cPages = (KU32)(pModLX->pMod->aSegments[i].cbMapped / OBJPAGELEN);
1427 KU32 iPage;
1428 KU8 *pbPage = (KU8 *)pvBits + (KUPTR)pModLX->pMod->aSegments[i].RVA;
1429
1430 /*
1431 * Iterate the page map pages.
1432 */
1433 for (iPage = 0; !rc && iPage < pObj->o32_mapsize; iPage++, pbPage += OBJPAGELEN)
1434 {
1435 const struct o32_map *pMap = &pModLX->paPageMappings[iPage + pObj->o32_pagemap - 1];
1436 switch (pMap->o32_pageflags)
1437 {
1438 case VALID:
1439 if (pMap->o32_pagesize == OBJPAGELEN)
1440 rc = kRdrRead(pRdr, pbPage, OBJPAGELEN,
1441 pModLX->Hdr.e32_datapage + (pMap->o32_pagedataoffset << pModLX->Hdr.e32_pageshift));
1442 else if (pMap->o32_pagesize < OBJPAGELEN)
1443 {
1444 rc = kRdrRead(pRdr, pbPage, pMap->o32_pagesize,
1445 pModLX->Hdr.e32_datapage + (pMap->o32_pagedataoffset << pModLX->Hdr.e32_pageshift));
1446 kHlpMemSet(pbPage + pMap->o32_pagesize, 0, OBJPAGELEN - pMap->o32_pagesize);
1447 }
1448 else
1449 rc = KLDR_ERR_LX_BAD_PAGE_MAP;
1450 break;
1451
1452 case ITERDATA:
1453 case ITERDATA2:
1454 /* make sure we've got a temp page .*/
1455 if (!pbTmpPage)
1456 {
1457 pbTmpPage = kHlpAlloc(OBJPAGELEN + 256);
1458 if (!pbTmpPage)
1459 break;
1460 }
1461 /* validate the size. */
1462 if (pMap->o32_pagesize > OBJPAGELEN + 252)
1463 {
1464 rc = KLDR_ERR_LX_BAD_PAGE_MAP;
1465 break;
1466 }
1467
1468 /* read it and ensure 4 extra zero bytes. */
1469 rc = kRdrRead(pRdr, pbTmpPage, pMap->o32_pagesize,
1470 pModLX->Hdr.e32_datapage + (pMap->o32_pagedataoffset << pModLX->Hdr.e32_pageshift));
1471 if (rc)
1472 break;
1473 kHlpMemSet(pbTmpPage + pMap->o32_pagesize, 0, 4);
1474
1475 /* unpack it into the image page. */
1476 if (pMap->o32_pageflags == ITERDATA2)
1477 rc = kldrModLXDoIterData2Unpacking(pbPage, pbTmpPage, pMap->o32_pagesize);
1478 else
1479 rc = kldrModLXDoIterDataUnpacking(pbPage, pbTmpPage, pMap->o32_pagesize);
1480 break;
1481
1482 case INVALID: /* we're probably not dealing correctly with INVALID pages... */
1483 case ZEROED:
1484 kHlpMemSet(pbPage, 0, OBJPAGELEN);
1485 break;
1486
1487 case RANGE:
1488 KLDRMODLX_ASSERT(!"RANGE");
1489 default:
1490 rc = KLDR_ERR_LX_BAD_PAGE_MAP;
1491 break;
1492 }
1493 }
1494 if (rc)
1495 break;
1496
1497 /*
1498 * Zero the remaining pages.
1499 */
1500 if (iPage < cPages)
1501 kHlpMemSet(pbPage, 0, (cPages - iPage) * OBJPAGELEN);
1502 }
1503
1504 if (pbTmpPage)
1505 kHlpFree(pbTmpPage);
1506 return rc;
1507}
1508
1509
1510/**
1511 * Unpacks iterdata (aka EXEPACK).
1512 *
1513 * @returns 0 on success, non-zero kLdr status code on failure.
1514 * @param pbDst Where to put the uncompressed data. (Assumes OBJPAGELEN size.)
1515 * @param pbSrc The compressed source data.
1516 * @param cbSrc The file size of the compressed data. The source buffer
1517 * contains 4 additional zero bytes.
1518 */
1519static int kldrModLXDoIterDataUnpacking(KU8 *pbDst, const KU8 *pbSrc, int cbSrc)
1520{
1521 const struct LX_Iter *pIter = (const struct LX_Iter *)pbSrc;
1522 int cbDst = OBJPAGELEN;
1523
1524 /* Validate size of data. */
1525 if (cbSrc >= (int)OBJPAGELEN - 2)
1526 return KLDR_ERR_LX_BAD_ITERDATA;
1527
1528 /*
1529 * Expand the page.
1530 */
1531 while (cbSrc > 0 && pIter->LX_nIter)
1532 {
1533 if (pIter->LX_nBytes == 1)
1534 {
1535 /*
1536 * Special case - one databyte.
1537 */
1538 cbDst -= pIter->LX_nIter;
1539 if (cbDst < 0)
1540 return KLDR_ERR_LX_BAD_ITERDATA;
1541
1542 cbSrc -= 4 + 1;
1543 if (cbSrc < -4)
1544 return KLDR_ERR_LX_BAD_ITERDATA;
1545
1546 kHlpMemSet(pbDst, pIter->LX_Iterdata, pIter->LX_nIter);
1547 pbDst += pIter->LX_nIter;
1548 pIter++;
1549 }
1550 else
1551 {
1552 /*
1553 * General.
1554 */
1555 int i;
1556
1557 cbDst -= pIter->LX_nIter * pIter->LX_nBytes;
1558 if (cbDst < 0)
1559 return KLDR_ERR_LX_BAD_ITERDATA;
1560
1561 cbSrc -= 4 + pIter->LX_nBytes;
1562 if (cbSrc < -4)
1563 return KLDR_ERR_LX_BAD_ITERDATA;
1564
1565 for (i = pIter->LX_nIter; i > 0; i--, pbDst += pIter->LX_nBytes)
1566 kHlpMemCopy(pbDst, &pIter->LX_Iterdata, pIter->LX_nBytes);
1567 pIter = (struct LX_Iter *)((char*)pIter + 4 + pIter->LX_nBytes);
1568 }
1569 }
1570
1571 /*
1572 * Zero remainder of the page.
1573 */
1574 if (cbDst > 0)
1575 kHlpMemSet(pbDst, 0, cbDst);
1576
1577 return 0;
1578}
1579
1580
1581/**
1582 * Unpacks iterdata (aka EXEPACK).
1583 *
1584 * @returns 0 on success, non-zero kLdr status code on failure.
1585 * @param pbDst Where to put the uncompressed data. (Assumes OBJPAGELEN size.)
1586 * @param pbSrc The compressed source data.
1587 * @param cbSrc The file size of the compressed data. The source buffer
1588 * contains 4 additional zero bytes.
1589 */
1590static int kldrModLXDoIterData2Unpacking(KU8 *pbDst, const KU8 *pbSrc, int cbSrc)
1591{
1592 int cbDst = OBJPAGELEN;
1593
1594 while (cbSrc > 0)
1595 {
1596 /*
1597 * Bit 0 and 1 is the encoding type.
1598 */
1599 switch (*pbSrc & 0x03)
1600 {
1601 /*
1602 *
1603 * 0 1 2 3 4 5 6 7
1604 * type | |
1605 * ----------------
1606 * cb <cb bytes of data>
1607 *
1608 * Bits 2-7 is, if not zero, the length of an uncompressed run
1609 * starting at the following byte.
1610 *
1611 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
1612 * type | | | | | |
1613 * ---------------- ---------------------- -----------------------
1614 * zero cb char to multiply
1615 *
1616 * If the bits are zero, the following two bytes describes a 1 byte interation
1617 * run. First byte is count, second is the byte to copy. A count of zero is
1618 * means end of data, and we simply stops. In that case the rest of the data
1619 * should be zero.
1620 */
1621 case 0:
1622 {
1623 if (*pbSrc)
1624 {
1625 const int cb = *pbSrc >> 2;
1626 cbDst -= cb;
1627 if (cbDst < 0)
1628 return KLDR_ERR_LX_BAD_ITERDATA2;
1629 cbSrc -= cb + 1;
1630 if (cbSrc < 0)
1631 return KLDR_ERR_LX_BAD_ITERDATA2;
1632 kHlpMemCopy(pbDst, ++pbSrc, cb);
1633 pbDst += cb;
1634 pbSrc += cb;
1635 }
1636 else if (cbSrc < 2)
1637 return KLDR_ERR_LX_BAD_ITERDATA2;
1638 else
1639 {
1640 const int cb = pbSrc[1];
1641 if (!cb)
1642 goto l_endloop;
1643 cbDst -= cb;
1644 if (cbDst < 0)
1645 return KLDR_ERR_LX_BAD_ITERDATA2;
1646 cbSrc -= 3;
1647 if (cbSrc < 0)
1648 return KLDR_ERR_LX_BAD_ITERDATA2;
1649 kHlpMemSet(pbDst, pbSrc[2], cb);
1650 pbDst += cb;
1651 pbSrc += 3;
1652 }
1653 break;
1654 }
1655
1656
1657 /*
1658 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1659 * type | | | | | |
1660 * ---- ------- -------------------------
1661 * cb1 cb2 - 3 offset <cb1 bytes of data>
1662 *
1663 * Two bytes layed out as described above, followed by cb1 bytes of data to be copied.
1664 * The cb2(+3) and offset describes an amount of data to be copied from the expanded
1665 * data relative to the current position. The data copied as you would expect it to be.
1666 */
1667 case 1:
1668 {
1669 cbSrc -= 2;
1670 if (cbSrc < 0)
1671 return KLDR_ERR_LX_BAD_ITERDATA2;
1672 else
1673 {
1674 const unsigned off = ((unsigned)pbSrc[1] << 1) | (*pbSrc >> 7);
1675 const int cb1 = (*pbSrc >> 2) & 3;
1676 const int cb2 = ((*pbSrc >> 4) & 7) + 3;
1677
1678 pbSrc += 2;
1679 cbSrc -= cb1;
1680 if (cbSrc < 0)
1681 return KLDR_ERR_LX_BAD_ITERDATA2;
1682 cbDst -= cb1;
1683 if (cbDst < 0)
1684 return KLDR_ERR_LX_BAD_ITERDATA2;
1685 kHlpMemCopy(pbDst, pbSrc, cb1);
1686 pbDst += cb1;
1687 pbSrc += cb1;
1688
1689 if (off > OBJPAGELEN - (unsigned)cbDst)
1690 return KLDR_ERR_LX_BAD_ITERDATA2;
1691 cbDst -= cb2;
1692 if (cbDst < 0)
1693 return KLDR_ERR_LX_BAD_ITERDATA2;
1694 kHlpMemMove(pbDst, pbDst - off, cb2);
1695 pbDst += cb2;
1696 }
1697 break;
1698 }
1699
1700
1701 /*
1702 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1703 * type | | | |
1704 * ---- ----------------------------------
1705 * cb-3 offset
1706 *
1707 * Two bytes layed out as described above.
1708 * The cb(+3) and offset describes an amount of data to be copied from the expanded
1709 * data relative to the current position.
1710 *
1711 * If offset == 1 the data is not copied as expected, but in the memcpyw manner.
1712 */
1713 case 2:
1714 {
1715 cbSrc -= 2;
1716 if (cbSrc < 0)
1717 return KLDR_ERR_LX_BAD_ITERDATA2;
1718 else
1719 {
1720 const unsigned off = ((unsigned)pbSrc[1] << 4) | (*pbSrc >> 4);
1721 const int cb = ((*pbSrc >> 2) & 3) + 3;
1722
1723 pbSrc += 2;
1724 if (off > OBJPAGELEN - (unsigned)cbDst)
1725 return KLDR_ERR_LX_BAD_ITERDATA2;
1726 cbDst -= cb;
1727 if (cbDst < 0)
1728 return KLDR_ERR_LX_BAD_ITERDATA2;
1729 kLdrModLXMemCopyW(pbDst, pbDst - off, cb);
1730 pbDst += cb;
1731 }
1732 break;
1733 }
1734
1735
1736 /*
1737 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
1738 * type | | | | | |
1739 * ---------- ---------------- ----------------------------------
1740 * cb1 cb2 offset <cb1 bytes of data>
1741 *
1742 * Three bytes layed out as described above, followed by cb1 bytes of data to be copied.
1743 * The cb2 and offset describes an amount of data to be copied from the expanded
1744 * data relative to the current position.
1745 *
1746 * If offset == 1 the data is not copied as expected, but in the memcpyw manner.
1747 */
1748 case 3:
1749 {
1750 cbSrc -= 3;
1751 if (cbSrc < 0)
1752 return KLDR_ERR_LX_BAD_ITERDATA2;
1753 else
1754 {
1755 const int cb1 = (*pbSrc >> 2) & 0xf;
1756 const int cb2 = ((pbSrc[1] & 0xf) << 2) | (*pbSrc >> 6);
1757 const unsigned off = ((unsigned)pbSrc[2] << 4) | (pbSrc[1] >> 4);
1758
1759 pbSrc += 3;
1760 cbSrc -= cb1;
1761 if (cbSrc < 0)
1762 return KLDR_ERR_LX_BAD_ITERDATA2;
1763 cbDst -= cb1;
1764 if (cbDst < 0)
1765 return KLDR_ERR_LX_BAD_ITERDATA2;
1766 kHlpMemCopy(pbDst, pbSrc, cb1);
1767 pbDst += cb1;
1768 pbSrc += cb1;
1769
1770 if (off > OBJPAGELEN - (unsigned)cbDst)
1771 return KLDR_ERR_LX_BAD_ITERDATA2;
1772 cbDst -= cb2;
1773 if (cbDst < 0)
1774 return KLDR_ERR_LX_BAD_ITERDATA2;
1775 kLdrModLXMemCopyW(pbDst, pbDst - off, cb2);
1776 pbDst += cb2;
1777 }
1778 break;
1779 }
1780 } /* type switch. */
1781 } /* unpack loop */
1782
1783l_endloop:
1784
1785
1786 /*
1787 * Zero remainder of the page.
1788 */
1789 if (cbDst > 0)
1790 kHlpMemSet(pbDst, 0, cbDst);
1791
1792 return 0;
1793}
1794
1795
1796/**
1797 * Special memcpy employed by the iterdata2 algorithm.
1798 *
1799 * Emulate a 16-bit memcpy (copying 16-bit at a time) and the effects this
1800 * has if src is very close to the destination.
1801 *
1802 * @param pbDst Destination pointer.
1803 * @param pbSrc Source pointer. Will always be <= pbDst.
1804 * @param cb Amount of data to be copied.
1805 * @remark This assumes that unaligned word and dword access is fine.
1806 */
1807static void kLdrModLXMemCopyW(KU8 *pbDst, const KU8 *pbSrc, int cb)
1808{
1809 switch (pbDst - pbSrc)
1810 {
1811 case 0:
1812 case 1:
1813 case 2:
1814 case 3:
1815 /* 16-bit copy (unaligned) */
1816 if (cb & 1)
1817 *pbDst++ = *pbSrc++;
1818 for (cb >>= 1; cb > 0; cb--, pbDst += 2, pbSrc += 2)
1819 *(KU16 *)pbDst = *(const KU16 *)pbSrc;
1820 break;
1821
1822 default:
1823 /* 32-bit copy (unaligned) */
1824 if (cb & 1)
1825 *pbDst++ = *pbSrc++;
1826 if (cb & 2)
1827 {
1828 *(KU16 *)pbDst = *(const KU16 *)pbSrc;
1829 pbDst += 2;
1830 pbSrc += 2;
1831 }
1832 for (cb >>= 2; cb > 0; cb--, pbDst += 4, pbSrc += 4)
1833 *(KU32 *)pbDst = *(const KU32 *)pbSrc;
1834 break;
1835 }
1836}
1837
1838
1839/**
1840 * Unprotects or protects the specified image mapping.
1841 *
1842 * @returns 0 on success.
1843 * @returns non-zero kLdr or OS status code on failure.
1844 *
1845 * @param pModLX The LX module interpreter instance.
1846 * @param pvBits The mapping to protect.
1847 * @param UnprotectOrProtect If 1 unprotect (i.e. make all writable), otherwise
1848 * protect according to the object table.
1849 */
1850static int kldrModLXDoProtect(PKLDRMODLX pModLX, void *pvBits, unsigned fUnprotectOrProtect)
1851{
1852 KU32 i;
1853 PKLDRMOD pMod = pModLX->pMod;
1854
1855 /*
1856 * Change object protection.
1857 */
1858 for (i = 0; i < pMod->cSegments; i++)
1859 {
1860 int rc;
1861 void *pv;
1862 KPROT enmProt;
1863
1864 /* calc new protection. */
1865 enmProt = pMod->aSegments[i].enmProt;
1866 if (fUnprotectOrProtect)
1867 {
1868 switch (enmProt)
1869 {
1870 case KPROT_NOACCESS:
1871 case KPROT_READONLY:
1872 case KPROT_READWRITE:
1873 case KPROT_WRITECOPY:
1874 enmProt = KPROT_READWRITE;
1875 break;
1876 case KPROT_EXECUTE:
1877 case KPROT_EXECUTE_READ:
1878 case KPROT_EXECUTE_READWRITE:
1879 case KPROT_EXECUTE_WRITECOPY:
1880 enmProt = KPROT_EXECUTE_READWRITE;
1881 break;
1882 default:
1883 KLDRMODLX_ASSERT(!"bad enmProt");
1884 return -1;
1885 }
1886 }
1887 else
1888 {
1889 /* copy on write -> normal write. */
1890 if (enmProt == KPROT_EXECUTE_WRITECOPY)
1891 enmProt = KPROT_EXECUTE_READWRITE;
1892 else if (enmProt == KPROT_WRITECOPY)
1893 enmProt = KPROT_READWRITE;
1894 }
1895
1896
1897 /* calc the address and set page protection. */
1898 pv = (KU8 *)pvBits + pMod->aSegments[i].RVA;
1899
1900 rc = kHlpPageProtect(pv, pMod->aSegments[i].cbMapped, enmProt);
1901 if (rc)
1902 break;
1903
1904 /** @todo the gap page should be marked NOACCESS! */
1905 }
1906
1907 return 0;
1908}
1909
1910
1911/** @copydoc kLdrModUnmap */
1912static int kldrModLXUnmap(PKLDRMOD pMod)
1913{
1914 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1915 KU32 i;
1916 int rc;
1917
1918 /*
1919 * Mapped?
1920 */
1921 if (!pModLX->pvMapping)
1922 return KLDR_ERR_NOT_MAPPED;
1923
1924 /*
1925 * Free the mapping and update the segments.
1926 */
1927 rc = kHlpPageFree((void *)pModLX->pvMapping, pModLX->cbMapped);
1928 KLDRMODLX_ASSERT(!rc);
1929 pModLX->pvMapping = NULL;
1930
1931 for (i = 0; i < pMod->cSegments; i++)
1932 pMod->aSegments[i].MapAddress = 0;
1933
1934 return rc;
1935}
1936
1937
1938/** @copydoc kLdrModAllocTLS */
1939static int kldrModLXAllocTLS(PKLDRMOD pMod, void *pvMapping)
1940{
1941 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1942
1943 /* no tls, just do the error checking. */
1944 if ( pvMapping == KLDRMOD_INT_MAP
1945 && pModLX->pvMapping)
1946 return KLDR_ERR_NOT_MAPPED;
1947 return 0;
1948}
1949
1950
1951/** @copydoc kLdrModFreeTLS */
1952static void kldrModLXFreeTLS(PKLDRMOD pMod, void *pvMapping)
1953{
1954 /* no tls. */
1955 K_NOREF(pMod);
1956 K_NOREF(pvMapping);
1957
1958}
1959
1960
1961/** @copydoc kLdrModReload */
1962static int kldrModLXReload(PKLDRMOD pMod)
1963{
1964 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1965 int rc, rc2;
1966
1967 /*
1968 * Mapped?
1969 */
1970 if (!pModLX->pvMapping)
1971 return KLDR_ERR_NOT_MAPPED;
1972
1973 /*
1974 * Before doing anything we'll have to make all pages writable.
1975 */
1976 rc = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 1 /* unprotect */);
1977 if (rc)
1978 return rc;
1979
1980 /*
1981 * Load the bits again.
1982 */
1983 rc = kldrModLXDoLoadBits(pModLX, (void *)pModLX->pvMapping);
1984
1985 /*
1986 * Restore protection.
1987 */
1988 rc2 = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 0 /* protect */);
1989 if (!rc && rc2)
1990 rc = rc2;
1991 return rc;
1992}
1993
1994
1995/** @copydoc kLdrModFixupMapping */
1996static int kldrModLXFixupMapping(PKLDRMOD pMod, PFNKLDRMODGETIMPORT pfnGetImport, void *pvUser)
1997{
1998 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1999 int rc, rc2;
2000
2001 /*
2002 * Mapped?
2003 */
2004 if (!pModLX->pvMapping)
2005 return KLDR_ERR_NOT_MAPPED;
2006
2007 /*
2008 * Before doing anything we'll have to make all pages writable.
2009 */
2010 rc = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 1 /* unprotect */);
2011 if (rc)
2012 return rc;
2013
2014 /*
2015 * Apply fixups and resolve imports.
2016 */
2017 rc = kldrModLXRelocateBits(pMod, (void *)pModLX->pvMapping, (KUPTR)pModLX->pvMapping,
2018 pMod->aSegments[0].LinkAddress, pfnGetImport, pvUser);
2019
2020 /*
2021 * Restore protection.
2022 */
2023 rc2 = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 0 /* protect */);
2024 if (!rc && rc2)
2025 rc = rc2;
2026 return rc;
2027}
2028
2029
2030/** @copydoc kLdrModCallInit */
2031static int kldrModLXCallInit(PKLDRMOD pMod, void *pvMapping, KUPTR uHandle)
2032{
2033 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2034 int rc;
2035
2036 /*
2037 * Mapped?
2038 */
2039 if (pvMapping == KLDRMOD_INT_MAP)
2040 {
2041 pvMapping = (void *)pModLX->pvMapping;
2042 if (!pvMapping)
2043 return KLDR_ERR_NOT_MAPPED;
2044 }
2045
2046 /*
2047 * Do TLS callbacks first and then call the init/term function if it's a DLL.
2048 */
2049 if ((pModLX->Hdr.e32_mflags & E32MODMASK) == E32MODDLL)
2050 rc = kldrModLXDoCallDLL(pModLX, pvMapping, 0 /* attach */, uHandle);
2051 else
2052 rc = 0;
2053 return rc;
2054}
2055
2056
2057/**
2058 * Call the DLL entrypoint.
2059 *
2060 * @returns 0 on success.
2061 * @returns KLDR_ERR_MODULE_INIT_FAILED or KLDR_ERR_THREAD_ATTACH_FAILED on failure.
2062 * @param pModLX The LX module interpreter instance.
2063 * @param pvMapping The module mapping to use (resolved).
2064 * @param uOp The operation (DLL_*).
2065 * @param uHandle The module handle to present.
2066 */
2067static int kldrModLXDoCallDLL(PKLDRMODLX pModLX, void *pvMapping, unsigned uOp, KUPTR uHandle)
2068{
2069 int rc;
2070
2071 /*
2072 * If no entrypoint there isn't anything to be done.
2073 */
2074 if ( !pModLX->Hdr.e32_startobj
2075 || pModLX->Hdr.e32_startobj > pModLX->Hdr.e32_objcnt)
2076 return 0;
2077
2078 /*
2079 * Invoke the entrypoint and convert the boolean result to a kLdr status code.
2080 */
2081 rc = kldrModLXDoCall((KUPTR)pvMapping
2082 + (KUPTR)pModLX->pMod->aSegments[pModLX->Hdr.e32_startobj - 1].RVA
2083 + pModLX->Hdr.e32_eip,
2084 uHandle, uOp, NULL);
2085 if (rc)
2086 rc = 0;
2087 else if (uOp == 0 /* attach */)
2088 rc = KLDR_ERR_MODULE_INIT_FAILED;
2089 else /* detach: ignore failures */
2090 rc = 0;
2091 return rc;
2092}
2093
2094
2095/**
2096 * Do a 3 parameter callback.
2097 *
2098 * @returns 32-bit callback return.
2099 * @param uEntrypoint The address of the function to be called.
2100 * @param uHandle The first argument, the module handle.
2101 * @param uOp The second argumnet, the reason we're calling.
2102 * @param pvReserved The third argument, reserved argument. (figure this one out)
2103 */
2104static KI32 kldrModLXDoCall(KUPTR uEntrypoint, KUPTR uHandle, KU32 uOp, void *pvReserved)
2105{
2106#if defined(__X86__) || defined(__i386__) || defined(_M_IX86)
2107 KI32 rc;
2108/** @todo try/except */
2109
2110 /*
2111 * Paranoia.
2112 */
2113# ifdef __GNUC__
2114 __asm__ __volatile__(
2115 "pushl %2\n\t"
2116 "pushl %1\n\t"
2117 "pushl %0\n\t"
2118 "lea 12(%%esp), %2\n\t"
2119 "call *%3\n\t"
2120 "movl %2, %%esp\n\t"
2121 : "=a" (rc)
2122 : "d" (uOp),
2123 "S" (0),
2124 "c" (uEntrypoint),
2125 "0" (uHandle));
2126# elif defined(_MSC_VER)
2127 __asm {
2128 mov eax, [uHandle]
2129 mov edx, [uOp]
2130 mov ecx, 0
2131 mov ebx, [uEntrypoint]
2132 push edi
2133 mov edi, esp
2134 push ecx
2135 push edx
2136 push eax
2137 call ebx
2138 mov esp, edi
2139 pop edi
2140 mov [rc], eax
2141 }
2142# else
2143# error "port me!"
2144# endif
2145 K_NOREF(pvReserved);
2146 return rc;
2147
2148#else
2149 K_NOREF(uEntrypoint);
2150 K_NOREF(uHandle);
2151 K_NOREF(uOp);
2152 K_NOREF(pvReserved);
2153 return KCPU_ERR_ARCH_CPU_NOT_COMPATIBLE;
2154#endif
2155}
2156
2157
2158/** @copydoc kLdrModCallTerm */
2159static int kldrModLXCallTerm(PKLDRMOD pMod, void *pvMapping, KUPTR uHandle)
2160{
2161 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2162
2163 /*
2164 * Mapped?
2165 */
2166 if (pvMapping == KLDRMOD_INT_MAP)
2167 {
2168 pvMapping = (void *)pModLX->pvMapping;
2169 if (!pvMapping)
2170 return KLDR_ERR_NOT_MAPPED;
2171 }
2172
2173 /*
2174 * Do the call.
2175 */
2176 if ((pModLX->Hdr.e32_mflags & E32MODMASK) == E32MODDLL)
2177 kldrModLXDoCallDLL(pModLX, pvMapping, 1 /* detach */, uHandle);
2178
2179 return 0;
2180}
2181
2182
2183/** @copydoc kLdrModCallThread */
2184static int kldrModLXCallThread(PKLDRMOD pMod, void *pvMapping, KUPTR uHandle, unsigned fAttachingOrDetaching)
2185{
2186 /* no thread attach/detach callout. */
2187 K_NOREF(pMod);
2188 K_NOREF(pvMapping);
2189 K_NOREF(uHandle);
2190 K_NOREF(fAttachingOrDetaching);
2191 return 0;
2192}
2193
2194
2195/** @copydoc kLdrModSize */
2196static KLDRADDR kldrModLXSize(PKLDRMOD pMod)
2197{
2198 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2199 return pModLX->cbMapped;
2200}
2201
2202
2203/** @copydoc kLdrModGetBits */
2204static int kldrModLXGetBits(PKLDRMOD pMod, void *pvBits, KLDRADDR BaseAddress, PFNKLDRMODGETIMPORT pfnGetImport, void *pvUser)
2205{
2206 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2207 int rc;
2208
2209 /*
2210 * Load the image bits.
2211 */
2212 rc = kldrModLXDoLoadBits(pModLX, pvBits);
2213 if (rc)
2214 return rc;
2215
2216 /*
2217 * Perform relocations.
2218 */
2219 return kldrModLXRelocateBits(pMod, pvBits, BaseAddress, pMod->aSegments[0].LinkAddress, pfnGetImport, pvUser);
2220
2221}
2222
2223
2224/** @copydoc kLdrModRelocateBits */
2225static int kldrModLXRelocateBits(PKLDRMOD pMod, void *pvBits, KLDRADDR NewBaseAddress, KLDRADDR OldBaseAddress,
2226 PFNKLDRMODGETIMPORT pfnGetImport, void *pvUser)
2227{
2228 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2229 KU32 iSeg;
2230 int rc;
2231
2232 /*
2233 * Do we need to to *anything*?
2234 */
2235 if ( NewBaseAddress == OldBaseAddress
2236 && NewBaseAddress == pModLX->paObjs[0].o32_base
2237 && !pModLX->Hdr.e32_impmodcnt)
2238 return 0;
2239
2240 /*
2241 * Load the fixup section.
2242 */
2243 if (!pModLX->pbFixupSection)
2244 {
2245 rc = kldrModLXDoLoadFixupSection(pModLX);
2246 if (rc)
2247 return rc;
2248 }
2249
2250 /*
2251 * Iterate the segments.
2252 */
2253 for (iSeg = 0; iSeg < pModLX->Hdr.e32_objcnt; iSeg++)
2254 {
2255 const struct o32_obj * const pObj = &pModLX->paObjs[iSeg];
2256 KLDRADDR PageAddress = NewBaseAddress + pModLX->pMod->aSegments[iSeg].RVA;
2257 KU32 iPage;
2258 KU8 *pbPage = (KU8 *)pvBits + (KUPTR)pModLX->pMod->aSegments[iSeg].RVA;
2259
2260 /*
2261 * Iterate the page map pages.
2262 */
2263 for (iPage = 0, rc = 0; !rc && iPage < pObj->o32_mapsize; iPage++, pbPage += OBJPAGELEN, PageAddress += OBJPAGELEN)
2264 {
2265 const KU8 * const pbFixupRecEnd = pModLX->pbFixupRecs + pModLX->paoffPageFixups[iPage + pObj->o32_pagemap];
2266 const KU8 *pb = pModLX->pbFixupRecs + pModLX->paoffPageFixups[iPage + pObj->o32_pagemap - 1];
2267 KLDRADDR uValue = NIL_KLDRADDR;
2268 KU32 fKind = 0;
2269 int iSelector;
2270
2271 /* sanity */
2272 if (pbFixupRecEnd < pb)
2273 return KLDR_ERR_BAD_FIXUP;
2274 if (pbFixupRecEnd - 1 > pModLX->pbFixupSectionLast)
2275 return KLDR_ERR_BAD_FIXUP;
2276 if (pb < pModLX->pbFixupSection)
2277 return KLDR_ERR_BAD_FIXUP;
2278
2279 /*
2280 * Iterate the fixup record.
2281 */
2282 while (pb < pbFixupRecEnd)
2283 {
2284 union _rel
2285 {
2286 const KU8 * pb;
2287 const struct r32_rlc *prlc;
2288 } u;
2289
2290 u.pb = pb;
2291 pb += 3 + (u.prlc->nr_stype & NRCHAIN ? 0 : 1); /* place pch at the 4th member. */
2292
2293 /*
2294 * Figure out the target.
2295 */
2296 switch (u.prlc->nr_flags & NRRTYP)
2297 {
2298 /*
2299 * Internal fixup.
2300 */
2301 case NRRINT:
2302 {
2303 KU16 iTrgObject;
2304 KU32 offTrgObject;
2305
2306 /* the object */
2307 if (u.prlc->nr_flags & NR16OBJMOD)
2308 {
2309 iTrgObject = *(const KU16 *)pb;
2310 pb += 2;
2311 }
2312 else
2313 iTrgObject = *pb++;
2314 iTrgObject--;
2315 if (iTrgObject >= pModLX->Hdr.e32_objcnt)
2316 return KLDR_ERR_BAD_FIXUP;
2317
2318 /* the target */
2319 if ((u.prlc->nr_stype & NRSRCMASK) != NRSSEG)
2320 {
2321 if (u.prlc->nr_flags & NR32BITOFF)
2322 {
2323 offTrgObject = *(const KU32 *)pb;
2324 pb += 4;
2325 }
2326 else
2327 {
2328 offTrgObject = *(const KU16 *)pb;
2329 pb += 2;
2330 }
2331
2332 /* calculate the symbol info. */
2333 uValue = offTrgObject + NewBaseAddress + pMod->aSegments[iTrgObject].RVA;
2334 }
2335 else
2336 uValue = NewBaseAddress + pMod->aSegments[iTrgObject].RVA;
2337 if ( (u.prlc->nr_stype & NRALIAS)
2338 || (pMod->aSegments[iTrgObject].fFlags & KLDRSEG_FLAG_16BIT))
2339 iSelector = pMod->aSegments[iTrgObject].Sel16bit;
2340 else
2341 iSelector = pMod->aSegments[iTrgObject].SelFlat;
2342 fKind = 0;
2343 break;
2344 }
2345
2346 /*
2347 * Import by symbol ordinal.
2348 */
2349 case NRRORD:
2350 {
2351 KU16 iModule;
2352 KU32 iSymbol;
2353
2354 /* the module ordinal */
2355 if (u.prlc->nr_flags & NR16OBJMOD)
2356 {
2357 iModule = *(const KU16 *)pb;
2358 pb += 2;
2359 }
2360 else
2361 iModule = *pb++;
2362 iModule--;
2363 if (iModule >= pModLX->Hdr.e32_impmodcnt)
2364 return KLDR_ERR_BAD_FIXUP;
2365#if 1
2366 if (u.prlc->nr_flags & NRICHAIN)
2367 return KLDR_ERR_BAD_FIXUP;
2368#endif
2369
2370 /* . */
2371 if (u.prlc->nr_flags & NR32BITOFF)
2372 {
2373 iSymbol = *(const KU32 *)pb;
2374 pb += 4;
2375 }
2376 else if (!(u.prlc->nr_flags & NR8BITORD))
2377 {
2378 iSymbol = *(const KU16 *)pb;
2379 pb += 2;
2380 }
2381 else
2382 iSymbol = *pb++;
2383
2384 /* resolve it. */
2385 rc = pfnGetImport(pMod, iModule, iSymbol, NULL, 0, NULL, &uValue, &fKind, pvUser);
2386 if (rc)
2387 return rc;
2388 iSelector = -1;
2389 break;
2390 }
2391
2392 /*
2393 * Import by symbol name.
2394 */
2395 case NRRNAM:
2396 {
2397 KU32 iModule;
2398 KU16 offSymbol;
2399 const KU8 *pbSymbol;
2400
2401 /* the module ordinal */
2402 if (u.prlc->nr_flags & NR16OBJMOD)
2403 {
2404 iModule = *(const KU16 *)pb;
2405 pb += 2;
2406 }
2407 else
2408 iModule = *pb++;
2409 iModule--;
2410 if (iModule >= pModLX->Hdr.e32_impmodcnt)
2411 return KLDR_ERR_BAD_FIXUP;
2412#if 1
2413 if (u.prlc->nr_flags & NRICHAIN)
2414 return KLDR_ERR_BAD_FIXUP;
2415#endif
2416
2417 /* . */
2418 if (u.prlc->nr_flags & NR32BITOFF)
2419 {
2420 offSymbol = *(const KU32 *)pb;
2421 pb += 4;
2422 }
2423 else if (!(u.prlc->nr_flags & NR8BITORD))
2424 {
2425 offSymbol = *(const KU16 *)pb;
2426 pb += 2;
2427 }
2428 else
2429 offSymbol = *pb++;
2430 pbSymbol = pModLX->pbImportProcs + offSymbol;
2431 if ( pbSymbol < pModLX->pbImportProcs
2432 || pbSymbol > pModLX->pbFixupSectionLast)
2433 return KLDR_ERR_BAD_FIXUP;
2434
2435 /* resolve it. */
2436 rc = pfnGetImport(pMod, iModule, NIL_KLDRMOD_SYM_ORDINAL, (const char *)pbSymbol + 1, *pbSymbol, NULL,
2437 &uValue, &fKind, pvUser);
2438 if (rc)
2439 return rc;
2440 iSelector = -1;
2441 break;
2442 }
2443
2444 case NRRENT:
2445 KLDRMODLX_ASSERT(!"NRRENT");
2446 default:
2447 iSelector = -1;
2448 break;
2449 }
2450
2451 /* addend */
2452 if (u.prlc->nr_flags & NRADD)
2453 {
2454 if (u.prlc->nr_flags & NR32BITADD)
2455 {
2456 uValue += *(const KU32 *)pb;
2457 pb += 4;
2458 }
2459 else
2460 {
2461 uValue += *(const KU16 *)pb;
2462 pb += 2;
2463 }
2464 }
2465
2466
2467 /*
2468 * Deal with the 'source' (i.e. the place that should be modified - very logical).
2469 */
2470 if (!(u.prlc->nr_stype & NRCHAIN))
2471 {
2472 int off = u.prlc->r32_soff;
2473
2474 /* common / simple */
2475 if ( (u.prlc->nr_stype & NRSRCMASK) == NROFF32
2476 && off >= 0
2477 && off <= (int)OBJPAGELEN - 4)
2478 *(KU32 *)&pbPage[off] = (KU32)uValue;
2479 else if ( (u.prlc->nr_stype & NRSRCMASK) == NRSOFF32
2480 && off >= 0
2481 && off <= (int)OBJPAGELEN - 4)
2482 *(KU32 *)&pbPage[off] = (KU32)(uValue - (PageAddress + off + 4));
2483 else
2484 {
2485 /* generic */
2486 rc = kldrModLXDoReloc(pbPage, off, PageAddress, u.prlc, iSelector, uValue, fKind);
2487 if (rc)
2488 return rc;
2489 }
2490 }
2491 else if (!(u.prlc->nr_flags & NRICHAIN))
2492 {
2493 const KI16 *poffSrc = (const KI16 *)pb;
2494 KU8 c = u.pb[2];
2495
2496 /* common / simple */
2497 if ((u.prlc->nr_stype & NRSRCMASK) == NROFF32)
2498 {
2499 while (c-- > 0)
2500 {
2501 int off = *poffSrc++;
2502 if (off >= 0 && off <= (int)OBJPAGELEN - 4)
2503 *(KU32 *)&pbPage[off] = (KU32)uValue;
2504 else
2505 {
2506 rc = kldrModLXDoReloc(pbPage, off, PageAddress, u.prlc, iSelector, uValue, fKind);
2507 if (rc)
2508 return rc;
2509 }
2510 }
2511 }
2512 else if ((u.prlc->nr_stype & NRSRCMASK) == NRSOFF32)
2513 {
2514 while (c-- > 0)
2515 {
2516 int off = *poffSrc++;
2517 if (off >= 0 && off <= (int)OBJPAGELEN - 4)
2518 *(KU32 *)&pbPage[off] = (KU32)(uValue - (PageAddress + off + 4));
2519 else
2520 {
2521 rc = kldrModLXDoReloc(pbPage, off, PageAddress, u.prlc, iSelector, uValue, fKind);
2522 if (rc)
2523 return rc;
2524 }
2525 }
2526 }
2527 else
2528 {
2529 while (c-- > 0)
2530 {
2531 rc = kldrModLXDoReloc(pbPage, *poffSrc++, PageAddress, u.prlc, iSelector, uValue, fKind);
2532 if (rc)
2533 return rc;
2534 }
2535 }
2536 pb = (const KU8 *)poffSrc;
2537 }
2538 else
2539 {
2540 /* This is a pain because it will require virgin pages on a relocation. */
2541 KLDRMODLX_ASSERT(!"NRICHAIN");
2542 return KLDR_ERR_LX_NRICHAIN_NOT_SUPPORTED;
2543 }
2544 }
2545 }
2546 }
2547
2548 return 0;
2549}
2550
2551
2552/**
2553 * Applies the relocation to one 'source' in a page.
2554 *
2555 * This takes care of the more esotic case while the common cases
2556 * are dealt with seperately.
2557 *
2558 * @returns 0 on success, non-zero kLdr status code on failure.
2559 * @param pbPage The page in which to apply the fixup.
2560 * @param off Page relative offset of where to apply the offset.
2561 * @param uValue The target value.
2562 * @param fKind The target kind.
2563 */
2564static int kldrModLXDoReloc(KU8 *pbPage, int off, KLDRADDR PageAddress, const struct r32_rlc *prlc,
2565 int iSelector, KLDRADDR uValue, KU32 fKind)
2566{
2567#pragma pack(1) /* just to be sure */
2568 union
2569 {
2570 KU8 ab[6];
2571 KU32 off32;
2572 KU16 off16;
2573 KU8 off8;
2574 struct
2575 {
2576 KU16 off;
2577 KU16 Sel;
2578 } Far16;
2579 struct
2580 {
2581 KU32 off;
2582 KU16 Sel;
2583 } Far32;
2584 } uData;
2585#pragma pack()
2586 const KU8 *pbSrc;
2587 KU8 *pbDst;
2588 KU8 cb;
2589
2590 K_NOREF(fKind);
2591
2592 /*
2593 * Compose the fixup data.
2594 */
2595 switch (prlc->nr_stype & NRSRCMASK)
2596 {
2597 case NRSBYT:
2598 uData.off8 = (KU8)uValue;
2599 cb = 1;
2600 break;
2601 case NRSSEG:
2602 if (iSelector == -1)
2603 {
2604 /* fixme */
2605 }
2606 uData.off16 = iSelector;
2607 cb = 2;
2608 break;
2609 case NRSPTR:
2610 if (iSelector == -1)
2611 {
2612 /* fixme */
2613 }
2614 uData.Far16.off = (KU16)uValue;
2615 uData.Far16.Sel = iSelector;
2616 cb = 4;
2617 break;
2618 case NRSOFF:
2619 uData.off16 = (KU16)uValue;
2620 cb = 2;
2621 break;
2622 case NRPTR48:
2623 if (iSelector == -1)
2624 {
2625 /* fixme */
2626 }
2627 uData.Far32.off = (KU32)uValue;
2628 uData.Far32.Sel = iSelector;
2629 cb = 6;
2630 break;
2631 case NROFF32:
2632 uData.off32 = (KU32)uValue;
2633 cb = 4;
2634 break;
2635 case NRSOFF32:
2636 uData.off32 = (KU32)(uValue - (PageAddress + off + 4));
2637 cb = 4;
2638 break;
2639 default:
2640 return KLDR_ERR_LX_BAD_FIXUP_SECTION; /** @todo fix error, add more checks! */
2641 }
2642
2643 /*
2644 * Apply it. This is sloooow...
2645 */
2646 pbSrc = &uData.ab[0];
2647 pbDst = pbPage + off;
2648 while (cb-- > 0)
2649 {
2650 if (off > (int)OBJPAGELEN)
2651 break;
2652 if (off >= 0)
2653 *pbDst = *pbSrc;
2654 pbSrc++;
2655 pbDst++;
2656 }
2657
2658 return 0;
2659}
2660
2661
2662/**
2663 * The LX module interpreter method table.
2664 */
2665KLDRMODOPS g_kLdrModLXOps =
2666{
2667 "LX",
2668 NULL,
2669 kldrModLXCreate,
2670 kldrModLXDestroy,
2671 kldrModLXQuerySymbol,
2672 kldrModLXEnumSymbols,
2673 kldrModLXGetImport,
2674 kldrModLXNumberOfImports,
2675 NULL /* can execute one is optional */,
2676 kldrModLXGetStackInfo,
2677 kldrModLXQueryMainEntrypoint,
2678 NULL /* pfnQueryImageUuid */,
2679 NULL /* fixme */,
2680 NULL /* fixme */,
2681 kldrModLXEnumDbgInfo,
2682 kldrModLXHasDbgInfo,
2683 kldrModLXMap,
2684 kldrModLXUnmap,
2685 kldrModLXAllocTLS,
2686 kldrModLXFreeTLS,
2687 kldrModLXReload,
2688 kldrModLXFixupMapping,
2689 kldrModLXCallInit,
2690 kldrModLXCallTerm,
2691 kldrModLXCallThread,
2692 kldrModLXSize,
2693 kldrModLXGetBits,
2694 kldrModLXRelocateBits,
2695 NULL /* fixme: pfnMostlyDone */,
2696 42 /* the end */
2697};
2698
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette