VirtualBox

source: kStuff/trunk/kLdr/kLdrModLX.c@ 24

Last change on this file since 24 was 24, checked in by bird, 16 years ago

darwin porting.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 86.3 KB
Line 
1/* $Id: kLdrModLX.c 24 2009-02-08 13:58:54Z bird $ */
2/** @file
3 * kLdr - The Module Interpreter for the Linear eXecutable (LX) Format.
4 */
5
6/*
7 * Copyright (c) 2006-2007 knut st. osmundsen <[email protected]>
8 *
9 * This file is part of kStuff.
10 *
11 * kStuff is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
15 *
16 * In addition to the permissions in the GNU Lesser General Public
17 * License, you are granted unlimited permission to link the compiled
18 * version of this file into combinations with other programs, and to
19 * distribute those combinations without any restriction coming from
20 * the use of this file.
21 *
22 * kStuff is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
25 * Lesser General Public License for more details.
26 *
27 * You should have received a copy of the GNU Lesser General Public
28 * License along with kStuff; if not, write to the Free Software
29 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
30 * 02110-1301, USA
31 */
32
33/*******************************************************************************
34* Header Files *
35*******************************************************************************/
36#include <k/kLdr.h>
37#include "kLdrInternal.h"
38#include <k/kLdrFmts/lx.h>
39
40
41/*******************************************************************************
42* Defined Constants And Macros *
43*******************************************************************************/
44/** @def KLDRMODLX_STRICT
45 * Define KLDRMODLX_STRICT to enabled strict checks in KLDRMODLX. */
46#define KLDRMODLX_STRICT 1
47
48/** @def KLDRMODLX_ASSERT
49 * Assert that an expression is true when KLDR_STRICT is defined.
50 */
51#ifdef KLDRMODLX_STRICT
52# define KLDRMODLX_ASSERT(expr) kHlpAssert(expr)
53#else
54# define KLDRMODLX_ASSERT(expr) do {} while (0)
55#endif
56
57
58/*******************************************************************************
59* Structures and Typedefs *
60*******************************************************************************/
61/**
62 * Instance data for the LX module interpreter.
63 */
64typedef struct KLDRMODLX
65{
66 /** Pointer to the module. (Follows the section table.) */
67 PKLDRMOD pMod;
68 /** Pointer to the user mapping. */
69 const void *pvMapping;
70 /** The size of the mapped LX image. */
71 KSIZE cbMapped;
72 /** Reserved flags. */
73 KU32 f32Reserved;
74
75 /** The offset of the LX header. */
76 KLDRFOFF offHdr;
77 /** Copy of the LX header. */
78 struct e32_exe Hdr;
79
80 /** Pointer to the loader section.
81 * Allocated together with this strcture. */
82 const KU8 *pbLoaderSection;
83 /** Pointer to the last byte in the loader section. */
84 const KU8 *pbLoaderSectionLast;
85 /** Pointer to the object table in the loader section. */
86 const struct o32_obj *paObjs;
87 /** Pointer to the object page map table in the loader section. */
88 const struct o32_map *paPageMappings;
89 /** Pointer to the resource table in the loader section. */
90 const struct rsrc32 *paRsrcs;
91 /** Pointer to the resident name table in the loader section. */
92 const KU8 *pbResNameTab;
93 /** Pointer to the entry table in the loader section. */
94 const KU8 *pbEntryTab;
95
96 /** Pointer to the non-resident name table. */
97 KU8 *pbNonResNameTab;
98 /** Pointer to the last byte in the non-resident name table. */
99 const KU8 *pbNonResNameTabLast;
100
101 /** Pointer to the fixup section. */
102 KU8 *pbFixupSection;
103 /** Pointer to the last byte in the fixup section. */
104 const KU8 *pbFixupSectionLast;
105 /** Pointer to the fixup page table within pvFixupSection. */
106 const KU32 *paoffPageFixups;
107 /** Pointer to the fixup record table within pvFixupSection. */
108 const KU8 *pbFixupRecs;
109 /** Pointer to the import module name table within pvFixupSection. */
110 const KU8 *pbImportMods;
111 /** Pointer to the import module name table within pvFixupSection. */
112 const KU8 *pbImportProcs;
113} KLDRMODLX, *PKLDRMODLX;
114
115
116/*******************************************************************************
117* Internal Functions *
118*******************************************************************************/
119static int kldrModLXHasDbgInfo(PKLDRMOD pMod, const void *pvBits);
120static int kldrModLXRelocateBits(PKLDRMOD pMod, void *pvBits, KLDRADDR NewBaseAddress, KLDRADDR OldBaseAddress,
121 PFNKLDRMODGETIMPORT pfnGetImport, void *pvUser);
122static int kldrModLXDoCreate(PKRDR pRdr, KLDRFOFF offNewHdr, PKLDRMODLX *ppModLX);
123static const KU8 *kldrModLXDoNameTableLookupByOrdinal(const KU8 *pbNameTable, KI32 cbNameTable, KU32 iOrdinal);
124static int kldrModLXDoNameLookup(PKLDRMODLX pModLX, const char *pchSymbol, KU32 cchSymbol, KU32 *piSymbol);
125static const KU8 *kldrModLXDoNameTableLookupByName(const KU8 *pbNameTable, KI32 cbNameTable,
126 const char *pchSymbol, KSIZE cchSymbol);
127static int kldrModLXDoLoadBits(PKLDRMODLX pModLX, void *pvBits);
128static int kldrModLXDoIterDataUnpacking(KU8 *pbDst, const KU8 *pbSrc, int cbSrc);
129static int kldrModLXDoIterData2Unpacking(KU8 *pbDst, const KU8 *pbSrc, int cbSrc);
130static void kLdrModLXMemCopyW(KU8 *pbDst, const KU8 *pbSrc, int cb);
131static int kldrModLXDoProtect(PKLDRMODLX pModLX, void *pvBits, unsigned fUnprotectOrProtect);
132static int kldrModLXDoCallDLL(PKLDRMODLX pModLX, unsigned uOp, KUPTR uHandle);
133static int kldrModLXDoForwarderQuery(PKLDRMODLX pModLX, const struct e32_entry *pEntry,
134 PFNKLDRMODGETIMPORT pfnGetForwarder, void *pvUser, PKLDRADDR puValue, KU32 *pfKind);
135static int kldrModLXDoLoadFixupSection(PKLDRMODLX pModLX);
136static KI32 kldrModLXDoCall(KUPTR uEntrypoint, KUPTR uHandle, KU32 uOp, void *pvReserved);
137static int kldrModLXDoReloc(KU8 *pbPage, int off, KLDRADDR PageAddress, const struct r32_rlc *prlc,
138 int iSelector, KLDRADDR uValue, KU32 fKind);
139
140
141/**
142 * Create a loader module instance interpreting the executable image found
143 * in the specified file provider instance.
144 *
145 * @returns 0 on success and *ppMod pointing to a module instance.
146 * On failure, a non-zero OS specific error code is returned.
147 * @param pOps Pointer to the registered method table.
148 * @param pRdr The file provider instance to use.
149 * @param offNewHdr The offset of the new header in MZ files. -1 if not found.
150 * @param ppMod Where to store the module instance pointer.
151 */
152static int kldrModLXCreate(PCKLDRMODOPS pOps, PKRDR pRdr, KLDRFOFF offNewHdr, PPKLDRMOD ppMod)
153{
154 PKLDRMODLX pModLX;
155 int rc;
156
157 /*
158 * Create the instance data and do a minimal header validation.
159 */
160 rc = kldrModLXDoCreate(pRdr, offNewHdr, &pModLX);
161 if (!rc)
162 {
163 pModLX->pMod->pOps = pOps;
164 pModLX->pMod->u32Magic = KLDRMOD_MAGIC;
165 *ppMod = pModLX->pMod;
166 return 0;
167 }
168 kHlpFree(pModLX);
169 return rc;
170}
171
172
173/**
174 * Separate function for reading creating the LX module instance to
175 * simplify cleanup on failure.
176 */
177static int kldrModLXDoCreate(PKRDR pRdr, KLDRFOFF offNewHdr, PKLDRMODLX *ppModLX)
178{
179 struct e32_exe Hdr;
180 PKLDRMODLX pModLX;
181 PKLDRMOD pMod;
182 KSIZE cb;
183 KSIZE cchFilename;
184 KU32 off, offEnd;
185 KU32 i;
186 int rc;
187 int fCanOptimizeMapping;
188 KU32 NextRVA;
189 *ppModLX = NULL;
190
191 /*
192 * Read the signature and file header.
193 */
194 rc = kRdrRead(pRdr, &Hdr, sizeof(Hdr), offNewHdr > 0 ? offNewHdr : 0);
195 if (rc)
196 return rc;
197 if ( Hdr.e32_magic[0] != E32MAGIC1
198 || Hdr.e32_magic[1] != E32MAGIC2)
199 return KLDR_ERR_UNKNOWN_FORMAT;
200
201 /* We're not interested in anything but x86 images. */
202 if ( Hdr.e32_level != E32LEVEL
203 || Hdr.e32_border != E32LEBO
204 || Hdr.e32_worder != E32LEWO
205 || Hdr.e32_cpu < E32CPU286
206 || Hdr.e32_cpu > E32CPU486
207 || Hdr.e32_pagesize != OBJPAGELEN
208 )
209 return KLDR_ERR_LX_BAD_HEADER;
210
211 /* Some rough sanity checks. */
212 offEnd = kRdrSize(pRdr) >= (KLDRFOFF)~(KU32)16 ? ~(KU32)16 : (KU32)kRdrSize(pRdr);
213 if ( Hdr.e32_itermap > offEnd
214 || Hdr.e32_datapage > offEnd
215 || Hdr.e32_nrestab > offEnd
216 || Hdr.e32_nrestab + Hdr.e32_cbnrestab > offEnd
217 || Hdr.e32_ldrsize > offEnd - offNewHdr - sizeof(Hdr)
218 || Hdr.e32_fixupsize > offEnd - offNewHdr - sizeof(Hdr)
219 || Hdr.e32_fixupsize + Hdr.e32_ldrsize > offEnd - offNewHdr - sizeof(Hdr))
220 return KLDR_ERR_LX_BAD_HEADER;
221
222 /* Verify the loader section. */
223 offEnd = Hdr.e32_objtab + Hdr.e32_ldrsize;
224 if (Hdr.e32_objtab < sizeof(Hdr))
225 return KLDR_ERR_LX_BAD_LOADER_SECTION;
226 off = Hdr.e32_objtab + sizeof(struct o32_obj) * Hdr.e32_objcnt;
227 if (off > offEnd)
228 return KLDR_ERR_LX_BAD_LOADER_SECTION;
229 if ( Hdr.e32_objmap
230 && (Hdr.e32_objmap < off || Hdr.e32_objmap > offEnd))
231 return KLDR_ERR_LX_BAD_LOADER_SECTION;
232 if ( Hdr.e32_rsrccnt
233 && ( Hdr.e32_rsrctab < off
234 || Hdr.e32_rsrctab > offEnd
235 || Hdr.e32_rsrctab + sizeof(struct rsrc32) * Hdr.e32_rsrccnt > offEnd))
236 return KLDR_ERR_LX_BAD_LOADER_SECTION;
237 if ( Hdr.e32_restab
238 && (Hdr.e32_restab < off || Hdr.e32_restab > offEnd - 2))
239 return KLDR_ERR_LX_BAD_LOADER_SECTION;
240 if ( Hdr.e32_enttab
241 && (Hdr.e32_enttab < off || Hdr.e32_enttab >= offEnd))
242 return KLDR_ERR_LX_BAD_LOADER_SECTION;
243 if ( Hdr.e32_dircnt
244 && (Hdr.e32_dirtab < off || Hdr.e32_dirtab > offEnd - 2))
245 return KLDR_ERR_LX_BAD_LOADER_SECTION;
246
247 /* Verify the fixup section. */
248 off = offEnd;
249 offEnd = off + Hdr.e32_fixupsize;
250 if ( Hdr.e32_fpagetab
251 && (Hdr.e32_fpagetab < off || Hdr.e32_fpagetab > offEnd))
252 {
253 /*
254 * wlink mixes the fixup section and the loader section.
255 */
256 off = Hdr.e32_fpagetab;
257 offEnd = off + Hdr.e32_fixupsize;
258 Hdr.e32_ldrsize = off - Hdr.e32_objtab;
259 }
260 if ( Hdr.e32_frectab
261 && (Hdr.e32_frectab < off || Hdr.e32_frectab > offEnd))
262 return KLDR_ERR_LX_BAD_FIXUP_SECTION;
263 if ( Hdr.e32_impmod
264 && (Hdr.e32_impmod < off || Hdr.e32_impmod > offEnd || Hdr.e32_impmod + Hdr.e32_impmodcnt > offEnd))
265 return KLDR_ERR_LX_BAD_FIXUP_SECTION;
266 if ( Hdr.e32_impproc
267 && (Hdr.e32_impproc < off || Hdr.e32_impproc > offEnd))
268 return KLDR_ERR_LX_BAD_FIXUP_SECTION;
269
270 /*
271 * Calc the instance size, allocate and initialize it.
272 */
273 cchFilename = kHlpStrLen(kRdrName(pRdr));
274 cb = K_ALIGN_Z(sizeof(KLDRMODLX), 8)
275 + K_ALIGN_Z(K_OFFSETOF(KLDRMOD, aSegments[Hdr.e32_objcnt + 1]), 8)
276 + K_ALIGN_Z(cchFilename + 1, 8)
277 + Hdr.e32_ldrsize + 2; /* +2 for two extra zeros. */
278 pModLX = (PKLDRMODLX)kHlpAlloc(cb);
279 if (!pModLX)
280 return KERR_NO_MEMORY;
281 *ppModLX = pModLX;
282
283 /* KLDRMOD */
284 pMod = (PKLDRMOD)((KU8 *)pModLX + K_ALIGN_Z(sizeof(KLDRMODLX), 8));
285 pMod->pvData = pModLX;
286 pMod->pRdr = pRdr;
287 pMod->pOps = NULL; /* set upon success. */
288 pMod->cSegments = Hdr.e32_objcnt;
289 pMod->cchFilename = cchFilename;
290 pMod->pszFilename = (char *)K_ALIGN_P(&pMod->aSegments[pMod->cSegments], 8);
291 kHlpMemCopy((char *)pMod->pszFilename, kRdrName(pRdr), cchFilename + 1);
292 pMod->pszName = NULL; /* finalized further down */
293 pMod->cchName = 0;
294 switch (Hdr.e32_cpu)
295 {
296 case E32CPU286:
297 pMod->enmCpu = KCPU_I80286;
298 pMod->enmArch = KCPUARCH_X86_16;
299 break;
300 case E32CPU386:
301 pMod->enmCpu = KCPU_I386;
302 pMod->enmArch = KCPUARCH_X86_32;
303 break;
304 case E32CPU486:
305 pMod->enmCpu = KCPU_I486;
306 pMod->enmArch = KCPUARCH_X86_32;
307 break;
308 }
309 pMod->enmEndian = KLDRENDIAN_LITTLE;
310 pMod->enmFmt = KLDRFMT_LX;
311 switch (Hdr.e32_mflags & E32MODMASK)
312 {
313 case E32MODEXE:
314 pMod->enmType = !(Hdr.e32_mflags & E32NOINTFIX)
315 ? KLDRTYPE_EXECUTABLE_RELOCATABLE
316 : KLDRTYPE_EXECUTABLE_FIXED;
317 break;
318
319 case E32MODDLL:
320 case E32PROTDLL:
321 case E32MODPROTDLL:
322 pMod->enmType = !(Hdr.e32_mflags & E32SYSDLL)
323 ? KLDRTYPE_SHARED_LIBRARY_RELOCATABLE
324 : KLDRTYPE_SHARED_LIBRARY_FIXED;
325 break;
326
327 case E32MODPDEV:
328 case E32MODVDEV:
329 pMod->enmType = KLDRTYPE_SHARED_LIBRARY_RELOCATABLE;
330 break;
331 }
332 pMod->u32Magic = 0; /* set upon success. */
333
334 /* KLDRMODLX */
335 pModLX->pMod = pMod;
336 pModLX->pvMapping = 0;
337 pModLX->cbMapped = 0;
338 pModLX->f32Reserved = 0;
339
340 pModLX->offHdr = offNewHdr >= 0 ? offNewHdr : 0;
341 kHlpMemCopy(&pModLX->Hdr, &Hdr, sizeof(Hdr));
342
343 pModLX->pbLoaderSection = K_ALIGN_P(pMod->pszFilename + pMod->cchFilename + 1, 16);
344 pModLX->pbLoaderSectionLast = pModLX->pbLoaderSection + pModLX->Hdr.e32_ldrsize - 1;
345 pModLX->paObjs = NULL;
346 pModLX->paPageMappings = NULL;
347 pModLX->paRsrcs = NULL;
348 pModLX->pbResNameTab = NULL;
349 pModLX->pbEntryTab = NULL;
350
351 pModLX->pbNonResNameTab = NULL;
352 pModLX->pbNonResNameTabLast = NULL;
353
354 pModLX->pbFixupSection = NULL;
355 pModLX->pbFixupSectionLast = NULL;
356 pModLX->paoffPageFixups = NULL;
357 pModLX->pbFixupRecs = NULL;
358 pModLX->pbImportMods = NULL;
359 pModLX->pbImportProcs = NULL;
360
361 /*
362 * Read the loader data.
363 */
364 rc = kRdrRead(pRdr, (void *)pModLX->pbLoaderSection, pModLX->Hdr.e32_ldrsize, pModLX->Hdr.e32_objtab + pModLX->offHdr);
365 if (rc)
366 return rc;
367 ((KU8 *)pModLX->pbLoaderSectionLast)[1] = 0;
368 ((KU8 *)pModLX->pbLoaderSectionLast)[2] = 0;
369 if (pModLX->Hdr.e32_objcnt)
370 pModLX->paObjs = (const struct o32_obj *)pModLX->pbLoaderSection;
371 if (pModLX->Hdr.e32_objmap)
372 pModLX->paPageMappings = (const struct o32_map *)(pModLX->pbLoaderSection + pModLX->Hdr.e32_objmap - pModLX->Hdr.e32_objtab);
373 if (pModLX->Hdr.e32_rsrccnt)
374 pModLX->paRsrcs = (const struct rsrc32 *)(pModLX->pbLoaderSection + pModLX->Hdr.e32_rsrctab - pModLX->Hdr.e32_objtab);
375 if (pModLX->Hdr.e32_restab)
376 pModLX->pbResNameTab = pModLX->pbLoaderSection + pModLX->Hdr.e32_restab - pModLX->Hdr.e32_objtab;
377 if (pModLX->Hdr.e32_enttab)
378 pModLX->pbEntryTab = pModLX->pbLoaderSection + pModLX->Hdr.e32_enttab - pModLX->Hdr.e32_objtab;
379
380 /*
381 * Get the soname from the resident name table.
382 * Very convenient that it's the 0 ordinal, because then we get a
383 * free string terminator.
384 * (The table entry consists of a pascal string followed by a 16-bit ordinal.)
385 */
386 if (pModLX->pbResNameTab)
387 pMod->pszName = (const char *)kldrModLXDoNameTableLookupByOrdinal(pModLX->pbResNameTab,
388 pModLX->pbLoaderSectionLast - pModLX->pbResNameTab + 1,
389 0);
390 if (!pMod->pszName)
391 return KLDR_ERR_LX_NO_SONAME;
392 pMod->cchName = *(const KU8 *)pMod->pszName++;
393 if (pMod->cchName != kHlpStrLen(pMod->pszName))
394 return KLDR_ERR_LX_BAD_SONAME;
395
396 /*
397 * Quick validation of the object table.
398 */
399 cb = 0;
400 for (i = 0; i < pMod->cSegments; i++)
401 {
402 if (pModLX->paObjs[i].o32_base & (OBJPAGELEN - 1))
403 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
404 if (pModLX->paObjs[i].o32_base + pModLX->paObjs[i].o32_size <= pModLX->paObjs[i].o32_base)
405 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
406 if (pModLX->paObjs[i].o32_mapsize > (pModLX->paObjs[i].o32_size + (OBJPAGELEN - 1)))
407 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
408 if ( pModLX->paObjs[i].o32_mapsize
409 && ( (KU8 *)&pModLX->paPageMappings[pModLX->paObjs[i].o32_pagemap] > pModLX->pbLoaderSectionLast
410 || (KU8 *)&pModLX->paPageMappings[pModLX->paObjs[i].o32_pagemap + pModLX->paObjs[i].o32_mapsize]
411 > pModLX->pbLoaderSectionLast))
412 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
413 if (i > 0 && !(pModLX->paObjs[i].o32_flags & OBJRSRC))
414 {
415 if (pModLX->paObjs[i].o32_base <= pModLX->paObjs[i - 1].o32_base)
416 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
417 if (pModLX->paObjs[i].o32_base < pModLX->paObjs[i - 1].o32_base + pModLX->paObjs[i - 1].o32_mapsize)
418 return KLDR_ERR_LX_BAD_OBJECT_TABLE;
419 }
420 }
421
422 /*
423 * Check if we can optimize the mapping by using a different
424 * object alignment. The linker typically uses 64KB alignment,
425 * we can easily get away with page alignment in most cases.
426 */
427 fCanOptimizeMapping = !(Hdr.e32_mflags & (E32NOINTFIX | E32SYSDLL));
428 NextRVA = 0;
429
430 /*
431 * Setup the KLDRMOD segment array.
432 */
433 for (i = 0; i < pMod->cSegments; i++)
434 {
435 /* unused */
436 pMod->aSegments[i].pvUser = NULL;
437 pMod->aSegments[i].MapAddress = 0;
438 pMod->aSegments[i].pchName = NULL;
439 pMod->aSegments[i].cchName = 0;
440 pMod->aSegments[i].offFile = -1;
441 pMod->aSegments[i].cbFile = -1;
442 pMod->aSegments[i].SelFlat = 0;
443 pMod->aSegments[i].Sel16bit = 0;
444
445 /* flags */
446 pMod->aSegments[i].fFlags = 0;
447 if (pModLX->paObjs[i].o32_flags & OBJBIGDEF)
448 pMod->aSegments[i].fFlags = KLDRSEG_FLAG_16BIT;
449 if (pModLX->paObjs[i].o32_flags & OBJALIAS16)
450 pMod->aSegments[i].fFlags = KLDRSEG_FLAG_OS2_ALIAS16;
451 if (pModLX->paObjs[i].o32_flags & OBJCONFORM)
452 pMod->aSegments[i].fFlags = KLDRSEG_FLAG_OS2_CONFORM;
453 if (pModLX->paObjs[i].o32_flags & OBJIOPL)
454 pMod->aSegments[i].fFlags = KLDRSEG_FLAG_OS2_IOPL;
455
456 /* size and addresses */
457 pMod->aSegments[i].Alignment = OBJPAGELEN;
458 pMod->aSegments[i].cb = pModLX->paObjs[i].o32_size;
459 pMod->aSegments[i].LinkAddress = pModLX->paObjs[i].o32_base;
460 pMod->aSegments[i].RVA = NextRVA;
461 if ( fCanOptimizeMapping
462 || i + 1 >= pMod->cSegments
463 || (pModLX->paObjs[i].o32_flags & OBJRSRC)
464 || (pModLX->paObjs[i + 1].o32_flags & OBJRSRC))
465 pMod->aSegments[i].cbMapped = K_ALIGN_Z(pModLX->paObjs[i].o32_size, OBJPAGELEN);
466 else
467 pMod->aSegments[i].cbMapped = pModLX->paObjs[i + 1].o32_base - pModLX->paObjs[i].o32_base;
468 NextRVA += pMod->aSegments[i].cbMapped;
469
470 /* protection */
471 switch ( pModLX->paObjs[i].o32_flags
472 & (OBJSHARED | OBJREAD | OBJWRITE | OBJEXEC))
473 {
474 case 0:
475 case OBJSHARED:
476 pMod->aSegments[i].enmProt = KPROT_NOACCESS;
477 break;
478 case OBJREAD:
479 case OBJREAD | OBJSHARED:
480 pMod->aSegments[i].enmProt = KPROT_READONLY;
481 break;
482 case OBJWRITE:
483 case OBJWRITE | OBJREAD:
484 pMod->aSegments[i].enmProt = KPROT_WRITECOPY;
485 break;
486 case OBJWRITE | OBJSHARED:
487 case OBJWRITE | OBJSHARED | OBJREAD:
488 pMod->aSegments[i].enmProt = KPROT_READWRITE;
489 break;
490 case OBJEXEC:
491 case OBJEXEC | OBJSHARED:
492 pMod->aSegments[i].enmProt = KPROT_EXECUTE;
493 break;
494 case OBJEXEC | OBJREAD:
495 case OBJEXEC | OBJREAD | OBJSHARED:
496 pMod->aSegments[i].enmProt = KPROT_EXECUTE_READ;
497 break;
498 case OBJEXEC | OBJWRITE:
499 case OBJEXEC | OBJWRITE | OBJREAD:
500 pMod->aSegments[i].enmProt = KPROT_EXECUTE_WRITECOPY;
501 break;
502 case OBJEXEC | OBJWRITE | OBJSHARED:
503 case OBJEXEC | OBJWRITE | OBJSHARED | OBJREAD:
504 pMod->aSegments[i].enmProt = KPROT_EXECUTE_READWRITE;
505 break;
506 }
507 if ((pModLX->paObjs[i].o32_flags & (OBJREAD | OBJWRITE | OBJEXEC | OBJRSRC)) == OBJRSRC)
508 pMod->aSegments[i].enmProt = KPROT_READONLY;
509 /*pMod->aSegments[i].f16bit = !(pModLX->paObjs[i].o32_flags & OBJBIGDEF)
510 pMod->aSegments[i].fIOPL = !(pModLX->paObjs[i].o32_flags & OBJIOPL)
511 pMod->aSegments[i].fConforming = !(pModLX->paObjs[i].o32_flags & OBJCONFORM) */
512 }
513
514 /* set the mapping size */
515 pModLX->cbMapped = NextRVA;
516
517 /*
518 * We're done.
519 */
520 *ppModLX = pModLX;
521 return 0;
522}
523
524
525/** @copydoc KLDRMODOPS::pfnDestroy */
526static int kldrModLXDestroy(PKLDRMOD pMod)
527{
528 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
529 int rc = 0;
530 KLDRMODLX_ASSERT(!pModLX->pvMapping);
531
532 if (pMod->pRdr)
533 {
534 rc = kRdrClose(pMod->pRdr);
535 pMod->pRdr = NULL;
536 }
537 if (pModLX->pbNonResNameTab)
538 {
539 kHlpFree(pModLX->pbNonResNameTab);
540 pModLX->pbNonResNameTab = NULL;
541 }
542 if (pModLX->pbFixupSection)
543 {
544 kHlpFree(pModLX->pbFixupSection);
545 pModLX->pbFixupSection = NULL;
546 }
547 pMod->u32Magic = 0;
548 pMod->pOps = NULL;
549 kHlpFree(pModLX);
550 return rc;
551}
552
553
554/**
555 * Resolved base address aliases.
556 *
557 * @param pModLX The interpreter module instance
558 * @param pBaseAddress The base address, IN & OUT.
559 */
560static void kldrModLXResolveBaseAddress(PKLDRMODLX pModLX, PKLDRADDR pBaseAddress)
561{
562 if (*pBaseAddress == KLDRMOD_BASEADDRESS_MAP)
563 *pBaseAddress = pModLX->pMod->aSegments[0].MapAddress;
564 else if (*pBaseAddress == KLDRMOD_BASEADDRESS_LINK)
565 *pBaseAddress = pModLX->pMod->aSegments[0].LinkAddress;
566}
567
568
569/** @copydoc kLdrModQuerySymbol */
570static int kldrModLXQuerySymbol(PKLDRMOD pMod, const void *pvBits, KLDRADDR BaseAddress, KU32 iSymbol,
571 const char *pchSymbol, KSIZE cchSymbol, const char *pszVersion,
572 PFNKLDRMODGETIMPORT pfnGetForwarder, void *pvUser, PKLDRADDR puValue, KU32 *pfKind)
573{
574 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
575 KU32 iOrdinal;
576 int rc;
577 const struct b32_bundle *pBundle;
578
579
580 /*
581 * Give up at once if there is no entry table.
582 */
583 if (!pModLX->Hdr.e32_enttab)
584 return KLDR_ERR_SYMBOL_NOT_FOUND;
585
586 /*
587 * Translate the symbol name into an ordinal.
588 */
589 if (pchSymbol)
590 {
591 rc = kldrModLXDoNameLookup(pModLX, pchSymbol, cchSymbol, &iSymbol);
592 if (rc)
593 return rc;
594 }
595
596 /*
597 * Iterate the entry table.
598 * (The entry table is made up of bundles of similar exports.)
599 */
600 iOrdinal = 1;
601 pBundle = (const struct b32_bundle *)pModLX->pbEntryTab;
602 while (pBundle->b32_cnt && iOrdinal <= iSymbol)
603 {
604 static const KSIZE s_cbEntry[] = { 0, 3, 5, 5, 7 };
605
606 /*
607 * Check for a hit first.
608 */
609 iOrdinal += pBundle->b32_cnt;
610 if (iSymbol < iOrdinal)
611 {
612 KU32 offObject;
613 const struct e32_entry *pEntry = (const struct e32_entry *)((KUPTR)(pBundle + 1)
614 + (iSymbol - (iOrdinal - pBundle->b32_cnt))
615 * s_cbEntry[pBundle->b32_type]);
616
617 /*
618 * Calculate the return address.
619 */
620 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
621 switch (pBundle->b32_type)
622 {
623 /* empty bundles are place holders unused ordinal ranges. */
624 case EMPTY:
625 return KLDR_ERR_SYMBOL_NOT_FOUND;
626
627 /* e32_flags + a 16-bit offset. */
628 case ENTRY16:
629 offObject = pEntry->e32_variant.e32_offset.offset16;
630 if (pfKind)
631 *pfKind = KLDRSYMKIND_16BIT | KLDRSYMKIND_NO_TYPE;
632 break;
633
634 /* e32_flags + a 16-bit offset + a 16-bit callgate selector. */
635 case GATE16:
636 offObject = pEntry->e32_variant.e32_callgate.offset;
637 if (pfKind)
638 *pfKind = KLDRSYMKIND_16BIT | KLDRSYMKIND_CODE;
639 break;
640
641 /* e32_flags + a 32-bit offset. */
642 case ENTRY32:
643 offObject = pEntry->e32_variant.e32_offset.offset32;
644 if (pfKind)
645 *pfKind = KLDRSYMKIND_32BIT;
646 break;
647
648 /* e32_flags + 16-bit import module ordinal + a 32-bit procname or ordinal. */
649 case ENTRYFWD:
650 return kldrModLXDoForwarderQuery(pModLX, pEntry, pfnGetForwarder, pvUser, puValue, pfKind);
651
652 default:
653 /* anyone actually using TYPEINFO will end up here. */
654 KLDRMODLX_ASSERT(!"Bad bundle type");
655 return KLDR_ERR_LX_BAD_BUNDLE;
656 }
657
658 /*
659 * Validate the object number and calc the return address.
660 */
661 if ( pBundle->b32_obj <= 0
662 || pBundle->b32_obj > pMod->cSegments)
663 return KLDR_ERR_LX_BAD_BUNDLE;
664 if (puValue)
665 *puValue = BaseAddress
666 + offObject
667 + pMod->aSegments[pBundle->b32_obj - 1].RVA;
668 return 0;
669 }
670
671 /*
672 * Skip the bundle.
673 */
674 if (pBundle->b32_type > ENTRYFWD)
675 {
676 KLDRMODLX_ASSERT(!"Bad type"); /** @todo figure out TYPEINFO. */
677 return KLDR_ERR_LX_BAD_BUNDLE;
678 }
679 if (pBundle->b32_type == 0)
680 pBundle = (const struct b32_bundle *)((const KU8 *)pBundle + 2);
681 else
682 pBundle = (const struct b32_bundle *)((const KU8 *)(pBundle + 1) + s_cbEntry[pBundle->b32_type] * pBundle->b32_cnt);
683 }
684
685 return KLDR_ERR_SYMBOL_NOT_FOUND;
686}
687
688
689/**
690 * Do name lookup.
691 *
692 * @returns See kLdrModQuerySymbol.
693 * @param pModLX The module to lookup the symbol in.
694 * @param pchSymbol The symbol to lookup.
695 * @param cchSymbol The symbol name length.
696 * @param piSymbol Where to store the symbol ordinal.
697 */
698static int kldrModLXDoNameLookup(PKLDRMODLX pModLX, const char *pchSymbol, KU32 cchSymbol, KU32 *piSymbol)
699{
700
701 /*
702 * First do a hash table lookup.
703 */
704 /** @todo hash name table for speed. */
705
706 /*
707 * Search the name tables.
708 */
709 const KU8 *pbName = kldrModLXDoNameTableLookupByName(pModLX->pbResNameTab,
710 pModLX->pbLoaderSectionLast - pModLX->pbResNameTab + 1,
711 pchSymbol, cchSymbol);
712 if (!pbName)
713 {
714 if (!pModLX->pbNonResNameTab)
715 {
716 /* lazy load it */
717 /** @todo non-resident name table. */
718 }
719 if (pModLX->pbNonResNameTab)
720 pbName = kldrModLXDoNameTableLookupByName(pModLX->pbResNameTab,
721 pModLX->pbNonResNameTabLast - pModLX->pbResNameTab + 1,
722 pchSymbol, cchSymbol);
723 }
724 if (!pbName)
725 return KLDR_ERR_SYMBOL_NOT_FOUND;
726
727 *piSymbol = *(const KU16 *)(pbName + 1 + *pbName);
728 return 0;
729}
730
731
732#if 0
733/**
734 * Hash a symbol using the algorithm from sdbm.
735 *
736 * The following was is the documenation of the orignal sdbm functions:
737 *
738 * This algorithm was created for sdbm (a public-domain reimplementation of
739 * ndbm) database library. it was found to do well in scrambling bits,
740 * causing better distribution of the keys and fewer splits. it also happens
741 * to be a good general hashing function with good distribution. the actual
742 * function is hash(i) = hash(i - 1) * 65599 + str[i]; what is included below
743 * is the faster version used in gawk. [there is even a faster, duff-device
744 * version] the magic constant 65599 was picked out of thin air while
745 * experimenting with different constants, and turns out to be a prime.
746 * this is one of the algorithms used in berkeley db (see sleepycat) and
747 * elsewhere.
748 */
749static KU32 kldrModLXDoHash(const char *pchSymbol, KU8 cchSymbol)
750{
751 KU32 hash = 0;
752 int ch;
753
754 while ( cchSymbol-- > 0
755 && (ch = *(unsigned const char *)pchSymbol++))
756 hash = ch + (hash << 6) + (hash << 16) - hash;
757
758 return hash;
759}
760#endif
761
762
763/**
764 * Lookup a name table entry by name.
765 *
766 * @returns Pointer to the name table entry if found.
767 * @returns NULL if not found.
768 * @param pbNameTable Pointer to the name table that should be searched.
769 * @param cbNameTable The size of the name table.
770 * @param pchSymbol The name of the symbol we're looking for.
771 * @param cchSymbol The length of the symbol name.
772 */
773static const KU8 *kldrModLXDoNameTableLookupByName(const KU8 *pbNameTable, KI32 cbNameTable,
774 const char *pchSymbol, KSIZE cchSymbol)
775{
776 /*
777 * Determin the namelength up front so we can skip anything which doesn't matches the length.
778 */
779 KU8 cbSymbol8Bit = (KU8)cchSymbol;
780 if (cbSymbol8Bit != cchSymbol)
781 return NULL; /* too long. */
782
783 /*
784 * Walk the name table.
785 */
786 while (*pbNameTable != 0 && cbNameTable > 0)
787 {
788 const KU8 cbName = *pbNameTable;
789
790 cbNameTable -= cbName + 1 + 2;
791 if (cbNameTable < 0)
792 break;
793
794 if ( cbName == cbSymbol8Bit
795 && !kHlpMemComp(pbNameTable + 1, pchSymbol, cbName))
796 return pbNameTable;
797
798 /* next entry */
799 pbNameTable += cbName + 1 + 2;
800 }
801
802 return NULL;
803}
804
805
806/**
807 * Deal with a forwarder entry.
808 *
809 * @returns See kLdrModQuerySymbol.
810 * @param pModLX The PE module interpreter instance.
811 * @param pEntry The forwarder entry.
812 * @param pfnGetForwarder The callback for resolving forwarder symbols. (optional)
813 * @param pvUser The user argument for the callback.
814 * @param puValue Where to put the value. (optional)
815 * @param pfKind Where to put the symbol kind. (optional)
816 */
817static int kldrModLXDoForwarderQuery(PKLDRMODLX pModLX, const struct e32_entry *pEntry,
818 PFNKLDRMODGETIMPORT pfnGetForwarder, void *pvUser, PKLDRADDR puValue, KU32 *pfKind)
819{
820 int rc;
821 KU32 iSymbol;
822 const char *pchSymbol;
823 KU8 cchSymbol;
824
825 if (!pfnGetForwarder)
826 return KLDR_ERR_FORWARDER_SYMBOL;
827
828 /*
829 * Validate the entry import module ordinal.
830 */
831 if ( !pEntry->e32_variant.e32_fwd.modord
832 || pEntry->e32_variant.e32_fwd.modord > pModLX->Hdr.e32_impmodcnt)
833 return KLDR_ERR_LX_BAD_FORWARDER;
834
835 /*
836 * Figure out the parameters.
837 */
838 if (pEntry->e32_flags & FWD_ORDINAL)
839 {
840 iSymbol = pEntry->e32_variant.e32_fwd.value;
841 pchSymbol = NULL; /* no symbol name. */
842 cchSymbol = 0;
843 }
844 else
845 {
846 const KU8 *pbName;
847
848 /* load the fixup section if necessary. */
849 if (!pModLX->pbImportProcs)
850 {
851 rc = kldrModLXDoLoadFixupSection(pModLX);
852 if (rc)
853 return rc;
854 }
855
856 /* Make name pointer. */
857 pbName = pModLX->pbImportProcs + pEntry->e32_variant.e32_fwd.value;
858 if ( pbName >= pModLX->pbFixupSectionLast
859 || pbName < pModLX->pbFixupSection
860 || !*pbName)
861 return KLDR_ERR_LX_BAD_FORWARDER;
862
863
864 /* check for '#' name. */
865 if (pbName[1] == '#')
866 {
867 KU8 cbLeft = *pbName;
868 const KU8 *pb = pbName + 1;
869 unsigned uBase;
870
871 /* base detection */
872 uBase = 10;
873 if ( cbLeft > 1
874 && pb[1] == '0'
875 && (pb[2] == 'x' || pb[2] == 'X'))
876 {
877 uBase = 16;
878 pb += 2;
879 cbLeft -= 2;
880 }
881
882 /* ascii to integer */
883 iSymbol = 0;
884 while (cbLeft-- > 0)
885 {
886 /* convert char to digit. */
887 unsigned uDigit = *pb++;
888 if (uDigit >= '0' && uDigit <= '9')
889 uDigit -= '0';
890 else if (uDigit >= 'a' && uDigit <= 'z')
891 uDigit -= 'a' + 10;
892 else if (uDigit >= 'A' && uDigit <= 'Z')
893 uDigit -= 'A' + 10;
894 else if (!uDigit)
895 break;
896 else
897 return KLDR_ERR_LX_BAD_FORWARDER;
898 if (uDigit >= uBase)
899 return KLDR_ERR_LX_BAD_FORWARDER;
900
901 /* insert the digit */
902 iSymbol *= uBase;
903 iSymbol += uDigit;
904 }
905 if (!iSymbol)
906 return KLDR_ERR_LX_BAD_FORWARDER;
907
908 pchSymbol = NULL; /* no symbol name. */
909 cchSymbol = 0;
910 }
911 else
912 {
913 pchSymbol = (char *)pbName + 1;
914 cchSymbol = *pbName;
915 iSymbol = NIL_KLDRMOD_SYM_ORDINAL;
916 }
917 }
918
919 /*
920 * Resolve the forwarder.
921 */
922 rc = pfnGetForwarder(pModLX->pMod, pEntry->e32_variant.e32_fwd.modord - 1, iSymbol, pchSymbol, cchSymbol, NULL, puValue, pfKind, pvUser);
923 if (!rc && pfKind)
924 *pfKind |= KLDRSYMKIND_FORWARDER;
925 return rc;
926}
927
928
929/**
930 * Loads the fixup section from the executable image.
931 *
932 * The fixup section isn't loaded until it's accessed. It's also freed by kLdrModDone().
933 *
934 * @returns 0 on success, non-zero kLdr or native status code on failure.
935 * @param pModLX The PE module interpreter instance.
936 */
937static int kldrModLXDoLoadFixupSection(PKLDRMODLX pModLX)
938{
939 int rc;
940 KU32 off;
941 void *pv;
942
943 pv = kHlpAlloc(pModLX->Hdr.e32_fixupsize);
944 if (!pv)
945 return KERR_NO_MEMORY;
946
947 off = pModLX->Hdr.e32_objtab + pModLX->Hdr.e32_ldrsize;
948 rc = kRdrRead(pModLX->pMod->pRdr, pv, pModLX->Hdr.e32_fixupsize,
949 off + pModLX->offHdr);
950 if (!rc)
951 {
952 pModLX->pbFixupSection = pv;
953 pModLX->pbFixupSectionLast = pModLX->pbFixupSection + pModLX->Hdr.e32_fixupsize;
954 KLDRMODLX_ASSERT(!pModLX->paoffPageFixups);
955 if (pModLX->Hdr.e32_fpagetab)
956 pModLX->paoffPageFixups = (const KU32 *)(pModLX->pbFixupSection + pModLX->Hdr.e32_fpagetab - off);
957 KLDRMODLX_ASSERT(!pModLX->pbFixupRecs);
958 if (pModLX->Hdr.e32_frectab)
959 pModLX->pbFixupRecs = pModLX->pbFixupSection + pModLX->Hdr.e32_frectab - off;
960 KLDRMODLX_ASSERT(!pModLX->pbImportMods);
961 if (pModLX->Hdr.e32_impmod)
962 pModLX->pbImportMods = pModLX->pbFixupSection + pModLX->Hdr.e32_impmod - off;
963 KLDRMODLX_ASSERT(!pModLX->pbImportProcs);
964 if (pModLX->Hdr.e32_impproc)
965 pModLX->pbImportProcs = pModLX->pbFixupSection + pModLX->Hdr.e32_impproc - off;
966 }
967 else
968 kHlpFree(pv);
969 return rc;
970}
971
972
973/** @copydoc kLdrModEnumSymbols */
974static int kldrModLXEnumSymbols(PKLDRMOD pMod, const void *pvBits, KLDRADDR BaseAddress,
975 KU32 fFlags, PFNKLDRMODENUMSYMS pfnCallback, void *pvUser)
976{
977 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
978 const struct b32_bundle *pBundle;
979 KU32 iOrdinal;
980 int rc = 0;
981
982 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
983
984 /*
985 * Enumerate the entry table.
986 * (The entry table is made up of bundles of similar exports.)
987 */
988 iOrdinal = 1;
989 pBundle = (const struct b32_bundle *)pModLX->pbEntryTab;
990 while (pBundle->b32_cnt && iOrdinal)
991 {
992 static const KSIZE s_cbEntry[] = { 0, 3, 5, 5, 7 };
993
994 /*
995 * Enum the entries in the bundle.
996 */
997 if (pBundle->b32_type != EMPTY)
998 {
999 const struct e32_entry *pEntry;
1000 KSIZE cbEntry;
1001 KLDRADDR BundleRVA;
1002 unsigned cLeft;
1003
1004
1005 /* Validate the bundle. */
1006 switch (pBundle->b32_type)
1007 {
1008 case ENTRY16:
1009 case GATE16:
1010 case ENTRY32:
1011 if ( pBundle->b32_obj <= 0
1012 || pBundle->b32_obj > pMod->cSegments)
1013 return KLDR_ERR_LX_BAD_BUNDLE;
1014 BundleRVA = pMod->aSegments[pBundle->b32_obj - 1].RVA;
1015 break;
1016
1017 case ENTRYFWD:
1018 BundleRVA = 0;
1019 break;
1020
1021 default:
1022 /* anyone actually using TYPEINFO will end up here. */
1023 KLDRMODLX_ASSERT(!"Bad bundle type");
1024 return KLDR_ERR_LX_BAD_BUNDLE;
1025 }
1026
1027 /* iterate the bundle entries. */
1028 cbEntry = s_cbEntry[pBundle->b32_type];
1029 pEntry = (const struct e32_entry *)(pBundle + 1);
1030 cLeft = pBundle->b32_cnt;
1031 while (cLeft-- > 0)
1032 {
1033 KLDRADDR uValue;
1034 KU32 fKind;
1035 int fFoundName;
1036 const KU8 *pbName;
1037
1038 /*
1039 * Calc the symbol value and kind.
1040 */
1041 switch (pBundle->b32_type)
1042 {
1043 /* e32_flags + a 16-bit offset. */
1044 case ENTRY16:
1045 uValue = BaseAddress + BundleRVA + pEntry->e32_variant.e32_offset.offset16;
1046 fKind = KLDRSYMKIND_16BIT | KLDRSYMKIND_NO_TYPE;
1047 break;
1048
1049 /* e32_flags + a 16-bit offset + a 16-bit callgate selector. */
1050 case GATE16:
1051 uValue = BaseAddress + BundleRVA + pEntry->e32_variant.e32_callgate.offset;
1052 fKind = KLDRSYMKIND_16BIT | KLDRSYMKIND_CODE;
1053 break;
1054
1055 /* e32_flags + a 32-bit offset. */
1056 case ENTRY32:
1057 uValue = BaseAddress + BundleRVA + pEntry->e32_variant.e32_offset.offset32;
1058 fKind = KLDRSYMKIND_32BIT;
1059 break;
1060
1061 /* e32_flags + 16-bit import module ordinal + a 32-bit procname or ordinal. */
1062 case ENTRYFWD:
1063 uValue = 0; /** @todo implement enumeration of forwarders properly. */
1064 fKind = KLDRSYMKIND_FORWARDER;
1065 break;
1066 }
1067
1068 /*
1069 * Any symbol names?
1070 */
1071 fFoundName = 0;
1072
1073 /* resident name table. */
1074 pbName = pModLX->pbResNameTab;
1075 if (pbName)
1076 {
1077 do
1078 {
1079 pbName = kldrModLXDoNameTableLookupByOrdinal(pbName, pModLX->pbLoaderSectionLast - pbName + 1, iOrdinal);
1080 if (!pbName)
1081 break;
1082 fFoundName = 1;
1083 rc = pfnCallback(pMod, iOrdinal, (const char *)pbName + 1, *pbName, NULL, uValue, fKind, pvUser);
1084 if (rc)
1085 return rc;
1086
1087 /* skip to the next entry */
1088 pbName += 1 + *pbName + 2;
1089 } while (pbName < pModLX->pbLoaderSectionLast);
1090 }
1091
1092 /* resident name table. */
1093 pbName = pModLX->pbNonResNameTab;
1094 /** @todo lazy load the non-resident name table. */
1095 if (pbName)
1096 {
1097 do
1098 {
1099 pbName = kldrModLXDoNameTableLookupByOrdinal(pbName, pModLX->pbNonResNameTabLast - pbName + 1, iOrdinal);
1100 if (!pbName)
1101 break;
1102 fFoundName = 1;
1103 rc = pfnCallback(pMod, iOrdinal, (const char *)pbName + 1, *pbName, NULL, uValue, fKind, pvUser);
1104 if (rc)
1105 return rc;
1106
1107 /* skip to the next entry */
1108 pbName += 1 + *pbName + 2;
1109 } while (pbName < pModLX->pbLoaderSectionLast);
1110 }
1111
1112 /*
1113 * If no names, call once with the ordinal only.
1114 */
1115 if (!fFoundName)
1116 {
1117 rc = pfnCallback(pMod, iOrdinal, NULL, 0, NULL, uValue, fKind, pvUser);
1118 if (rc)
1119 return rc;
1120 }
1121
1122 /* next */
1123 iOrdinal++;
1124 pEntry = (const struct e32_entry *)((KUPTR)pEntry + cbEntry);
1125 }
1126 }
1127
1128 /*
1129 * The next bundle.
1130 */
1131 if (pBundle->b32_type > ENTRYFWD)
1132 {
1133 KLDRMODLX_ASSERT(!"Bad type"); /** @todo figure out TYPEINFO. */
1134 return KLDR_ERR_LX_BAD_BUNDLE;
1135 }
1136 if (pBundle->b32_type == 0)
1137 pBundle = (const struct b32_bundle *)((const KU8 *)pBundle + 2);
1138 else
1139 pBundle = (const struct b32_bundle *)((const KU8 *)(pBundle + 1) + s_cbEntry[pBundle->b32_type] * pBundle->b32_cnt);
1140 }
1141
1142 return 0;
1143}
1144
1145
1146/**
1147 * Lookup a name table entry by ordinal.
1148 *
1149 * @returns Pointer to the name table entry if found.
1150 * @returns NULL if not found.
1151 * @param pbNameTable Pointer to the name table that should be searched.
1152 * @param cbNameTable The size of the name table.
1153 * @param iOrdinal The ordinal to search for.
1154 */
1155static const KU8 *kldrModLXDoNameTableLookupByOrdinal(const KU8 *pbNameTable, KI32 cbNameTable, KU32 iOrdinal)
1156{
1157 while (*pbNameTable != 0 && cbNameTable > 0)
1158 {
1159 const KU8 cbName = *pbNameTable;
1160 KU32 iName;
1161
1162 cbNameTable -= cbName + 1 + 2;
1163 if (cbNameTable < 0)
1164 break;
1165
1166 iName = *(pbNameTable + cbName + 1)
1167 | ((unsigned)*(pbNameTable + cbName + 2) << 8);
1168 if (iName == iOrdinal)
1169 return pbNameTable;
1170
1171 /* next entry */
1172 pbNameTable += cbName + 1 + 2;
1173 }
1174
1175 return NULL;
1176}
1177
1178
1179/** @copydoc kLdrModGetImport */
1180static int kldrModLXGetImport(PKLDRMOD pMod, const void *pvBits, KU32 iImport, char *pszName, KSIZE cchName)
1181{
1182 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1183 const KU8 *pb;
1184 int rc;
1185
1186 /*
1187 * Validate
1188 */
1189 if (iImport >= pModLX->Hdr.e32_impmodcnt)
1190 return KLDR_ERR_IMPORT_ORDINAL_OUT_OF_BOUNDS;
1191
1192 /*
1193 * Lazy loading the fixup section.
1194 */
1195 if (!pModLX->pbImportMods)
1196 {
1197 rc = kldrModLXDoLoadFixupSection(pModLX);
1198 if (rc)
1199 return rc;
1200 }
1201
1202 /*
1203 * Iterate the module import table until we reach the requested import ordinal.
1204 */
1205 pb = pModLX->pbImportMods;
1206 while (iImport-- > 0)
1207 pb += *pb + 1;
1208
1209 /*
1210 * Copy out the result.
1211 */
1212 if (*pb < cchName)
1213 {
1214 kHlpMemCopy(pszName, pb + 1, *pb);
1215 pszName[*pb] = '\0';
1216 rc = 0;
1217 }
1218 else
1219 {
1220 kHlpMemCopy(pszName, pb + 1, cchName);
1221 if (cchName)
1222 pszName[cchName - 1] = '\0';
1223 rc = KERR_BUFFER_OVERFLOW;
1224 }
1225
1226 return rc;
1227}
1228
1229
1230/** @copydoc kLdrModNumberOfImports */
1231static KI32 kldrModLXNumberOfImports(PKLDRMOD pMod, const void *pvBits)
1232{
1233 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1234 return pModLX->Hdr.e32_impmodcnt;
1235}
1236
1237
1238/** @copydoc kLdrModGetStackInfo */
1239static int kldrModLXGetStackInfo(PKLDRMOD pMod, const void *pvBits, KLDRADDR BaseAddress, PKLDRSTACKINFO pStackInfo)
1240{
1241 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1242 const KU32 i = pModLX->Hdr.e32_stackobj;
1243
1244 if ( i
1245 && i <= pMod->cSegments
1246 && pModLX->Hdr.e32_esp <= pMod->aSegments[i - 1].LinkAddress + pMod->aSegments[i - 1].cb
1247 && pModLX->Hdr.e32_stacksize
1248 && pModLX->Hdr.e32_esp - pModLX->Hdr.e32_stacksize >= pMod->aSegments[i - 1].LinkAddress)
1249 {
1250
1251 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
1252 pStackInfo->LinkAddress = pModLX->Hdr.e32_esp - pModLX->Hdr.e32_stacksize;
1253 pStackInfo->Address = BaseAddress
1254 + pMod->aSegments[i - 1].RVA
1255 + pModLX->Hdr.e32_esp - pModLX->Hdr.e32_stacksize - pMod->aSegments[i - 1].LinkAddress;
1256 }
1257 else
1258 {
1259 pStackInfo->Address = NIL_KLDRADDR;
1260 pStackInfo->LinkAddress = NIL_KLDRADDR;
1261 }
1262 pStackInfo->cbStack = pModLX->Hdr.e32_stacksize;
1263 pStackInfo->cbStackThread = 0;
1264
1265 return 0;
1266}
1267
1268
1269/** @copydoc kLdrModQueryMainEntrypoint */
1270static int kldrModLXQueryMainEntrypoint(PKLDRMOD pMod, const void *pvBits, KLDRADDR BaseAddress, PKLDRADDR pMainEPAddress)
1271{
1272 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1273
1274 /*
1275 * Convert the address from the header.
1276 */
1277 kldrModLXResolveBaseAddress(pModLX, &BaseAddress);
1278 *pMainEPAddress = pModLX->Hdr.e32_startobj
1279 && pModLX->Hdr.e32_startobj <= pMod->cSegments
1280 && pModLX->Hdr.e32_eip < pMod->aSegments[pModLX->Hdr.e32_startobj - 1].cb
1281 ? BaseAddress + pMod->aSegments[pModLX->Hdr.e32_startobj - 1].RVA + pModLX->Hdr.e32_eip
1282 : NIL_KLDRADDR;
1283 return 0;
1284}
1285
1286
1287/** @copydoc kLdrModEnumDbgInfo */
1288static int kldrModLXEnumDbgInfo(PKLDRMOD pMod, const void *pvBits, PFNKLDRENUMDBG pfnCallback, void *pvUser)
1289{
1290 /*PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;*/
1291
1292 /*
1293 * Quit immediately if no debug info.
1294 */
1295 if (kldrModLXHasDbgInfo(pMod, pvBits))
1296 return 0;
1297#if 0
1298 /*
1299 * Read the debug info and look for familiar magics and structures.
1300 */
1301 /** @todo */
1302#endif
1303
1304 return 0;
1305}
1306
1307
1308/** @copydoc kLdrModHasDbgInfo */
1309static int kldrModLXHasDbgInfo(PKLDRMOD pMod, const void *pvBits)
1310{
1311 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1312
1313 /*
1314 * Don't curretnly bother with linkers which doesn't advertise it in the header.
1315 */
1316 if ( !pModLX->Hdr.e32_debuginfo
1317 || !pModLX->Hdr.e32_debuglen)
1318 return KLDR_ERR_NO_DEBUG_INFO;
1319 return 0;
1320}
1321
1322
1323/** @copydoc kLdrModMap */
1324static int kldrModLXMap(PKLDRMOD pMod)
1325{
1326 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1327 unsigned fFixed;
1328 void *pvBase;
1329 int rc;
1330
1331 /*
1332 * Already mapped?
1333 */
1334 if (pModLX->pvMapping)
1335 return KLDR_ERR_ALREADY_MAPPED;
1336
1337 /*
1338 * Allocate memory for it.
1339 */
1340 /* fixed image? */
1341 fFixed = pMod->enmType == KLDRTYPE_EXECUTABLE_FIXED
1342 || pMod->enmType == KLDRTYPE_SHARED_LIBRARY_FIXED;
1343 if (!fFixed)
1344 pvBase = NULL;
1345 else
1346 {
1347 pvBase = (void *)(KUPTR)pMod->aSegments[0].LinkAddress;
1348 if ((KUPTR)pvBase != pMod->aSegments[0].LinkAddress)
1349 return KLDR_ERR_ADDRESS_OVERFLOW;
1350 }
1351 rc = kHlpPageAlloc(&pvBase, pModLX->cbMapped, KPROT_EXECUTE_READWRITE, fFixed);
1352 if (rc)
1353 return rc;
1354
1355 /*
1356 * Load the bits, apply page protection, and update the segment table.
1357 */
1358 rc = kldrModLXDoLoadBits(pModLX, pvBase);
1359 if (!rc)
1360 rc = kldrModLXDoProtect(pModLX, pvBase, 0 /* protect */);
1361 if (!rc)
1362 {
1363 KU32 i;
1364 for (i = 0; i < pMod->cSegments; i++)
1365 {
1366 if (pMod->aSegments[i].RVA != NIL_KLDRADDR)
1367 pMod->aSegments[i].MapAddress = (KUPTR)pvBase + (KUPTR)pMod->aSegments[i].RVA;
1368 }
1369 pModLX->pvMapping = pvBase;
1370 }
1371 else
1372 kHlpPageFree(pvBase, pModLX->cbMapped);
1373 return rc;
1374}
1375
1376
1377/**
1378 * Loads the LX pages into the specified memory mapping.
1379 *
1380 * @returns 0 on success.
1381 * @returns non-zero kLdr or OS status code on failure.
1382 *
1383 * @param pModLX The LX module interpreter instance.
1384 * @param pvBits Where to load the bits.
1385 */
1386static int kldrModLXDoLoadBits(PKLDRMODLX pModLX, void *pvBits)
1387{
1388 const PKRDR pRdr = pModLX->pMod->pRdr;
1389 KU8 *pbTmpPage = NULL;
1390 int rc = 0;
1391 KU32 i;
1392
1393 /*
1394 * Iterate the segments.
1395 */
1396 for (i = 0; i < pModLX->Hdr.e32_objcnt; i++)
1397 {
1398 const struct o32_obj * const pObj = &pModLX->paObjs[i];
1399 const KU32 cPages = pModLX->pMod->aSegments[i].cbMapped / OBJPAGELEN;
1400 KU32 iPage;
1401 KU8 *pbPage = (KU8 *)pvBits + (KUPTR)pModLX->pMod->aSegments[i].RVA;
1402
1403 /*
1404 * Iterate the page map pages.
1405 */
1406 for (iPage = 0; !rc && iPage < pObj->o32_mapsize; iPage++, pbPage += OBJPAGELEN)
1407 {
1408 const struct o32_map *pMap = &pModLX->paPageMappings[iPage + pObj->o32_pagemap - 1];
1409 switch (pMap->o32_pageflags)
1410 {
1411 case VALID:
1412 if (pMap->o32_pagesize == OBJPAGELEN)
1413 rc = kRdrRead(pRdr, pbPage, OBJPAGELEN,
1414 pModLX->Hdr.e32_datapage + (pMap->o32_pagedataoffset << pModLX->Hdr.e32_pageshift));
1415 else if (pMap->o32_pagesize < OBJPAGELEN)
1416 {
1417 rc = kRdrRead(pRdr, pbPage, pMap->o32_pagesize,
1418 pModLX->Hdr.e32_datapage + (pMap->o32_pagedataoffset << pModLX->Hdr.e32_pageshift));
1419 kHlpMemSet(pbPage + pMap->o32_pagesize, 0, OBJPAGELEN - pMap->o32_pagesize);
1420 }
1421 else
1422 rc = KLDR_ERR_LX_BAD_PAGE_MAP;
1423 break;
1424
1425 case ITERDATA:
1426 case ITERDATA2:
1427 /* make sure we've got a temp page .*/
1428 if (!pbTmpPage)
1429 {
1430 pbTmpPage = kHlpAlloc(OBJPAGELEN + 256);
1431 if (!pbTmpPage)
1432 break;
1433 }
1434 /* validate the size. */
1435 if (pMap->o32_pagesize > OBJPAGELEN + 252)
1436 {
1437 rc = KLDR_ERR_LX_BAD_PAGE_MAP;
1438 break;
1439 }
1440
1441 /* read it and ensure 4 extra zero bytes. */
1442 rc = kRdrRead(pRdr, pbTmpPage, pMap->o32_pagesize,
1443 pModLX->Hdr.e32_datapage + (pMap->o32_pagedataoffset << pModLX->Hdr.e32_pageshift));
1444 if (rc)
1445 break;
1446 kHlpMemSet(pbTmpPage + pMap->o32_pagesize, 0, 4);
1447
1448 /* unpack it into the image page. */
1449 if (pMap->o32_pageflags == ITERDATA2)
1450 rc = kldrModLXDoIterData2Unpacking(pbPage, pbTmpPage, pMap->o32_pagesize);
1451 else
1452 rc = kldrModLXDoIterDataUnpacking(pbPage, pbTmpPage, pMap->o32_pagesize);
1453 break;
1454
1455 case INVALID: /* we're probably not dealing correctly with INVALID pages... */
1456 case ZEROED:
1457 kHlpMemSet(pbPage, 0, OBJPAGELEN);
1458 break;
1459
1460 case RANGE:
1461 KLDRMODLX_ASSERT(!"RANGE");
1462 default:
1463 rc = KLDR_ERR_LX_BAD_PAGE_MAP;
1464 break;
1465 }
1466 }
1467 if (rc)
1468 break;
1469
1470 /*
1471 * Zero the remaining pages.
1472 */
1473 if (iPage < cPages)
1474 kHlpMemSet(pbPage, 0, (cPages - iPage) * OBJPAGELEN);
1475 }
1476
1477 if (pbTmpPage)
1478 kHlpFree(pbTmpPage);
1479 return rc;
1480}
1481
1482
1483/**
1484 * Unpacks iterdata (aka EXEPACK).
1485 *
1486 * @returns 0 on success, non-zero kLdr status code on failure.
1487 * @param pbDst Where to put the uncompressed data. (Assumes OBJPAGELEN size.)
1488 * @param pbSrc The compressed source data.
1489 * @param cbSrc The file size of the compressed data. The source buffer
1490 * contains 4 additional zero bytes.
1491 */
1492static int kldrModLXDoIterDataUnpacking(KU8 *pbDst, const KU8 *pbSrc, int cbSrc)
1493{
1494 const struct LX_Iter *pIter = (const struct LX_Iter *)pbSrc;
1495 int cbDst = OBJPAGELEN;
1496
1497 /* Validate size of data. */
1498 if (cbSrc >= OBJPAGELEN - 2)
1499 return KLDR_ERR_LX_BAD_ITERDATA;
1500
1501 /*
1502 * Expand the page.
1503 */
1504 while (cbSrc > 0 && pIter->LX_nIter)
1505 {
1506 if (pIter->LX_nBytes == 1)
1507 {
1508 /*
1509 * Special case - one databyte.
1510 */
1511 cbDst -= pIter->LX_nIter;
1512 if (cbDst < 0)
1513 return KLDR_ERR_LX_BAD_ITERDATA;
1514
1515 cbSrc -= 4 + 1;
1516 if (cbSrc < -4)
1517 return KLDR_ERR_LX_BAD_ITERDATA;
1518
1519 kHlpMemSet(pbDst, pIter->LX_Iterdata, pIter->LX_nIter);
1520 pbDst += pIter->LX_nIter;
1521 pIter++;
1522 }
1523 else
1524 {
1525 /*
1526 * General.
1527 */
1528 int i;
1529
1530 cbDst -= pIter->LX_nIter * pIter->LX_nBytes;
1531 if (cbDst < 0)
1532 return KLDR_ERR_LX_BAD_ITERDATA;
1533
1534 cbSrc -= 4 + pIter->LX_nBytes;
1535 if (cbSrc < -4)
1536 return KLDR_ERR_LX_BAD_ITERDATA;
1537
1538 for (i = pIter->LX_nIter; i > 0; i--, pbDst += pIter->LX_nBytes)
1539 kHlpMemCopy(pbDst, &pIter->LX_Iterdata, pIter->LX_nBytes);
1540 pIter = (struct LX_Iter *)((char*)pIter + 4 + pIter->LX_nBytes);
1541 }
1542 }
1543
1544 /*
1545 * Zero remainder of the page.
1546 */
1547 if (cbDst > 0)
1548 kHlpMemSet(pbDst, 0, cbDst);
1549
1550 return 0;
1551}
1552
1553
1554/**
1555 * Unpacks iterdata (aka EXEPACK).
1556 *
1557 * @returns 0 on success, non-zero kLdr status code on failure.
1558 * @param pbDst Where to put the uncompressed data. (Assumes OBJPAGELEN size.)
1559 * @param pbSrc The compressed source data.
1560 * @param cbSrc The file size of the compressed data. The source buffer
1561 * contains 4 additional zero bytes.
1562 */
1563static int kldrModLXDoIterData2Unpacking(KU8 *pbDst, const KU8 *pbSrc, int cbSrc)
1564{
1565 int cbDst = OBJPAGELEN;
1566
1567 while (cbSrc > 0)
1568 {
1569 /*
1570 * Bit 0 and 1 is the encoding type.
1571 */
1572 switch (*pbSrc & 0x03)
1573 {
1574 /*
1575 *
1576 * 0 1 2 3 4 5 6 7
1577 * type | |
1578 * ----------------
1579 * cb <cb bytes of data>
1580 *
1581 * Bits 2-7 is, if not zero, the length of an uncompressed run
1582 * starting at the following byte.
1583 *
1584 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
1585 * type | | | | | |
1586 * ---------------- ---------------------- -----------------------
1587 * zero cb char to multiply
1588 *
1589 * If the bits are zero, the following two bytes describes a 1 byte interation
1590 * run. First byte is count, second is the byte to copy. A count of zero is
1591 * means end of data, and we simply stops. In that case the rest of the data
1592 * should be zero.
1593 */
1594 case 0:
1595 {
1596 if (*pbSrc)
1597 {
1598 const int cb = *pbSrc >> 2;
1599 cbDst -= cb;
1600 if (cbDst < 0)
1601 return KLDR_ERR_LX_BAD_ITERDATA2;
1602 cbSrc -= cb + 1;
1603 if (cbSrc < 0)
1604 return KLDR_ERR_LX_BAD_ITERDATA2;
1605 kHlpMemCopy(pbDst, ++pbSrc, cb);
1606 pbDst += cb;
1607 pbSrc += cb;
1608 }
1609 else if (cbSrc < 2)
1610 return KLDR_ERR_LX_BAD_ITERDATA2;
1611 else
1612 {
1613 const int cb = pbSrc[1];
1614 if (!cb)
1615 goto l_endloop;
1616 cbDst -= cb;
1617 if (cbDst < 0)
1618 return KLDR_ERR_LX_BAD_ITERDATA2;
1619 cbSrc -= 3;
1620 if (cbSrc < 0)
1621 return KLDR_ERR_LX_BAD_ITERDATA2;
1622 kHlpMemSet(pbDst, pbSrc[2], cb);
1623 pbDst += cb;
1624 pbSrc += 3;
1625 }
1626 break;
1627 }
1628
1629
1630 /*
1631 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1632 * type | | | | | |
1633 * ---- ------- -------------------------
1634 * cb1 cb2 - 3 offset <cb1 bytes of data>
1635 *
1636 * Two bytes layed out as described above, followed by cb1 bytes of data to be copied.
1637 * The cb2(+3) and offset describes an amount of data to be copied from the expanded
1638 * data relative to the current position. The data copied as you would expect it to be.
1639 */
1640 case 1:
1641 {
1642 cbSrc -= 2;
1643 if (cbSrc < 0)
1644 return KLDR_ERR_LX_BAD_ITERDATA2;
1645 else
1646 {
1647 const unsigned off = ((unsigned)pbSrc[1] << 1) | (*pbSrc >> 7);
1648 const int cb1 = (*pbSrc >> 2) & 3;
1649 const int cb2 = ((*pbSrc >> 4) & 7) + 3;
1650
1651 pbSrc += 2;
1652 cbSrc -= cb1;
1653 if (cbSrc < 0)
1654 return KLDR_ERR_LX_BAD_ITERDATA2;
1655 cbDst -= cb1;
1656 if (cbDst < 0)
1657 return KLDR_ERR_LX_BAD_ITERDATA2;
1658 kHlpMemCopy(pbDst, pbSrc, cb1);
1659 pbDst += cb1;
1660 pbSrc += cb1;
1661
1662 if (off > OBJPAGELEN - cbDst)
1663 return KLDR_ERR_LX_BAD_ITERDATA2;
1664 cbDst -= cb2;
1665 if (cbDst < 0)
1666 return KLDR_ERR_LX_BAD_ITERDATA2;
1667 kHlpMemMove(pbDst, pbDst - off, cb2);
1668 pbDst += cb2;
1669 }
1670 break;
1671 }
1672
1673
1674 /*
1675 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
1676 * type | | | |
1677 * ---- ----------------------------------
1678 * cb-3 offset
1679 *
1680 * Two bytes layed out as described above.
1681 * The cb(+3) and offset describes an amount of data to be copied from the expanded
1682 * data relative to the current position.
1683 *
1684 * If offset == 1 the data is not copied as expected, but in the memcpyw manner.
1685 */
1686 case 2:
1687 {
1688 cbSrc -= 2;
1689 if (cbSrc < 0)
1690 return KLDR_ERR_LX_BAD_ITERDATA2;
1691 else
1692 {
1693 const unsigned off = ((unsigned)pbSrc[1] << 4) | (*pbSrc >> 4);
1694 const int cb = ((*pbSrc >> 2) & 3) + 3;
1695
1696 pbSrc += 2;
1697 if (off > OBJPAGELEN - cbDst)
1698 return KLDR_ERR_LX_BAD_ITERDATA2;
1699 cbDst -= cb;
1700 if (cbDst < 0)
1701 return KLDR_ERR_LX_BAD_ITERDATA2;
1702 kLdrModLXMemCopyW(pbDst, pbDst - off, cb);
1703 pbDst += cb;
1704 }
1705 break;
1706 }
1707
1708
1709 /*
1710 * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
1711 * type | | | | | |
1712 * ---------- ---------------- ----------------------------------
1713 * cb1 cb2 offset <cb1 bytes of data>
1714 *
1715 * Three bytes layed out as described above, followed by cb1 bytes of data to be copied.
1716 * The cb2 and offset describes an amount of data to be copied from the expanded
1717 * data relative to the current position.
1718 *
1719 * If offset == 1 the data is not copied as expected, but in the memcpyw manner.
1720 */
1721 case 3:
1722 {
1723 cbSrc -= 3;
1724 if (cbSrc < 0)
1725 return KLDR_ERR_LX_BAD_ITERDATA2;
1726 else
1727 {
1728 const int cb1 = (*pbSrc >> 2) & 0xf;
1729 const int cb2 = ((pbSrc[1] & 0xf) << 2) | (*pbSrc >> 6);
1730 const unsigned off = ((unsigned)pbSrc[2] << 4) | (pbSrc[1] >> 4);
1731
1732 pbSrc += 3;
1733 cbSrc -= cb1;
1734 if (cbSrc < 0)
1735 return KLDR_ERR_LX_BAD_ITERDATA2;
1736 cbDst -= cb1;
1737 if (cbDst < 0)
1738 return KLDR_ERR_LX_BAD_ITERDATA2;
1739 kHlpMemCopy(pbDst, pbSrc, cb1);
1740 pbDst += cb1;
1741 pbSrc += cb1;
1742
1743 if (off > OBJPAGELEN - cbDst)
1744 return KLDR_ERR_LX_BAD_ITERDATA2;
1745 cbDst -= cb2;
1746 if (cbDst < 0)
1747 return KLDR_ERR_LX_BAD_ITERDATA2;
1748 kLdrModLXMemCopyW(pbDst, pbDst - off, cb2);
1749 pbDst += cb2;
1750 }
1751 break;
1752 }
1753 } /* type switch. */
1754 } /* unpack loop */
1755
1756l_endloop:
1757
1758
1759 /*
1760 * Zero remainder of the page.
1761 */
1762 if (cbDst > 0)
1763 kHlpMemSet(pbDst, 0, cbDst);
1764
1765 return 0;
1766}
1767
1768
1769/**
1770 * Special memcpy employed by the iterdata2 algorithm.
1771 *
1772 * Emulate a 16-bit memcpy (copying 16-bit at a time) and the effects this
1773 * has if src is very close to the destination.
1774 *
1775 * @param pbDst Destination pointer.
1776 * @param pbSrc Source pointer. Will always be <= pbDst.
1777 * @param cb Amount of data to be copied.
1778 * @remark This assumes that unaligned word and dword access is fine.
1779 */
1780static void kLdrModLXMemCopyW(KU8 *pbDst, const KU8 *pbSrc, int cb)
1781{
1782 switch (pbDst - pbSrc)
1783 {
1784 case 0:
1785 case 1:
1786 case 2:
1787 case 3:
1788 /* 16-bit copy (unaligned) */
1789 if (cb & 1)
1790 *pbDst++ = *pbSrc++;
1791 for (cb >>= 1; cb > 0; cb--, pbDst += 2, pbSrc += 2)
1792 *(KU16 *)pbDst = *(const KU16 *)pbSrc;
1793 break;
1794
1795 default:
1796 /* 32-bit copy (unaligned) */
1797 if (cb & 1)
1798 *pbDst++ = *pbSrc++;
1799 if (cb & 2)
1800 {
1801 *(KU16 *)pbDst = *(const KU16 *)pbSrc;
1802 pbDst += 2;
1803 pbSrc += 2;
1804 }
1805 for (cb >>= 2; cb > 0; cb--, pbDst += 4, pbSrc += 4)
1806 *(KU32 *)pbDst = *(const KU32 *)pbSrc;
1807 break;
1808 }
1809}
1810
1811
1812/**
1813 * Unprotects or protects the specified image mapping.
1814 *
1815 * @returns 0 on success.
1816 * @returns non-zero kLdr or OS status code on failure.
1817 *
1818 * @param pModLX The LX module interpreter instance.
1819 * @param pvBits The mapping to protect.
1820 * @param UnprotectOrProtect If 1 unprotect (i.e. make all writable), otherwise
1821 * protect according to the object table.
1822 */
1823static int kldrModLXDoProtect(PKLDRMODLX pModLX, void *pvBits, unsigned fUnprotectOrProtect)
1824{
1825 KU32 i;
1826 PKLDRMOD pMod = pModLX->pMod;
1827
1828 /*
1829 * Change object protection.
1830 */
1831 for (i = 0; i < pMod->cSegments; i++)
1832 {
1833 int rc;
1834 void *pv;
1835 KPROT enmProt;
1836
1837 /* calc new protection. */
1838 enmProt = pMod->aSegments[i].enmProt;
1839 if (fUnprotectOrProtect)
1840 {
1841 switch (enmProt)
1842 {
1843 case KPROT_NOACCESS:
1844 case KPROT_READONLY:
1845 case KPROT_READWRITE:
1846 case KPROT_WRITECOPY:
1847 enmProt = KPROT_READWRITE;
1848 break;
1849 case KPROT_EXECUTE:
1850 case KPROT_EXECUTE_READ:
1851 case KPROT_EXECUTE_READWRITE:
1852 case KPROT_EXECUTE_WRITECOPY:
1853 enmProt = KPROT_EXECUTE_READWRITE;
1854 break;
1855 default:
1856 KLDRMODLX_ASSERT(!"bad enmProt");
1857 return -1;
1858 }
1859 }
1860 else
1861 {
1862 /* copy on write -> normal write. */
1863 if (enmProt == KPROT_EXECUTE_WRITECOPY)
1864 enmProt = KPROT_EXECUTE_READWRITE;
1865 else if (enmProt == KPROT_WRITECOPY)
1866 enmProt = KPROT_READWRITE;
1867 }
1868
1869
1870 /* calc the address and set page protection. */
1871 pv = (KU8 *)pvBits + pMod->aSegments[i].RVA;
1872
1873 rc = kHlpPageProtect(pv, pMod->aSegments[i].cbMapped, enmProt);
1874 if (rc)
1875 break;
1876
1877 /** @todo the gap page should be marked NOACCESS! */
1878 }
1879
1880 return 0;
1881}
1882
1883
1884/** @copydoc kLdrModUnmap */
1885static int kldrModLXUnmap(PKLDRMOD pMod)
1886{
1887 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1888 KU32 i;
1889 int rc;
1890
1891 /*
1892 * Mapped?
1893 */
1894 if (!pModLX->pvMapping)
1895 return KLDR_ERR_NOT_MAPPED;
1896
1897 /*
1898 * Free the mapping and update the segments.
1899 */
1900 rc = kHlpPageFree((void *)pModLX->pvMapping, pModLX->cbMapped);
1901 KLDRMODLX_ASSERT(!rc);
1902 pModLX->pvMapping = NULL;
1903
1904 for (i = 0; i < pMod->cSegments; i++)
1905 pMod->aSegments[i].MapAddress = 0;
1906
1907 return rc;
1908}
1909
1910
1911/** @copydoc kLdrModAllocTLS */
1912static int kldrModLXAllocTLS(PKLDRMOD pMod)
1913{
1914 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1915
1916 /* no tls, just do the error checking. */
1917 if (!pModLX->pvMapping)
1918 return KLDR_ERR_NOT_MAPPED;
1919 return 0;
1920}
1921
1922
1923/** @copydoc kLdrModFreeTLS */
1924static void kldrModLXFreeTLS(PKLDRMOD pMod)
1925{
1926 /* no tls. */
1927}
1928
1929
1930/** @copydoc kLdrModReload */
1931static int kldrModLXReload(PKLDRMOD pMod)
1932{
1933 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1934 int rc, rc2;
1935
1936 /*
1937 * Mapped?
1938 */
1939 if (!pModLX->pvMapping)
1940 return KLDR_ERR_NOT_MAPPED;
1941
1942 /*
1943 * Before doing anything we'll have to make all pages writable.
1944 */
1945 rc = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 1 /* unprotect */);
1946 if (rc)
1947 return rc;
1948
1949 /*
1950 * Load the bits again.
1951 */
1952 rc = kldrModLXDoLoadBits(pModLX, (void *)pModLX->pvMapping);
1953
1954 /*
1955 * Restore protection.
1956 */
1957 rc2 = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 0 /* protect */);
1958 if (!rc && rc2)
1959 rc = rc2;
1960 return rc;
1961}
1962
1963
1964/** @copydoc kLdrModFixupMapping */
1965static int kldrModLXFixupMapping(PKLDRMOD pMod, PFNKLDRMODGETIMPORT pfnGetImport, void *pvUser)
1966{
1967 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
1968 int rc, rc2;
1969
1970 /*
1971 * Mapped?
1972 */
1973 if (!pModLX->pvMapping)
1974 return KLDR_ERR_NOT_MAPPED;
1975
1976 /*
1977 * Before doing anything we'll have to make all pages writable.
1978 */
1979 rc = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 1 /* unprotect */);
1980 if (rc)
1981 return rc;
1982
1983 /*
1984 * Apply fixups and resolve imports.
1985 */
1986 rc = kldrModLXRelocateBits(pMod, (void *)pModLX->pvMapping, (KUPTR)pModLX->pvMapping,
1987 pMod->aSegments[0].LinkAddress, pfnGetImport, pvUser);
1988
1989 /*
1990 * Restore protection.
1991 */
1992 rc2 = kldrModLXDoProtect(pModLX, (void *)pModLX->pvMapping, 0 /* protect */);
1993 if (!rc && rc2)
1994 rc = rc2;
1995 return rc;
1996}
1997
1998
1999/** @copydoc kLdrModCallInit */
2000static int kldrModLXCallInit(PKLDRMOD pMod, KUPTR uHandle)
2001{
2002 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2003 int rc;
2004
2005 /*
2006 * Mapped?
2007 */
2008 if (!pModLX->pvMapping)
2009 return KLDR_ERR_NOT_MAPPED;
2010
2011 /*
2012 * Do TLS callbacks first and then call the init/term function if it's a DLL.
2013 */
2014 if ((pModLX->Hdr.e32_mflags & E32MODMASK) == E32MODDLL)
2015 rc = kldrModLXDoCallDLL(pModLX, 0 /* attach */, uHandle);
2016 else
2017 rc = 0;
2018 return rc;
2019}
2020
2021
2022/**
2023 * Call the DLL entrypoint.
2024 *
2025 * @returns 0 on success.
2026 * @returns KLDR_ERR_MODULE_INIT_FAILED or KLDR_ERR_THREAD_ATTACH_FAILED on failure.
2027 * @param pModLX The LX module interpreter instance.
2028 * @param uOp The operation (DLL_*).
2029 * @param uHandle The module handle to present.
2030 */
2031static int kldrModLXDoCallDLL(PKLDRMODLX pModLX, unsigned uOp, KUPTR uHandle)
2032{
2033 int rc;
2034
2035 /*
2036 * If no entrypoint there isn't anything to be done.
2037 */
2038 if ( !pModLX->Hdr.e32_startobj
2039 || pModLX->Hdr.e32_startobj > pModLX->Hdr.e32_objcnt)
2040 return 0;
2041
2042 /*
2043 * Invoke the entrypoint and convert the boolean result to a kLdr status code.
2044 */
2045 rc = kldrModLXDoCall((KUPTR)pModLX->pvMapping
2046 + (KUPTR)pModLX->pMod->aSegments[pModLX->Hdr.e32_startobj - 1].RVA
2047 + pModLX->Hdr.e32_eip,
2048 uHandle, uOp, NULL);
2049 if (rc)
2050 rc = 0;
2051 else if (uOp == 0 /* attach */)
2052 rc = KLDR_ERR_MODULE_INIT_FAILED;
2053 else /* detach: ignore failures */
2054 rc = 0;
2055 return rc;
2056}
2057
2058
2059/**
2060 * Do a 3 parameter callback.
2061 *
2062 * @returns 32-bit callback return.
2063 * @param uEntrypoint The address of the function to be called.
2064 * @param uHandle The first argument, the module handle.
2065 * @param uOp The second argumnet, the reason we're calling.
2066 * @param pvReserved The third argument, reserved argument. (figure this one out)
2067 */
2068static KI32 kldrModLXDoCall(KUPTR uEntrypoint, KUPTR uHandle, KU32 uOp, void *pvReserved)
2069{
2070#if defined(__X86__) || defined(__i386__) || defined(_M_IX86)
2071 KI32 rc;
2072/** @todo try/except */
2073
2074 /*
2075 * Paranoia.
2076 */
2077# ifdef __GNUC__
2078 __asm__ __volatile__(
2079 "pushl %2\n\t"
2080 "pushl %1\n\t"
2081 "pushl %0\n\t"
2082 "lea 12(%%esp), %2\n\t"
2083 "call *%3\n\t"
2084 "movl %2, %%esp\n\t"
2085 : "=a" (rc)
2086 : "d" (uOp),
2087 "S" (0),
2088 "c" (uEntrypoint),
2089 "0" (uHandle));
2090# elif defined(_MSC_VER)
2091 __asm {
2092 mov eax, [uHandle]
2093 mov edx, [uOp]
2094 mov ecx, 0
2095 mov ebx, [uEntrypoint]
2096 push edi
2097 mov edi, esp
2098 push ecx
2099 push edx
2100 push eax
2101 call ebx
2102 mov esp, edi
2103 pop edi
2104 mov [rc], eax
2105 }
2106# else
2107# error "port me!"
2108# endif
2109 return rc;
2110
2111#else
2112 return KCPU_ERR_ARCH_CPU_NOT_COMPATIBLE;
2113#endif
2114}
2115
2116
2117/** @copydoc kLdrModCallTerm */
2118static int kldrModLXCallTerm(PKLDRMOD pMod, KUPTR uHandle)
2119{
2120 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2121
2122 /*
2123 * Mapped?
2124 */
2125 if (!pModLX->pvMapping)
2126 return KLDR_ERR_NOT_MAPPED;
2127
2128 /*
2129 * Do the call.
2130 */
2131 if ((pModLX->Hdr.e32_mflags & E32MODMASK) == E32MODDLL)
2132 kldrModLXDoCallDLL(pModLX, 1 /* detach */, uHandle);
2133
2134 return 0;
2135}
2136
2137
2138/** @copydoc kLdrModCallThread */
2139static int kldrModLXCallThread(PKLDRMOD pMod, KUPTR uHandle, unsigned fAttachingOrDetaching)
2140{
2141 /* no thread attach/detach callout. */
2142 return 0;
2143}
2144
2145
2146/** @copydoc kLdrModSize */
2147static KLDRADDR kldrModLXSize(PKLDRMOD pMod)
2148{
2149 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2150 return pModLX->cbMapped;
2151}
2152
2153
2154/** @copydoc kLdrModGetBits */
2155static int kldrModLXGetBits(PKLDRMOD pMod, void *pvBits, KLDRADDR BaseAddress, PFNKLDRMODGETIMPORT pfnGetImport, void *pvUser)
2156{
2157 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2158 int rc;
2159
2160 /*
2161 * Load the image bits.
2162 */
2163 rc = kldrModLXDoLoadBits(pModLX, pvBits);
2164 if (rc)
2165 return rc;
2166
2167 /*
2168 * Perform relocations.
2169 */
2170 return kldrModLXRelocateBits(pMod, pvBits, BaseAddress, pMod->aSegments[0].LinkAddress, pfnGetImport, pvUser);
2171
2172}
2173
2174
2175/** @copydoc kLdrModRelocateBits */
2176static int kldrModLXRelocateBits(PKLDRMOD pMod, void *pvBits, KLDRADDR NewBaseAddress, KLDRADDR OldBaseAddress,
2177 PFNKLDRMODGETIMPORT pfnGetImport, void *pvUser)
2178{
2179 PKLDRMODLX pModLX = (PKLDRMODLX)pMod->pvData;
2180 KU32 iSeg;
2181 int rc;
2182
2183 /*
2184 * Do we need to to *anything*?
2185 */
2186 if ( NewBaseAddress == OldBaseAddress
2187 && NewBaseAddress == pModLX->paObjs[0].o32_base
2188 && !pModLX->Hdr.e32_impmodcnt)
2189 return 0;
2190
2191 /*
2192 * Load the fixup section.
2193 */
2194 if (!pModLX->pbFixupSection)
2195 {
2196 rc = kldrModLXDoLoadFixupSection(pModLX);
2197 if (rc)
2198 return rc;
2199 }
2200
2201 /*
2202 * Iterate the segments.
2203 */
2204 for (iSeg = 0; iSeg < pModLX->Hdr.e32_objcnt; iSeg++)
2205 {
2206 const struct o32_obj * const pObj = &pModLX->paObjs[iSeg];
2207 KLDRADDR PageAddress = NewBaseAddress + pModLX->pMod->aSegments[iSeg].RVA;
2208 KU32 iPage;
2209 KU8 *pbPage = (KU8 *)pvBits + (KUPTR)pModLX->pMod->aSegments[iSeg].RVA;
2210
2211 /*
2212 * Iterate the page map pages.
2213 */
2214 for (iPage = 0, rc = 0; !rc && iPage < pObj->o32_mapsize; iPage++, pbPage += OBJPAGELEN, PageAddress += OBJPAGELEN)
2215 {
2216 const KU8 * const pbFixupRecEnd = pModLX->pbFixupRecs + pModLX->paoffPageFixups[iPage + pObj->o32_pagemap];
2217 const KU8 *pb = pModLX->pbFixupRecs + pModLX->paoffPageFixups[iPage + pObj->o32_pagemap - 1];
2218 KLDRADDR uValue;
2219 int iSelector;
2220 KU32 fKind;
2221
2222 /* sanity */
2223 if (pbFixupRecEnd < pb)
2224 return KLDR_ERR_BAD_FIXUP;
2225 if (pbFixupRecEnd - 1 > pModLX->pbFixupSectionLast)
2226 return KLDR_ERR_BAD_FIXUP;
2227 if (pb < pModLX->pbFixupSection)
2228 return KLDR_ERR_BAD_FIXUP;
2229
2230 /*
2231 * Iterate the fixup record.
2232 */
2233 while (pb < pbFixupRecEnd)
2234 {
2235 union _rel
2236 {
2237 const KU8 * pb;
2238 const struct r32_rlc *prlc;
2239 } u;
2240
2241 u.pb = pb;
2242 pb += 3 + (u.prlc->nr_stype & NRCHAIN ? 0 : 1); /* place pch at the 4th member. */
2243
2244 /*
2245 * Figure out the target.
2246 */
2247 switch (u.prlc->nr_flags & NRRTYP)
2248 {
2249 /*
2250 * Internal fixup.
2251 */
2252 case NRRINT:
2253 {
2254 KU16 iTrgObject;
2255 KU32 offTrgObject;
2256
2257 /* the object */
2258 if (u.prlc->nr_flags & NR16OBJMOD)
2259 {
2260 iTrgObject = *(const KU16 *)pb;
2261 pb += 2;
2262 }
2263 else
2264 iTrgObject = *pb++;
2265 iTrgObject--;
2266 if (iTrgObject >= pModLX->Hdr.e32_objcnt)
2267 return KLDR_ERR_BAD_FIXUP;
2268
2269 /* the target */
2270 if ((u.prlc->nr_stype & NRSRCMASK) != NRSSEG)
2271 {
2272 if (u.prlc->nr_flags & NR32BITOFF)
2273 {
2274 offTrgObject = *(const KU32 *)pb;
2275 pb += 4;
2276 }
2277 else
2278 {
2279 offTrgObject = *(const KU16 *)pb;
2280 pb += 2;
2281 }
2282
2283 /* calculate the symbol info. */
2284 uValue = offTrgObject + NewBaseAddress + pMod->aSegments[iTrgObject].RVA;
2285 }
2286 else
2287 uValue = NewBaseAddress + pMod->aSegments[iTrgObject].RVA;
2288 if ( (u.prlc->nr_stype & NRALIAS)
2289 || (pMod->aSegments[iTrgObject].fFlags & KLDRSEG_FLAG_16BIT))
2290 iSelector = pMod->aSegments[iTrgObject].Sel16bit;
2291 else
2292 iSelector = pMod->aSegments[iTrgObject].SelFlat;
2293 fKind = 0;
2294 break;
2295 }
2296
2297 /*
2298 * Import by symbol ordinal.
2299 */
2300 case NRRORD:
2301 {
2302 KU16 iModule;
2303 KU32 iSymbol;
2304
2305 /* the module ordinal */
2306 if (u.prlc->nr_flags & NR16OBJMOD)
2307 {
2308 iModule = *(const KU16 *)pb;
2309 pb += 2;
2310 }
2311 else
2312 iModule = *pb++;
2313 iModule--;
2314 if (iModule >= pModLX->Hdr.e32_impmodcnt)
2315 return KLDR_ERR_BAD_FIXUP;
2316#if 1
2317 if (u.prlc->nr_flags & NRICHAIN)
2318 return KLDR_ERR_BAD_FIXUP;
2319#endif
2320
2321 /* . */
2322 if (u.prlc->nr_flags & NR32BITOFF)
2323 {
2324 iSymbol = *(const KU32 *)pb;
2325 pb += 4;
2326 }
2327 else if (!(u.prlc->nr_flags & NR8BITORD))
2328 {
2329 iSymbol = *(const KU16 *)pb;
2330 pb += 2;
2331 }
2332 else
2333 iSymbol = *pb++;
2334
2335 /* resolve it. */
2336 rc = pfnGetImport(pMod, iModule, iSymbol, NULL, 0, NULL, &uValue, &fKind, pvUser);
2337 if (rc)
2338 return rc;
2339 iSelector = -1;
2340 break;
2341 }
2342
2343 /*
2344 * Import by symbol name.
2345 */
2346 case NRRNAM:
2347 {
2348 KU32 iModule;
2349 KU16 offSymbol;
2350 const KU8 *pbSymbol;
2351
2352 /* the module ordinal */
2353 if (u.prlc->nr_flags & NR16OBJMOD)
2354 {
2355 iModule = *(const KU16 *)pb;
2356 pb += 2;
2357 }
2358 else
2359 iModule = *pb++;
2360 iModule--;
2361 if (iModule >= pModLX->Hdr.e32_impmodcnt)
2362 return KLDR_ERR_BAD_FIXUP;
2363#if 1
2364 if (u.prlc->nr_flags & NRICHAIN)
2365 return KLDR_ERR_BAD_FIXUP;
2366#endif
2367
2368 /* . */
2369 if (u.prlc->nr_flags & NR32BITOFF)
2370 {
2371 offSymbol = *(const KU32 *)pb;
2372 pb += 4;
2373 }
2374 else if (!(u.prlc->nr_flags & NR8BITORD))
2375 {
2376 offSymbol = *(const KU16 *)pb;
2377 pb += 2;
2378 }
2379 else
2380 offSymbol = *pb++;
2381 pbSymbol = pModLX->pbImportProcs + offSymbol;
2382 if ( pbSymbol < pModLX->pbImportProcs
2383 || pbSymbol > pModLX->pbFixupSectionLast)
2384 return KLDR_ERR_BAD_FIXUP;
2385
2386 /* resolve it. */
2387 rc = pfnGetImport(pMod, iModule, NIL_KLDRMOD_SYM_ORDINAL, (const char *)pbSymbol + 1, *pbSymbol, NULL,
2388 &uValue, &fKind, pvUser);
2389 if (rc)
2390 return rc;
2391 iSelector = -1;
2392 break;
2393 }
2394
2395 case NRRENT:
2396 KLDRMODLX_ASSERT(!"NRRENT");
2397 default:
2398 break;
2399 }
2400
2401 /* addend */
2402 if (u.prlc->nr_flags & NRADD)
2403 {
2404 if (u.prlc->nr_flags & NR32BITADD)
2405 {
2406 uValue += *(const KU32 *)pb;
2407 pb += 4;
2408 }
2409 else
2410 {
2411 uValue += *(const KU16 *)pb;
2412 pb += 2;
2413 }
2414 }
2415
2416
2417 /*
2418 * Deal with the 'source' (i.e. the place that should be modified - very logical).
2419 */
2420 if (!(u.prlc->nr_stype & NRCHAIN))
2421 {
2422 int off = u.prlc->r32_soff;
2423
2424 /* common / simple */
2425 if ( (u.prlc->nr_stype & NRSRCMASK) == NROFF32
2426 && off >= 0
2427 && off <= OBJPAGELEN - 4)
2428 *(KU32 *)&pbPage[off] = uValue;
2429 else if ( (u.prlc->nr_stype & NRSRCMASK) == NRSOFF32
2430 && off >= 0
2431 && off <= OBJPAGELEN - 4)
2432 *(KU32 *)&pbPage[off] = uValue - (PageAddress + off + 4);
2433 else
2434 {
2435 /* generic */
2436 rc = kldrModLXDoReloc(pbPage, off, PageAddress, u.prlc, iSelector, uValue, fKind);
2437 if (rc)
2438 return rc;
2439 }
2440 }
2441 else if (!(u.prlc->nr_flags & NRICHAIN))
2442 {
2443 const KI16 *poffSrc = (const KI16 *)pb;
2444 KU8 c = u.pb[2];
2445
2446 /* common / simple */
2447 if ((u.prlc->nr_stype & NRSRCMASK) == NROFF32)
2448 {
2449 while (c-- > 0)
2450 {
2451 int off = *poffSrc++;
2452 if (off >= 0 && off <= OBJPAGELEN - 4)
2453 *(KU32 *)&pbPage[off] = uValue;
2454 else
2455 {
2456 rc = kldrModLXDoReloc(pbPage, off, PageAddress, u.prlc, iSelector, uValue, fKind);
2457 if (rc)
2458 return rc;
2459 }
2460 }
2461 }
2462 else if ((u.prlc->nr_stype & NRSRCMASK) == NRSOFF32)
2463 {
2464 while (c-- > 0)
2465 {
2466 int off = *poffSrc++;
2467 if (off >= 0 && off <= OBJPAGELEN - 4)
2468 *(KU32 *)&pbPage[off] = uValue - (PageAddress + off + 4);
2469 else
2470 {
2471 rc = kldrModLXDoReloc(pbPage, off, PageAddress, u.prlc, iSelector, uValue, fKind);
2472 if (rc)
2473 return rc;
2474 }
2475 }
2476 }
2477 else
2478 {
2479 while (c-- > 0)
2480 {
2481 rc = kldrModLXDoReloc(pbPage, *poffSrc++, PageAddress, u.prlc, iSelector, uValue, fKind);
2482 if (rc)
2483 return rc;
2484 }
2485 }
2486 pb = (const KU8 *)poffSrc;
2487 }
2488 else
2489 {
2490 /* This is a pain because it will require virgin pages on a relocation. */
2491 KLDRMODLX_ASSERT(!"NRICHAIN");
2492 return KLDR_ERR_LX_NRICHAIN_NOT_SUPPORTED;
2493 }
2494 }
2495 }
2496 }
2497
2498 return 0;
2499}
2500
2501
2502/**
2503 * Applies the relocation to one 'source' in a page.
2504 *
2505 * This takes care of the more esotic case while the common cases
2506 * are dealt with seperately.
2507 *
2508 * @returns 0 on success, non-zero kLdr status code on failure.
2509 * @param pbPage The page in which to apply the fixup.
2510 * @param off Page relative offset of where to apply the offset.
2511 * @param uValue The target value.
2512 * @param fKind The target kind.
2513 */
2514static int kldrModLXDoReloc(KU8 *pbPage, int off, KLDRADDR PageAddress, const struct r32_rlc *prlc,
2515 int iSelector, KLDRADDR uValue, KU32 fKind)
2516{
2517#pragma pack(1) /* just to be sure */
2518 union
2519 {
2520 KU8 ab[6];
2521 KU32 off32;
2522 KU16 off16;
2523 KU8 off8;
2524 struct
2525 {
2526 KU16 off;
2527 KU16 Sel;
2528 } Far16;
2529 struct
2530 {
2531 KU32 off;
2532 KU16 Sel;
2533 } Far32;
2534 } uData;
2535#pragma pack()
2536 const KU8 *pbSrc;
2537 KU8 *pbDst;
2538 KU8 cb;
2539
2540 /*
2541 * Compose the fixup data.
2542 */
2543 switch (prlc->nr_stype & NRSRCMASK)
2544 {
2545 case NRSBYT:
2546 uData.off8 = (KU8)uValue;
2547 cb = 1;
2548 break;
2549 case NRSSEG:
2550 if (iSelector == -1)
2551 {
2552 /* fixme */
2553 }
2554 uData.off16 = iSelector;
2555 cb = 2;
2556 break;
2557 case NRSPTR:
2558 if (iSelector == -1)
2559 {
2560 /* fixme */
2561 }
2562 uData.Far16.off = (KU16)uValue;
2563 uData.Far16.Sel = iSelector;
2564 cb = 4;
2565 break;
2566 case NRSOFF:
2567 uData.off16 = (KU16)uValue;
2568 cb = 2;
2569 break;
2570 case NRPTR48:
2571 if (iSelector == -1)
2572 {
2573 /* fixme */
2574 }
2575 uData.Far32.off = (KU32)uValue;
2576 uData.Far32.Sel = iSelector;
2577 cb = 6;
2578 break;
2579 case NROFF32:
2580 uData.off32 = (KU32)uValue;
2581 cb = 4;
2582 break;
2583 case NRSOFF32:
2584 uData.off32 = (KU32)uValue - (PageAddress + off + 4);
2585 cb = 4;
2586 break;
2587 default:
2588 return KLDR_ERR_LX_BAD_FIXUP_SECTION; /** @todo fix error, add more checks! */
2589 }
2590
2591 /*
2592 * Apply it. This is sloooow...
2593 */
2594 pbSrc = &uData.ab[0];
2595 pbDst = pbPage + off;
2596 while (cb-- > 0)
2597 {
2598 if (off > OBJPAGELEN)
2599 break;
2600 if (off >= 0)
2601 *pbDst = *pbSrc;
2602 pbSrc++;
2603 pbDst++;
2604 }
2605
2606 return 0;
2607}
2608
2609
2610/**
2611 * The LX module interpreter method table.
2612 */
2613KLDRMODOPS g_kLdrModLXOps =
2614{
2615 "LX",
2616 NULL,
2617 kldrModLXCreate,
2618 kldrModLXDestroy,
2619 kldrModLXQuerySymbol,
2620 kldrModLXEnumSymbols,
2621 kldrModLXGetImport,
2622 kldrModLXNumberOfImports,
2623 NULL /* can execute one is optional */,
2624 kldrModLXGetStackInfo,
2625 kldrModLXQueryMainEntrypoint,
2626 NULL /* fixme */,
2627 NULL /* fixme */,
2628 kldrModLXEnumDbgInfo,
2629 kldrModLXHasDbgInfo,
2630 kldrModLXMap,
2631 kldrModLXUnmap,
2632 kldrModLXAllocTLS,
2633 kldrModLXFreeTLS,
2634 kldrModLXReload,
2635 kldrModLXFixupMapping,
2636 kldrModLXCallInit,
2637 kldrModLXCallTerm,
2638 kldrModLXCallThread,
2639 kldrModLXSize,
2640 kldrModLXGetBits,
2641 kldrModLXRelocateBits,
2642 NULL /* fixme: pfnMostlyDone */,
2643 42 /* the end */
2644};
2645
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette