Changeset 102733 in vbox
- Timestamp:
- Dec 29, 2023 7:40:03 PM (11 months ago)
- Location:
- trunk
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/err.h
r102699 r102733 2576 2576 /** Recompiler: Register allocator internal processing error \#11. */ 2577 2577 #define VERR_IEM_REG_IPE_11 (-5353) 2578 /** Recompiler: Register allocator internal processing error \#12. */ 2579 #define VERR_IEM_REG_IPE_12 (-5354) 2580 /** Recompiler: Register allocator internal processing error \#13. */ 2581 #define VERR_IEM_REG_IPE_13 (-5355) 2578 2582 2579 2583 /** Recompiler: Out of variables. */ … … 2609 2613 /** Recompiler: Variable management internal processing error \#11. */ 2610 2614 #define VERR_IEM_VAR_IPE_11 (-5375) 2615 /** Recompiler: Variable management internal processing error \#12. */ 2616 #define VERR_IEM_VAR_IPE_12 (-5376) 2617 /** Recompiler: Variable management internal processing error \#13. */ 2618 #define VERR_IEM_VAR_IPE_13 (-5377) 2611 2619 2612 2620 /** Recompiler: Unimplemented case. */ -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp
r102724 r102733 4208 4208 4209 4209 /** 4210 * Flushes guest register shadow copies held by a set of host registers. 4211 * 4212 * This is used with the TLB lookup code for ensuring that we don't carry on 4213 * with any guest shadows in volatile registers, as these will get corrupted by 4214 * a TLB miss. 4215 * 4216 * @param pReNative The native recompile state. 4217 * @param fHstRegs Set of host registers to flush guest shadows for. 4218 */ 4219 DECLHIDDEN(void) iemNativeRegFlushGuestShadowsByHostMask(PIEMRECOMPILERSTATE pReNative, uint32_t fHstRegs) RT_NOEXCEPT 4220 { 4221 /* 4222 * Reduce the mask by what's currently shadowed. 4223 */ 4224 uint32_t const bmHstRegsWithGstShadowOld = pReNative->Core.bmHstRegsWithGstShadow; 4225 fHstRegs &= bmHstRegsWithGstShadowOld; 4226 if (fHstRegs) 4227 { 4228 uint32_t const bmHstRegsWithGstShadowNew = bmHstRegsWithGstShadowOld & ~fHstRegs; 4229 Log12(("iemNativeRegFlushGuestShadowsByHostMask: flushing %#RX32 (%#RX32 -> %#RX32)\n", 4230 fHstRegs, bmHstRegsWithGstShadowOld, bmHstRegsWithGstShadowNew)); 4231 pReNative->Core.bmHstRegsWithGstShadow = bmHstRegsWithGstShadowNew; 4232 if (bmHstRegsWithGstShadowNew) 4233 { 4234 /* 4235 * Partial (likely). 4236 */ 4237 uint64_t fGstShadows = 0; 4238 do 4239 { 4240 unsigned const idxHstReg = ASMBitFirstSetU32(fHstRegs) - 1; 4241 Assert(!(pReNative->Core.bmHstRegs & RT_BIT_32(idxHstReg))); 4242 Assert( (pReNative->Core.bmGstRegShadows & pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows) 4243 == pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows); 4244 4245 fGstShadows |= pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows; 4246 pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows = 0; 4247 fHstRegs &= ~RT_BIT_32(idxHstReg); 4248 } while (fHstRegs != 0); 4249 pReNative->Core.bmGstRegShadows &= ~fGstShadows; 4250 } 4251 else 4252 { 4253 /* 4254 * Clear all. 4255 */ 4256 do 4257 { 4258 unsigned const idxHstReg = ASMBitFirstSetU32(fHstRegs) - 1; 4259 Assert(!(pReNative->Core.bmHstRegs & RT_BIT_32(idxHstReg))); 4260 Assert( (pReNative->Core.bmGstRegShadows & pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows) 4261 == pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows); 4262 4263 pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows = 0; 4264 fHstRegs &= ~RT_BIT_32(idxHstReg); 4265 } while (fHstRegs != 0); 4266 pReNative->Core.bmGstRegShadows = 0; 4267 } 4268 } 4269 } 4270 4271 4272 /** 4273 * Restores guest shadow copies in volatile registers. 4274 * 4275 * This is used after calling a helper function (think TLB miss) to restore the 4276 * register state of volatile registers. 4277 * 4278 * @param pReNative The native recompile state. 4279 * @param fHstRegs Set of host registers to flush guest shadows for. 4280 * @see iemNativeVarSaveVolatileRegsPreHlpCall(), 4281 * iemNativeVarRestoreVolatileRegsPostHlpCall() 4282 */ 4283 DECL_HIDDEN_THROW(uint32_t) 4284 iemNativeRegRestoreGuestShadowsInVolatileRegs(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fHstRegsActiveShadows) 4285 { 4286 uint32_t fHstRegs = pReNative->Core.bmHstRegsWithGstShadow & IEMNATIVE_CALL_VOLATILE_GREG_MASK; 4287 if (fHstRegs) 4288 { 4289 Log12(("iemNativeRegRestoreGuestShadowsInVolatileRegs: %#RX32\n", fHstRegs)); 4290 do 4291 { 4292 unsigned const idxHstReg = ASMBitFirstSetU32(fHstRegs) - 1; 4293 4294 /* It's not fatal if a register is active holding a variable that 4295 shadowing a guest register, ASSUMING all pending guest register 4296 writes were flushed prior to the helper call. However, we'll be 4297 emitting duplicate restores, so it wasts code space. */ 4298 Assert(!(pReNative->Core.bmHstRegs & ~fHstRegsActiveShadows & RT_BIT_32(idxHstReg))); 4299 RT_NOREF(fHstRegsActiveShadows); 4300 4301 uint64_t const fGstRegShadows = pReNative->Core.aHstRegs[idxHstReg].fGstRegShadows; 4302 Assert((pReNative->Core.bmGstRegShadows & fGstRegShadows) == fGstRegShadows); 4303 AssertStmt(fGstRegShadows != 0 && fGstRegShadows < RT_BIT_64(kIemNativeGstReg_End), 4304 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_12)); 4305 4306 unsigned const idxGstReg = ASMBitFirstSetU64(fGstRegShadows) - 1; 4307 off = iemNativeEmitLoadGprWithGstShadowReg(pReNative, off, idxHstReg, (IEMNATIVEGSTREG)idxGstReg); 4308 4309 fHstRegs &= ~RT_BIT_32(idxHstReg); 4310 } while (fHstRegs != 0); 4311 } 4312 return off; 4313 } 4314 4315 4316 /** 4210 4317 * Flushes delayed write of a specific guest register. 4211 4318 * … … 6977 7084 pReNative->Core.aVars[idxVar].fRegAcquired = true; 6978 7085 return idxReg; 7086 } 7087 7088 7089 /** 7090 * Emit code to save volatile registers prior to a call to a helper (TLB miss). 7091 * 7092 * This is used together with iemNativeVarRestoreVolatileRegsPostHlpCall() and 7093 * optionally iemNativeRegRestoreGuestShadowsInVolatileRegs() to bypass the 7094 * requirement of flushing anything in volatile host registers when making a 7095 * call. 7096 * 7097 * @returns New @a off value. 7098 * @param pReNative The recompiler state. 7099 * @param off The code buffer position. 7100 * @param fHstRegsNotToSave Set of registers not to save & restore. 7101 */ 7102 DECL_INLINE_THROW(uint32_t) 7103 iemNativeVarSaveVolatileRegsPreHlpCall(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fHstRegsNotToSave) 7104 { 7105 uint32_t fHstRegs = pReNative->Core.bmHstRegs & IEMNATIVE_CALL_VOLATILE_GREG_MASK & ~fHstRegsNotToSave; 7106 if (fHstRegs) 7107 { 7108 do 7109 { 7110 unsigned int const idxHstReg = ASMBitFirstSetU32(fHstRegs) - 1; 7111 fHstRegs &= ~RT_BIT_32(idxHstReg); 7112 7113 if (pReNative->Core.aHstRegs[idxHstReg].enmWhat == kIemNativeWhat_Var) 7114 { 7115 uint8_t const idxVar = pReNative->Core.aHstRegs[idxHstReg].idxVar; 7116 AssertStmt( idxVar < RT_ELEMENTS(pReNative->Core.aVars) 7117 && (pReNative->Core.bmVars & RT_BIT_32(idxVar)) 7118 && pReNative->Core.aVars[idxVar].idxReg == idxHstReg, 7119 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_12)); 7120 switch (pReNative->Core.aVars[idxVar].enmKind) 7121 { 7122 case kIemNativeVarKind_Stack: 7123 { 7124 /* Temporarily spill the variable register. */ 7125 uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative, idxVar); 7126 Log12(("iemNativeVarSaveVolatileRegsPreHlpCall: spilling idxVar=%d/idxReg=%d onto the stack (slot %#x bp+%d, off=%#x)\n", 7127 idxVar, idxHstReg, idxStackSlot, iemNativeStackCalcBpDisp(idxStackSlot), off)); 7128 off = iemNativeEmitStoreGprByBp(pReNative, off, iemNativeStackCalcBpDisp(idxStackSlot), idxHstReg); 7129 continue; 7130 } 7131 7132 case kIemNativeVarKind_Immediate: 7133 case kIemNativeVarKind_VarRef: 7134 case kIemNativeVarKind_GstRegRef: 7135 /* It is weird to have any of these loaded at this point. */ 7136 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_13)); 7137 continue; 7138 7139 case kIemNativeVarKind_End: 7140 case kIemNativeVarKind_Invalid: 7141 break; 7142 } 7143 AssertFailed(); 7144 } 7145 } while (fHstRegs); 7146 } 7147 return off; 7148 } 7149 7150 7151 /** 7152 * Emit code to restore volatile registers after to a call to a helper. 7153 * 7154 * @returns New @a off value. 7155 * @param pReNative The recompiler state. 7156 * @param off The code buffer position. 7157 * @param fHstRegsNotToSave Set of registers not to save & restore. 7158 * @see iemNativeVarSaveVolatileRegsPreHlpCall(), 7159 * iemNativeRegRestoreGuestShadowsInVolatileRegs() 7160 */ 7161 DECL_INLINE_THROW(uint32_t) 7162 iemNativeVarRestoreVolatileRegsPostHlpCall(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fHstRegsNotToSave) 7163 { 7164 uint32_t fHstRegs = pReNative->Core.bmHstRegs & IEMNATIVE_CALL_VOLATILE_GREG_MASK & ~fHstRegsNotToSave; 7165 if (fHstRegs) 7166 { 7167 do 7168 { 7169 unsigned int const idxHstReg = ASMBitFirstSetU32(fHstRegs) - 1; 7170 fHstRegs &= ~RT_BIT_32(idxHstReg); 7171 7172 if (pReNative->Core.aHstRegs[idxHstReg].enmWhat == kIemNativeWhat_Var) 7173 { 7174 uint8_t const idxVar = pReNative->Core.aHstRegs[idxHstReg].idxVar; 7175 AssertStmt( idxVar < RT_ELEMENTS(pReNative->Core.aVars) 7176 && (pReNative->Core.bmVars & RT_BIT_32(idxVar)) 7177 && pReNative->Core.aVars[idxVar].idxReg == idxHstReg, 7178 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_12)); 7179 switch (pReNative->Core.aVars[idxVar].enmKind) 7180 { 7181 case kIemNativeVarKind_Stack: 7182 { 7183 /* Unspill the variable register. */ 7184 uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative, idxVar); 7185 Log12(("iemNativeVarRestoreVolatileRegsPostHlpCall: unspilling idxVar=%d/idxReg=%d (slot %#x bp+%d, off=%#x)\n", 7186 idxVar, idxHstReg, idxStackSlot, iemNativeStackCalcBpDisp(idxStackSlot), off)); 7187 off = iemNativeEmitLoadGprByBp(pReNative, off, idxHstReg, iemNativeStackCalcBpDisp(idxStackSlot)); 7188 continue; 7189 } 7190 7191 case kIemNativeVarKind_Immediate: 7192 case kIemNativeVarKind_VarRef: 7193 case kIemNativeVarKind_GstRegRef: 7194 /* It is weird to have any of these loaded at this point. */ 7195 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_IPE_13)); 7196 continue; 7197 7198 case kIemNativeVarKind_End: 7199 case kIemNativeVarKind_Invalid: 7200 break; 7201 } 7202 AssertFailed(); 7203 } 7204 } while (fHstRegs); 7205 } 7206 return off; 6979 7207 } 6980 7208 … … 9810 10038 iemNativeRegFreeTmp(a_pReNative, idxReg2); 9811 10039 iemNativeRegFreeTmp(a_pReNative, idxReg1); 10040 } 10041 10042 uint32_t getRegsNotToSave() const 10043 { 10044 if (!fSkip) 10045 return RT_BIT_32(idxReg1) | RT_BIT_32(idxReg2); 10046 return 0; 10047 } 10048 10049 /** This is only for avoid assertions. */ 10050 uint32_t getActiveRegsWithShadows() const 10051 { 10052 #ifdef VBOX_STRICT 10053 if (!fSkip) 10054 return RT_BIT_32(idxRegSegBase) | RT_BIT_32(idxRegSegLimit) | RT_BIT_32(idxRegSegAttrib); 10055 #endif 10056 return 0; 9812 10057 } 9813 10058 } IEMNATIVEEMITTLBSTATE; … … 10873 11118 /* IEMNATIVE_CALL_ARG1_GREG = idxVarValue (first) */ 10874 11119 off = iemNativeEmitLoadArgGregFromImmOrStackVar(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, idxVarValue, 10875 0 /*offAddend*/, true /*fVarAllowInVolatileReg*/);11120 0 /*offAddend*/, IEMNATIVE_CALL_VOLATILE_GREG_MASK); 10876 11121 10877 11122 /* IEMNATIVE_CALL_ARG0_GREG = pVCpu */ … … 11328 11573 * may end up making calls. 11329 11574 */ 11330 /** @todo we could postpone this till we make the call and reload the11331 * registers after returning from the call. Not sure if that's sensible or11332 * not, though. */11333 11575 off = iemNativeRegFlushPendingWrites(pReNative, off); 11334 11576 11577 #ifdef IEMNATIVE_WITH_FREE_AND_FLUSH_VOLATILE_REGS_AT_TLB_LOOKUP 11335 11578 /* 11336 11579 * Move/spill/flush stuff out of call-volatile registers. … … 11340 11583 /** @todo save+restore active registers and maybe guest shadows in tlb-miss. */ 11341 11584 off = iemNativeRegMoveAndFreeAndFlushAtCall(pReNative, off, 0 /* vacate all non-volatile regs */); 11585 #endif 11342 11586 11343 11587 /* The bUnmapInfo variable will get a register in the tlb-hit code path, … … 11384 11628 #endif 11385 11629 11630 #ifndef IEMNATIVE_WITH_FREE_AND_FLUSH_VOLATILE_REGS_AT_TLB_LOOKUP 11631 /* Save variables in volatile registers. */ 11632 uint32_t const fHstRegsNotToSave = TlbState.getRegsNotToSave() | RT_BIT_32(idxRegMemResult) | RT_BIT_32(idxRegUnmapInfo); 11633 off = iemNativeVarSaveVolatileRegsPreHlpCall(pReNative, off, fHstRegsNotToSave); 11634 #endif 11635 11636 /* IEMNATIVE_CALL_ARG2_GREG = GCPtrMem - load first as it is from a variable. */ 11637 off = iemNativeEmitLoadArgGregFromImmOrStackVar(pReNative, off, IEMNATIVE_CALL_ARG2_GREG, idxVarGCPtrMem, 0 /*cbAppend*/, 11638 #ifndef IEMNATIVE_WITH_FREE_AND_FLUSH_VOLATILE_REGS_AT_TLB_LOOKUP 11639 IEMNATIVE_CALL_VOLATILE_GREG_MASK, true /*fSpilledVarsInvolatileRegs*/); 11640 #else 11641 IEMNATIVE_CALL_VOLATILE_GREG_MASK); 11642 #endif 11643 11386 11644 /* IEMNATIVE_CALL_ARG3_GREG = iSegReg */ 11387 11645 if (iSegReg != UINT8_MAX) … … 11391 11649 } 11392 11650 11393 /* IEMNATIVE_CALL_ARG2_GREG = GCPtrMem */11394 off = iemNativeEmitLoadArgGregFromImmOrStackVar(pReNative, off, IEMNATIVE_CALL_ARG2_GREG, idxVarGCPtrMem);11395 11396 11651 /* IEMNATIVE_CALL_ARG1_GREG = &idxVarUnmapInfo; stackslot address, load any register with result after the call. */ 11397 #if 011398 off = iemNativeEmitLoadArgGregWithVarAddr(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, idxVarUnmapInfo, true /*fFlushShadows*/);11399 #else11400 11652 int32_t const offBpDispVarUnmapInfo = iemNativeStackCalcBpDisp(iemNativeVarGetStackSlot(pReNative, idxVarUnmapInfo)); 11401 11653 off = iemNativeEmitLeaGprByBp(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, offBpDispVarUnmapInfo); 11402 #endif11403 11654 11404 11655 /* IEMNATIVE_CALL_ARG0_GREG = pVCpu */ … … 11414 11665 if (idxRegMemResult != IEMNATIVE_CALL_RET_GREG) 11415 11666 off = iemNativeEmitLoadGprFromGpr(pReNative, off, idxRegMemResult, IEMNATIVE_CALL_RET_GREG); 11667 11668 #ifndef IEMNATIVE_WITH_FREE_AND_FLUSH_VOLATILE_REGS_AT_TLB_LOOKUP 11669 /* Restore variables and guest shadow registers to volatile registers. */ 11670 off = iemNativeVarRestoreVolatileRegsPostHlpCall(pReNative, off, fHstRegsNotToSave); 11671 off = iemNativeRegRestoreGuestShadowsInVolatileRegs(pReNative, off, TlbState.getActiveRegsWithShadows()); 11672 #endif 11416 11673 11417 11674 Assert(pReNative->Core.aVars[idxVarUnmapInfo].idxReg == idxRegUnmapInfo); … … 11442 11699 */ 11443 11700 iemNativeLabelDefine(pReNative, idxLabelTlbDone, off); 11701 11702 # ifndef IEMNATIVE_WITH_FREE_AND_FLUSH_VOLATILE_REGS_AT_TLB_LOOKUP 11703 /* Temp Hack: Flush all guest shadows in volatile registers in case of TLB miss. */ 11704 iemNativeRegFlushGuestShadowsByHostMask(pReNative, IEMNATIVE_CALL_VOLATILE_GREG_MASK); 11705 # endif 11444 11706 } 11445 11707 #else … … 11553 11815 /* IEMNATIVE_CALL_ARG1_GREG = idxVarUnmapInfo (first!) */ 11554 11816 off = iemNativeEmitLoadArgGregFromStackVar(pReNative, off, IEMNATIVE_CALL_ARG1_GREG, idxVarUnmapInfo, 11555 0 /*offAddend*/, true /*fVarAllowInVolatileReg*/);11817 0 /*offAddend*/, IEMNATIVE_CALL_VOLATILE_GREG_MASK); 11556 11818 11557 11819 /* IEMNATIVE_CALL_ARG0_GREG = pVCpu */ -
trunk/src/VBox/VMM/include/IEMN8veRecompiler.h
r102724 r102733 836 836 uint32_t fKeepVars = 0); 837 837 DECLHIDDEN(void) iemNativeRegFlushGuestShadows(PIEMRECOMPILERSTATE pReNative, uint64_t fGstRegs) RT_NOEXCEPT; 838 DECLHIDDEN(void) iemNativeRegFlushGuestShadowsByHostMask(PIEMRECOMPILERSTATE pReNative, uint32_t fHstRegs) RT_NOEXCEPT; 838 839 839 840 DECL_HIDDEN_THROW(uint8_t) iemNativeVarGetStackSlot(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar); -
trunk/src/VBox/VMM/include/IEMN8veRecompilerEmit.h
r102724 r102733 4301 4301 DECL_FORCE_INLINE_THROW(uint32_t) 4302 4302 iemNativeEmitLoadArgGregFromStackVar(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxRegArg, uint8_t idxVar, 4303 int32_t offAddend = 0, bool fVarAllowInVolatileReg = false) 4303 int32_t offAddend = 0, uint32_t fHstVolatileRegsAllowed = UINT32_MAX, 4304 bool fSpilledVarsInVolatileRegs = false) 4304 4305 { 4305 4306 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar); … … 4308 4309 4309 4310 uint8_t const idxRegVar = pReNative->Core.aVars[idxVar].idxReg; 4310 if (idxRegVar < RT_ELEMENTS(pReNative->Core.aHstRegs)) 4311 { 4312 Assert(!(RT_BIT_32(idxRegVar) & IEMNATIVE_CALL_VOLATILE_GREG_MASK) || fVarAllowInVolatileReg); 4313 RT_NOREF(fVarAllowInVolatileReg); 4311 if ( idxRegVar < RT_ELEMENTS(pReNative->Core.aHstRegs) 4312 && ( (RT_BIT_32(idxRegVar) & (~IEMNATIVE_CALL_VOLATILE_GREG_MASK | fHstVolatileRegsAllowed)) 4313 || !fSpilledVarsInVolatileRegs )) 4314 { 4315 AssertStmt( !(RT_BIT_32(idxRegVar) & IEMNATIVE_CALL_VOLATILE_GREG_MASK) 4316 || (RT_BIT_32(idxRegVar) & fHstVolatileRegsAllowed), 4317 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_13)); 4314 4318 if (!offAddend) 4315 4319 { … … 4339 4343 DECL_FORCE_INLINE_THROW(uint32_t) 4340 4344 iemNativeEmitLoadArgGregFromImmOrStackVar(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxRegArg, uint8_t idxVar, 4341 int32_t offAddend = 0, bool fVarAllowInVolatileReg = false) 4345 int32_t offAddend = 0, uint32_t fHstVolatileRegsAllowed = 0, 4346 bool fSpilledVarsInVolatileRegs = false) 4342 4347 { 4343 4348 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar); … … 4345 4350 off = iemNativeEmitLoadGprImm64(pReNative, off, idxRegArg, pReNative->Core.aVars[idxVar].u.uValue + offAddend); 4346 4351 else 4347 off = iemNativeEmitLoadArgGregFromStackVar(pReNative, off, idxRegArg, idxVar, offAddend, fVarAllowInVolatileReg); 4352 off = iemNativeEmitLoadArgGregFromStackVar(pReNative, off, idxRegArg, idxVar, offAddend, 4353 fHstVolatileRegsAllowed, fSpilledVarsInVolatileRegs); 4348 4354 return off; 4349 4355 }
Note:
See TracChangeset
for help on using the changeset viewer.