diff --git a/src/CLR/Core/CLR_RT_HeapBlock_Delegate.cpp b/src/CLR/Core/CLR_RT_HeapBlock_Delegate.cpp index 0fd44b5cae..f40915abac 100644 --- a/src/CLR/Core/CLR_RT_HeapBlock_Delegate.cpp +++ b/src/CLR/Core/CLR_RT_HeapBlock_Delegate.cpp @@ -57,6 +57,7 @@ HRESULT CLR_RT_HeapBlock_Delegate::CreateInstance( dlg->m_object.SetObjectReference(nullptr); dlg->m_genericTypeSpec.Clear(); + dlg->m_genericMethodSpec.Clear(); #if defined(NANOCLR_APPDOMAINS) dlg->m_appDomain = g_CLR_RT_ExecutionEngine.GetCurrentAppDomain(); diff --git a/src/CLR/Core/CLR_RT_StackFrame.cpp b/src/CLR/Core/CLR_RT_StackFrame.cpp index c2691bc1dc..3a6678dd2e 100644 --- a/src/CLR/Core/CLR_RT_StackFrame.cpp +++ b/src/CLR/Core/CLR_RT_StackFrame.cpp @@ -118,15 +118,18 @@ HRESULT CLR_RT_StackFrame::Push(CLR_RT_Thread *th, const CLR_RT_MethodDef_Instan // void* m_customPointer; // }; // + // Initialize generic type context storage to invalid + stack->m_genericTypeSpecStorage.Clear(); + // #ifndef NANOCLR_NO_IL_INLINE stack->m_inlineFrame = nullptr; #endif #if defined(NANOCLR_PROFILE_NEW_CALLS) stack->m_callchain.Enter(stack); // CLR_PROF_CounterCallChain m_callchain; #endif - // - // CLR_RT_HeapBlock m_extension[1]; - // + // + // CLR_RT_HeapBlock m_extension[1]; + // #if defined(ENABLE_NATIVE_PROFILER) stack->m_fNativeProfiled = stack->m_owningThread->m_fNativeProfiled; #endif diff --git a/src/CLR/Core/Execution.cpp b/src/CLR/Core/Execution.cpp index 55a14a0bdd..9d5d87f078 100644 --- a/src/CLR/Core/Execution.cpp +++ b/src/CLR/Core/Execution.cpp @@ -2028,6 +2028,40 @@ CLR_RT_HeapBlock *CLR_RT_ExecutionEngine::AccessStaticField(const CLR_RT_FieldDe return nullptr; } +// Helper function to resolve generic type parameters (VAR/MVAR) to their concrete types +// Used by both InitializeReference and InitializeLocals to reduce code duplication +static HRESULT ResolveGenericTypeParameter( + const CLR_RT_TypeSpec_Index &genericTypeIndex, + CLR_UINT8 paramPosition, + CLR_RT_TypeDef_Index &outClass, + NanoCLRDataType &outDataType) +{ + NATIVE_PROFILE_CLR_CORE(); + NANOCLR_HEADER(); + + if (!NANOCLR_INDEX_IS_VALID(genericTypeIndex)) + { + NANOCLR_SET_AND_LEAVE(CLR_E_FAIL); + } + + CLR_RT_TypeSpec_Instance typeSpec; + if (!typeSpec.InitializeFromIndex(genericTypeIndex)) + { + NANOCLR_SET_AND_LEAVE(CLR_E_FAIL); + } + + CLR_RT_SignatureParser::Element paramElement; + if (!typeSpec.GetGenericParam(paramPosition, paramElement)) + { + NANOCLR_SET_AND_LEAVE(CLR_E_FAIL); + } + + outClass = paramElement.Class; + outDataType = paramElement.DataType; + + NANOCLR_NOCLEANUP(); +} + HRESULT CLR_RT_ExecutionEngine::InitializeReference( CLR_RT_HeapBlock &ref, CLR_RT_SignatureParser &parser, @@ -2063,8 +2097,13 @@ HRESULT CLR_RT_ExecutionEngine::InitializeReference( if (dt == DATATYPE_VAR) { - genericInstance->assembly - ->FindGenericParamAtTypeSpec(genericInstance->data, res.GenericParamPosition, realTypeDef, dt); + if (genericInstance == nullptr || !NANOCLR_INDEX_IS_VALID(*genericInstance)) + { + NANOCLR_SET_AND_LEAVE(CLR_E_FAIL); + } + + NANOCLR_CHECK_HRESULT( + ResolveGenericTypeParameter(*genericInstance, res.GenericParamPosition, realTypeDef, dt)); goto process_datatype; } @@ -2297,26 +2336,16 @@ HRESULT CLR_RT_ExecutionEngine::InitializeLocals( // type-level generic parameter in a locals signature (e.g. 'T' inside a generic type) CLR_INT8 genericParamPosition = *sig++; + // Resolve type-level generic parameter (VAR) using the method's enclosing type context if (methodDefInstance.genericType && NANOCLR_INDEX_IS_VALID(*methodDefInstance.genericType) && methodDefInstance.genericType->data != CLR_EmptyToken) { - CLR_RT_TypeSpec_Instance typeSpec{}; - typeSpec.InitializeFromIndex( - (const CLR_RT_TypeSpec_Index &)methodDefInstance.genericType->data); - - typeSpec.assembly->FindGenericParamAtTypeSpec( - methodDefInstance.genericType->data, - genericParamPosition, - cls, - dt); + NANOCLR_CHECK_HRESULT( + ResolveGenericTypeParameter(*methodDefInstance.genericType, genericParamPosition, cls, dt)); } else { - assembly->FindGenericParamAtTypeSpec( - methodDefInstance.genericType->data, - genericParamPosition, - cls, - dt); + NANOCLR_SET_AND_LEAVE(CLR_E_FAIL); } goto done; @@ -2324,17 +2353,36 @@ HRESULT CLR_RT_ExecutionEngine::InitializeLocals( case DATATYPE_MVAR: { + // Method-level generic parameter (e.g., '!!T' in a generic method like Array.Empty()) CLR_UINT8 genericParamPosition = *sig++; - CLR_RT_GenericParam_Index gpIndex; + // For generic methods, use the MethodSpec's signature to get the concrete type + if (NANOCLR_INDEX_IS_VALID(methodDefInstance.methodSpec)) + { + CLR_RT_MethodSpec_Instance methodSpec; + if (!methodSpec.InitializeFromIndex(methodDefInstance.methodSpec)) + { + NANOCLR_SET_AND_LEAVE(CLR_E_FAIL); + } - assembly->FindGenericParamAtMethodDef(methodDefInstance, genericParamPosition, gpIndex); + // Use GetGenericArgument to get the concrete type from MethodSpec's signature + if (!methodSpec.GetGenericArgument(genericParamPosition, cls, dt)) + { + NANOCLR_SET_AND_LEAVE(CLR_E_FAIL); + } + } + else + { + // Fallback: try to resolve using GenericParam table (for open generic methods) + CLR_RT_GenericParam_Index gpIndex; + assembly->FindGenericParamAtMethodDef(methodDefInstance, genericParamPosition, gpIndex); - CLR_RT_GenericParam_CrossReference gp = - assembly->crossReferenceGenericParam[gpIndex.GenericParam()]; + CLR_RT_GenericParam_CrossReference gp = + assembly->crossReferenceGenericParam[gpIndex.GenericParam()]; - cls = gp.classTypeDef; - dt = gp.dataType; + cls = gp.classTypeDef; + dt = gp.dataType; + } goto done; } @@ -3538,37 +3586,20 @@ bool CLR_RT_ExecutionEngine::IsInstanceOf( CLR_RT_HeapBlock &obj, CLR_RT_Assembly *assm, CLR_UINT32 token, - bool isInstInstruction) + bool isInstInstruction, + const CLR_RT_MethodDef_Instance *caller) { NATIVE_PROFILE_CLR_CORE(); + CLR_RT_TypeDescriptor desc{}; CLR_RT_TypeDescriptor descTarget{}; - CLR_RT_TypeDef_Instance clsTarget{}; - CLR_RT_TypeSpec_Instance defTarget{}; if (FAILED(desc.InitializeFromObject(obj))) return false; - if (clsTarget.ResolveToken(token, assm)) - { - // - // Shortcut for identity. - // - if (desc.m_handlerCls.data == clsTarget.data) - return true; - - if (FAILED(descTarget.InitializeFromType(clsTarget))) - return false; - } - else if (defTarget.ResolveToken(token, assm)) - { - if (FAILED(descTarget.InitializeFromTypeSpec(defTarget))) - return false; - } - else - { + // Use InitializeFromSignatureToken to properly resolve VAR/MVAR tokens + if (FAILED(descTarget.InitializeFromSignatureToken(assm, token, caller))) return false; - } return IsInstanceOf(desc, descTarget, isInstInstruction); } @@ -3609,7 +3640,8 @@ HRESULT CLR_RT_ExecutionEngine::CastToType( CLR_RT_HeapBlock &ref, CLR_UINT32 tk, CLR_RT_Assembly *assm, - bool isInstInstruction) + bool isInstInstruction, + const CLR_RT_MethodDef_Instance *caller) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); @@ -3618,7 +3650,7 @@ HRESULT CLR_RT_ExecutionEngine::CastToType( { ; } - else if (g_CLR_RT_ExecutionEngine.IsInstanceOf(ref, assm, tk, isInstInstruction) == true) + else if (g_CLR_RT_ExecutionEngine.IsInstanceOf(ref, assm, tk, isInstInstruction, caller) == true) { ; } diff --git a/src/CLR/Core/Interpreter.cpp b/src/CLR/Core/Interpreter.cpp index 89c02b84bb..9fab21ea7d 100644 --- a/src/CLR/Core/Interpreter.cpp +++ b/src/CLR/Core/Interpreter.cpp @@ -1002,6 +1002,37 @@ HRESULT CLR_RT_Thread::Execute_DelegateInvoke(CLR_RT_StackFrame &stackArg) NANOCLR_NOCLEANUP(); } +// Helper function to handle generic .cctor rescheduling for static field operations +// Returns CLR_E_RESCHEDULE if rescheduling is needed, CLR_E_WRONG_TYPE if hash is invalid, +// or S_OK if no rescheduling is needed. +static HRESULT HandleGenericCctorReschedule( + CLR_RT_TypeSpec_Instance &tsInst, + CLR_RT_StackFrame *stack, + CLR_PMETADATA *pIp) +{ + CLR_UINT32 hash = + g_CLR_RT_TypeSystem.ComputeHashForClosedGenericType(tsInst, &stack->m_genericTypeSpecStorage, &stack->m_call); + + if (hash == 0xFFFFFFFF) + { + return CLR_E_WRONG_TYPE; + } + + CLR_RT_GenericCctorExecutionRecord *cctorRecord = g_CLR_RT_TypeSystem.FindOrCreateGenericCctorRecord(hash, nullptr); + + if (cctorRecord != nullptr && (cctorRecord->m_flags & CLR_RT_GenericCctorExecutionRecord::c_Scheduled) && + !(cctorRecord->m_flags & CLR_RT_GenericCctorExecutionRecord::c_Executed)) + { + // .cctor is scheduled but not yet executed + // Rewind ip to before this instruction so it will be retried + // (1 byte for opcode + 2 bytes for compressed field token) + *pIp -= 3; + return CLR_E_RESCHEDULE; + } + + return S_OK; +} + HRESULT CLR_RT_Thread::Execute_IL(CLR_RT_StackFrame &stackArg) { NATIVE_PROFILE_CLR_CORE(); @@ -2108,8 +2139,83 @@ HRESULT CLR_RT_Thread::Execute_IL(CLR_RT_StackFrame &stackArg) { FETCH_ARG_COMPRESSED_METHODTOKEN(arg, ip); + // Save arrayElementType for propagation through the call chain + // This will be used by ResolveToken and later restored after InitializeFromIndex calls + CLR_RT_TypeDef_Index propagatedArrayElementType{}; + if (NANOCLR_INDEX_IS_VALID(stack->m_call.arrayElementType)) + { + propagatedArrayElementType = stack->m_call.arrayElementType; + } + CLR_RT_MethodDef_Instance calleeInst{}; - if (calleeInst.ResolveToken(arg, assm, stack->m_call.genericType) == false) + // Set arrayElementType before ResolveToken so generic type resolution can use it + calleeInst.arrayElementType = propagatedArrayElementType; + + // For interface method calls on generic instances, try to extract the closed generic TypeSpec + // from the caller's assembly by matching the object's TypeDef + const CLR_RT_TypeSpec_Index *effectiveCallerGeneric = stack->m_call.genericType; + + // Only perform expensive TypeSpec search if ALL conditions are met: + // 1. This is a virtual call (interfaces use CALLVIRT) + // 2. No generic context exists yet + // 3. The token is a MethodRef (not direct MethodDef) + if (op == CEE_CALLVIRT && stack->m_call.genericType == nullptr && + CLR_TypeFromTk(arg) == TBL_MethodRef) + { + const CLR_RECORD_METHODREF *mr = assm->GetMethodRef(CLR_DataFromTk(arg)); + + // 4. The method owner is a TypeRef (interface method) + // 5. Not a static method (interfaces don't have static methods, but safety check) + if (mr && mr->Owner() == TBL_TypeRef) + { + // Temporarily resolve to get argument count and check if instance method + CLR_RT_MethodDef_Instance tempInst{}; + if (tempInst.ResolveToken(arg, assm, nullptr) && + (tempInst.target->flags & CLR_RECORD_METHODDEF::MD_Static) == 0) + { + CLR_RT_HeapBlock *pThisTemp = &evalPos[1 - tempInst.target->argumentsCount]; + + // 6. 'this' is an object reference (not a value type byref) + if (pThisTemp->DataType() == DATATYPE_OBJECT || pThisTemp->DataType() == DATATYPE_BYREF) + { + CLR_RT_HeapBlock *obj = pThisTemp->Dereference(); + + // 7. Object is a class instance (generic or not) + if (obj && obj->DataType() == DATATYPE_CLASS) + { + CLR_RT_TypeDef_Index objCls = obj->ObjectCls(); + + // 8. Object has a valid TypeDef + if (NANOCLR_INDEX_IS_VALID(objCls)) + { + // NOW search for a TypeSpec that matches this object's TypeDef + // This is only needed for closed generic instances like List + for (int i = 0; i < assm->tablesSize[TBL_TypeSpec]; i++) + { + const CLR_RT_TypeSpec_Index *tsIdx = + &assm->crossReferenceTypeSpec[i].genericType; + if (NANOCLR_INDEX_IS_VALID(*tsIdx)) + { + CLR_RT_TypeSpec_Instance tsInst{}; + if (tsInst.InitializeFromIndex(*tsIdx) && + NANOCLR_INDEX_IS_VALID(tsInst.genericTypeDef) && + tsInst.genericTypeDef.data == objCls.data) + { + // Found a TypeSpec in the caller's assembly that matches the + // object's class + effectiveCallerGeneric = tsIdx; + break; + } + } + } + } + } + } + } + } + } + + if (calleeInst.ResolveToken(arg, assm, effectiveCallerGeneric) == false) { NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } @@ -2132,6 +2238,12 @@ HRESULT CLR_RT_Thread::Execute_IL(CLR_RT_StackFrame &stackArg) { calleeInst.InitializeFromIndex(dlg->DelegateFtn()); + // Restore propagated arrayElementType after InitializeFromIndex + if (NANOCLR_INDEX_IS_VALID(propagatedArrayElementType)) + { + calleeInst.arrayElementType = propagatedArrayElementType; + } + if ((calleeInst.target->flags & CLR_RECORD_METHODDEF::MD_Static) == 0) { pThis->Assign(dlg->m_object); @@ -2211,16 +2323,57 @@ HRESULT CLR_RT_Thread::Execute_IL(CLR_RT_StackFrame &stackArg) NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } - if (calleeInst.genericType && NANOCLR_INDEX_IS_VALID(*calleeInst.genericType) && - calleeInst.genericType->data != CLR_EmptyToken) + // If this is an SZArrayHelper dispatch, populate arrayElementType from runtime + // array + CLR_RT_TypeDef_Index savedArrayElementType{}; + CLR_RT_HeapBlock_Array *pArray = + (CLR_RT_HeapBlock_Array *)pThis[0].Dereference(); + if (pArray && pArray->DataType() == DATATYPE_SZARRAY) + { + // Check if the dispatched method belongs to SZArrayHelper + CLR_RT_MethodDef_Instance calleeRealInst{}; + calleeRealInst.InitializeFromIndex(calleeReal); + + CLR_RT_TypeDef_Instance calleeType{}; + if (calleeType.InitializeFromMethod(calleeRealInst)) + { + if (calleeType.data == g_CLR_RT_WellKnownTypes.SZArrayHelper.data) + { + // Get the element type from the array's reflection data + const CLR_RT_ReflectionDef_Index &reflex = + pArray->ReflectionDataConst(); + + // For a 1D array, levels == 1 and data.type is the element TypeDef + if (reflex.levels == 1 && reflex.kind == REFLECTION_TYPE) + { + savedArrayElementType = reflex.data.type; + } + } + } + } + + // Initialize the dispatched method, preserving the generic context from + // calleeInst The genericType was set by ResolveToken from the MethodRef's owner + // TypeSpec + if (calleeInst.genericType && NANOCLR_INDEX_IS_VALID(*calleeInst.genericType)) { - // store the current generic context (if any) calleeInst.InitializeFromIndex(calleeReal, *calleeInst.genericType); } else { calleeInst.InitializeFromIndex(calleeReal); } + + // Restore the array element type after reinitializing calleeInst + if (NANOCLR_INDEX_IS_VALID(savedArrayElementType)) + { + calleeInst.arrayElementType = savedArrayElementType; + } + else if (NANOCLR_INDEX_IS_VALID(propagatedArrayElementType)) + { + // Restore propagated arrayElementType from parent frame + calleeInst.arrayElementType = propagatedArrayElementType; + } } #if defined(NANOCLR_APPDOMAINS) @@ -2243,6 +2396,14 @@ HRESULT CLR_RT_Thread::Execute_IL(CLR_RT_StackFrame &stackArg) else #endif // NANOCLR_APPDOMAINS { + // Restore propagated arrayElementType before any path (inline or push) + // Only restore if not already set (e.g., by SZArrayHelper detection in virtual dispatch) + if (!NANOCLR_INDEX_IS_VALID(calleeInst.arrayElementType) && + NANOCLR_INDEX_IS_VALID(propagatedArrayElementType)) + { + calleeInst.arrayElementType = propagatedArrayElementType; + } + #ifndef NANOCLR_NO_IL_INLINE if (stack->PushInline(ip, assm, evalPos, calleeInst, pThis)) { @@ -2252,7 +2413,45 @@ HRESULT CLR_RT_Thread::Execute_IL(CLR_RT_StackFrame &stackArg) #endif WRITEBACK(stack, evalPos, ip, fDirty); + NANOCLR_CHECK_HRESULT(CLR_RT_StackFrame::Push(th, calleeInst, -1)); + + // Set up the new stack frame's generic context + // Priority order: + // 1. effectiveCallerGeneric (extracted from TypeSpec search for interface calls) - HIGHEST + // PRIORITY + // This is the concrete closed generic type (e.g., List) not the interface (e.g., + // IEnumerable) + // 2. calleeInst.genericType (from MethodRef TypeSpec or virtual dispatch) + // 3. stack->m_call.genericType (inherited from caller) + CLR_RT_StackFrame *newStack = th->CurrentFrame(); + const CLR_RT_TypeSpec_Index *effectiveGenericContext = nullptr; + + if (effectiveCallerGeneric && NANOCLR_INDEX_IS_VALID(*effectiveCallerGeneric)) + { + effectiveGenericContext = effectiveCallerGeneric; + } + else if (calleeInst.genericType && NANOCLR_INDEX_IS_VALID(*calleeInst.genericType)) + { + effectiveGenericContext = calleeInst.genericType; + } + else if (stack->m_call.genericType && NANOCLR_INDEX_IS_VALID(*stack->m_call.genericType)) + { + effectiveGenericContext = stack->m_call.genericType; + } + + if (effectiveGenericContext) + { + // CRITICAL: Copy the value to stable storage and update the pointer ATOMICALLY + // to prevent the pointer from pointing to stale/overwritten memory + newStack->m_genericTypeSpecStorage = *effectiveGenericContext; + newStack->m_call.genericType = &newStack->m_genericTypeSpecStorage; + } + else + { + // Ensure genericType doesn't point to garbage + newStack->m_call.genericType = nullptr; + } } goto Execute_Restart; @@ -2279,13 +2478,20 @@ HRESULT CLR_RT_Thread::Execute_IL(CLR_RT_StackFrame &stackArg) WRITEBACK(stack, evalPos, ip, fDirty); + // + // Preserve arrayElementType from returning frame to caller frame + // + CLR_RT_StackFrame *stackNext = stack->Caller(); + if (stackNext && NANOCLR_INDEX_IS_VALID(stack->m_call.arrayElementType)) + { + stackNext->m_call.arrayElementType = stack->m_call.arrayElementType; + } + // // Same kind of handler, no need to pop back out, just restart execution in place. // if (stack->m_flags & CLR_RT_StackFrame::c_CallerIsCompatibleForRet) { - CLR_RT_StackFrame *stackNext = stack->Caller(); - stack->Pop(); stack = stackNext; @@ -2379,7 +2585,17 @@ HRESULT CLR_RT_Thread::Execute_IL(CLR_RT_StackFrame &stackArg) { FETCH_ARG_COMPRESSED_METHODTOKEN(arg, ip); + // Save arrayElementType for propagation through the call chain + CLR_RT_TypeDef_Index propagatedArrayElementType{}; + if (NANOCLR_INDEX_IS_VALID(stack->m_call.arrayElementType)) + { + propagatedArrayElementType = stack->m_call.arrayElementType; + } + CLR_RT_MethodDef_Instance calleeInst{}; + // Set arrayElementType before ResolveToken so generic type resolution can use it + calleeInst.arrayElementType = propagatedArrayElementType; + if (calleeInst.ResolveToken(arg, assm, stack->m_call.genericType) == false) { NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); @@ -2564,7 +2780,7 @@ HRESULT CLR_RT_Thread::Execute_IL(CLR_RT_StackFrame &stackArg) FETCH_ARG_COMPRESSED_TYPETOKEN(arg, ip); NANOCLR_CHECK_HRESULT( - CLR_RT_ExecutionEngine::CastToType(evalPos[0], arg, assm, (op == CEE_ISINST))); + CLR_RT_ExecutionEngine::CastToType(evalPos[0], arg, assm, (op == CEE_ISINST), &stack->m_call)); break; } @@ -2753,7 +2969,12 @@ HRESULT CLR_RT_Thread::Execute_IL(CLR_RT_StackFrame &stackArg) if (field.genericType && NANOCLR_INDEX_IS_VALID(*field.genericType)) { // access static field of a generic instance - ptr = field.assembly->GetStaticFieldByFieldDef(field, field.genericType); + // Pass both TypeSpec context (for VAR resolution) and MethodDef context (for MVAR resolution) + ptr = field.assembly->GetStaticFieldByFieldDef( + field, + field.genericType, + &stack->m_genericTypeSpecStorage, + &stack->m_call); } else { @@ -2765,6 +2986,26 @@ HRESULT CLR_RT_Thread::Execute_IL(CLR_RT_StackFrame &stackArg) { NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } + else if (field.genericType && NANOCLR_INDEX_IS_VALID(*field.genericType)) + { + CLR_RT_HeapBlock *obj = ptr; + NanoCLRDataType dt; + + CLR_RT_TypeDescriptor::ExtractObjectAndDataType(obj, dt); + + // Field not found - but if this is a generic type with + // a .cctor that's scheduled, + // reschedule to allow the .cctor to complete field initialization + if (obj == nullptr) + { + // Check if there's a pending .cctor for this generic type + CLR_RT_TypeSpec_Instance tsInst; + if (tsInst.InitializeFromIndex(*field.genericType)) + { + NANOCLR_CHECK_HRESULT(HandleGenericCctorReschedule(tsInst, stack, &ip)); + } + } + } evalPos++; CHECKSTACK(stack, evalPos); @@ -2793,7 +3034,12 @@ HRESULT CLR_RT_Thread::Execute_IL(CLR_RT_StackFrame &stackArg) if (field.genericType && NANOCLR_INDEX_IS_VALID(*field.genericType)) { // access static field of a generic instance - ptr = field.assembly->GetStaticFieldByFieldDef(field, field.genericType); + // Pass both TypeSpec context (for VAR resolution) and MethodDef context (for MVAR resolution) + ptr = field.assembly->GetStaticFieldByFieldDef( + field, + field.genericType, + &stack->m_genericTypeSpecStorage, + &stack->m_call); } else { @@ -2803,6 +3049,19 @@ HRESULT CLR_RT_Thread::Execute_IL(CLR_RT_StackFrame &stackArg) if (ptr == nullptr) { + // Field not found - but if this is a generic type with a .cctor that's scheduled, + // reschedule to allow the .cctor to complete field initialization + if (field.genericType && NANOCLR_INDEX_IS_VALID(*field.genericType)) + { + // Check if there's a pending .cctor for this generic type + CLR_RT_TypeSpec_Instance tsInst; + if (tsInst.InitializeFromIndex(*field.genericType)) + { + NANOCLR_CHECK_HRESULT(HandleGenericCctorReschedule(tsInst, stack, &ip)); + } + } + + // Not a pending .cctor case - this is a real error NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } @@ -2832,7 +3091,12 @@ HRESULT CLR_RT_Thread::Execute_IL(CLR_RT_StackFrame &stackArg) if (field.genericType && NANOCLR_INDEX_IS_VALID(*field.genericType)) { // access static field of a generic instance - ptr = field.assembly->GetStaticFieldByFieldDef(field, field.genericType); + // Pass both TypeSpec context (for VAR resolution) and MethodDef context (for MVAR resolution) + ptr = field.assembly->GetStaticFieldByFieldDef( + field, + field.genericType, + &stack->m_genericTypeSpecStorage, + &stack->m_call); } else { @@ -2842,6 +3106,19 @@ HRESULT CLR_RT_Thread::Execute_IL(CLR_RT_StackFrame &stackArg) if (ptr == nullptr) { + // Field not found - but if this is a generic type with a .cctor that's scheduled, + // reschedule to allow the .cctor to complete field initialization + if (field.genericType && NANOCLR_INDEX_IS_VALID(*field.genericType)) + { + // Check if there's a pending .cctor for this generic type + CLR_RT_TypeSpec_Instance tsInst; + if (tsInst.InitializeFromIndex(*field.genericType)) + { + NANOCLR_CHECK_HRESULT(HandleGenericCctorReschedule(tsInst, stack, &ip)); + } + } + + // Not a pending .cctor case - this is a real error NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } @@ -2860,11 +3137,31 @@ HRESULT CLR_RT_Thread::Execute_IL(CLR_RT_StackFrame &stackArg) FETCH_ARG_COMPRESSED_TYPETOKEN(arg, ip); CLR_RT_TypeDef_Instance typeInst{}; + CLR_RT_TypeDef_Index previousArrayElemType = stack->m_call.arrayElementType; + + // For BOXing a generic VAR (!0) inside an interface adapter (e.g., IList.get_Item) + // we may lack a closed generic TypeSpec in stack->m_call.genericType. In that case + // use the runtime type of the value being boxed to populate arrayElementType so + // TypeDef::ResolveToken can fall back and close the VAR slot. + if (!NANOCLR_INDEX_IS_VALID(stack->m_call.arrayElementType)) + { + CLR_RT_TypeDef_Index valueTypeIdx; + if (SUCCEEDED(CLR_RT_TypeDescriptor::ExtractTypeIndexFromObject(evalPos[0], valueTypeIdx))) + { + stack->m_call.arrayElementType = valueTypeIdx; + } + } + if (typeInst.ResolveToken(arg, assm, &stack->m_call) == false) { + // restore previous context before bailing + stack->m_call.arrayElementType = previousArrayElemType; NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } + // Restore previous arrayElementType (don't leak temporary inference) + stack->m_call.arrayElementType = previousArrayElemType; + UPDATESTACK(stack, evalPos); // check if value is a nullable type @@ -3013,11 +3310,31 @@ HRESULT CLR_RT_Thread::Execute_IL(CLR_RT_StackFrame &stackArg) // TODO: still not handling Nullable types here CLR_RT_TypeDef_Instance typeInst{}; + CLR_RT_TypeDef_Index previousArrayElemType = stack->m_call.arrayElementType; + + // For UNBOX.ANY of a generic VAR (!0) inside an interface adapter (e.g., IList.set_Item) + // we may lack a closed generic TypeSpec in stack->m_call.genericType. In that case + // use the runtime type of the boxed value to populate arrayElementType so + // TypeDef::ResolveToken can fall back and close the VAR slot. + if (!NANOCLR_INDEX_IS_VALID(stack->m_call.arrayElementType)) + { + CLR_RT_TypeDef_Index valueTypeIdx; + if (SUCCEEDED(CLR_RT_TypeDescriptor::ExtractTypeIndexFromObject(evalPos[0], valueTypeIdx))) + { + stack->m_call.arrayElementType = valueTypeIdx; + } + } + if (typeInst.ResolveToken(arg, assm, &stack->m_call) == false) { + // restore previous context before bailing + stack->m_call.arrayElementType = previousArrayElemType; NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } + // Restore previous arrayElementType (don't leak temporary inference) + stack->m_call.arrayElementType = previousArrayElemType; + UPDATESTACK(stack, evalPos); if (((typeInst.target->flags & CLR_RECORD_TYPEDEF::TD_Semantics_Mask) == @@ -3043,7 +3360,8 @@ HRESULT CLR_RT_Thread::Execute_IL(CLR_RT_StackFrame &stackArg) else { //"castclass" - NANOCLR_CHECK_HRESULT(CLR_RT_ExecutionEngine::CastToType(evalPos[0], arg, assm, false)); + NANOCLR_CHECK_HRESULT( + CLR_RT_ExecutionEngine::CastToType(evalPos[0], arg, assm, false, &stack->m_call)); } break; @@ -3175,16 +3493,27 @@ HRESULT CLR_RT_Thread::Execute_IL(CLR_RT_StackFrame &stackArg) CLR_RT_TypeDef_Instance type{}; CLR_RT_TypeDef_Index cls; + // Propagate the array element type into the current call context so generic VAR can resolve + // against a closed type (e.g., List[] -> Int32). This mirrors the SZArrayHelper flow + // used in method dispatch, but scoped to this instruction. + CLR_RT_TypeDef_Index previousArrayElemType = stack->m_call.arrayElementType; + + NANOCLR_CHECK_HRESULT(CLR_RT_TypeDescriptor::ExtractTypeIndexFromObject(evalPos[0], cls)); + + stack->m_call.arrayElementType = cls; + if (!type.ResolveToken(arg, assm, &stack->m_call)) { + // Restore previous context before bailing out + stack->m_call.arrayElementType = previousArrayElemType; NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } - NANOCLR_CHECK_HRESULT(CLR_RT_TypeDescriptor::ExtractTypeIndexFromObject(evalPos[0], cls)); - // Check this is an object of the requested type. if (!g_CLR_RT_ExecutionEngine.IsInstanceOfToken(arg, evalPos[0], stack->m_call)) { + // Restore previous context before leaving + stack->m_call.arrayElementType = previousArrayElemType; NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } @@ -3198,6 +3527,9 @@ HRESULT CLR_RT_Thread::Execute_IL(CLR_RT_StackFrame &stackArg) NANOCLR_CHECK_HRESULT(evalPos[0].LoadFromReference(safeSource)); } + // Restore previous arrayElementType context + stack->m_call.arrayElementType = previousArrayElemType; + goto Execute_LoadAndPromote; // } @@ -3351,11 +3683,31 @@ HRESULT CLR_RT_Thread::Execute_IL(CLR_RT_StackFrame &stackArg) // Resolve the IL's element type in the context of any generics CLR_RT_TypeDef_Instance expectedType; + CLR_RT_TypeDef_Index previousArrayElemType = stack->m_call.arrayElementType; + + // For STELEM of a generic VAR (!0) inside an interface adapter (e.g., IList.set_Item) + // we may lack a closed generic TypeSpec in stack->m_call.genericType. In that case + // use the runtime type of the array element to populate arrayElementType so + // TypeDef::ResolveToken can fall back and close the VAR slot. + if (!NANOCLR_INDEX_IS_VALID(stack->m_call.arrayElementType)) + { + CLR_RT_TypeDef_Index elemTypeIdx; + if (SUCCEEDED(CLR_RT_TypeDescriptor::ExtractTypeIndexFromObject(evalPos[1], elemTypeIdx))) + { + stack->m_call.arrayElementType = elemTypeIdx; + } + } + if (!expectedType.ResolveToken(arg, assm, &stack->m_call)) { + // restore previous context before bailing + stack->m_call.arrayElementType = previousArrayElemType; NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } + // Restore previous arrayElementType (don't leak temporary inference) + stack->m_call.arrayElementType = previousArrayElemType; + NanoCLRDataType elemDT = (NanoCLRDataType)expectedType.target->dataType; // Promote the value if it's a reference or boxed struct @@ -3504,13 +3856,6 @@ HRESULT CLR_RT_Thread::Execute_IL(CLR_RT_StackFrame &stackArg) // resolve the generic parameter in the context of the caller's generic type, if different // from the caller's assembly. - CLR_RT_Assembly *resolveAsm = assm; - if (stack->m_call.genericType && NANOCLR_INDEX_IS_VALID(*stack->m_call.genericType)) - { - resolveAsm = - g_CLR_RT_TypeSystem.m_assemblies[stack->m_call.genericType->Assembly() - 1]; - } - if (stack->m_call.genericType != nullptr) { CLR_UINT32 rawGenericParamRow = CLR_DataFromTk(arg); @@ -3539,19 +3884,20 @@ HRESULT CLR_RT_Thread::Execute_IL(CLR_RT_StackFrame &stackArg) { // closed TypeSpec const CLR_RT_TypeSpec_Index *callerTypeSpec = stack->m_call.genericType; - CLR_RT_TypeDef_Index resolvedTypeDef; - NanoCLRDataType dummyDataType; - - if (!resolveAsm->FindGenericParamAtTypeSpec( - callerTypeSpec->TypeSpec(), - genericParam.target->number, - resolvedTypeDef, - dummyDataType)) + + CLR_RT_TypeSpec_Instance typeSpec; + if (!typeSpec.InitializeFromIndex(*callerTypeSpec)) + { + NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); + } + + CLR_RT_SignatureParser::Element paramElement; + if (!typeSpec.GetGenericParam(genericParam.target->number, paramElement)) { NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } - NANOCLR_CHECK_HRESULT(evalPos[0].SetReflection(resolvedTypeDef)); + NANOCLR_CHECK_HRESULT(evalPos[0].SetReflection(paramElement.Class)); } } } diff --git a/src/CLR/Core/Thread.cpp b/src/CLR/Core/Thread.cpp index 1461930ef9..bcf3ea834f 100644 --- a/src/CLR/Core/Thread.cpp +++ b/src/CLR/Core/Thread.cpp @@ -175,6 +175,11 @@ HRESULT CLR_RT_Thread::PushThreadProcDelegate(CLR_RT_HeapBlock_Delegate *pDelega inst.genericType = &delegateTypeSpec; } + if (pDelegate->m_genericMethodSpec.data != 0) + { + inst.methodSpec = pDelegate->m_genericMethodSpec; + } + #if defined(NANOCLR_APPDOMAINS) if (!pDelegate->m_appDomain->IsLoaded()) @@ -202,6 +207,14 @@ HRESULT CLR_RT_Thread::PushThreadProcDelegate(CLR_RT_HeapBlock_Delegate *pDelega stackTop->m_call.genericType = &stackTop->m_genericTypeSpecStorage; } + // If we have a generic method context, copy it to the stack frame + // This enables MVAR resolution for .cctor triggered from generic methods + if (pDelegate->m_genericMethodSpec.data != 0) + { + CLR_RT_StackFrame *stackTop = this->CurrentFrame(); + stackTop->m_call.methodSpec = pDelegate->m_genericMethodSpec; + } + if ((inst.target->flags & CLR_RECORD_METHODDEF::MD_Static) == 0) { CLR_RT_StackFrame *stackTop = this->CurrentFrame(); diff --git a/src/CLR/Core/TypeSystem.cpp b/src/CLR/Core/TypeSystem.cpp index d13f6633db..783ed5055e 100644 --- a/src/CLR/Core/TypeSystem.cpp +++ b/src/CLR/Core/TypeSystem.cpp @@ -722,6 +722,10 @@ bool CLR_RT_TypeSpec_Instance::InitializeFromIndex(const CLR_RT_TypeSpec_Index & genericTypeDef = element.Class; } + else + { + genericTypeDef.Clear(); + } return true; } @@ -789,21 +793,28 @@ bool CLR_RT_TypeSpec_Instance::ResolveToken( return false; } + CLR_RT_TypeSpec_Instance callerTypeSpec; + if (!callerTypeSpec.InitializeFromIndex(*caller->genericType)) + { + ClearInstance(); + return false; + } + + CLR_RT_SignatureParser::Element paramElement; + if (!callerTypeSpec.GetGenericParam((CLR_UINT32)pos, paramElement)) + { + ClearInstance(); + return false; + } + + // Use the resolved parameter's type for this TypeSpec auto &tsi = *caller->genericType; CLR_UINT32 closedTsRow = tsi.TypeSpec(); Set(caller->genericType->Assembly(), closedTsRow); assembly = g_CLR_RT_TypeSystem.m_assemblies[caller->genericType->Assembly() - 1]; - - target = assm->GetTypeSpec(closedTsRow); - - NanoCLRDataType realDataType; - - g_CLR_RT_TypeSystem.m_assemblies[caller->genericType->Assembly() - 1]->FindGenericParamAtTypeSpec( - caller->genericType->data, - (CLR_UINT32)pos, - cachedElementType, - realDataType); + target = assembly->GetTypeSpec(closedTsRow); + cachedElementType = paramElement.Class; } else if (element.DataType == DATATYPE_MVAR) { @@ -898,6 +909,49 @@ bool CLR_RT_TypeSpec_Instance::IsClosedGenericType() return true; } +bool CLR_RT_TypeSpec_Instance::GetGenericParam(CLR_INT32 parameterPosition, CLR_RT_SignatureParser::Element &element) +{ + NATIVE_PROFILE_CLR_CORE(); + + CLR_RT_SignatureParser parser; + parser.Initialize_TypeSpec(assembly, target); + + if (FAILED(parser.Advance(element))) + { + return false; + } + + // sanity check for GENERICINST + if (element.DataType != DATATYPE_GENERICINST) + { + return false; + } + + // move to type + if (FAILED(parser.Advance(element))) + { + return false; + } + + // sanity check for invalid parameter position + if (parameterPosition >= element.GenParamCount) + { + // not enough parameters!! + return false; + } + + // walk to the requested parameter position + for (int32_t i = 0; i <= parameterPosition; i++) + { + if (FAILED(parser.Advance(element))) + { + return false; + } + } + + return true; +} + ////////////////////////////// bool CLR_RT_TypeDef_Instance::InitializeFromReflection(const CLR_RT_ReflectionDef_Index &reflex, CLR_UINT32 *levels) @@ -1226,30 +1280,76 @@ bool CLR_RT_TypeDef_Instance::ResolveToken( { int pos = elem.GenericParamPosition; - // Use the *caller's* bound genericType (Stack, etc.) - if (caller == nullptr || caller->genericType == nullptr) + CLR_RT_TypeSpec_Instance callerTypeSpec; + if (!callerTypeSpec.InitializeFromIndex(*caller->genericType)) { return false; } - CLR_RT_TypeDef_Index realTypeDef; - NanoCLRDataType realDataType; - - // Only call this once to map (e.g. !T→Int32) + CLR_RT_SignatureParser::Element paramElement; - g_CLR_RT_TypeSystem.m_assemblies[caller->genericType->Assembly() - 1] - ->FindGenericParamAtTypeSpec( - caller->genericType->data, - (CLR_UINT32)pos, - realTypeDef, - realDataType); - - // populate this instance - data = realTypeDef.data; - assembly = g_CLR_RT_TypeSystem.m_assemblies[realTypeDef.Assembly() - 1]; - target = assembly->GetTypeDef(realTypeDef.Type()); + // Try to map using the generic context (e.g. !T→Int32) + if (callerTypeSpec.GetGenericParam((CLR_UINT32)pos, paramElement)) + { + // Successfully resolved from generic context + if (NANOCLR_INDEX_IS_VALID(paramElement.Class)) + { + data = paramElement.Class.data; + assembly = g_CLR_RT_TypeSystem.m_assemblies[paramElement.Class.Assembly() - 1]; + target = assembly->GetTypeDef(paramElement.Class.Type()); + } + else if (paramElement.DataType == DATATYPE_MVAR) + { + // resolve from methodspec context + if (NANOCLR_INDEX_IS_VALID(caller->methodSpec)) + { + CLR_RT_MethodSpec_Instance methodSpecInstance; + if (methodSpecInstance.InitializeFromIndex(caller->methodSpec)) + { + NanoCLRDataType dataType; + CLR_RT_TypeDef_Index typeDef; + methodSpecInstance.GetGenericArgument( + paramElement.GenericParamPosition, + typeDef, + dataType); + + data = typeDef.data; + assembly = g_CLR_RT_TypeSystem.m_assemblies[typeDef.Assembly() - 1]; + target = assembly->GetTypeDef(typeDef.Type()); + } + else + { + return false; + } + } + else + { + return false; + } + } + else if (paramElement.DataType == DATATYPE_VAR) + { + // nested VAR not implemented + ASSERT(false); + return false; + } + else + { + return false; + } + } + else if (NANOCLR_INDEX_IS_VALID(caller->arrayElementType) && pos == 0) + { + // Fallback to arrayElementType for SZArrayHelper scenarios + data = caller->arrayElementType.data; + assembly = g_CLR_RT_TypeSystem.m_assemblies[caller->arrayElementType.Assembly() - 1]; + target = assembly->GetTypeDef(caller->arrayElementType.Type()); + } + else + { + return false; + } } - else if (elem.DataType == DATATYPE_MVAR) { // Use the caller bound genericType (Stack, etc.) @@ -1355,17 +1455,22 @@ bool CLR_RT_TypeDef_Instance::ResolveNullableType( return false; } - CLR_RT_TypeDef_Index realTypeDef; - NanoCLRDataType realDataType; + CLR_RT_TypeSpec_Instance callerTypeSpec; + if (!callerTypeSpec.InitializeFromIndex(*caller->genericType)) + { + return false; + } - // Only call this once to map (e.g. !T→Int32) - caller->assembly - ->FindGenericParamAtTypeSpec(caller->genericType->data, (CLR_UINT32)pos, realTypeDef, realDataType); + CLR_RT_SignatureParser::Element paramElement; + if (!callerTypeSpec.GetGenericParam((CLR_UINT32)pos, paramElement)) + { + return false; + } // populate this instance - data = realTypeDef.data; - assembly = g_CLR_RT_TypeSystem.m_assemblies[realTypeDef.Assembly() - 1]; - target = assembly->GetTypeDef(realTypeDef.Type()); + data = paramElement.Class.data; + assembly = g_CLR_RT_TypeSystem.m_assemblies[paramElement.Class.Assembly() - 1]; + target = assembly->GetTypeDef(paramElement.Class.Type()); return true; } @@ -1591,6 +1696,8 @@ bool CLR_RT_MethodDef_Instance::InitializeFromIndex(const CLR_RT_MethodDef_Index assembly = g_CLR_RT_TypeSystem.m_assemblies[Assembly() - 1]; target = assembly->GetMethodDef(Method()); genericType = nullptr; + arrayElementType.Clear(); + methodSpec.Clear(); #if defined(NANOCLR_INSTANCE_NAMES) name = assembly->GetString(target->name); @@ -1602,6 +1709,7 @@ bool CLR_RT_MethodDef_Instance::InitializeFromIndex(const CLR_RT_MethodDef_Index assembly = nullptr; target = nullptr; genericType = nullptr; + arrayElementType.Clear(); return false; } @@ -1612,6 +1720,8 @@ bool CLR_RT_MethodDef_Instance::InitializeFromIndex( { NATIVE_PROFILE_CLR_CORE(); + methodSpec.Clear(); + CLR_RT_TypeSpec_Instance tsInst; if (!tsInst.InitializeFromIndex(typeSpec)) @@ -1645,15 +1755,14 @@ bool CLR_RT_MethodDef_Instance::InitializeFromIndex( if (elem.DataType == DATATYPE_VAR) { - CLR_RT_TypeDef_Index realOwner; - NanoCLRDataType dummyDT; + CLR_RT_SignatureParser::Element paramElement; - if (!tsAsm->FindGenericParamAtTypeSpec(typeSpec.data, elem.GenericParamPosition, realOwner, dummyDT)) + if (!tsInst.GetGenericParam(elem.GenericParamPosition, paramElement)) { return false; } - ownerTypeIdx = realOwner; + ownerTypeIdx = paramElement.Class; } else { @@ -1726,6 +1835,7 @@ void CLR_RT_MethodDef_Instance::ClearInstance() assembly = nullptr; target = nullptr; genericType = nullptr; + arrayElementType.Clear(); } bool CLR_RT_MethodDef_Instance::ResolveToken( @@ -1734,6 +1844,9 @@ bool CLR_RT_MethodDef_Instance::ResolveToken( const CLR_RT_TypeSpec_Index *callerGeneric) { NATIVE_PROFILE_CLR_CORE(); + + ClearInstance(); + if (assm) { CLR_UINT32 index = CLR_DataFromTk(tk); @@ -1836,7 +1949,7 @@ bool CLR_RT_MethodDef_Instance::ResolveToken( } else { - // owner is TypeRef + // owner is TypeRef (e.g., interface method call) // get data for MethodRef (from index) data = assm->crossReferenceMethodRef[index].target.data; @@ -1845,8 +1958,10 @@ bool CLR_RT_MethodDef_Instance::ResolveToken( // grab the MethodDef target = assembly->GetMethodDef(Method()); - // invalidate GenericType - genericType = nullptr; + // Preserve caller's generic context for interface method calls + // When calling a interface method (e.g. IList.Remove() on a List object) we need the closed + // generic context + genericType = callerGeneric; } #if defined(NANOCLR_INSTANCE_NAMES) @@ -1965,7 +2080,17 @@ bool CLR_RT_MethodDef_Instance::GetDeclaringType(CLR_RT_TypeDef_Instance &declTy { NATIVE_PROFILE_CLR_CORE(); - if (genericType && NANOCLR_INDEX_IS_VALID(*genericType)) + // First, get the method's owner type to check if it's actually generic + CLR_RT_TypeDef_Instance ownerType{}; + if (!ownerType.InitializeFromMethod(*this)) + { + return false; + } + + // Only use the generic type context if: + // 1. We have a generic type context available AND + // 2. The method's declaring type is actually generic + if (genericType && NANOCLR_INDEX_IS_VALID(*genericType) && ownerType.target->genericParamCount > 0) { // Look up the assembly that actually owns that TypeSpec auto tsAsm = g_CLR_RT_TypeSystem.m_assemblies[genericType->Assembly() - 1]; @@ -1990,24 +2115,25 @@ bool CLR_RT_MethodDef_Instance::GetDeclaringType(CLR_RT_TypeDef_Instance &declTy // generic type, advance to get the type int pos = elem.GenericParamPosition; // Use the *caller's* bound genericType (Stack, etc.) - CLR_RT_TypeDef_Index td; - NanoCLRDataType dt; - if (tsAsm == nullptr || - tsAsm->FindGenericParamAtTypeSpec(genericType->data, (CLR_UINT32)pos, td, dt) == false) + CLR_RT_TypeSpec_Instance callerTypeSpec; + if (!callerTypeSpec.InitializeFromIndex(*genericType)) + { + return false; + } + + CLR_RT_SignatureParser::Element paramElement; + if (!callerTypeSpec.GetGenericParam((CLR_UINT32)pos, paramElement)) { return false; } - return declType.InitializeFromIndex(td); + return declType.InitializeFromIndex(paramElement.Class); } } - else - { - // Normal (non‐generic or open‐generic) - return declType.InitializeFromMethod(*this); - } - return false; + // For non-generic types or when no generic context is available, + // just return the declaring type + return declType.InitializeFromMethod(*this); } ////////////////////////////// @@ -2102,6 +2228,37 @@ void CLR_RT_MethodSpec_Instance::ClearInstance() target = nullptr; } +bool CLR_RT_MethodSpec_Instance::GetGenericArgument( + CLR_INT32 argumentPosition, + CLR_RT_TypeDef_Index &typeDef, + NanoCLRDataType &dataType) +{ + CLR_RT_SignatureParser parser; + parser.Initialize_MethodSignature(this); + + // sanity check + if (argumentPosition >= parser.ParamCount) + { + return false; + } + + CLR_RT_SignatureParser::Element elem; + + // loop through parameters to find the desired one + for (CLR_INT32 i = 0; i <= argumentPosition; i++) + { + if (FAILED(parser.Advance(elem))) + { + return false; + } + } + + typeDef = elem.Class; + dataType = elem.DataType; + + return true; +} + //////////////////////////////////////////////////////////////////////////////////////////////////// void CLR_RT_TypeDescriptor::TypeDescriptor_Initialize() @@ -2175,7 +2332,9 @@ HRESULT CLR_RT_TypeDescriptor::InitializeFromReflection(const CLR_RT_ReflectionD NANOCLR_NOCLEANUP(); } -HRESULT CLR_RT_TypeDescriptor::InitializeFromTypeSpec(const CLR_RT_TypeSpec_Index &sig) +HRESULT CLR_RT_TypeDescriptor::InitializeFromTypeSpec( + const CLR_RT_TypeSpec_Index &sig, + const CLR_RT_TypeSpec_Index *contextTypeSpec) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); @@ -2188,9 +2347,9 @@ HRESULT CLR_RT_TypeDescriptor::InitializeFromTypeSpec(const CLR_RT_TypeSpec_Inde NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); } - parser.Initialize_TypeSpec(inst.assembly, inst.target); + parser.Initialize_TypeSpec(inst.assembly, inst.assembly->GetTypeSpec(inst.TypeSpec())); - NANOCLR_SET_AND_LEAVE(InitializeFromSignatureParser(parser)); + NANOCLR_SET_AND_LEAVE(InitializeFromSignatureParser(parser, contextTypeSpec)); NANOCLR_NOCLEANUP(); } @@ -2311,7 +2470,9 @@ HRESULT CLR_RT_TypeDescriptor::InitializeFromFieldDefinition(const CLR_RT_FieldD NANOCLR_NOCLEANUP(); } -HRESULT CLR_RT_TypeDescriptor::InitializeFromSignatureParser(CLR_RT_SignatureParser &parser) +HRESULT CLR_RT_TypeDescriptor::InitializeFromSignatureParser( + CLR_RT_SignatureParser &parser, + const CLR_RT_TypeSpec_Index *contextTypeSpec) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); @@ -2327,7 +2488,31 @@ HRESULT CLR_RT_TypeDescriptor::InitializeFromSignatureParser(CLR_RT_SignaturePar if (res.DataType == DATATYPE_GENERICINST) { - NANOCLR_CHECK_HRESULT(InitializeFromGenericType(res.TypeSpec)); + // generic type, advance again to get the type + parser.Advance(res); + } + + // Check if this is an unresolved generic parameter (VAR) and we have a context + if (res.DataType == DATATYPE_VAR && contextTypeSpec && NANOCLR_INDEX_IS_VALID(*contextTypeSpec)) + { + // Resolve VAR from context TypeSpec using existing helper + CLR_RT_TypeSpec_Instance contextTs; + if (!contextTs.InitializeFromIndex(*contextTypeSpec)) + { + NANOCLR_SET_AND_LEAVE(CLR_E_FAIL); + } + + CLR_RT_SignatureParser::Element paramElement; + if (contextTs.GetGenericParam(res.GenericParamPosition, paramElement)) + { + // Use the resolved type from context + NANOCLR_CHECK_HRESULT(InitializeFromType(paramElement.Class)); + } + else + { + // Couldn't resolve, fall back to original behavior + NANOCLR_CHECK_HRESULT(InitializeFromType(res.Class)); + } } else { @@ -2382,12 +2567,42 @@ HRESULT CLR_RT_TypeDescriptor::InitializeFromSignatureToken( if (elem.DataType == DATATYPE_VAR) { // !T: ask the CLR to map that slot into the *actual* argument - CLR_RT_TypeDef_Index td; - NanoCLRDataType dt; - g_CLR_RT_TypeSystem.m_assemblies[caller->genericType->Assembly() - 1] - ->FindGenericParamAtTypeSpec(caller->genericType->data, elem.GenericParamPosition, td, dt); - this->InitializeFromTypeDef(td); + // For SZArrayHelper scenarios, arrayElementType is authoritative for position 0 + if (caller && NANOCLR_INDEX_IS_VALID(caller->arrayElementType) && elem.GenericParamPosition == 0) + { + this->InitializeFromTypeDef(caller->arrayElementType); + } + // Otherwise try to resolve from generic context + else if (caller && caller->genericType && NANOCLR_INDEX_IS_VALID(*caller->genericType)) + { + CLR_RT_TypeSpec_Instance callerTypeSpec; + if (!callerTypeSpec.InitializeFromIndex(*caller->genericType)) + { + return false; + } + + CLR_RT_SignatureParser::Element paramElement; + if (callerTypeSpec.GetGenericParam(elem.GenericParamPosition, paramElement)) + { + if (NANOCLR_INDEX_IS_VALID(paramElement.Class)) + { + this->InitializeFromTypeDef(paramElement.Class); + } + else + { + NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); + } + } + else + { + NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); + } + } + else + { + NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); + } } else if (elem.DataType == DATATYPE_MVAR) { @@ -2399,20 +2614,11 @@ HRESULT CLR_RT_TypeDescriptor::InitializeFromSignatureToken( } else if (elem.DataType == DATATYPE_GENERICINST) { - // full generic instantiation: read it out - // CLASS/VALUETYPE - parser.Advance(elem); - // generic-definition token - parser.Advance(elem); - - CLR_RT_TypeSpec_Index tsInst{}; - tsInst.Set(elem.Class.Assembly(), elem.Class.Type()); - - // argument count - parser.Advance(elem); - - // now read each argument and record in tsInst.m_data.GenericArguments - this->InitializeFromTypeSpec(tsInst); + // full generic instantiation: parse it directly from the signature + // Pass caller's generic type as context to resolve VAR parameters in the generic arguments + const CLR_RT_TypeSpec_Index *contextTypeSpec = + (caller && NANOCLR_INDEX_IS_VALID(*caller->genericType)) ? caller->genericType : nullptr; + this->InitializeFromSignatureParser(parser, contextTypeSpec); } else { @@ -4775,6 +4981,7 @@ static const TypeIndexLookup c_TypeIndexLookup[] = { TIL("System", "MulticastDelegate", MulticastDelegate), TIL("System", "Array", Array), + TIL(nullptr, "SZArrayHelper", SZArrayHelper), TIL("System.Collections", "ArrayList", ArrayList), TIL("System", "ICloneable", ICloneable), TIL("System.Collections", "IList", IList), @@ -5199,7 +5406,9 @@ CLR_RT_HeapBlock *CLR_RT_Assembly::GetGenericStaticField( CLR_RT_HeapBlock *CLR_RT_Assembly::GetStaticFieldByFieldDef( const CLR_RT_FieldDef_Index &fdIndex, - const CLR_RT_TypeSpec_Index *genericType) + const CLR_RT_TypeSpec_Index *genericType, + const CLR_RT_TypeSpec_Index *contextTypeSpec, + const CLR_RT_MethodDef_Instance *contextMethod) { NATIVE_PROFILE_CLR_CORE(); @@ -5212,6 +5421,36 @@ CLR_RT_HeapBlock *CLR_RT_Assembly::GetStaticFieldByFieldDef( { return hb; } + + // On-demand allocation: if this is an open generic type that needs runtime binding, + // allocate static fields now (closed generics should already be allocated via metadata) + CLR_RT_TypeSpec_Instance tsInst; + if (tsInst.InitializeFromIndex(*genericType) && !tsInst.IsClosedGenericType()) + { + // Get the generic type definition to check if it has static fields + CLR_RT_TypeDef_Instance genericTypeDef; + if (genericTypeDef.InitializeFromIndex(tsInst.genericTypeDef)) + { + if (genericTypeDef.target->staticFieldsCount > 0) + { + // Allocate static fields on-demand for this runtime-bound generic + // Pass both context parameters for proper VAR and MVAR resolution + if (SUCCEEDED(AllocateGenericStaticFieldsOnDemand( + *genericType, + genericTypeDef, + contextTypeSpec, + contextMethod))) + { + // Retry the lookup after allocation + hb = GetGenericStaticField(*genericType, fdIndex); + if (hb != nullptr) + { + return hb; + } + } + } + } + } } // fallback to assembly static fields (use offset stored on crossReferenceFieldDef) @@ -5226,91 +5465,341 @@ CLR_RT_HeapBlock *CLR_RT_Assembly::GetStaticFieldByFieldDef( #endif } -HRESULT CLR_RT_Assembly::PrepareForExecution() +HRESULT CLR_RT_Assembly::AllocateGenericStaticFieldsOnDemand( + const CLR_RT_TypeSpec_Index &typeSpecIndex, + const CLR_RT_TypeDef_Instance &genericTypeDef, + const CLR_RT_TypeSpec_Index *contextTypeSpec, + const CLR_RT_MethodDef_Instance *contextMethod) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); - if ((flags & CLR_RT_Assembly::PreparingForExecution) != 0) - { - // Circular dependency - _ASSERTE(false); + CLR_UINT32 hash; + CLR_RT_TypeSpec_Instance tsInstance; + CLR_RT_Assembly *tsOwner; + CLR_RT_TypeSpec_CrossReference *tsCross; + CLR_RT_GenericStaticFieldRecord *record; + CLR_RT_Assembly *ownerAsm; + const CLR_RECORD_TYPEDEF *ownerTd; + CLR_UINT32 count; + CLR_UINT32 newMax; + CLR_RT_GenericStaticFieldRecord *newArray; + CLR_RT_HeapBlock *fields; + CLR_RT_FieldDef_Index *fieldDefs; + const CLR_RECORD_METHODDEF *md; + int methodCount; - NANOCLR_MSG_SET_AND_LEAVE(CLR_E_FAIL, L"Failed to prepare type system for execution\n"); + // Initialize TypeSpec instance for hash computation + if (!tsInstance.InitializeFromIndex(typeSpecIndex)) + { + NANOCLR_SET_AND_LEAVE(CLR_E_FAIL); } - if ((flags & CLR_RT_Assembly::PreparedForExecution) == 0) - { - int i; + // Compute hash for this closed generic type, using context to resolve VAR/MVAR if needed + // Pass both TypeSpec context (for VAR) and MethodDef context (for MVAR) + hash = g_CLR_RT_TypeSystem.ComputeHashForClosedGenericType(tsInstance, contextTypeSpec, contextMethod); - flags |= CLR_RT_Assembly::PreparingForExecution; + // If hash computation failed (returned 0), we can't create unique storage for this generic type + if (hash == 0) + { + NANOCLR_SET_AND_LEAVE(CLR_E_NOT_SUPPORTED); + } - ITERATE_THROUGH_RECORDS(this, i, AssemblyRef, ASSEMBLYREF) + // Check if already allocated (shouldn't happen if called from GetStaticFieldByFieldDef, but be safe) + for (CLR_UINT32 i = 0; i < g_CLR_RT_TypeSystem.m_genericStaticFieldsCount; i++) + { + if (g_CLR_RT_TypeSystem.m_genericStaticFields[i].m_hash == hash) { - _ASSERTE(dst->target != nullptr); + // Already exists, link to cross-reference if needed + tsOwner = g_CLR_RT_TypeSystem.m_assemblies[typeSpecIndex.Assembly() - 1]; + tsCross = &tsOwner->crossReferenceTypeSpec[typeSpecIndex.TypeSpec()]; - if (dst->target != nullptr) + if (tsCross->genericStaticFields == nullptr) { - NANOCLR_CHECK_HRESULT(dst->target->PrepareForExecution()); + record = &g_CLR_RT_TypeSystem.m_genericStaticFields[i]; + tsCross->genericStaticFields = record->m_fields; + tsCross->genericStaticFieldDefs = record->m_fieldDefs; + tsCross->genericStaticFieldsCount = record->m_count; } + + NANOCLR_SET_AND_LEAVE(S_OK); } + } -#if defined(NANOCLR_APPDOMAINS) - // Temporary solution. All Assemblies get added to the current AppDomain - // Which assemblies get loaded at boot, and when assemblies get added to AppDomain at runtime is - // not yet determined/implemented + // Get owner assembly and typedef + ownerAsm = genericTypeDef.assembly; + ownerTd = genericTypeDef.target; + count = ownerTd->staticFieldsCount; - NANOCLR_CHECK_HRESULT(g_CLR_RT_ExecutionEngine.GetCurrentAppDomain()->LoadAssembly(this)); -#endif + if (count == 0) + { + // No static fields to allocate + NANOCLR_SET_AND_LEAVE(S_OK); } - NANOCLR_CLEANUP(); + // Grow global registry if needed + if (g_CLR_RT_TypeSystem.m_genericStaticFieldsCount >= g_CLR_RT_TypeSystem.m_genericStaticFieldsMaxCount) + { + newMax = g_CLR_RT_TypeSystem.m_genericStaticFieldsMaxCount + 10; + newArray = (CLR_RT_GenericStaticFieldRecord *)platform_malloc(sizeof(CLR_RT_GenericStaticFieldRecord) * newMax); - // Only try once. If this fails, then what? - flags |= CLR_RT_Assembly::PreparedForExecution; - flags &= ~CLR_RT_Assembly::PreparingForExecution; + if (newArray == nullptr) + { + NANOCLR_SET_AND_LEAVE(CLR_E_OUT_OF_MEMORY); + } - NANOCLR_CLEANUP_END(); -} + // Copy existing records + if (g_CLR_RT_TypeSystem.m_genericStaticFields != nullptr && g_CLR_RT_TypeSystem.m_genericStaticFieldsCount > 0) + { + memcpy( + newArray, + g_CLR_RT_TypeSystem.m_genericStaticFields, + sizeof(CLR_RT_GenericStaticFieldRecord) * g_CLR_RT_TypeSystem.m_genericStaticFieldsCount); -//--// + platform_free(g_CLR_RT_TypeSystem.m_genericStaticFields); + } -CLR_UINT32 CLR_RT_Assembly::ComputeAssemblyHash() -{ - NATIVE_PROFILE_CLR_CORE(); - return header->ComputeAssemblyHash(name, header->version); -} + g_CLR_RT_TypeSystem.m_genericStaticFields = newArray; + g_CLR_RT_TypeSystem.m_genericStaticFieldsMaxCount = newMax; + } -CLR_UINT32 CLR_RT_Assembly::ComputeAssemblyHash(const CLR_RECORD_ASSEMBLYREF *ar) -{ - NATIVE_PROFILE_CLR_CORE(); - return header->ComputeAssemblyHash(GetString(ar->name), ar->version); -} + // Allocate storage for the static fields + fields = g_CLR_RT_ExecutionEngine.ExtractHeapBlocksForObjects( + DATATYPE_OBJECT, // heapblock kind + 0, // flags + count); // number of CLR_RT_HeapBlock entries -//--// + if (fields == nullptr) + { + NANOCLR_SET_AND_LEAVE(CLR_E_OUT_OF_MEMORY); + } -bool CLR_RT_Assembly::FindTypeDef(const char *typeName, const char *nameSpace, CLR_RT_TypeDef_Index &index) -{ - NATIVE_PROFILE_CLR_CORE(); - const CLR_RECORD_TYPEDEF *target = GetTypeDef(0); - int tblSize = tablesSize[TBL_TypeDef]; - bool isNestedType = false; - std::string extractedNamespace; + // Allocate mapping for field definitions + fieldDefs = (CLR_RT_FieldDef_Index *)platform_malloc(sizeof(CLR_RT_FieldDef_Index) * count); - // Check if typeName contains '/' - const char *slashPos = strchr(typeName, '/'); - if (slashPos != nullptr) + if (fieldDefs == nullptr) { - // Extract the type name from the '/' to the end of the string - const char *extractedTypeName = slashPos + 1; + NANOCLR_SET_AND_LEAVE(CLR_E_OUT_OF_MEMORY); + } - // Extract the enclosed type name from the '/' backwards to the '.' before - const char *dotPos = strrchr(typeName, '.'); - std::string enclosedTypeName; + // Initialize the record in global registry + record = &g_CLR_RT_TypeSystem.m_genericStaticFields[g_CLR_RT_TypeSystem.m_genericStaticFieldsCount++]; + record->m_hash = hash; + record->m_fields = fields; + record->m_fieldDefs = fieldDefs; + record->m_count = count; - if (dotPos != nullptr) - { - enclosedTypeName.assign(dotPos + 1, slashPos); + // Initialize field definitions and values + for (CLR_UINT32 i = 0; i < count; i++) + { + CLR_INDEX fieldIndex = ownerTd->firstStaticField + i; + fieldDefs[i].Set(ownerAsm->assemblyIndex, fieldIndex); + + // Initialize the storage using the field definition + const CLR_RECORD_FIELDDEF *pFd = ownerAsm->GetFieldDef(fieldIndex); + g_CLR_RT_ExecutionEngine.InitializeReference(fields[i], pFd, ownerAsm); + } + + // Link this assembly's cross-reference to the global registry entry + tsOwner = g_CLR_RT_TypeSystem.m_assemblies[typeSpecIndex.Assembly() - 1]; + tsCross = &tsOwner->crossReferenceTypeSpec[typeSpecIndex.TypeSpec()]; + + tsCross->genericStaticFields = record->m_fields; + tsCross->genericStaticFieldDefs = record->m_fieldDefs; + tsCross->genericStaticFieldsCount = record->m_count; + + // Now that static fields are allocated, schedule the static constructor to initialize them + // Find the .cctor method for the generic type definition + md = ownerAsm->GetMethodDef(ownerTd->firstMethod); + methodCount = ownerTd->virtualMethodCount + ownerTd->instanceMethodCount + ownerTd->staticMethodCount; + + for (int i = 0; i < methodCount; i++, md++) + { + if (md->flags & CLR_RECORD_METHODDEF::MD_StaticConstructor) + { + // Found the .cctor - check execution status + CLR_RT_GenericCctorExecutionRecord *cctorRecord = + g_CLR_RT_TypeSystem.FindOrCreateGenericCctorRecord(hash, nullptr); + + if (cctorRecord != nullptr) + { + // Check if .cctor is already executed + if (cctorRecord->m_flags & CLR_RT_GenericCctorExecutionRecord::c_Executed) + { + // .cctor already completed - fields should be initialized + NANOCLR_SET_AND_LEAVE(S_OK); + } + + // Check if .cctor is already scheduled + if (cctorRecord->m_flags & CLR_RT_GenericCctorExecutionRecord::c_Scheduled) + { + // .cctor is already scheduled/running - nothing more to do + // The caller will retry after the .cctor completes + NANOCLR_SET_AND_LEAVE(S_OK); + } + + // Need to schedule the .cctor - mark it as scheduled + cctorRecord->m_flags |= CLR_RT_GenericCctorExecutionRecord::c_Scheduled; + } + + // Schedule the .cctor to run + CLR_RT_MethodDef_Index cctorIndex; + cctorIndex.Set(ownerAsm->assemblyIndex, ownerTd->firstMethod + i); + + // Ensure the .cctor thread exists (it may have been destroyed after initial startup) + if (g_CLR_RT_ExecutionEngine.EnsureSystemThread( + g_CLR_RT_ExecutionEngine.m_cctorThread, + ThreadPriority::System_Highest)) + { + // Create delegate for the static constructor + CLR_RT_HeapBlock refDlg; + refDlg.SetObjectReference(nullptr); + CLR_RT_ProtectFromGC gc(refDlg); + + if (SUCCEEDED(CLR_RT_HeapBlock_Delegate::CreateInstance(refDlg, cctorIndex, nullptr))) + { + CLR_RT_HeapBlock_Delegate *dlg = refDlg.DereferenceDelegate(); + + // Store the TypeSpec index so the .cctor can resolve type generic parameters + dlg->m_genericTypeSpec = typeSpecIndex; + + // Store the caller's MethodSpec (if any) to enable reolution of method generic parameters + if (contextMethod != nullptr) + { + dlg->m_genericMethodSpec = contextMethod->methodSpec; + } + else + { + dlg->m_genericMethodSpec.Clear(); + } + + // Push to the .cctor thread and schedule for execution + if (SUCCEEDED(g_CLR_RT_ExecutionEngine.m_cctorThread->PushThreadProcDelegate(dlg))) + { + g_CLR_RT_ExecutionEngine.m_cctorThread->m_terminationCallback = + CLR_RT_ExecutionEngine::StaticConstructorTerminationCallback; + + // The .cctor is now scheduled and will run when this thread yields + // The caller will get nullptr and should reschedule to allow .cctor to complete + } + else + { + // Failed to schedule - clear the flag + if (cctorRecord != nullptr) + { + cctorRecord->m_flags &= ~CLR_RT_GenericCctorExecutionRecord::c_Scheduled; + } + } + } + else + { + // Failed to create delegate - clear the flag + if (cctorRecord != nullptr) + { + cctorRecord->m_flags &= ~CLR_RT_GenericCctorExecutionRecord::c_Scheduled; + } + } + } + else + { + // Failed to ensure thread - clear the flag + if (cctorRecord != nullptr) + { + cctorRecord->m_flags &= ~CLR_RT_GenericCctorExecutionRecord::c_Scheduled; + } + } + break; + } + } + + NANOCLR_NOCLEANUP(); +} + +HRESULT CLR_RT_Assembly::PrepareForExecution() +{ + NATIVE_PROFILE_CLR_CORE(); + NANOCLR_HEADER(); + + if ((flags & CLR_RT_Assembly::PreparingForExecution) != 0) + { + // Circular dependency + _ASSERTE(false); + + NANOCLR_MSG_SET_AND_LEAVE(CLR_E_FAIL, L"Failed to prepare type system for execution\n"); + } + + if ((flags & CLR_RT_Assembly::PreparedForExecution) == 0) + { + int i; + + flags |= CLR_RT_Assembly::PreparingForExecution; + + ITERATE_THROUGH_RECORDS(this, i, AssemblyRef, ASSEMBLYREF) + { + _ASSERTE(dst->target != nullptr); + + if (dst->target != nullptr) + { + NANOCLR_CHECK_HRESULT(dst->target->PrepareForExecution()); + } + } + +#if defined(NANOCLR_APPDOMAINS) + // Temporary solution. All Assemblies get added to the current AppDomain + // Which assemblies get loaded at boot, and when assemblies get added to AppDomain at runtime is + // not yet determined/implemented + + NANOCLR_CHECK_HRESULT(g_CLR_RT_ExecutionEngine.GetCurrentAppDomain()->LoadAssembly(this)); +#endif + } + + NANOCLR_CLEANUP(); + + // Only try once. If this fails, then what? + flags |= CLR_RT_Assembly::PreparedForExecution; + flags &= ~CLR_RT_Assembly::PreparingForExecution; + + NANOCLR_CLEANUP_END(); +} + +//--// + +CLR_UINT32 CLR_RT_Assembly::ComputeAssemblyHash() +{ + NATIVE_PROFILE_CLR_CORE(); + return header->ComputeAssemblyHash(name, header->version); +} + +CLR_UINT32 CLR_RT_Assembly::ComputeAssemblyHash(const CLR_RECORD_ASSEMBLYREF *ar) +{ + NATIVE_PROFILE_CLR_CORE(); + return header->ComputeAssemblyHash(GetString(ar->name), ar->version); +} + +//--// + +bool CLR_RT_Assembly::FindTypeDef(const char *typeName, const char *nameSpace, CLR_RT_TypeDef_Index &index) +{ + NATIVE_PROFILE_CLR_CORE(); + const CLR_RECORD_TYPEDEF *target = GetTypeDef(0); + int tblSize = tablesSize[TBL_TypeDef]; + bool isNestedType = false; + std::string extractedNamespace; + + // Check if typeName contains '/' + const char *slashPos = strchr(typeName, '/'); + if (slashPos != nullptr) + { + // Extract the type name from the '/' to the end of the string + const char *extractedTypeName = slashPos + 1; + + // Extract the enclosed type name from the '/' backwards to the '.' before + const char *dotPos = strrchr(typeName, '.'); + std::string enclosedTypeName; + + if (dotPos != nullptr) + { + enclosedTypeName.assign(dotPos + 1, slashPos); // Extract the namespace from the beginning of the string to that '.' extractedNamespace.assign(typeName, dotPos - typeName); } @@ -5449,54 +5938,6 @@ bool CLR_RT_Assembly::FindGenericParam(CLR_INDEX typeSpecIndex, CLR_RT_GenericPa return false; } -bool CLR_RT_Assembly::FindGenericParamAtTypeSpec( - CLR_UINT32 typeSpecIndex, - CLR_INT32 genericParameterPosition, - CLR_RT_TypeDef_Index &typeDef, - NanoCLRDataType &dataType) -{ - NATIVE_PROFILE_CLR_CORE(); - - CLR_RT_SignatureParser parser; - parser.Initialize_TypeSpec(this, GetTypeSpec(typeSpecIndex)); - - CLR_RT_SignatureParser::Element element; - - // get into the GENERICINST - if (FAILED(parser.Advance(element))) - { - return false; - } - - // move to type - if (FAILED(parser.Advance(element))) - { - return false; - } - - // sanity check for invalid parameter position - if (genericParameterPosition > element.GenParamCount) - { - // not enough parameters!! - return false; - } - - // walk to the requested parameter position - for (int32_t i = 0; i <= genericParameterPosition; i++) - { - if (FAILED(parser.Advance(element))) - { - return false; - } - } - - // element.Class was filled from the VAR position - typeDef = element.Class; - dataType = element.DataType; - - return true; -} - bool CLR_RT_Assembly::FindGenericParamAtMethodDef( CLR_RT_MethodDef_Instance md, CLR_INT32 genericParameterPosition, @@ -7045,7 +7486,9 @@ HRESULT CLR_RT_TypeSystem::BuildTypeName( const CLR_RT_TypeSpec_Index &typeIndex, char *&szBuffer, size_t &iBuffer, - CLR_UINT32 levels) + CLR_UINT32 levels, + const CLR_RT_TypeSpec_Index *contextTypeSpec, + const CLR_RT_MethodDef_Instance *contextMethodDef) { NATIVE_PROFILE_CLR_CORE(); NANOCLR_HEADER(); @@ -7075,7 +7518,7 @@ HRESULT CLR_RT_TypeSystem::BuildTypeName( CLR_RT_TypeDef_Index typeDef; typeDef.data = element.Class.data; - BuildTypeName(typeDef, szBuffer, iBuffer); + NANOCLR_CHECK_HRESULT(BuildTypeName(typeDef, szBuffer, iBuffer)); if (element.GenParamCount > 0) { @@ -7091,19 +7534,72 @@ HRESULT CLR_RT_TypeSystem::BuildTypeName( if (element.DataType == DATATYPE_VAR) { - // resolve the !T against our *closed* typeIndex - CLR_RT_TypeDef_Index realTd; - NanoCLRDataType realDt; + // resolve the !T against our *closed* typeIndex, if possible + if (contextTypeSpec != nullptr && NANOCLR_INDEX_IS_VALID(*contextTypeSpec)) + { + // generic type parameter + + CLR_RT_TypeSpec_Instance typeContextTs; + if (!typeContextTs.InitializeFromIndex(*contextTypeSpec)) + { + NANOCLR_SET_AND_LEAVE(CLR_E_FAIL); + } + + CLR_RT_SignatureParser::Element paramElement; + // this will bind !T→System.Int32, etc. + if (!typeContextTs.GetGenericParam(element.GenericParamPosition, paramElement)) + { + // couldn't be resolved, print encoded form (!N) + char encodedParam[6]; + snprintf(encodedParam, ARRAYSIZE(encodedParam), "!%d", element.GenericParamPosition); + NANOCLR_CHECK_HRESULT(QueueStringToBuffer(szBuffer, iBuffer, encodedParam)); + } + else if (paramElement.DataType == DATATYPE_VAR) + { + // couldn't be resolved, print encoded form (!N) + char encodedParam[6]; + snprintf(encodedParam, ARRAYSIZE(encodedParam), "!%d", element.GenericParamPosition); + NANOCLR_CHECK_HRESULT(QueueStringToBuffer(szBuffer, iBuffer, encodedParam)); + } + else + { + // now print the *actual* type name + BuildTypeName(paramElement.Class, szBuffer, iBuffer); + } + } + else + { + // couldn't be resolved, print encoded form (!N) + char encodedParam[6]; + snprintf(encodedParam, ARRAYSIZE(encodedParam), "!%d", element.GenericParamPosition); + NANOCLR_CHECK_HRESULT(QueueStringToBuffer(szBuffer, iBuffer, encodedParam)); + } + } + else if (element.DataType == DATATYPE_MVAR) + { + // method generic parameter + if (contextMethodDef != nullptr && NANOCLR_INDEX_IS_VALID(*contextMethodDef)) + { + if (NANOCLR_INDEX_IS_VALID(contextMethodDef->methodSpec)) + { + CLR_RT_MethodSpec_Instance methodSpec{}; + methodSpec.InitializeFromIndex(contextMethodDef->methodSpec); - // this will bind !T→System.Int32, etc. - typeSpecInstance.assembly->FindGenericParamAtTypeSpec( - typeIndex.data, - element.GenericParamPosition, // the !N slot - realTd, - realDt); + if (!methodSpec.GetGenericArgument(element.GenericParamPosition, typeDef, element.DataType)) + { + NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); + } - // now print the *actual* type name - BuildTypeName(realTd, szBuffer, iBuffer); + NANOCLR_CHECK_HRESULT(BuildTypeName(typeDef, szBuffer, iBuffer)); + } + } + else + { + // couldn't be resolved, print encoded form (!!N) + char encodedParam[7]; + snprintf(encodedParam, ARRAYSIZE(encodedParam), "!!%d", element.GenericParamPosition); + NANOCLR_CHECK_HRESULT(QueueStringToBuffer(szBuffer, iBuffer, encodedParam)); + } } else { @@ -7111,7 +7607,7 @@ HRESULT CLR_RT_TypeSystem::BuildTypeName( CLR_RT_TypeDef_Index td; td.data = element.Class.data; - BuildTypeName(td, szBuffer, iBuffer); + NANOCLR_CHECK_HRESULT(BuildTypeName(td, szBuffer, iBuffer)); } if (i + 1 < element.GenParamCount) @@ -7269,6 +7765,184 @@ HRESULT CLR_RT_TypeSystem::BuildMethodName( NANOCLR_NOCLEANUP(); } +HRESULT CLR_RT_TypeSystem::BuildMethodName( + const CLR_RT_MethodDef_Instance &mdInst, + const CLR_RT_TypeSpec_Index *genericType, + char *&szBuffer, + size_t &iBuffer) +{ + NATIVE_PROFILE_CLR_CORE(); + NANOCLR_HEADER(); + + CLR_RT_TypeDef_Instance declTypeInst{}; + CLR_RT_TypeDef_Index declTypeIdx; + CLR_RT_TypeDef_Instance instOwner{}; + bool useGeneric = false; + + if (!declTypeInst.InitializeFromMethod(mdInst)) + { + NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); + } + + declTypeIdx.Set(mdInst.Assembly(), declTypeInst.assembly->crossReferenceMethodDef[mdInst.Method()].GetOwner()); + + if (mdInst.genericType && NANOCLR_INDEX_IS_VALID(*mdInst.genericType)) + { + useGeneric = true; + } + else if (genericType && NANOCLR_INDEX_IS_VALID(*genericType)) + { + // parse TypeSpec to get its TypeDef + CLR_RT_TypeSpec_Instance tsInst = {}; + + if (tsInst.InitializeFromIndex(*genericType)) + { + if (tsInst.genericTypeDef.Type() == declTypeIdx.Type()) + { + useGeneric = true; + } + } + } + + if (!useGeneric) + { + if (instOwner.InitializeFromMethod(mdInst) == false) + { + NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); + } + + NANOCLR_CHECK_HRESULT(BuildTypeName(instOwner, szBuffer, iBuffer)); + } + else + { + // First, build the type name (either from genericType or from the method's declaring type) + if (genericType != nullptr && NANOCLR_INDEX_IS_VALID(*genericType) && genericType->data != CLR_EmptyToken) + { + // Use the provided generic type context + if (!SUCCEEDED(BuildTypeName(*genericType, szBuffer, iBuffer, 0, nullptr, &mdInst))) + { + // Fall back to the declaring type + if (instOwner.InitializeFromMethod(mdInst) == false) + { + NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); + } + NANOCLR_CHECK_HRESULT(BuildTypeName(instOwner, szBuffer, iBuffer)); + } + } + else if (mdInst.genericType != nullptr && NANOCLR_INDEX_IS_VALID(*mdInst.genericType)) + { + // Use the method instance's generic type + if (!SUCCEEDED(BuildTypeName(*mdInst.genericType, szBuffer, iBuffer, 0))) + { + // Fall back to the declaring type + if (instOwner.InitializeFromMethod(mdInst) == false) + { + NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); + } + NANOCLR_CHECK_HRESULT(BuildTypeName(instOwner, szBuffer, iBuffer)); + } + } + else + { + // Fall back to the declaring type + if (instOwner.InitializeFromMethod(mdInst) == false) + { + NANOCLR_SET_AND_LEAVE(CLR_E_WRONG_TYPE); + } + NANOCLR_CHECK_HRESULT(BuildTypeName(instOwner, szBuffer, iBuffer)); + } + + // Append the method name + CLR_SafeSprintf(szBuffer, iBuffer, "::%s", mdInst.assembly->GetString(mdInst.target->name)); + + // If this method has generic parameters (methodSpec is valid), append them + if (NANOCLR_INDEX_IS_VALID(mdInst.methodSpec)) + { + CLR_RT_MethodSpec_Instance msInst{}; + if (msInst.InitializeFromIndex(mdInst.methodSpec)) + { + // Parse the methodSpec instantiation signature to get the generic arguments + CLR_RT_SignatureParser parser{}; + parser.Initialize_MethodSignature(&msInst); + + CLR_SafeSprintf(szBuffer, iBuffer, "<"); + + for (int i = 0; i < parser.ParamCount; i++) + { + CLR_RT_SignatureParser::Element elem{}; + if (FAILED(parser.Advance(elem))) + { + break; + } + + if (i > 0) + { + CLR_SafeSprintf(szBuffer, iBuffer, ", "); + } + + // Build the type name for this generic argument + // Use the method's declaring type as context for VAR resolution + const CLR_RT_TypeSpec_Index *context = + (mdInst.genericType && NANOCLR_INDEX_IS_VALID(*mdInst.genericType)) ? mdInst.genericType + : nullptr; + + if (elem.DataType == DATATYPE_VAR || elem.DataType == DATATYPE_MVAR) + { + // Generic parameter - try to resolve it + if (context != nullptr) + { + CLR_RT_TypeSpec_Instance contextTs; + if (!contextTs.InitializeFromIndex(*context)) + { + NANOCLR_SET_AND_LEAVE(CLR_E_FAIL); + } + + CLR_RT_SignatureParser::Element paramElement; + if (contextTs.GetGenericParam(elem.GenericParamPosition, paramElement)) + { + NANOCLR_CHECK_HRESULT(BuildTypeName(paramElement.Class, szBuffer, iBuffer)); + } + else + { + // Couldn't resolve - show as !n or !!n + if (elem.DataType == DATATYPE_VAR) + { + CLR_SafeSprintf(szBuffer, iBuffer, "!%d", elem.GenericParamPosition); + } + else + { + CLR_SafeSprintf(szBuffer, iBuffer, "!!%d", elem.GenericParamPosition); + } + } + } + else + { + // No context - show as !n or !!n + if (elem.DataType == DATATYPE_VAR) + { + CLR_SafeSprintf(szBuffer, iBuffer, "!%d", elem.GenericParamPosition); + } + else + { + CLR_SafeSprintf(szBuffer, iBuffer, "!!%d", elem.GenericParamPosition); + } + } + } + else if (NANOCLR_INDEX_IS_VALID(elem.Class)) + { + // Concrete type + NANOCLR_CHECK_HRESULT(BuildTypeName(elem.Class, szBuffer, iBuffer)); + } + } + + CLR_SafeSprintf(szBuffer, iBuffer, ">"); + } + } + } + + NANOCLR_NOCLEANUP(); +} + HRESULT CLR_RT_TypeSystem::BuildFieldName(const CLR_RT_FieldDef_Index &fd, char *&szBuffer, size_t &iBuffer) { NATIVE_PROFILE_CLR_CORE(); @@ -7385,6 +8059,7 @@ HRESULT CLR_RT_TypeSystem::BuildMethodRefName( NANOCLR_NOCLEANUP(); } + HRESULT CLR_RT_TypeSystem::BuildMethodSpecName(const CLR_RT_MethodSpec_Index &ms, char *&szBuffer, size_t &iBuffer) { NATIVE_PROFILE_CLR_CORE(); @@ -7558,6 +8233,8 @@ bool CLR_RT_TypeSystem::FindVirtualMethodDef( CLR_RT_MethodDef_Instance calleeInst{}; calleeInst.InitializeFromIndex(calleeMD); + const bool isArrayClass = (cls.data == g_CLR_RT_WellKnownTypes.Array.data); + const CLR_RECORD_METHODDEF *calleeMDR = calleeInst.target; CLR_UINT8 calleeArgumentsCount = calleeMDR->argumentsCount; @@ -7602,6 +8279,53 @@ bool CLR_RT_TypeSystem::FindVirtualMethodDef( clsInst.SwitchToParent(); } + // SZ arrays expose IList generic interfaces through System.Array+SZArrayHelper, so fall back to that helper + // type + if (isArrayClass) + { + const CLR_RT_TypeDef_Index &arrayHelperIdx = g_CLR_RT_WellKnownTypes.SZArrayHelper; + + if (NANOCLR_INDEX_IS_VALID(arrayHelperIdx)) + { + CLR_RT_TypeDef_Instance helperInst{}; + helperInst.InitializeFromIndex(arrayHelperIdx); + + CLR_RT_Assembly *arrayHelperAssm = helperInst.assembly; + const CLR_RECORD_TYPEDEF *arrayHelperTypeDef = helperInst.target; + + if (arrayHelperAssm != nullptr && arrayHelperTypeDef != nullptr) + { + int methodCount = arrayHelperTypeDef->virtualMethodCount + arrayHelperTypeDef->instanceMethodCount; + + if (methodCount > 0 && arrayHelperTypeDef->firstMethod != CLR_EmptyIndex) + { + const CLR_RECORD_METHODDEF *arrayHelperMethodDef = + arrayHelperAssm->GetMethodDef(arrayHelperTypeDef->firstMethod); + + for (int i = 0; i < methodCount; i++, arrayHelperMethodDef++) + { + if ((arrayHelperMethodDef->flags & CLR_RECORD_METHODDEF::MD_Static) != 0) + { + continue; + } + + if (arrayHelperMethodDef->argumentsCount != calleeArgumentsCount) + { + continue; + } + + const char *methodNameAtHelper = arrayHelperAssm->GetString(arrayHelperMethodDef->name); + if (methodNameAtHelper != nullptr && strcmp(methodNameAtHelper, calleeName) == 0) + { + index.Set(arrayHelperAssm->assemblyIndex, arrayHelperTypeDef->firstMethod + i); + return true; + } + } + } + } + } + } + index.Clear(); return false; @@ -7744,9 +8468,13 @@ CLR_RT_GenericStaticFieldRecord *CLR_RT_TypeSystem::FindOrCreateGenericStaticFie return record; } -CLR_UINT32 CLR_RT_TypeSystem::ComputeHashForClosedGenericType(CLR_RT_TypeSpec_Instance &typeInstance) +CLR_UINT32 CLR_RT_TypeSystem::ComputeHashForClosedGenericType( + CLR_RT_TypeSpec_Instance &typeInstance, + const CLR_RT_TypeSpec_Index *contextTypeSpec, + const CLR_RT_MethodDef_Instance *contextMethod) { CLR_UINT32 hash = 0; + int argCount; // Start with the generic type definition hash = SUPPORT_ComputeCRC(&typeInstance.genericTypeDef.data, sizeof(CLR_UINT32), hash); @@ -7760,17 +8488,17 @@ CLR_UINT32 CLR_RT_TypeSystem::ComputeHashForClosedGenericType(CLR_RT_TypeSpec_In // Advance to the generic instance marker if (FAILED(parser.Advance(elem)) || elem.DataType != DATATYPE_GENERICINST) { - return hash; + goto ComputeHash_End; } // Advance to the generic type definition if (FAILED(parser.Advance(elem))) { - return hash; + goto ComputeHash_End; } // Get argument count - int argCount = elem.GenParamCount; + argCount = elem.GenParamCount; // Process each generic argument for (int i = 0; i < argCount; i++) @@ -7780,15 +8508,71 @@ CLR_UINT32 CLR_RT_TypeSystem::ComputeHashForClosedGenericType(CLR_RT_TypeSpec_In break; } - // Add each argument's type information to the hash - hash = SUPPORT_ComputeCRC(&elem.DataType, sizeof(elem.DataType), hash); + // Check if this is an unresolved generic parameter (VAR or MVAR) + if (elem.DataType == DATATYPE_VAR && contextTypeSpec && NANOCLR_INDEX_IS_VALID(*contextTypeSpec)) + { + // Resolve VAR (type parameter) from context TypeSpec + CLR_RT_TypeSpec_Instance contextTs; + if (!contextTs.InitializeFromIndex(*contextTypeSpec)) + { + hash = 0; + goto ComputeHash_End; + } - if (elem.DataType == DATATYPE_CLASS || elem.DataType == DATATYPE_VALUETYPE) + CLR_RT_SignatureParser::Element paramElement; + if (contextTs.GetGenericParam(elem.GenericParamPosition, paramElement)) + { + // Use the resolved type from context + hash = SUPPORT_ComputeCRC(¶mElement.DataType, sizeof(paramElement.DataType), hash); + + if (paramElement.DataType == DATATYPE_CLASS || paramElement.DataType == DATATYPE_VALUETYPE) + { + hash = SUPPORT_ComputeCRC(¶mElement.Class.data, sizeof(paramElement.Class.data), hash); + } + } + else + { + // couldn't resolve VAR, return failure + hash = 0; + goto ComputeHash_End; + } + } + else if (elem.DataType == DATATYPE_MVAR && contextMethod && NANOCLR_INDEX_IS_VALID(contextMethod->methodSpec)) { - hash = SUPPORT_ComputeCRC(&elem.Class.data, sizeof(elem.Class.data), hash); + // Resolve MVAR (method parameter) from MethodSpec instance + + CLR_RT_MethodSpec_Instance methodSpecInst{}; + if (methodSpecInst.InitializeFromIndex(contextMethod->methodSpec)) + { + CLR_RT_TypeDef_Index resolvedTypeDef; + NanoCLRDataType resolvedDataType; + + if (methodSpecInst.GetGenericArgument(elem.GenericParamPosition, resolvedTypeDef, resolvedDataType)) + { + // Use the resolved type from MethodSpec + hash = SUPPORT_ComputeCRC(&resolvedDataType, sizeof(resolvedDataType), hash); + } + else + { + // couldn't resolve MVAR, return failure + hash = 0; + goto ComputeHash_End; + } + } + } + else + { + // Add each argument's type information to the hash + hash = SUPPORT_ComputeCRC(&elem.DataType, sizeof(elem.DataType), hash); + + if (elem.DataType == DATATYPE_CLASS || elem.DataType == DATATYPE_VALUETYPE) + { + hash = SUPPORT_ComputeCRC(&elem.Class.data, sizeof(elem.Class.data), hash); + } } } +ComputeHash_End: return hash ? hash : 0xFFFFFFFF; // Don't allow zero as a hash value } diff --git a/src/CLR/Diagnostics/Diagnostics_stub.cpp b/src/CLR/Diagnostics/Diagnostics_stub.cpp index 2ca278ea77..24a4ace5e7 100644 --- a/src/CLR/Diagnostics/Diagnostics_stub.cpp +++ b/src/CLR/Diagnostics/Diagnostics_stub.cpp @@ -190,6 +190,14 @@ __nfweak void CLR_RT_DUMP::METHOD(const CLR_RT_MethodDef_Index &method, const CL NATIVE_PROFILE_CLR_DIAGNOSTICS(); } +__nfweak void CLR_RT_DUMP::METHOD(const CLR_RT_MethodDef_Instance &mdInst, const CLR_RT_TypeSpec_Index *genericType) +{ + (void)mdInst; + (void)genericType; + + NATIVE_PROFILE_CLR_DIAGNOSTICS(); +} + __nfweak void CLR_RT_DUMP::FIELD(const CLR_RT_FieldDef_Index &field) { (void)field; diff --git a/src/CLR/Diagnostics/Info.cpp b/src/CLR/Diagnostics/Info.cpp index 1d0f44ada1..d48d0bc71e 100644 --- a/src/CLR/Diagnostics/Info.cpp +++ b/src/CLR/Diagnostics/Info.cpp @@ -446,17 +446,38 @@ void CLR_RT_Assembly::DumpToken(CLR_UINT32 token, const CLR_RT_MethodDef_Instanc const CLR_RECORD_FIELDREF *fr = GetFieldRef(index); const auto &xref = crossReferenceFieldRef[index]; - // If the caller passed in a closed‐generic TypeSpec, use that … + // If the caller passed in a closed‐generic TypeSpec, use that if (methodDefInstance.genericType != nullptr && methodDefInstance.genericType->data != CLR_EmptyToken) { - // Build the closed‐generic owner name - char rgType[256], *sz = rgType; - size_t cb = sizeof(rgType); - g_CLR_RT_TypeSystem.BuildTypeName(*methodDefInstance.genericType, sz, cb, 0); - - // Append the field name - CLR_SafeSprintf(sz, cb, "::%s", GetString(fr->name)); - CLR_Debug::Printf("%s", rgType); + // The field's encodedOwner points to the TypeSpec we want to build the name for (e.g., EmptyArray) + // and methodDefInstance.genericType is the closed generic type that provides context (e.g., + // EmptyArray) + if (fr->Owner() == TBL_TypeSpec) + { + static CLR_RT_TypeSpec_Index s_ownerTypeSpec; + s_ownerTypeSpec.Set(assemblyIndex, fr->OwnerIndex()); + + // Build the type name using the closed generic as context to resolve VAR parameters + char rgType[256], *sz = rgType; + size_t cb = sizeof(rgType); + g_CLR_RT_TypeSystem + .BuildTypeName(s_ownerTypeSpec, sz, cb, 0, methodDefInstance.genericType, &methodDefInstance); + + // Append the field name + CLR_SafeSprintf(sz, cb, "::%s", GetString(fr->name)); + CLR_Debug::Printf("%s", rgType); + } + else + { + // TypeRef case - just use the existing genericType + char rgType[256], *sz = rgType; + size_t cb = sizeof(rgType); + g_CLR_RT_TypeSystem.BuildTypeName(*methodDefInstance.genericType, sz, cb, 0, nullptr); + + // Append the field name + CLR_SafeSprintf(sz, cb, "::%s", GetString(fr->name)); + CLR_Debug::Printf("%s", rgType); + } } else { @@ -579,22 +600,26 @@ void CLR_RT_Assembly::DumpToken(CLR_UINT32 token, const CLR_RT_MethodDef_Instanc // if the caller's genericType is non‐null, ask the CLR to map !n→actual argument: if (methodDefInstance.genericType != nullptr && NANOCLR_INDEX_IS_VALID(*methodDefInstance.genericType)) { - CLR_RT_TypeDef_Index tdArg{}; - NanoCLRDataType dtArg; - bool ok = g_CLR_RT_TypeSystem.m_assemblies[methodDefInstance.genericType->Assembly() - 1] - ->FindGenericParamAtTypeSpec( - methodDefInstance.genericType->TypeSpec(), - gpPosition, - tdArg, - dtArg); - if (ok) + CLR_RT_TypeSpec_Instance typeSpec; + if (!typeSpec.InitializeFromIndex(*methodDefInstance.genericType)) + { + CLR_Debug::Printf("!%d", gpPosition); + break; + } + + CLR_RT_SignatureParser::Element paramElement; + if (typeSpec.GetGenericParam(gpPosition, paramElement)) { char bufArg[256]{}; char *pArg = bufArg; size_t cbArg = sizeof(bufArg); - g_CLR_RT_TypeSystem - .BuildTypeName(tdArg, pArg, cbArg, CLR_RT_TypeSystem::TYPENAME_FLAGS_FULL, elem.Levels); + g_CLR_RT_TypeSystem.BuildTypeName( + paramElement.Class, + pArg, + cbArg, + CLR_RT_TypeSystem::TYPENAME_FLAGS_FULL, + elem.Levels); CLR_Debug::Printf("%s", bufArg); @@ -663,23 +688,20 @@ void CLR_RT_Assembly::DumpToken(CLR_UINT32 token, const CLR_RT_MethodDef_Instanc if (methodDefInstance.genericType != nullptr && NANOCLR_INDEX_IS_VALID(*methodDefInstance.genericType)) { - CLR_RT_TypeDef_Index tdArg{}; - NanoCLRDataType dtArg; - - bool genericParamFound = tsInst.assembly->FindGenericParamAtTypeSpec( - methodDefInstance.genericType->TypeSpec(), - gpIndex, - tdArg, - dtArg); - if (genericParamFound) + CLR_RT_TypeSpec_Instance typeSpec; + if (typeSpec.InitializeFromIndex(*methodDefInstance.genericType)) { - // print "I4[]" or the bound argument plus [] - char bufArg[256]; - char *pArg = bufArg; - size_t cbArg = sizeof(bufArg); - g_CLR_RT_TypeSystem.BuildTypeName(tdArg, pArg, cbArg); - CLR_Debug::Printf("%s[]", bufArg); - break; + CLR_RT_SignatureParser::Element paramElement; + if (typeSpec.GetGenericParam(gpIndex, paramElement)) + { + // print "I4[]" or the bound argument plus [] + char bufArg[256]; + char *pArg = bufArg; + size_t cbArg = sizeof(bufArg); + g_CLR_RT_TypeSystem.BuildTypeName(paramElement.Class, pArg, cbArg); + CLR_Debug::Printf("%s[]", bufArg); + break; + } } } @@ -932,7 +954,7 @@ void CLR_RT_Assembly::DumpOpcodeDirect( if (op == CEE_CALL || op == CEE_CALLVIRT) { CLR_RT_MethodDef_Instance mdInst{}; - if (mdInst.ResolveToken(token, call.assembly, call.genericType)) + if (NANOCLR_INDEX_IS_VALID(call) && mdInst.ResolveToken(token, call.assembly, call.genericType)) { // mdInst now holds the target MethodDef (or MethodSpec) plus any genericType. CLR_RT_DUMP::METHOD(mdInst, call.genericType); @@ -1025,6 +1047,18 @@ void CLR_RT_DUMP::METHOD(const CLR_RT_MethodDef_Index &method, const CLR_RT_Type CLR_Debug::Printf("%s", rgBuffer); } +void CLR_RT_DUMP::METHOD(const CLR_RT_MethodDef_Instance &mdInst, const CLR_RT_TypeSpec_Index *genericType) +{ + NATIVE_PROFILE_CLR_DIAGNOSTICS(); + char rgBuffer[512]; + char *szBuffer = rgBuffer; + size_t iBuffer = MAXSTRLEN(rgBuffer); + + g_CLR_RT_TypeSystem.BuildMethodName(mdInst, genericType, szBuffer, iBuffer); + + CLR_Debug::Printf("%s", rgBuffer); +} + void CLR_RT_DUMP::FIELD(const CLR_RT_FieldDef_Index &field) { NATIVE_PROFILE_CLR_DIAGNOSTICS(); @@ -1083,7 +1117,19 @@ void CLR_RT_DUMP::OBJECT(CLR_RT_HeapBlock *ptr, const char *text) { auto *dlg = (CLR_RT_HeapBlock_Delegate *)ptr; - CLR_RT_DUMP::METHOD(dlg->DelegateFtn(), nullptr); + CLR_RT_MethodDef_Instance mdInst; + if (mdInst.InitializeFromIndex(dlg->DelegateFtn())) + { + // Use the delegate's stored generic context for more informative diagnostics + const CLR_RT_TypeSpec_Index *genericType = + (dlg->m_genericTypeSpec.data != 0) ? &dlg->m_genericTypeSpec : nullptr; + CLR_RT_DUMP::METHOD(mdInst, genericType); + } + else + { + // Fallback if initialization fails + CLR_RT_DUMP::METHOD(dlg->DelegateFtn(), nullptr); + } } break; diff --git a/src/CLR/Include/nanoCLR_Checks.h b/src/CLR/Include/nanoCLR_Checks.h index f13a26126c..8048974bb1 100644 --- a/src/CLR/Include/nanoCLR_Checks.h +++ b/src/CLR/Include/nanoCLR_Checks.h @@ -27,6 +27,7 @@ struct CLR_RT_DUMP static void TYPE (const CLR_RT_TypeDef_Index& cls ) DECL_POSTFIX; static void TYPE (const CLR_RT_ReflectionDef_Index& reflex ) DECL_POSTFIX; static void METHOD (const CLR_RT_MethodDef_Index& method, const CLR_RT_TypeSpec_Index *genericType) DECL_POSTFIX; + static void METHOD (const CLR_RT_MethodDef_Instance& mdInst, const CLR_RT_TypeSpec_Index *genericType) DECL_POSTFIX; static void FIELD (const CLR_RT_FieldDef_Index& field ) DECL_POSTFIX; static void OBJECT ( CLR_RT_HeapBlock* ptr , const char* text ) DECL_POSTFIX; static void METHODREF (const CLR_RT_MethodRef_Index& method ) DECL_POSTFIX; diff --git a/src/CLR/Include/nanoCLR_Runtime.h b/src/CLR/Include/nanoCLR_Runtime.h index b22d291ea1..bcda8ff63b 100644 --- a/src/CLR/Include/nanoCLR_Runtime.h +++ b/src/CLR/Include/nanoCLR_Runtime.h @@ -1437,7 +1437,14 @@ struct CLR_RT_Assembly : public CLR_RT_HeapBlock_Node // EVENT HEAP - NO RELOCAT const CLR_RT_FieldDef_Index &fdIndex); CLR_RT_HeapBlock *GetStaticFieldByFieldDef( const CLR_RT_FieldDef_Index &fdIndex, - const CLR_RT_TypeSpec_Index *genericType); + const CLR_RT_TypeSpec_Index *genericType, + const CLR_RT_TypeSpec_Index *contextTypeSpec = nullptr, + const CLR_RT_MethodDef_Instance *contextMethod = nullptr); + HRESULT AllocateGenericStaticFieldsOnDemand( + const CLR_RT_TypeSpec_Index &typeSpecIndex, + const CLR_RT_TypeDef_Instance &genericTypeDef, + const CLR_RT_TypeSpec_Index *contextTypeSpec = nullptr, + const CLR_RT_MethodDef_Instance *contextMethod = nullptr); HRESULT PrepareForExecution(); CLR_UINT32 ComputeAssemblyHash(); @@ -1448,11 +1455,6 @@ struct CLR_RT_Assembly : public CLR_RT_HeapBlock_Node // EVENT HEAP - NO RELOCAT bool FindTypeDef(CLR_UINT32 hash, CLR_RT_TypeDef_Index &index); bool FindTypeSpec(const CLR_PMETADATA sig, CLR_RT_TypeSpec_Index &index); - bool FindGenericParamAtTypeSpec( - CLR_UINT32 typeSpecIndex, - CLR_INT32 genericParameterPosition, - CLR_RT_TypeDef_Index &typeDef, - NanoCLRDataType &dataType); bool FindGenericParamAtMethodDef( CLR_RT_MethodDef_Instance md, CLR_INT32 genericParameterPosition, @@ -1743,6 +1745,7 @@ struct CLR_RT_WellKnownTypes CLR_RT_TypeDef_Index MulticastDelegate; CLR_RT_TypeDef_Index Array; + CLR_RT_TypeDef_Index SZArrayHelper; CLR_RT_TypeDef_Index ArrayList; CLR_RT_TypeDef_Index ICloneable; CLR_RT_TypeDef_Index IList; @@ -2069,7 +2072,13 @@ struct CLR_RT_TypeSystem // EVENT HEAP - NO RELOCATION - const CLR_RECORD_RESOURCE *&res, CLR_UINT32 &size); - HRESULT BuildTypeName(const CLR_RT_TypeSpec_Index &typeIndex, char *&szBuffer, size_t &iBuffer, CLR_UINT32 levels); + HRESULT BuildTypeName( + const CLR_RT_TypeSpec_Index &typeIndex, + char *&szBuffer, + size_t &iBuffer, + CLR_UINT32 levels, + const CLR_RT_TypeSpec_Index *contextTypeSpec = nullptr, + const CLR_RT_MethodDef_Instance *contextMethodDef = nullptr); HRESULT BuildTypeName( const CLR_RT_TypeDef_Index &cls, char *&szBuffer, @@ -2082,6 +2091,11 @@ struct CLR_RT_TypeSystem // EVENT HEAP - NO RELOCATION - const CLR_RT_TypeSpec_Index *genericType, char *&szBuffer, size_t &size); + HRESULT BuildMethodName( + const CLR_RT_MethodDef_Instance &mdInst, + const CLR_RT_TypeSpec_Index *genericType, + char *&szBuffer, + size_t &size); HRESULT BuildFieldName(const CLR_RT_FieldDef_Index &fd, char *&szBuffer, size_t &size); HRESULT BuildMethodRefName(const CLR_RT_MethodRef_Index &method, char *&szBuffer, size_t &iBuffer); HRESULT BuildMethodRefName( @@ -2133,7 +2147,10 @@ struct CLR_RT_TypeSystem // EVENT HEAP - NO RELOCATION - CLR_UINT32 staticFieldCount); // Helper to compute hash for a closed generic type - static CLR_UINT32 ComputeHashForClosedGenericType(CLR_RT_TypeSpec_Instance &typeInstance); + static CLR_UINT32 ComputeHashForClosedGenericType( + CLR_RT_TypeSpec_Instance &typeInstance, + const CLR_RT_TypeSpec_Index *contextTypeSpec = nullptr, + const CLR_RT_MethodDef_Instance *contextMethod = nullptr); // Helper to find or create a generic .cctor execution record by hash static CLR_RT_GenericCctorExecutionRecord *FindOrCreateGenericCctorRecord(CLR_UINT32 hash, bool *created); @@ -2174,6 +2191,8 @@ struct CLR_RT_TypeSpec_Instance : public CLR_RT_TypeSpec_Index bool ResolveToken(CLR_UINT32 tk, CLR_RT_Assembly *assm, const CLR_RT_MethodDef_Instance *caller = nullptr); bool IsClosedGenericType(); + + bool GetGenericParam(CLR_INT32 parameterPosition, CLR_RT_SignatureParser::Element &element); }; //--// @@ -2251,6 +2270,9 @@ struct CLR_RT_MethodDef_Instance : public CLR_RT_MethodDef_Index const CLR_RT_TypeSpec_Index *genericType; CLR_RT_MethodSpec_Index methodSpec; + // For SZArrayHelper rebind: stores the array element TypeDef when dispatching from arrays + CLR_RT_TypeDef_Index arrayElementType; + #if defined(NANOCLR_INSTANCE_NAMES) const char *name; #endif @@ -2326,6 +2348,8 @@ struct CLR_RT_MethodSpec_Instance : public CLR_RT_MethodSpec_Index } CLR_EncodedMethodDefOrRef InstanceOfMethod; + + bool GetGenericArgument(CLR_INT32 argumentPosition, CLR_RT_TypeDef_Index &typeDef, NanoCLRDataType &dataType); }; //////////////////////////////////////////////////////////////////////////////////////////////////// @@ -2432,12 +2456,16 @@ struct CLR_RT_TypeDescriptor HRESULT InitializeFromDataType(NanoCLRDataType dt); HRESULT InitializeFromReflection(const CLR_RT_ReflectionDef_Index &reflex); - HRESULT InitializeFromTypeSpec(const CLR_RT_TypeSpec_Index &sig); + HRESULT InitializeFromTypeSpec( + const CLR_RT_TypeSpec_Index &sig, + const CLR_RT_TypeSpec_Index *contextTypeSpec = nullptr); HRESULT InitializeFromType(const CLR_RT_TypeDef_Index &cls); HRESULT InitializeFromTypeDef(const CLR_RT_TypeDef_Index &cls); HRESULT InitializeFromGenericType(const CLR_RT_TypeSpec_Index &genericType); HRESULT InitializeFromFieldDefinition(const CLR_RT_FieldDef_Instance &fd); - HRESULT InitializeFromSignatureParser(CLR_RT_SignatureParser &parser); + HRESULT InitializeFromSignatureParser( + CLR_RT_SignatureParser &parser, + const CLR_RT_TypeSpec_Index *contextTypeSpec = nullptr); HRESULT InitializeFromSignatureToken( CLR_RT_Assembly *assm, CLR_UINT32 token, @@ -4204,10 +4232,20 @@ struct CLR_RT_ExecutionEngine static bool IsInstanceOf(CLR_RT_TypeDescriptor &desc, CLR_RT_TypeDescriptor &descTarget, bool isInstInstruction); static bool IsInstanceOf(const CLR_RT_TypeDef_Index &cls, const CLR_RT_TypeDef_Index &clsTarget); static bool IsInstanceOf(CLR_RT_HeapBlock &obj, const CLR_RT_TypeDef_Index &clsTarget); - static bool IsInstanceOf(CLR_RT_HeapBlock &obj, CLR_RT_Assembly *assm, CLR_UINT32 token, bool isInstInstruction); + static bool IsInstanceOf( + CLR_RT_HeapBlock &obj, + CLR_RT_Assembly *assm, + CLR_UINT32 token, + bool isInstInstruction, + const CLR_RT_MethodDef_Instance *caller = nullptr); bool IsInstanceOfToken(CLR_UINT32 token, CLR_RT_HeapBlock &obj, const CLR_RT_MethodDef_Instance &caller); - static HRESULT CastToType(CLR_RT_HeapBlock &ref, CLR_UINT32 tk, CLR_RT_Assembly *assm, bool isInstInstruction); + static HRESULT CastToType( + CLR_RT_HeapBlock &ref, + CLR_UINT32 tk, + CLR_RT_Assembly *assm, + bool isInstInstruction, + const CLR_RT_MethodDef_Instance *caller); void DebuggerLoop(); diff --git a/src/CLR/Include/nanoCLR_Runtime__HeapBlock.h b/src/CLR/Include/nanoCLR_Runtime__HeapBlock.h index e799d0884b..d9a22795b4 100644 --- a/src/CLR/Include/nanoCLR_Runtime__HeapBlock.h +++ b/src/CLR/Include/nanoCLR_Runtime__HeapBlock.h @@ -1910,9 +1910,14 @@ struct CLR_RT_HeapBlock_Delegate : public CLR_RT_HeapBlock_Node // OBJECT HEAP - CLR_RT_AppDomain *m_appDomain; #endif - // Optional TypeSpec index for generic type static constructors (data == 0 means not set) + // Optional TypeSpec index for resolving type generic parameter (VARs like !0) + // (data == 0 means not set) CLR_RT_TypeSpec_Index m_genericTypeSpec; + // Optional MethodSpec index for resolving method generic parameters (MVAR like !!0) + // (data == 0 means not set) + CLR_RT_MethodSpec_Index m_genericMethodSpec; + //--// const CLR_RT_MethodDef_Index &DelegateFtn() const diff --git a/targets/TI_SimpleLink/TI_CC1352R1_LAUNCHXL/common/Device_BlockStorage.c b/targets/TI_SimpleLink/TI_CC1352R1_LAUNCHXL/common/Device_BlockStorage.c index 81655f6435..b73b30d075 100644 --- a/targets/TI_SimpleLink/TI_CC1352R1_LAUNCHXL/common/Device_BlockStorage.c +++ b/targets/TI_SimpleLink/TI_CC1352R1_LAUNCHXL/common/Device_BlockStorage.c @@ -10,8 +10,8 @@ const BlockRange BlockRange1[] = { // the last block is reserved for Customer Configuration Area and Bootloader Backdoor configuration // so we don't take it into account for the map - {BlockRange_BLOCKTYPE_CODE, 0, 25}, // 0x00000000 nanoCLR - {BlockRange_BLOCKTYPE_DEPLOYMENT, 26, 42}, // 0x00034000 deployment + {BlockRange_BLOCKTYPE_CODE, 0, 26}, // 0x00000000 nanoCLR + {BlockRange_BLOCKTYPE_DEPLOYMENT, 27, 42}, // 0x00036000 deployment }; const BlockRegionInfo BlockRegions[] = {{ diff --git a/targets/TI_SimpleLink/TI_CC1352R1_LAUNCHXL/nanoCLR/CC13x2_26x2_CLR.ld b/targets/TI_SimpleLink/TI_CC1352R1_LAUNCHXL/nanoCLR/CC13x2_26x2_CLR.ld index db6c8d1a3c..daa6672a5b 100644 --- a/targets/TI_SimpleLink/TI_CC1352R1_LAUNCHXL/nanoCLR/CC13x2_26x2_CLR.ld +++ b/targets/TI_SimpleLink/TI_CC1352R1_LAUNCHXL/nanoCLR/CC13x2_26x2_CLR.ld @@ -40,7 +40,7 @@ HEAPSIZE = 0x2500; /* Size of heap buffer used by HeapMem */ MEMORY { /* original flash LENGTH was 0x00057fa8 */ - FLASH (RX) : ORIGIN = 0x00000000, LENGTH = 0x00034000 + FLASH (RX) : ORIGIN = 0x00000000, LENGTH = 0x00036000 /* * Customer Configuration Area and Bootloader Backdoor configuration in * flash, 40 bytes