diff options
139 files changed, 5007 insertions, 1899 deletions
diff --git a/Documentation/how-to-build-and-run-ilcompiler-in-visual-studio-2015.md b/Documentation/how-to-build-and-run-ilcompiler-in-visual-studio-2015.md index 108027363..e5d430f5b 100644 --- a/Documentation/how-to-build-and-run-ilcompiler-in-visual-studio-2015.md +++ b/Documentation/how-to-build-and-run-ilcompiler-in-visual-studio-2015.md @@ -52,7 +52,7 @@ _Note: The size of NuGet packages is approximately 2.75 GB, so download might ta - Set startup command line to: `@c:\corert\bin\obj\Windows_NT.x64.Debug\cpp.rsp` - - `--nolinenumbers` command line option can be used to suppress generation of line number mappings in C++ files - useful for debugging + - `--codegenopt:nolinenumbers` command line option can be used to suppress generation of line number mappings in C++ files - useful for debugging - Build & run using **F5** - This will run the compiler. The output is `c:\corert\bin\obj\Windows_NT.x64.Debug\repro\native\repro.cpp` file. diff --git a/build.proj b/build.proj index b39ae63c8..20fe45254 100644 --- a/build.proj +++ b/build.proj @@ -13,6 +13,7 @@ </PropertyGroup> <Import Project="$(ToolsDir)CodeCoverage.targets" Condition="Exists('$(ToolsDir)CodeCoverage.targets')" /> <Import Project="$(ToolsDir)PerfTesting.targets" Condition="Exists('$(ToolsDir)PerfTesting.targets') and '$(Performance)' == 'true'"/> + <Import Project="$(ToolsDir)VersionTools.targets" Condition="Exists('$(ToolsDir)VersionTools.targets')" /> <ItemGroup> <Project Include="src\dirs.proj" /> diff --git a/buildpipeline/DotNet-CoreRT-Linux.json b/buildpipeline/DotNet-CoreRT-Linux.json index dd5bd631d..dde1228dd 100644 --- a/buildpipeline/DotNet-CoreRT-Linux.json +++ b/buildpipeline/DotNet-CoreRT-Linux.json @@ -449,7 +449,7 @@ } }, { - "enabled": false, + "enabled": true, "continueOnError": false, "alwaysRun": false, "displayName": "Remove container", @@ -470,23 +470,7 @@ "enabled": true, "continueOnError": false, "alwaysRun": false, - "displayName": "Remove old docker build logs", - "timeoutInMinutes": 0, - "task": { - "id": "b7e8b412-0437-4065-9371-edc5881de25b", - "versionSpec": "*", - "definitionType": "task" - }, - "inputs": { - "SourceFolder": "$(DockerCopyDest)", - "Contents": "*" - } - }, - { - "enabled": true, - "continueOnError": false, - "alwaysRun": false, - "displayName": "Run mkdir", + "displayName": "Publish packages", "timeoutInMinutes": 0, "task": { "id": "d9bafed4-0b18-4f58-968d-86655b4d2ce9", @@ -494,17 +478,17 @@ "definitionType": "task" }, "inputs": { - "filename": "mkdir", - "arguments": "$(DockerCopyDest)", - "workingFolder": "", + "filename": "docker", + "arguments": "run -w=\"$(GitDirectory)\" --name $(DockerContainerName) $(DockerModifiedImageName) $(GitDirectory)/buildscripts/publish-packages.sh -AzureAccount $(CloudDropAccountName) -AzureToken $(CloudDropAccessToken) -Container $(Label)", + "workingFolder": "$(SourceFolder)", "failOnStandardError": "false" } }, { "enabled": true, - "continueOnError": true, - "alwaysRun": true, - "displayName": "Expose docker repo for publishing", + "continueOnError": false, + "alwaysRun": false, + "displayName": "Commit changes", "timeoutInMinutes": 0, "task": { "id": "d9bafed4-0b18-4f58-968d-86655b4d2ce9", @@ -513,7 +497,7 @@ }, "inputs": { "filename": "docker", - "arguments": "cp $(DockerContainerName):$(GitDirectory) $(DockerCopyDest)/", + "arguments": "commit $(DockerContainerName) $(DockerModifiedImageName)", "workingFolder": "", "failOnStandardError": "false" } @@ -522,25 +506,23 @@ "enabled": true, "continueOnError": false, "alwaysRun": false, - "displayName": "Remove container", + "displayName": "Remove old docker build logs", "timeoutInMinutes": 0, "task": { - "id": "d9bafed4-0b18-4f58-968d-86655b4d2ce9", + "id": "b7e8b412-0437-4065-9371-edc5881de25b", "versionSpec": "*", "definitionType": "task" }, "inputs": { - "filename": "docker", - "arguments": "rm $(DockerContainerName)", - "workingFolder": "", - "failOnStandardError": "false" + "SourceFolder": "$(DockerCopyDest)", + "Contents": "*" } }, { "enabled": true, "continueOnError": false, "alwaysRun": false, - "displayName": "Download nuget", + "displayName": "Run mkdir", "timeoutInMinutes": 0, "task": { "id": "d9bafed4-0b18-4f58-968d-86655b4d2ce9", @@ -548,17 +530,17 @@ "definitionType": "task" }, "inputs": { - "filename": "curl", - "arguments": "-L -k -o $(SourceFolder)/nuget.zip https://dotnet.myget.org/F/dotnet-buildtools/api/v2/package/NuGet.CommandLine/3.5.0-rc-1256", + "filename": "mkdir", + "arguments": "$(DockerCopyDest)", "workingFolder": "", "failOnStandardError": "false" } }, { "enabled": true, - "continueOnError": false, - "alwaysRun": false, - "displayName": "Unzip nuget", + "continueOnError": true, + "alwaysRun": true, + "displayName": "Expose docker repo for publishing", "timeoutInMinutes": 0, "task": { "id": "d9bafed4-0b18-4f58-968d-86655b4d2ce9", @@ -566,17 +548,17 @@ "definitionType": "task" }, "inputs": { - "filename": "unzip", - "arguments": "$(SourceFolder)/nuget.zip -d $(SourceFolder)/nuget", + "filename": "docker", + "arguments": "cp $(DockerContainerName):$(GitDirectory) $(DockerCopyDest)/", "workingFolder": "", "failOnStandardError": "false" } }, { - "enabled": false, + "enabled": true, "continueOnError": false, "alwaysRun": false, - "displayName": "Publish packages", + "displayName": "Remove container", "timeoutInMinutes": 0, "task": { "id": "d9bafed4-0b18-4f58-968d-86655b4d2ce9", @@ -584,9 +566,9 @@ "definitionType": "task" }, "inputs": { - "filename": "mono", - "arguments": "nuget/tools/NuGet.exe push bin/Product/pkg/*.nupkg $(MyGetApiKey) -Source $(MyGetFeedUrl) -Timeout 3600", - "workingFolder": "$(SourceFolder)", + "filename": "docker", + "arguments": "rm $(DockerContainerName)", + "workingFolder": "", "failOnStandardError": "false" } }, @@ -720,8 +702,12 @@ "value": "$(Build.BuildNumber)", "allowOverride": true }, + "BuildTag": { + "value": "corert-alpha", + "allowOverride": true + }, "Label": { - "value": "$(Build.BuildNumber)", + "value": "$(BuildTag)-$(Build.BuildNumber)", "allowOverride": true }, "SourceVersion": { @@ -771,6 +757,25 @@ "value": null, "isSecret": true, "allowOverride": true + }, + "CloudDropAccountName": { + "value": "dotnetbuildoutput" + }, + "CloudDropAccessToken": { + "value": null, + "isSecret": true + }, + "UpdatePublishedVersions.AuthToken": { + "value": null, + "isSecret": true + }, + "VersionsRepoOwner": { + "value": "crummel", + "allowOverride": true + }, + "VersionsRepo": { + "value": "dotnet_versions", + "allowOverride": true } }, "demands": [ diff --git a/buildpipeline/DotNet-CoreRT-Mac.json b/buildpipeline/DotNet-CoreRT-Mac.json index a2f7c586f..2abbbb760 100644 --- a/buildpipeline/DotNet-CoreRT-Mac.json +++ b/buildpipeline/DotNet-CoreRT-Mac.json @@ -130,53 +130,17 @@ "enabled": true, "continueOnError": false, "alwaysRun": false, - "displayName": "Download nuget", - "timeoutInMinutes": 0, - "task": { - "id": "d9bafed4-0b18-4f58-968d-86655b4d2ce9", - "versionSpec": "*", - "definitionType": "task" - }, - "inputs": { - "filename": "curl", - "arguments": "-L -k -o $(SourceFolder)/nuget.zip https://dotnet.myget.org/F/dotnet-buildtools/api/v2/package/NuGet.CommandLine/3.5.0-rc-1256", - "workingFolder": "", - "failOnStandardError": "false" - } - }, - { - "enabled": true, - "continueOnError": false, - "alwaysRun": false, - "displayName": "Unzip nuget", - "timeoutInMinutes": 0, - "task": { - "id": "d9bafed4-0b18-4f58-968d-86655b4d2ce9", - "versionSpec": "*", - "definitionType": "task" - }, - "inputs": { - "filename": "unzip", - "arguments": "$(SourceFolder)/nuget.zip -d $(SourceFolder)/nuget", - "workingFolder": "", - "failOnStandardError": "false" - } - }, - { - "enabled": true, - "continueOnError": false, - "alwaysRun": false, "displayName": "Publish packages", "timeoutInMinutes": 0, "task": { "id": "d9bafed4-0b18-4f58-968d-86655b4d2ce9", - "versionSpec": "*", + "versionSpec": "1.*", "definitionType": "task" }, "inputs": { - "filename": "mono", - "arguments": "nuget/tools/NuGet.exe push bin/Product/pkg/*.nupkg $(MyGetApiKey) -Source $(MyGetFeedUrl) -Timeout 3600", - "workingFolder": "$(SourceFolder)", + "filename": "$(Build.SourcesDirectory)/$(SourceFolder)/buildscripts/publish-packages.sh", + "arguments": "-AzureAccount $(CloudDropAccountName) -AzureToken $(CloudDropAccessToken) -Container $(Label)", + "workingFolder": "$(Build.SourcesDirectory)/$(SourceFolder)", "failOnStandardError": "false" } }, @@ -292,8 +256,12 @@ "value": "$(Build.BuildNumber)", "allowOverride": true }, + "BuildTag": { + "value": "corert-alpha", + "allowOverride": true + }, "Label": { - "value": "$(Build.BuildNumber)", + "value": "$(BuildTag)-$(Build.BuildNumber)", "allowOverride": true }, "SourceVersion": { @@ -322,6 +290,25 @@ "value": null, "isSecret": true, "allowOverride": true + }, + "CloudDropAccountName": { + "value": "dotnetbuildoutput" + }, + "CloudDropAccessToken": { + "value": null, + "isSecret": true + }, + "UpdatePublishedVersions.AuthToken": { + "value": null, + "isSecret": true + }, + "VersionsRepoOwner": { + "value": "crummel", + "allowOverride": true + }, + "VersionsRepo": { + "value": "dotnet_versions", + "allowOverride": true } }, "demands": [ diff --git a/buildpipeline/DotNet-CoreRT-Publish.json b/buildpipeline/DotNet-CoreRT-Publish.json new file mode 100644 index 000000000..d503fce24 --- /dev/null +++ b/buildpipeline/DotNet-CoreRT-Publish.json @@ -0,0 +1,456 @@ +{ + "build": [ + { + "enabled": true, + "continueOnError": false, + "alwaysRun": false, + "displayName": "Run script $(VS140COMNTOOLS)\\VsDevCmd.bat", + "timeoutInMinutes": 0, + "task": { + "id": "bfc8bf76-e7ac-4a8c-9a55-a944a9f632fd", + "versionSpec": "*", + "definitionType": "task" + }, + "inputs": { + "filename": "$(VS140COMNTOOLS)\\VsDevCmd.bat", + "arguments": "", + "modifyEnvironment": "true", + "workingFolder": "", + "failOnStandardError": "false" + } + }, + { + "enabled": true, + "continueOnError": false, + "alwaysRun": false, + "displayName": "Fetch custom tooling (NuGet, EmbedIndex)", + "timeoutInMinutes": 0, + "task": { + "id": "e213ff0f-5d5c-4791-802d-52ea3e7be1f1", + "versionSpec": "*", + "definitionType": "task" + }, + "inputs": { + "scriptType": "filePath", + "scriptName": "scripts/DotNet-Trusted-Publish/Fetch-Tools.ps1", + "arguments": "$(Build.StagingDirectory)\\ToolingDownload", + "inlineScript": "# You can write your powershell scripts inline here. \n# You can also pass predefined and custom variables to this scripts using arguments\n\n Write-Host \"Hello World\"", + "workingFolder": "", + "failOnStandardError": "true" + } + }, + { + "enabled": true, + "continueOnError": false, + "alwaysRun": false, + "displayName": "Set up pipeline-specific git repository", + "timeoutInMinutes": 0, + "task": { + "id": "e213ff0f-5d5c-4791-802d-52ea3e7be1f1", + "versionSpec": "*", + "definitionType": "task" + }, + "inputs": { + "scriptType": "inlineScript", + "scriptName": "", + "arguments": "-gitUrl $(GitUrl) -root $(Pipeline.SourcesDirectory)", + "inlineScript": "param($gitUrl, $root)\n\nif (Test-Path $root)\n{\n Remove-Item -Recurse -Force $root\n}\ngit clone --no-checkout $gitUrl $root 2>&1 | Write-Host\ncd $root\ngit checkout $env:SourceVersion 2>&1 | Write-Host\n\nWrite-Host (\"##vso[task.setvariable variable=Pipeline.SourcesDirectory;]$root\")", + "workingFolder": "", + "failOnStandardError": "true" + } + }, + { + "enabled": true, + "continueOnError": false, + "alwaysRun": false, + "displayName": "Download packages", + "timeoutInMinutes": 0, + "task": { + "id": "e213ff0f-5d5c-4791-802d-52ea3e7be1f1", + "versionSpec": "*", + "definitionType": "task" + }, + "inputs": { + "scriptType": "inlineScript", + "scriptName": "", + "arguments": "$(CloudDropAccountName) $(CloudDropAccessToken) $(Label)", + "inlineScript": "param($account, $token, $container)\nif ($env:UseLegacyBuildScripts -eq \"true\")\n{\n .\\sync.cmd /ab /p:CloudDropAccountName=$account /p:CloudDropAccessToken=$token /p:ContainerName=$container\n}\nelse\n{\n .\\init-tools.cmd\n msbuild buildscripts\\syncAzure.proj /p:CloudDropAccountName=$account /p:CloudDropAccessToken=$token /p:ContainerName=$container /fl \"/flp:v=diag;logfile=package-download.log\"\n}", + "workingFolder": "$(Pipeline.SourcesDirectory)", + "failOnStandardError": "false" + } + }, + { + "enabled": true, + "continueOnError": false, + "alwaysRun": false, + "displayName": "Index symbol packages", + "timeoutInMinutes": 0, + "task": { + "id": "e213ff0f-5d5c-4791-802d-52ea3e7be1f1", + "versionSpec": "*", + "definitionType": "task" + }, + "inputs": { + "scriptType": "inlineScript", + "scriptName": "", + "arguments": "", + "inlineScript": "if ($env:Configuration -ne \"Release\") { exit }\n\n& $env:Build_SourcesDirectory\\scripts\\DotNet-Trusted-Publish\\Embed-Index.ps1 `\n $env:Pipeline_SourcesDirectory\\packages\\AzureTransfer\\Windows_NT.x64.$env:Configuration\\Microsoft.TargetingPack.Private.CoreRT\\$env:AzureContainerSymbolPackageGlob `\n $env:Build_StagingDirectory\\IndexedSymbolPackages", + "workingFolder": "", + "failOnStandardError": "true" + } + }, + { + "enabled": true, + "continueOnError": false, + "alwaysRun": false, + "displayName": "Generate Version Assets", + "timeoutInMinutes": 0, + "task": { + "id": "e213ff0f-5d5c-4791-802d-52ea3e7be1f1", + "versionSpec": "*", + "definitionType": "task" + }, + "inputs": { + "scriptType": "inlineScript", + "scriptName": "", + "arguments": "", + "inlineScript": "msbuild build.proj /t:CreateOrUpdateCurrentVersionFile /p:OfficialBuildId=$env:OfficialBuildId /p:BuildVersionFile=bin\\obj\\BuildVersion-$env:OfficialBuildId.props", + "workingFolder": "$(Pipeline.SourcesDirectory)", + "failOnStandardError": "true" + } + }, + { + "enabled": true, + "continueOnError": true, + "alwaysRun": false, + "displayName": "Log Native Version Assets Files", + "timeoutInMinutes": 0, + "task": { + "id": "d9bafed4-0b18-4f58-968d-86655b4d2ce9", + "versionSpec": "*", + "definitionType": "task" + }, + "inputs": { + "filename": "dir", + "arguments": "$(Pipeline.SourcesDirectory)\\bin\\obj\\BuildVersion*", + "workingFolder": "", + "failOnStandardError": "false" + } + }, + { + "enabled": true, + "continueOnError": false, + "alwaysRun": false, + "displayName": "packages -> dotnet.myget.org", + "timeoutInMinutes": 0, + "task": { + "id": "e213ff0f-5d5c-4791-802d-52ea3e7be1f1", + "versionSpec": "*", + "definitionType": "task" + }, + "inputs": { + "scriptType": "inlineScript", + "scriptName": "", + "arguments": "$(MyGetApiKey)", + "inlineScript": "param($ApiKey)\nif ($env:Configuration -ne \"Release\") { exit }\n& $env:CustomNuGetPath push $env:Pipeline_SourcesDirectory\\packages\\AzureTransfer\\Windows_NT.x64.$env:Configuration\\Microsoft.TargetingPack.Private.CoreRT\\$env:AzureContainerPackageGlob $ApiKey -Source $env:MyGetFeedUrl -Timeout 3600", + "workingFolder": "", + "failOnStandardError": "true" + } + }, + { + "enabled": true, + "continueOnError": false, + "alwaysRun": false, + "displayName": "symbol packages -> dotnet.myget.org", + "timeoutInMinutes": 0, + "task": { + "id": "e213ff0f-5d5c-4791-802d-52ea3e7be1f1", + "versionSpec": "*", + "definitionType": "task" + }, + "inputs": { + "scriptType": "inlineScript", + "scriptName": "", + "arguments": "$(MyGetApiKey)", + "inlineScript": "param($ApiKey)\nif ($env:Configuration -ne \"Release\") { exit }\n& $env:CustomNuGetPath push $env:Build_StagingDirectory\\IndexedSymbolPackages\\*.nupkg $ApiKey -Source $env:MyGetFeedUrl -Timeout 3600", + "workingFolder": "", + "failOnStandardError": "true" + } + }, + { + "enabled": true, + "continueOnError": false, + "alwaysRun": false, + "displayName": "Update versions repository", + "timeoutInMinutes": 0, + "task": { + "id": "e213ff0f-5d5c-4791-802d-52ea3e7be1f1", + "versionSpec": "*", + "definitionType": "task" + }, + "inputs": { + "scriptType": "inlineScript", + "scriptName": "", + "arguments": "-gitHubAuthToken $(UpdatePublishedVersions.AuthToken) -root $(Pipeline.SourcesDirectory)", + "inlineScript": "param($gitHubAuthToken, $root)\nif ($env:Configuration -ne \"Release\") { exit }\ncd $root\n. $root\\buildscripts\\UpdatePublishedVersions.ps1 `\n -gitHubUser dotnet-build-bot -gitHubEmail dotnet-build-bot@microsoft.com `\n -gitHubAuthToken $gitHubAuthToken `\n -versionsRepoOwner $env:VersionsRepoOwner -versionsRepo $env:VersionsRepo `\n -versionsRepoPath build-info/dotnet/$env:GitHubRepositoryName/$env:SourceBranch `\n -nupkgPath $root\\packages\\AzureTransfer\\Windows_NT.x64.$env:Configuration\\Microsoft.TargetingPack.Private.CoreRT\\$env:AzureContainerPackageGlob", + "workingFolder": "", + "failOnStandardError": "true" + } + }, + { + "enabled": true, + "continueOnError": false, + "alwaysRun": false, + "displayName": "Get Build Number", + "timeoutInMinutes": 0, + "task": { + "id": "e213ff0f-5d5c-4791-802d-52ea3e7be1f1", + "versionSpec": "*", + "definitionType": "task" + }, + "inputs": { + "scriptType": "inlineScript", + "scriptName": "", + "arguments": "$(OfficialBuildId) $(Pipeline.SourcesDirectory)", + "inlineScript": "param(\n [string]$OfficialBuildId,\n [string]$SourcesDir\n)\n$VersionPropsFile=$SourcesDir + \"\\bin\\obj\\BuildVersion-\" + $OfficialBuildId + \".props\"\n[xml]$versionXml=Get-Content $VersionPropsFile\n$env:BuildNumber=$versionXml.Project.PropertyGroup.BuildNumberMajor.InnerText + \".\" + $versionXml.Project.PropertyGroup.BuildNumberMinor.InnerText\nWrite-Host (\"##vso[task.setvariable variable=BuildNumber;]$env:BuildNumber\")", + "workingFolder": "", + "failOnStandardError": "true" + } + }, + { + "enabled": true, + "continueOnError": true, + "alwaysRun": false, + "displayName": "Publish to Artifact Services Drop (BuildNumber)", + "timeoutInMinutes": 0, + "task": { + "id": "f9d96d25-0c81-4e77-8282-1ad1f785cbb4", + "versionSpec": "*", + "definitionType": "task" + }, + "inputs": { + "dropServiceURI": "https://devdiv.artifacts.visualstudio.com/DefaultCollection", + "buildNumber": "dotnet/$(GitHubRepositoryName)/$(SourceBranch)/$(BuildNumber)/packages/$(Configuration)", + "sourcePath": "$(Pipeline.SourcesDirectory)\\packages\\AzureTransfer", + "dropExePath": "", + "toLowerCase": "true", + "detailedLog": "false", + "usePat": "false" + } + }, + { + "enabled": true, + "continueOnError": true, + "alwaysRun": false, + "displayName": "Publish to Artifact Services Drop (OfficialBuildId)", + "timeoutInMinutes": 0, + "task": { + "id": "f9d96d25-0c81-4e77-8282-1ad1f785cbb4", + "versionSpec": "*", + "definitionType": "task" + }, + "inputs": { + "dropServiceURI": "https://devdiv.artifacts.visualstudio.com/DefaultCollection", + "buildNumber": "dotnet/$(GitHubRepositoryName)/$(SourceBranch)/$(OfficialBuildId)/packages/$(Configuration)", + "sourcePath": "$(Pipeline.SourcesDirectory)\\packages\\AzureTransfer", + "dropExePath": "", + "toLowerCase": "true", + "detailedLog": "false", + "usePat": "false" + } + }, + { + "enabled": true, + "continueOnError": false, + "alwaysRun": true, + "displayName": "Copy Publish Artifact: PublishLogs", + "timeoutInMinutes": 0, + "task": { + "id": "1d341bb0-2106-458c-8422-d00bcea6512a", + "versionSpec": "*", + "definitionType": "task" + }, + "inputs": { + "CopyRoot": "", + "Contents": "**\\*.log", + "ArtifactName": "PublishLogs", + "ArtifactType": "Container", + "TargetPath": "\\\\my\\share\\$(Build.DefinitionName)\\$(Build.BuildNumber)" + } + } + ], + "options": [ + { + "enabled": false, + "definition": { + "id": "7c555368-ca64-4199-add6-9ebaf0b0137d" + }, + "inputs": { + "multipliers": "[]", + "parallel": "false", + "continueOnError": "true", + "additionalFields": "{}" + } + }, + { + "enabled": false, + "definition": { + "id": "a9db38f9-9fdc-478c-b0f9-464221e58316" + }, + "inputs": { + "workItemType": "234347", + "assignToRequestor": "true", + "additionalFields": "{}" + } + }, + { + "enabled": false, + "definition": { + "id": "57578776-4c22-4526-aeb0-86b6da17ee9c" + }, + "inputs": { + "additionalFields": "{}" + } + } + ], + "variables": { + "system.debug": { + "value": "false", + "allowOverride": true + }, + "Configuration": { + "value": "Debug", + "allowOverride": true + }, + "TeamName": { + "value": "DotNetCore" + }, + "CloudDropAccountName": { + "value": "dotnetbuildoutput" + }, + "CloudDropAccessToken": { + "value": null, + "isSecret": true + }, + "OfficialBuildId": { + "value": "$(Build.BuildNumber)", + "allowOverride": true + }, + "BuildTag": { + "value": "corert-alpha", + "allowOverride": true + }, + "Label": { + "value": "$(BuildTag)-$(Build.BuildNumber)", + "allowOverride": true + }, + "MyGetFeedUrl": { + "value": "https://dotnet.myget.org/F/dotnet-core-test/api/v2/package", + "allowOverride": true + }, + "MyGetApiKey": { + "value": null, + "isSecret": true + }, + "VstsPat": { + "value": null, + "isSecret": true + }, + "DevDivPat": { + "value": null, + "isSecret": true + }, + "UpdatePublishedVersions.AuthToken": { + "value": null, + "isSecret": true + }, + "VersionsRepoOwner": { + "value": "crummel", + "allowOverride": true + }, + "VersionsRepo": { + "value": "dotnet_versions", + "allowOverride": true + }, + "Pipeline.SourcesDirectory": { + "value": "$(Build.BinariesDirectory)\\pipelineRepository" + }, + "SourceVersion": { + "value": "master", + "allowOverride": true + }, + "SourceBranch": { + "value": "master", + "allowOverride": true + }, + "AzureContainerPackageGlob": { + "value": "*.nupkg", + "allowOverride": true + }, + "AzureContainerSymbolPackageGlob": { + "value": "symbols\\*.nupkg", + "allowOverride": true + }, + "GitHubRepositoryName": { + "value": "corert" + }, + "UseLegacyBuildScripts": { + "value": "false", + "allowOverride": true + } + }, + "retentionRules": [ + { + "branches": [ + "+refs/heads/*" + ], + "artifacts": [], + "artifactTypesToDelete": [ + "FilePath", + "SymbolStore" + ], + "daysToKeep": 10, + "minimumToKeep": 1, + "deleteBuildRecord": true, + "deleteTestResults": true + } + ], + "buildNumberFormat": "$(date:yyyyMMdd)$(rev:-rr)", + "jobAuthorizationScope": "projectCollection", + "jobTimeoutInMinutes": 180, + "repository": { + "properties": { + "labelSources": "0", + "reportBuildStatus": "false" + }, + "id": "0a2b2664-c1be-429c-9b40-8a24dee27a4a", + "type": "TfsGit", + "name": "DotNet-BuildPipeline", + "url": "https://devdiv.visualstudio.com/DevDiv/_git/DotNet-BuildPipeline", + "defaultBranch": "refs/heads/master", + "clean": "true", + "checkoutSubmodules": false + }, + "quality": "definition", + "defaultBranch": "refs/heads/master", + "queue": { + "pool": { + "id": 39, + "name": "DotNet-Build" + }, + "id": 36, + "name": "DotNet-Build" + }, + "path": "\\", + "type": "build", + "id": 2943, + "name": "DotNet-CoreRT-Publish", + "project": { + "id": "0bdbc590-a062-4c3f-b0f6-9383f67865ee", + "name": "DevDiv", + "description": "Visual Studio and DevDiv team project for git source code repositories. Work items will be added for Adams, Dev14 work items are tracked in vstfdevdiv. ", + "url": "https://devdiv.visualstudio.com/DefaultCollection/_apis/projects/0bdbc590-a062-4c3f-b0f6-9383f67865ee", + "state": "wellFormed", + "revision": 418097399 + } +}
\ No newline at end of file diff --git a/buildpipeline/DotNet-CoreRT-Windows.json b/buildpipeline/DotNet-CoreRT-Windows.json index 7453b6cfb..dcb89273b 100644 --- a/buildpipeline/DotNet-CoreRT-Windows.json +++ b/buildpipeline/DotNet-CoreRT-Windows.json @@ -148,27 +148,7 @@ "enabled": true, "continueOnError": false, "alwaysRun": false, - "displayName": "Download nuget", - "timeoutInMinutes": 0, - "task": { - "id": "e213ff0f-5d5c-4791-802d-52ea3e7be1f1", - "versionSpec": "*", - "definitionType": "task" - }, - "inputs": { - "scriptType": "inlineScript", - "scriptName": "", - "arguments": "https://dotnet.myget.org/F/dotnet-buildtools/api/v2/package/NuGet.CommandLine/3.5.0-rc-1256 $(CustomNuGetDownloadPath) $(CustomNuGetPath)", - "inlineScript": "param($packageUrl, $packageDestDir, $nugetDestPath)\n$downloadPath=\"$packageDestDir\\NuGet.CommandLine.nupkg\"\n$extract = \"$packageDestDir\\extracted\"\n$t = mkdir $packageDestDir -ea Ignore\n(New-Object Net.WebClient).DownloadFile($packageUrl, $downloadPath)\nAdd-Type -Assembly 'System.IO.Compression.FileSystem'\nRemove-Item $extract -Recurse -ea Ignore\n[System.IO.Compression.ZipFile]::ExtractToDirectory($downloadPath, $extract)\nCopy-Item $packageDestDir\\extracted\\tools\\NuGet.exe $nugetDestPath", - "workingFolder": "", - "failOnStandardError": "true" - } - }, - { - "enabled": true, - "continueOnError": false, - "alwaysRun": false, - "displayName": "Publish packages to MyGet", + "displayName": "Publish packages", "timeoutInMinutes": 0, "task": { "id": "d9bafed4-0b18-4f58-968d-86655b4d2ce9", @@ -176,8 +156,8 @@ "definitionType": "task" }, "inputs": { - "filename": "$(CustomNuGetPath)", - "arguments": "push bin\\Product\\pkg\\*.nupkg $(MyGetApiKey) -Source $(MyGetFeedUrl) -Timeout 3600", + "filename": "$(Build.SourcesDirectory)\\$(SourceFolder)\\buildscripts\\publish-packages.cmd", + "arguments": "-AzureAccount=$(CloudDropAccountName) -AzureToken=\"$(CloudDropAccessToken)\" -Container=$(Label)", "workingFolder": "$(SourceFolder)", "failOnStandardError": "false" } @@ -236,7 +216,7 @@ "inputs": { "symbolServiceURI": "https://devdiv.artifacts.visualstudio.com/DefaultCollection", "requestName": "$(system.teamProject)/$(Build.BuildNumber)/$(Build.BuildId)", - "sourcePath": "$(Build.SourcesDirectory)\\corefx\\bin", + "sourcePath": "$(Build.SourcesDirectory)\\corert\\bin", "assemblyPath": "", "toLowerCase": "true", "detailedLog": "true", @@ -408,8 +388,12 @@ "value": "$(Build.BuildNumber)", "allowOverride": true }, + "BuildTag": { + "value": "corert-alpha", + "allowOverride": true + }, "Label": { - "value": "$(Build.BuildNumber)", + "value": "$(BuildTag)-$(Build.BuildNumber)", "allowOverride": true }, "SourceVersion": { @@ -444,6 +428,25 @@ "SourceFolder": { "value": "corert_$(Build.BuildId)", "allowOverride": false + }, + "CloudDropAccountName": { + "value": "dotnetbuildoutput" + }, + "CloudDropAccessToken": { + "value": null, + "isSecret": true + }, + "UpdatePublishedVersions.AuthToken": { + "value": null, + "isSecret": true + }, + "VersionsRepoOwner": { + "value": "crummel", + "allowOverride": true + }, + "VersionsRepo": { + "value": "dotnet_versions", + "allowOverride": true } }, "demands": [ diff --git a/buildpipeline/pipeline.json b/buildpipeline/pipeline.json index 837259ebb..ef9acfed2 100644 --- a/buildpipeline/pipeline.json +++ b/buildpipeline/pipeline.json @@ -7,43 +7,148 @@ }, "Pipelines": [ { - "Name": "All-Release-x64", + "Name": "All-Release", "Parameters": { "TreatWarningsAsErrors": "false" }, "BuildParameters": { - "Platform": "x64", "Configuration": "Release" }, "Definitions": [ { "Name": "DotNet-CoreRT-Linux", + "Parameters": { + "Platform": "x64" + }, "ReportingParameters": { "OperatingSystem": "Debian 8.2", - "SubType": "native", "Type": "build/product/", - "ConfigurationGroup": "Release" + "ConfigurationGroup": "Release", + "Platform": "x64" } }, { "Name": "DotNet-CoreRT-Mac", + "Parameters": { + "Platform": "x64" + }, "ReportingParameters": { - "SubType": "native", "OperatingSystem": "OSX", "Type": "build/product/", - "ConfigurationGroup": "Release" + "ConfigurationGroup": "Release", + "Platform": "x64" } }, { "Name": "DotNet-CoreRT-Windows", + "Parameters": { + "Platform": "x64" + }, "ReportingParameters": { - "SubType": "managed", - "OperatingSystem": "All (Managed)", + "OperatingSystem": "Windows", "Type": "build/product/", - "ConfigurationGroup": "Release" + "ConfigurationGroup": "Release", + "Platform": "x64" } } ] + }, + { + "Name": "All-Debug", + "Parameters": { + "TreatWarningsAsErrors": "false" + }, + "BuildParameters": { + "Configuration": "Debug" + }, + "Definitions": [ + { + "Name": "DotNet-CoreRT-Linux", + "Parameters": { + "Platform": "x64" + }, + "ReportingParameters": { + "OperatingSystem": "Debian 8.2", + "Type": "build/product/", + "ConfigurationGroup": "Debug", + "Platform": "x64" + } + }, + { + "Name": "DotNet-CoreRT-Mac", + "Parameters": { + "Platform": "x64" + }, + "ReportingParameters": { + "OperatingSystem": "OSX", + "Type": "build/product/", + "ConfigurationGroup": "Debug", + "Platform": "x64" + } + }, + { + "Name": "DotNet-CoreRT-Windows", + "Parameters": { + "Platform": "x64" + }, + "ReportingParameters": { + "OperatingSystem": "Windows", + "Type": "build/product/", + "ConfigurationGroup": "Debug", + "Platform": "x64" + } + } + ] + }, + { + "Name": "Publish-Release", + "Parameters": { + "TreatWarningsAsErrors": "false" + }, + "BuildParameters": { + "Configuration": "Release" + }, + "Definitions": [ + { + "Name": "DotNet-CoreRT-Publish", + "Parameters": { + "GitHubRepositoryName": "corert" + }, + "ReportingParameters": { + "TaskName": "Package Publish", + "Type": "build/publish/", + "ConfigurationGroup": "Release - Push to MyGet Feed" + } + } + ], + "DependsOn": [ + "All-Release" + ] + }, + { + "Name": "Publish-Debug", + "Parameters": { + "TreatWarningsAsErrors": "false" + }, + "BuildParameters": { + "ConfigurationGroup": "Debug" + }, + "Definitions": [ + { + "Name": "DotNet-CoreRT-Publish", + "Parameters": { + "GitHubRepositoryName": "corert" + }, + "ReportingParameters": { + "TaskName": "Package Publish", + "Type": "build/publish/", + "ConfigurationGroup": "Debug - Push to Azure Storage" + } + } + ], + "DependsOn": [ + "All-Debug" + ] } ] } diff --git a/buildscripts/publish-packages.cmd b/buildscripts/publish-packages.cmd new file mode 100644 index 000000000..633ff7199 --- /dev/null +++ b/buildscripts/publish-packages.cmd @@ -0,0 +1,40 @@ +@echo off +REM don't pass args to buildvars-setup, just get defaults +call %~dp0buildvars-setup.cmd + +set _msbuildexe="%ProgramFiles(x86)%\MSBuild\14.0\Bin\MSBuild.exe" +if not exist %_msbuildexe% (set _msbuildexe="%ProgramFiles%\MSBuild\14.0\Bin\MSBuild.exe") +REM hopefully it's on the path +if not exist %_msbuildexe% set _msbuildexe=msbuild + +set AzureAccount= +set AzureToken= +set Container= + +:Arg_Loop +if "%1" == "" goto ArgsDone + +if /i "%1" == "-AzureAccount" (set AzureAccount=%2&shift&shift&goto Arg_Loop) +if /i "%1" == "-AzureToken" (set AzureToken=%2&shift&shift&goto Arg_Loop) +if /i "%1" == "-Container" (set Container=%2&shift&shift&goto Arg_Loop) + +echo Invalid command line argument: %1 +exit /b 1 +:ArgsDone + +set AzureToken=%AzureToken:"=% + +if "%AzureAccount%" == "" ( + echo Azure account not specified. + exit /b 1 +) +if "%AzureToken%" == "" ( + echo Azure token not specified. + exit /b 1 +) +if "%Container%" == "" ( + echo Azure container not specified. + exit /b 1 +) + +%_msbuildexe% %__ProjectDir%\buildscripts\publish.proj /p:CloudDropAccountName=%AzureAccount% /p:CloudDropAccessToken=%AzureToken% /p:ContainerName=%Container% /flp:v=diag;LogFile=publish-packages.log
\ No newline at end of file diff --git a/buildscripts/publish-packages.sh b/buildscripts/publish-packages.sh new file mode 100755 index 000000000..a1632ed15 --- /dev/null +++ b/buildscripts/publish-packages.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +export AzureAccount= +export AzureToken= +export Container= + +while [ "$1" != "" ]; do + lowerI="$(echo $1 | awk '{print tolower($0)}')" + case $lowerI in + -azureaccount) + shift + export AzureAccount=$1 + ;; + -azuretoken) + shift + export AzureToken=$1 + ;; + -container) + shift + export Container=$1 + ;; + *) + echo Bad argument $1 + exit 1 + esac + shift +done + +# don't pass args to buildvars-setup, just get defaults +scriptRoot="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +. $scriptRoot/buildvars-setup.sh + +$__ProjectRoot/Tools/msbuild.sh $scriptRoot/publish.proj /p:CloudDropAccountName=$AzureAccount /p:CloudDropAccessToken=$AzureToken /p:ContainerName=$Container "/flp:v=diag;LogFile=publish-packages.log"
\ No newline at end of file diff --git a/buildscripts/publish.proj b/buildscripts/publish.proj new file mode 100644 index 000000000..25731c681 --- /dev/null +++ b/buildscripts/publish.proj @@ -0,0 +1,20 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="14.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <Import Project="$([MSBuild]::GetDirectoryNameOfFileAbove($(MSBuildThisFileDirectory), dir.props))\dir.props" /> + <Import Project="$(ToolsDir)PublishContent.targets" /> + <Import Project="$(ToolsDir)versioning.targets" /> + + <PropertyGroup> + <PublishPattern Condition="'$(PublishPattern)' == ''">$(PackageOutputRoot)**\*.nupkg</PublishPattern> + </PropertyGroup> + + <Target Name="CreateContainerName" + DependsOnTargets="CreateVersionFileDuringBuild" + Condition="'$(ContainerName)' == ''"> + <PropertyGroup> + <ContainerName>corert-$(PreReleaseLabel)-$(BuildNumberMajor)-$(BuildNumberMinor)</ContainerName> + </PropertyGroup> + </Target> + + <Target Name="Build" DependsOnTargets="CreateContainerName;UploadToAzure" /> +</Project>
\ No newline at end of file diff --git a/buildscripts/syncAzure.proj b/buildscripts/syncAzure.proj new file mode 100644 index 000000000..5e5c3bcc8 --- /dev/null +++ b/buildscripts/syncAzure.proj @@ -0,0 +1,19 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project ToolsVersion="14.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <Import Project="$([MSBuild]::GetDirectoryNameOfFileAbove($(MSBuildThisFileDirectory), dir.props))\dir.props" /> + + <PropertyGroup> + <ContainerNamePrefix Condition="'$(ContainerNamePrefix)' == ''">corert-$(PreReleaseLabel)</ContainerNamePrefix> + <ContainerName Condition="'$(ContainerNamePrefix)' != '' and '$(BuildNumberMajor)' != '' and '$(BuildNumberMinor)' != ''">$(ContainerNamePrefix)-$(BuildNumberMajor)-$(BuildNumberMinor)</ContainerName> + <DownloadDirectory>$(PackagesDir)AzureTransfer</DownloadDirectory> + </PropertyGroup> + + <Import Project="$(ToolsDir)SyncCloudContent.targets" /> + + <Target Name="ValidateRequiredProperties"> + <Error Condition="'$(CloudDropAccountName)' == ''" Text="Missing property CloudDropAccountName." /> + <Error Condition="'$(CloudDropAccessToken)' == ''" Text="Missing property CloudDropAccessToken." /> + </Target> + + <Target Name="Build" DependsOnTargets="ValidateRequiredProperties;DownloadBlobsFromAzureTargets" /> +</Project>
\ No newline at end of file diff --git a/buildscripts/updatePublishedVersions.ps1 b/buildscripts/updatePublishedVersions.ps1 new file mode 100644 index 000000000..465bb4e70 --- /dev/null +++ b/buildscripts/updatePublishedVersions.ps1 @@ -0,0 +1,26 @@ +# +# Copyright (c) .NET Foundation and contributors. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for full license information. +# + +# This script updates the dotnet/versions repository based on a set of packages. It directly +# commits the changes using GitHub APIs. + +param( + [Parameter(Mandatory=$true)][string]$gitHubUser, + [Parameter(Mandatory=$true)][string]$gitHubEmail, + [Parameter(Mandatory=$true)][string]$gitHubAuthToken, + [Parameter(Mandatory=$true)][string]$versionsRepoOwner, + [Parameter(Mandatory=$true)][string]$versionsRepo, + [Parameter(Mandatory=$true)][string]$versionsRepoPath, + # A pattern matching all packages in the set that the versions repository should be set to. + [Parameter(Mandatory=$true)][string]$nupkgPath) + +msbuild /t:UpdatePublishedVersions ` + /p:GitHubUser="$gitHubUser" ` + /p:GitHubEmail="$gitHubEmail" ` + /p:GitHubAuthToken="$gitHubAuthToken" ` + /p:VersionsRepoOwner="$versionsRepoOwner" ` + /p:VersionsRepo="$versionsRepo" ` + /p:VersionsRepoPath="$versionsRepoPath" ` + /p:ShippedNuGetPackageGlobPath="$nupkgPath"
\ No newline at end of file @@ -68,6 +68,8 @@ <ObjDir Condition="'$(ObjDir)'==''">$(BinDir)obj/</ObjDir> <ProductBinDir Condition="'$(ProductBinDir)'==''">$(BinDir)Product/</ProductBinDir> <TestWorkingDir Condition="'$(TestWorkingDir)'==''">$(BinDir)tests/</TestWorkingDir> + <PackageOutputRoot Condition="'$(PackageOutputRoot)'=='' and '$(NonShippingPackage)' == 'true'">$(BinDir)packages_noship/</PackageOutputRoot> + <PackageOutputRoot Condition="'$(PackageOutputRoot)'=='' and '$(NonShippingPackage)' != 'true'">$(ProductBinDir)pkg/</PackageOutputRoot> <!-- Folder where restored Nuget packages will go --> <PackagesOutDir Condition="'$(PackagesOutDir)'==''">$(BinDir)packages/</PackagesOutDir> @@ -92,7 +94,9 @@ <OSPlatformConfig>$(BinDirOSGroup).$(BinDirPlatform).$(BinDirConfiguration)</OSPlatformConfig> <BaseOutputPath Condition="'$(BaseOutputPath)'==''">$(ProductBinDir)</BaseOutputPath> - <OutputPath Condition="'$(OutputPath)'==''">$(BaseOutputPath)$(OSPlatformConfig)/$(MSBuildProjectName)/</OutputPath> + <PackageOutputPath Condition="'$(PackageOutputPath)'==''">$(PackageOutputRoot)$(OSPlatformConfig)/$(MSBuildProjectName)/</PackageOutputPath> + <SymbolPackageOutputPath Condition="'$(SymbolPackageOutputPath)'==''">$(PackageOutputPath)symbols/</SymbolPackageOutputPath> + <OutputPath Condition="'$(OutputPath)'==''">$(BaseOutputPath)$(OSPlatformConfig)/$(MSBuildProjectName)</OutputPath> <!-- Folder where we will drop the Nuget package for the toolchain --> <ProductPackageDir Condition="'$(ProductPackageDir)'==''">$(BaseOutputPath)$(OSPlatformConfig)/packaging/</ProductPackageDir> diff --git a/netci.groovy b/netci.groovy index 83d30da9a..20bef1863 100644 --- a/netci.groovy +++ b/netci.groovy @@ -8,6 +8,10 @@ def project = GithubProject // The input branch name (e.g. master) def branch = GithubBranchName +def imageVersionMap = ['Windows_NT':'latest-or-auto', + 'OSX':'latest-or-auto', + 'Ubuntu':'20170118'] + // Innerloop build OS's def osList = ['Ubuntu', 'OSX', 'Windows_NT'] @@ -30,9 +34,9 @@ def osList = ['Ubuntu', 'OSX', 'Windows_NT'] // Calculate the build commands if (os == 'Windows_NT') { buildString = "build.cmd ${lowercaseConfiguration}" + testScriptString = "tests\\runtest.cmd /coreclr " } else { - // On other OS's we skipmscorlib but run the pal tests buildString = "./build.sh ${lowercaseConfiguration}" } @@ -46,8 +50,15 @@ def osList = ['Ubuntu', 'OSX', 'Windows_NT'] batchFile(buildString) if (configuration == 'Debug') { - prJobDescription += " + CoreCLR tests" - batchFile("tests\\runtest.cmd /coreclr Top200") + if (isPR) { + prJobDescription += " and CoreCLR tests" + // Run a small set of BVTs during PR validation + batchFile(testScriptString + "Top200") + } + else { + // Run the full set of known passing tests in the post-commit job + batchFile(testScriptString + "KnownGood") + } } } else { @@ -58,12 +69,14 @@ def osList = ['Ubuntu', 'OSX', 'Windows_NT'] // This call performs test run checks for the CI. Utilities.addXUnitDotNETResults(newJob, '**/testResults.xml') - Utilities.setMachineAffinity(newJob, os, 'latest-or-auto') + Utilities.setMachineAffinity(newJob, os, imageVersionMap[os]) Utilities.standardJobSetup(newJob, project, isPR, "*/${branch}") if (isPR) { Utilities.addGithubPRTriggerForBranch(newJob, branch, prJobDescription) } else { + // Set a large timeout since the default (2 hours) is insufficient + Utilities.setJobTimeout(newJob, 1440) Utilities.addGithubPushTrigger(newJob) } } diff --git a/src/BuildIntegration/Microsoft.NETCore.Native.Windows.props b/src/BuildIntegration/Microsoft.NETCore.Native.Windows.props index d36383bcc..edf33355c 100644 --- a/src/BuildIntegration/Microsoft.NETCore.Native.Windows.props +++ b/src/BuildIntegration/Microsoft.NETCore.Native.Windows.props @@ -25,7 +25,7 @@ See the LICENSE file in the project root for more information. <CppCompilerAndLinkerArg Include="/I$(IlcPath)\inc" /> <CppCompilerAndLinkerArg Condition="'$(Configuration)' == 'Debug'" Include="/Od" /> <CppCompilerAndLinkerArg Condition="'$(Configuration)' != 'Debug'" Include="/O2" /> - <CppCompilerAndLinkerArg Include="/c /nologo /W3 /GS /DCPPCODEGEN /EHs /Zi" /> + <CppCompilerAndLinkerArg Include="/c /nologo /W3 /GS /DCPPCODEGEN /EHs /Zi /bigobj" /> <CppCompilerAndLinkerArg Condition="'$(UseDebugCrt)' == 'true'" Include="/MTd" /> <CppCompilerAndLinkerArg Condition="'$(UseDebugCrt)' != 'true'" Include="/MT" /> <CppCompilerAndLinkerArg Include="$(AdditionalCppCompilerFlags)" /> diff --git a/src/Common/src/Interop/Unix/System.Private.CoreLib.Native/Interop.ErrNo.cs b/src/Common/src/Interop/Unix/System.Private.CoreLib.Native/Interop.ErrNo.cs index 911a055bc..e879fdecc 100644 --- a/src/Common/src/Interop/Unix/System.Private.CoreLib.Native/Interop.ErrNo.cs +++ b/src/Common/src/Interop/Unix/System.Private.CoreLib.Native/Interop.ErrNo.cs @@ -11,12 +11,10 @@ internal static partial class Interop { internal unsafe partial class Sys { - [MethodImpl(MethodImplOptions.InternalCall)] - [RuntimeImport(Interop.Libraries.CoreLibNative, "CoreLibNative_GetLastErrNo")] - internal static extern int GetLastErrNo(); + [DllImport(Interop.Libraries.CoreLibNative, EntryPoint = "CoreLibNative_GetErrNo")] + internal static extern int GetErrNo(); - [MethodImpl(MethodImplOptions.InternalCall)] - [RuntimeImport(Interop.Libraries.CoreLibNative, "CoreLibNative_SetLastErrNo")] - internal static extern void SetLastErrNo(int error); + [DllImport(Interop.Libraries.CoreLibNative, EntryPoint = "CoreLibNative_ClearErrNo")] + internal static extern void ClearErrNo(); } } diff --git a/src/Common/src/Interop/Windows/mincore/Interop.GetLastError.cs b/src/Common/src/Interop/Windows/mincore/Interop.GetLastError.cs index fce9ba542..45dcf94c1 100644 --- a/src/Common/src/Interop/Windows/mincore/Interop.GetLastError.cs +++ b/src/Common/src/Interop/Windows/mincore/Interop.GetLastError.cs @@ -2,16 +2,13 @@ // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. -using System.Runtime; -using System.Runtime.CompilerServices; using System.Runtime.InteropServices; internal partial class Interop { internal partial class mincore { - [MethodImpl(MethodImplOptions.InternalCall)] - [RuntimeImport(Interop.Libraries.ErrorHandling, "GetLastError")] + [DllImport("api-ms-win-core-errorhandling-l1-1-0.dll")] internal extern static int GetLastError(); } } diff --git a/src/Common/src/Interop/Windows/mincore/Interop.SetLastError.cs b/src/Common/src/Interop/Windows/mincore/Interop.SetLastError.cs index 16e8ec8e0..583043061 100644 --- a/src/Common/src/Interop/Windows/mincore/Interop.SetLastError.cs +++ b/src/Common/src/Interop/Windows/mincore/Interop.SetLastError.cs @@ -2,16 +2,13 @@ // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. -using System.Runtime; -using System.Runtime.CompilerServices; using System.Runtime.InteropServices; internal partial class Interop { internal partial class mincore { - [MethodImpl(MethodImplOptions.InternalCall)] - [RuntimeImport(Interop.Libraries.ErrorHandling, "SetLastError")] + [DllImport("api-ms-win-core-errorhandling-l1-1-0.dll")] internal extern static void SetLastError(uint dwErrCode); } } diff --git a/src/Common/src/TypeSystem/Common/TypeSystemConstaintsHelpers.cs b/src/Common/src/TypeSystem/Common/TypeSystemConstaintsHelpers.cs new file mode 100644 index 000000000..31d1d293c --- /dev/null +++ b/src/Common/src/TypeSystem/Common/TypeSystemConstaintsHelpers.cs @@ -0,0 +1,83 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + + +using System.Diagnostics; + +namespace Internal.TypeSystem +{ + public static class TypeSystemConstraintsHelpers + { + private static bool VerifyGenericParamConstraint(Instantiation typeInstantiation, Instantiation methodInstantiation, GenericParameterDesc genericParam, TypeDesc instantiationParam) + { + // Check class constraint + if (genericParam.HasReferenceTypeConstraint && !instantiationParam.IsGCPointer) + return false; + + // Check default constructor constraint + if (genericParam.HasDefaultConstructorConstraint) + { + if (!instantiationParam.IsDefType) + return false; + + if (!instantiationParam.IsValueType && instantiationParam.GetDefaultConstructor() == null) + return false; + } + + // Check struct constraint + if (genericParam.HasNotNullableValueTypeConstraint) + { + if (!instantiationParam.IsValueType) + return false; + + if (instantiationParam.IsNullable) + return false; + } + + foreach (var constraintType in genericParam.TypeConstraints) + { + var instantiatedType = constraintType.InstantiateSignature(typeInstantiation, methodInstantiation); + if (!instantiationParam.CanCastTo(instantiatedType)) + return false; + } + + return true; + } + + public static bool CheckConstraints(this TypeDesc type) + { + // Non-generic types always pass constraints check + if (!type.HasInstantiation) + return true; + + TypeDesc uninstantiatedType = type.GetTypeDefinition(); + for (int i = 0; i < uninstantiatedType.Instantiation.Length; i++) + { + if (!VerifyGenericParamConstraint(type.Instantiation, default(Instantiation), (GenericParameterDesc)uninstantiatedType.Instantiation[i], type.Instantiation[i])) + return false; + } + + return true; + } + + public static bool CheckConstraints(this MethodDesc method) + { + if (!method.OwningType.CheckConstraints()) + return false; + + // Non-generic methods always pass constraints check + if (!method.HasInstantiation) + return true; + + MethodDesc uninstantiatedMethod = method.GetMethodDefinition(); + for (int i = 0; i < uninstantiatedMethod.Instantiation.Length; i++) + { + if (!VerifyGenericParamConstraint(method.OwningType.Instantiation, method.Instantiation, (GenericParameterDesc)uninstantiatedMethod.Instantiation[i], method.Instantiation[i])) + return false; + } + + return true; + } + } +} diff --git a/src/ILCompiler.Compiler/src/Compiler/Compilation.cs b/src/ILCompiler.Compiler/src/Compiler/Compilation.cs index 9da9ab066..4ab222cc5 100644 --- a/src/ILCompiler.Compiler/src/Compiler/Compilation.cs +++ b/src/ILCompiler.Compiler/src/Compiler/Compilation.cs @@ -196,20 +196,13 @@ namespace ILCompiler public void AddCompilationRoot(TypeDesc type, string reason) { - if (type.IsGenericDefinition) + if (!ConstructedEETypeNode.CreationAllowed(type)) { _graph.AddRoot(_factory.NecessaryTypeSymbol(type), reason); } else { _graph.AddRoot(_factory.ConstructedTypeSymbol(type), reason); - - // If the type has a thread static field then we should eagerly create a helper - // to access such fields at runtime. This is required for multi-module compilation. - if (type.IsDefType && (((DefType)type).ThreadStaticFieldSize > 0)) - { - _graph.AddRoot(_factory.ReadyToRunHelper(ReadyToRunHelperId.GetThreadStaticBase, (MetadataType)type), reason); - } } } } diff --git a/src/ILCompiler.Compiler/src/Compiler/CompilerTypeSystemContext.TypeInit.cs b/src/ILCompiler.Compiler/src/Compiler/CompilerTypeSystemContext.TypeInit.cs index 8d96d3078..76ebf89a7 100644 --- a/src/ILCompiler.Compiler/src/Compiler/CompilerTypeSystemContext.TypeInit.cs +++ b/src/ILCompiler.Compiler/src/Compiler/CompilerTypeSystemContext.TypeInit.cs @@ -41,56 +41,8 @@ namespace ILCompiler private static bool HasEagerConstructorAttribute(TypeDesc type) { MetadataType mdType = type as MetadataType; - return mdType != null && ( - mdType.HasCustomAttribute("System.Runtime.CompilerServices", "EagerOrderedStaticConstructorAttribute") - || mdType.HasCustomAttribute("System.Runtime.CompilerServices", "EagerStaticClassConstructionAttribute")); - } - } - - public class EagerConstructorComparer : IComparer<DependencyAnalysis.IMethodNode> - { - private int GetConstructionOrder(MetadataType type) - { - // For EagerOrderedStaticConstructorAttribute, order is defined by an integer. - // For the other case (EagerStaticClassConstructionAttribute), order is defined - // implicitly. - - var decoded = ((EcmaType)type.GetTypeDefinition()).GetDecodedCustomAttribute( - "System.Runtime.CompilerServices", "EagerOrderedStaticConstructorAttribute"); - - if (decoded != null) - return (int)decoded.Value.FixedArguments[0].Value; - - Debug.Assert(type.HasCustomAttribute("System.Runtime.CompilerServices", "EagerStaticClassConstructionAttribute")); - // RhBind on .NET Native for UWP will sort these based on static dependencies of the .cctors. - // We could probably do the same, but this attribute is pretty much deprecated in favor of - // EagerOrderedStaticConstructorAttribute that has explicit order. The remaining uses of - // the unordered one don't appear to have dependencies, so sorting them all before the - // ordered ones should do. - return -1; - } - - public int Compare(DependencyAnalysis.IMethodNode x, DependencyAnalysis.IMethodNode y) - { - var typeX = (MetadataType)x.Method.OwningType; - var typeY = (MetadataType)y.Method.OwningType; - - int orderX = GetConstructionOrder(typeX); - int orderY = GetConstructionOrder(typeY); - - int result; - if (orderX != orderY) - { - result = Comparer<int>.Default.Compare(orderX, orderY); - } - else - { - // Use type name as a tie breaker. We need this algorithm to produce stable - // ordering so that the sequence of eager cctors is deterministic. - result = String.Compare(typeX.GetFullName(), typeY.GetFullName(), StringComparison.Ordinal); - } - - return result; + return mdType != null && + mdType.HasCustomAttribute("System.Runtime.CompilerServices", "EagerStaticClassConstructionAttribute"); } } } diff --git a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/ExactMethodInstantiationsNode.cs b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/ExactMethodInstantiationsNode.cs new file mode 100644 index 000000000..124b1854f --- /dev/null +++ b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/ExactMethodInstantiationsNode.cs @@ -0,0 +1,157 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +using System; +using System.IO; +using System.Diagnostics; + +using Internal.Text; +using Internal.TypeSystem; +using Internal.NativeFormat; + +namespace ILCompiler.DependencyAnalysis +{ + /// <summary> + /// Hashtable of all exact (non-canonical) generic method instantiations compiled in the module. + /// </summary> + internal sealed class ExactMethodInstantiationsNode : ObjectNode, ISymbolNode + { + private ObjectAndOffsetSymbolNode _endSymbol; + private ExternalReferencesTableNode _externalReferences; + + public ExactMethodInstantiationsNode(ExternalReferencesTableNode externalReferences) + { + _endSymbol = new ObjectAndOffsetSymbolNode(this, 0, "__exact_method_instantiations_End", true); + _externalReferences = externalReferences; + } + + public void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) + { + sb.Append(nameMangler.CompilationUnitPrefix).Append("__exact_method_instantiations"); + } + + public ISymbolNode EndSymbol => _endSymbol; + public int Offset => 0; + public override bool IsShareable => false; + public override ObjectNodeSection Section => ObjectNodeSection.DataSection; + public override bool StaticDependenciesAreComputed => true; + protected override string GetName() => this.GetMangledName(); + + public override ObjectData GetData(NodeFactory factory, bool relocsOnly = false) + { + // Dependencies for this node are tracked by the method code nodes + if (relocsOnly) + return new ObjectData(Array.Empty<byte>(), Array.Empty<Relocation>(), 1, new ISymbolNode[] { this }); + + // Ensure the native layout data has been saved, in order to get valid Vertex offsets for the signature Vertices + factory.MetadataManager.NativeLayoutInfo.SaveNativeLayoutInfoWriter(factory); + + NativeWriter nativeWriter = new NativeWriter(); + VertexHashtable hashtable = new VertexHashtable(); + Section nativeSection = nativeWriter.NewSection(); + nativeSection.Place(hashtable); + + + foreach (MethodDesc method in factory.MetadataManager.GetCompiledMethods()) + { + if (!IsMethodEligibleForTracking(method)) + continue; + + // Get the method pointer vertex + + bool getUnboxingStub = method.OwningType.IsValueType && !method.Signature.IsStatic; + IMethodNode methodEntryPointNode = factory.MethodEntrypoint(method, getUnboxingStub); + Vertex methodPointer = nativeWriter.GetUnsignedConstant(_externalReferences.GetIndex(methodEntryPointNode)); + + // Get native layout vertices for the declaring type + + ISymbolNode declaringTypeNode = factory.NecessaryTypeSymbol(method.OwningType); + Vertex declaringType = nativeWriter.GetUnsignedConstant(_externalReferences.GetIndex(declaringTypeNode)); + + // Get a vertex sequence for the method instantiation args if any + + VertexSequence arguments = new VertexSequence(); + foreach (var arg in method.Instantiation) + { + ISymbolNode argNode = factory.NecessaryTypeSymbol(arg); + arguments.Append(nativeWriter.GetUnsignedConstant(_externalReferences.GetIndex(argNode))); + } + + // Get the name and sig of the method. + // Note: the method name and signature are stored in the NativeLayoutInfo blob, not in the hashtable we build here. + + NativeLayoutMethodNameAndSignatureVertexNode nameAndSig = factory.NativeLayout.MethodNameAndSignatureVertex(method.GetTypicalMethodDefinition()); + NativeLayoutPlacedSignatureVertexNode placedNameAndSig = factory.NativeLayout.PlacedSignatureVertex(nameAndSig); + Debug.Assert(placedNameAndSig.SavedVertex != null); + Vertex placedNameAndSigOffsetSig = nativeWriter.GetOffsetSignature(placedNameAndSig.SavedVertex); + + // Get the vertex for the completed method signature + + Vertex methodSignature = nativeWriter.GetTuple(declaringType, placedNameAndSigOffsetSig, arguments); + + // Make the generic method entry vertex + + Vertex entry = nativeWriter.GetTuple(methodSignature, methodPointer); + + // Add to the hash table, hashed by the containing type's hashcode + uint hashCode = (uint)method.OwningType.GetHashCode(); + hashtable.Append(hashCode, nativeSection.Place(entry)); + } + + MemoryStream stream = new MemoryStream(); + nativeWriter.Save(stream); + + byte[] streamBytes = stream.ToArray(); + + _endSymbol.SetSymbolOffset(streamBytes.Length); + + return new ObjectData(streamBytes, Array.Empty<Relocation>(), 1, new ISymbolNode[] { this, _endSymbol }); + } + + public static DependencyList GetExactMethodInstantiationDependenciesForMethod(NodeFactory factory, MethodDesc method) + { + if (!IsMethodEligibleForTracking(method)) + return null; + + DependencyList dependencies = new DependencyList(); + + // Method entry point dependency + bool getUnboxingStub = method.OwningType.IsValueType && !method.Signature.IsStatic; + IMethodNode methodEntryPointNode = factory.MethodEntrypoint(method, getUnboxingStub); + dependencies.Add(new DependencyListEntry(methodEntryPointNode, "Exact method instantiation entry")); + + // Get native layout dependencies for the declaring type + dependencies.Add(new DependencyListEntry(factory.NecessaryTypeSymbol(method.OwningType), "Exact method instantiation entry")); + + // Get native layout dependencies for the method instantiation args + foreach (var arg in method.Instantiation) + dependencies.Add(new DependencyListEntry(factory.NecessaryTypeSymbol(arg), "Exact method instantiation entry")); + + // Get native layout dependencies for the method signature. + NativeLayoutMethodNameAndSignatureVertexNode nameAndSig = factory.NativeLayout.MethodNameAndSignatureVertex(method.GetTypicalMethodDefinition()); + dependencies.Add(new DependencyListEntry(factory.NativeLayout.PlacedSignatureVertex(nameAndSig), "Exact method instantiation entry")); + + return dependencies; + } + + private static bool IsMethodEligibleForTracking(MethodDesc method) + { + // Runtime determined methods should never show up here. + Debug.Assert(!method.IsRuntimeDeterminedExactMethod); + + if (method.IsAbstract) + return false; + + if (!method.HasInstantiation) + return false; + + // This hashtable is only for method instantiations that don't use generic dictionaries, + // so check if the given method is shared before proceeding + if (method.IsSharedByGenericInstantiations || method.GetCanonMethodTarget(CanonicalFormKind.Specific) != method) + return false; + + return true; + } + } +}
\ No newline at end of file diff --git a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/GenericDictionaryNode.cs b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/GenericDictionaryNode.cs index a66288e4a..5b8c98172 100644 --- a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/GenericDictionaryNode.cs +++ b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/GenericDictionaryNode.cs @@ -69,11 +69,17 @@ namespace ILCompiler.DependencyAnalysis Instantiation typeInst = this.TypeInstantiation; Instantiation methodInst = this.MethodInstantiation; - foreach (var entry in layout.Entries) + foreach (GenericLookupResult lookupResult in layout.Entries) { - ISymbolNode targetNode = entry.GetTarget(factory, typeInst, methodInst); - int targetDelta = entry.TargetDelta; - builder.EmitPointerReloc(targetNode, targetDelta); +#if DEBUG + int offsetBefore = builder.CountBytes; +#endif + + lookupResult.EmitDictionaryEntry(ref builder, factory, typeInst, methodInst); + +#if DEBUG + Debug.Assert(builder.CountBytes - offsetBefore == factory.Target.PointerSize); +#endif } } diff --git a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/GenericLookupResult.cs b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/GenericLookupResult.cs index 5760d328b..77da6177a 100644 --- a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/GenericLookupResult.cs +++ b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/GenericLookupResult.cs @@ -2,6 +2,7 @@ // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. +using System; using System.Diagnostics; using Internal.Text; @@ -24,11 +25,10 @@ namespace ILCompiler.DependencyAnalysis public abstract void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb); public abstract override string ToString(); - /// <summary> - /// Gets the offset to be applied to the symbol returned by - /// <see cref="GetTarget(NodeFactory, Instantiation, Instantiation)"/> to get the actual target. - /// </summary> - public virtual int TargetDelta => 0; + public virtual void EmitDictionaryEntry(ref ObjectDataBuilder builder, NodeFactory factory, Instantiation typeInstantiation, Instantiation methodInstantiation) + { + builder.EmitPointerReloc(GetTarget(factory, typeInstantiation, methodInstantiation)); + } } /// <summary> @@ -101,14 +101,17 @@ namespace ILCompiler.DependencyAnalysis _method = method; } - public override int TargetDelta => FatFunctionPointerConstants.Offset; - public override ISymbolNode GetTarget(NodeFactory factory, Instantiation typeInstantiation, Instantiation methodInstantiation) { MethodDesc instantiatedMethod = _method.InstantiateSignature(typeInstantiation, methodInstantiation); return factory.FatFunctionPointer(instantiatedMethod); } + public override void EmitDictionaryEntry(ref ObjectDataBuilder builder, NodeFactory factory, Instantiation typeInstantiation, Instantiation methodInstantiation) + { + builder.EmitPointerReloc(GetTarget(factory, typeInstantiation, methodInstantiation), FatFunctionPointerConstants.Offset); + } + public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append("MethodEntry_"); @@ -217,6 +220,35 @@ namespace ILCompiler.DependencyAnalysis } /// <summary> + /// Generic lookup result that points to the threadstatic base index of a type. + /// </summary> + internal sealed class TypeThreadStaticBaseIndexGenericLookupResult : GenericLookupResult + { + private MetadataType _type; + + public TypeThreadStaticBaseIndexGenericLookupResult(TypeDesc type) + { + Debug.Assert(type.IsRuntimeDeterminedSubtype, "Concrete static base in a generic dictionary?"); + Debug.Assert(type is MetadataType); + _type = (MetadataType)type; + } + + public override ISymbolNode GetTarget(NodeFactory factory, Instantiation typeInstantiation, Instantiation methodInstantiation) + { + var instantiatedType = (MetadataType)_type.InstantiateSignature(typeInstantiation, methodInstantiation); + return factory.TypeThreadStaticIndex(instantiatedType); + } + + public override void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) + { + sb.Append("ThreadStaticBase_"); + sb.Append(nameMangler.GetMangledTypeName(_type)); + } + + public override string ToString() => $"ThreadStaticBase: {_type}"; + } + + /// <summary> /// Generic lookup result that points to the GC static base of a type. /// </summary> internal sealed class TypeGCStaticBaseGenericLookupResult : GenericLookupResult diff --git a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/GenericsHashtableNode.cs b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/GenericsHashtableNode.cs index fa2e07480..fd5315207 100644 --- a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/GenericsHashtableNode.cs +++ b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/GenericsHashtableNode.cs @@ -4,8 +4,6 @@ using System; using System.IO; -using System.Diagnostics; -using System.Collections.Generic; using Internal.Text; using Internal.TypeSystem; @@ -21,23 +19,10 @@ namespace ILCompiler.DependencyAnalysis private ObjectAndOffsetSymbolNode _endSymbol; private ExternalReferencesTableNode _externalReferences; - private NativeWriter _writer; - private Section _tableSection; - private VertexHashtable _hashtable; - - private HashSet<TypeDesc> _genericTypeInstantiations; - public GenericsHashtableNode(ExternalReferencesTableNode externalReferences) { _endSymbol = new ObjectAndOffsetSymbolNode(this, 0, "__generics_hashtable_End", true); _externalReferences = externalReferences; - - _writer = new NativeWriter(); - _hashtable = new VertexHashtable(); - _tableSection = _writer.NewSection(); - _tableSection.Place(_hashtable); - - _genericTypeInstantiations = new HashSet<TypeDesc>(); } public void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) @@ -52,31 +37,32 @@ namespace ILCompiler.DependencyAnalysis public override bool StaticDependenciesAreComputed => true; protected override string GetName() => this.GetMangledName(); - public void AddInstantiatedTypeEntry(NodeFactory factory, TypeDesc type) - { - Debug.Assert(type.HasInstantiation && !type.IsGenericDefinition); - - if (!_genericTypeInstantiations.Add(type)) - return; - - var typeSymbol = factory.NecessaryTypeSymbol(type); - uint instantiationId = _externalReferences.GetIndex(typeSymbol); - Vertex hashtableEntry = _writer.GetUnsignedConstant(instantiationId); - - _hashtable.Append((uint)type.GetHashCode(), _tableSection.Place(hashtableEntry)); - } - public override ObjectData GetData(NodeFactory factory, bool relocsOnly = false) { // This node does not trigger generation of other nodes. if (relocsOnly) return new ObjectData(Array.Empty<byte>(), Array.Empty<Relocation>(), 1, new ISymbolNode[] { this }); - // Zero out the hashset so that we AV if someone tries to insert after we're done. - _genericTypeInstantiations = null; + NativeWriter nativeWriter = new NativeWriter(); + VertexHashtable hashtable = new VertexHashtable(); + Section nativeSection = nativeWriter.NewSection(); + nativeSection.Place(hashtable); + + foreach (var type in factory.MetadataManager.GetTypesWithEETypes()) + { + // If this is an instantiated non-canonical generic type, add it to the generic instantiations hashtable + if (!type.HasInstantiation || type.IsGenericDefinition || type.IsCanonicalSubtype(CanonicalFormKind.Any)) + continue; + + var typeSymbol = factory.NecessaryTypeSymbol(type); + uint instantiationId = _externalReferences.GetIndex(typeSymbol); + Vertex hashtableEntry = nativeWriter.GetUnsignedConstant(instantiationId); + + hashtable.Append((uint)type.GetHashCode(), nativeSection.Place(hashtableEntry)); + } MemoryStream stream = new MemoryStream(); - _writer.Save(stream); + nativeWriter.Save(stream); byte[] streamBytes = stream.ToArray(); _endSymbol.SetSymbolOffset(streamBytes.Length); diff --git a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/MethodCodeNode.cs b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/MethodCodeNode.cs index 7c71a3c6e..fb45db873 100644 --- a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/MethodCodeNode.cs +++ b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/MethodCodeNode.cs @@ -99,6 +99,16 @@ namespace ILCompiler.DependencyAnalysis dependencies.Add(new DependencyListEntry(factory.MethodEntrypoint(invokeStub), "Reflection invoke")); } + if (_method.HasInstantiation) + { + var exactMethodInstantiationDependencies = ExactMethodInstantiationsNode.GetExactMethodInstantiationDependenciesForMethod(factory, _method); + if (exactMethodInstantiationDependencies != null) + { + dependencies = dependencies ?? new DependencyList(); + dependencies.AddRange(exactMethodInstantiationDependencies); + } + } + return dependencies; } diff --git a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/NativeLayoutInfoNode.cs b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/NativeLayoutInfoNode.cs index 76030201a..e299b9e1a 100644 --- a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/NativeLayoutInfoNode.cs +++ b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/NativeLayoutInfoNode.cs @@ -22,249 +22,74 @@ namespace ILCompiler.DependencyAnalysis private ExternalReferencesTableNode _externalReferences; private NativeWriter _writer; - private MemoryStream _writerStream; + private byte[] _writerSavedBytes; private Section _signaturesSection; private Section _ldTokenInfoSection; + private List<NativeLayoutVertexNode> _vertexNodesToWrite; + public NativeLayoutInfoNode(ExternalReferencesTableNode externalReferences) { _endSymbol = new ObjectAndOffsetSymbolNode(this, 0, "__nativelayoutinfo_End", true); _externalReferences = externalReferences; + _writer = new NativeWriter(); _signaturesSection = _writer.NewSection(); _ldTokenInfoSection = _writer.NewSection(); - } - public ISymbolNode EndSymbol - { - get - { - return _endSymbol; - } + _vertexNodesToWrite = new List<NativeLayoutVertexNode>(); } public void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) { sb.Append(nameMangler.CompilationUnitPrefix).Append("__nativelayoutinfo"); } + public ISymbolNode EndSymbol => _endSymbol; public int Offset => 0; public override bool IsShareable => false; - public override ObjectNodeSection Section => ObjectNodeSection.DataSection; - public override bool StaticDependenciesAreComputed => true; - protected override string GetName() => this.GetMangledName(); - public void SaveNativeLayoutInfoWriter() - { - if (_writerStream != null) - { -#if DEBUG - // Sanity check... We should not write new items to the native layout after - // we've already saved it. - - MemoryStream debugStream = new MemoryStream(); - _writer.Save(debugStream); - byte[] debugStreamBytes = debugStream.ToArray(); - byte[] nativeLayoutInfoBytes = _writerStream.ToArray(); - Debug.Assert(debugStreamBytes.Length == nativeLayoutInfoBytes.Length); - for (int i = 0; i < debugStreamBytes.Length; i++) - Debug.Assert(debugStreamBytes[i] == nativeLayoutInfoBytes[i]); -#endif - return; - } - - _writerStream = new MemoryStream(); - _writer.Save(_writerStream); - } - - public override ObjectData GetData(NodeFactory factory, bool relocsOnly = false) - { - // This node does not trigger generation of other nodes. - if (relocsOnly) - return new ObjectData(Array.Empty<byte>(), Array.Empty<Relocation>(), 1, new ISymbolNode[] { this }); - - SaveNativeLayoutInfoWriter(); - - byte[] nativeLayoutInfoBytes = _writerStream.ToArray(); - - _endSymbol.SetSymbolOffset(nativeLayoutInfoBytes.Length); - - return new ObjectData(nativeLayoutInfoBytes, Array.Empty<Relocation>(), 1, new ISymbolNode[] { this, _endSymbol }); - } - - #region Vertex Building Functions - public Vertex GetNativeLayoutInfoSignatureForLdToken(NodeFactory factory, MethodDesc method) - { - // TODO: option to return the uninstantiated signature info. Current implementation will only encode - // the instantiated signature (i.e. with containing type, return type, args, etc... encoded as external types) - - Vertex signature = null; - - Vertex containingType = GetNativeLayoutInfoSignatureForEEType(factory, method.OwningType); - Vertex nameAndSig = _writer.GetMethodNameAndSigSignature( - method.Name, - GetNativeLayoutInfoSignatureForMethodSignature(factory, method.GetTypicalMethodDefinition())); - Vertex[] args = null; - MethodFlags flags = 0; - - if (method.HasInstantiation) - { - flags |= MethodFlags.HasInstantiation; - args = new Vertex[method.Instantiation.Length]; - for (int i = 0; i < args.Length; i++) - args[i] = GetNativeLayoutInfoSignatureForEEType(factory, method.Instantiation[i]); - } - - signature = _writer.GetMethodSignature((uint)flags, 0, containingType, nameAndSig, args); - - return _ldTokenInfoSection.Place(signature); - } - - public Vertex GetNativeLayoutInfoSignatureForEEType(NodeFactory factory, TypeDesc type) - { - IEETypeNode typeSymbol = factory.NecessaryTypeSymbol(type); - uint typeIndex = _externalReferences.GetIndex(typeSymbol); - return _writer.GetExternalTypeSignature(typeIndex); - } + public Section LdTokenInfoSection => _ldTokenInfoSection; + public Section SignaturesSection => _signaturesSection; + public ExternalReferencesTableNode ExternalReferences => _externalReferences; + public NativeWriter Writer => _writer; - public Vertex GetNativeLayoutInfoSignatureForMethodSignature(NodeFactory factory, MethodDesc method) + public void AddVertexNodeToNativeLayout(NativeLayoutVertexNode vertexNode) { - MethodCallingConvention methodCallingConvention = default(MethodCallingConvention); - - if (method.Signature.GenericParameterCount > 0) - methodCallingConvention |= MethodCallingConvention.Generic; - if (method.Signature.IsStatic) - methodCallingConvention |= MethodCallingConvention.Static; - - int parameterCount = method.Signature.Length; - Vertex returnType = GetNativeLayoutInfoSignatureForTypeSignature(factory, method.Signature.ReturnType); - Vertex[] parameters = new Vertex[parameterCount]; - for (int i = 0; i < parameterCount; i++) - { - parameters[i] = GetNativeLayoutInfoSignatureForTypeSignature(factory, method.Signature[i]); - } - - return _signaturesSection.Place(_writer.GetMethodSigSignature((uint)methodCallingConvention, (uint)method.Signature.GenericParameterCount, returnType, parameters)); + _vertexNodesToWrite.Add(vertexNode); } - public Vertex GetNativeLayoutInfoSignatureForPlacedNameAndSignature(NodeFactory factory, MethodDesc method) + public void SaveNativeLayoutInfoWriter(NodeFactory factory) { - // Always use the NativeLayoutInfo node for names and sigs. This saves space, - // since we can Unify more signatures, allows optimizations in comparing sigs in the same module, and prevents the dynamic - // type loader having to know about other native layout sections (since sigs contain types). If we are using a non-native - // layout info writer, write the sig to the native layout info, and refer to it by offset in its own section. At runtime, - // we will assume all names and sigs are in the native layout and find it. - - Vertex methodSig = GetNativeLayoutInfoSignatureForMethodSignature(factory, method); - Vertex nameAndSig = _writer.GetMethodNameAndSigSignature(method.Name, methodSig); - return _signaturesSection.Place(nameAndSig); - } + if (_writerSavedBytes != null) + return; - public Vertex GetNativeLayoutInfoSignatureForPlacedTypeSignature(NodeFactory factory, TypeDesc type) - { - // Similar to method name and signatures, we always use the NativeLayoutInfo blob for type signatures too (same reasons). + foreach (var vertexNode in _vertexNodesToWrite) + vertexNode.WriteVertex(factory); - Vertex typeSignature = GetNativeLayoutInfoSignatureForTypeSignature(factory, type); - return _signaturesSection.Place(typeSignature); - } + MemoryStream writerStream = new MemoryStream(); + _writer.Save(writerStream); + _writerSavedBytes = writerStream.ToArray(); - public Vertex GetNativeLayoutInfoOffsetSignature(Vertex nativeLayoutInfoVertex) - { - // Creates a vertex that holds the offset to a signature in the native layout - return _writer.GetOffsetSignature(nativeLayoutInfoVertex); + // Zero out the native writer and vertex list so that we AV if someone tries to insert after we're done. + _writer = null; + _vertexNodesToWrite = null; } - public Vertex GetNativeLayoutInfoSignatureForTypeSignature(NodeFactory factory, TypeDesc type) + public override ObjectData GetData(NodeFactory factory, bool relocsOnly = false) { - Vertex signature = null; - - switch (type.Category) - { - case Internal.TypeSystem.TypeFlags.SzArray: - signature = _writer.GetModifierTypeSignature(TypeModifierKind.Array, GetNativeLayoutInfoSignatureForTypeSignature(factory, ((ArrayType)type).ElementType)); - break; - - case Internal.TypeSystem.TypeFlags.Pointer: - signature = _writer.GetModifierTypeSignature(TypeModifierKind.Pointer, GetNativeLayoutInfoSignatureForTypeSignature(factory, ((PointerType)type).ParameterType)); - break; - - case Internal.TypeSystem.TypeFlags.ByRef: - signature = _writer.GetModifierTypeSignature(TypeModifierKind.ByRef, GetNativeLayoutInfoSignatureForTypeSignature(factory, ((ByRefType)type).ParameterType)); - break; - - case Internal.TypeSystem.TypeFlags.SignatureTypeVariable: - signature = _writer.GetVariableTypeSignature((uint)((SignatureVariable)type).Index, false); - break; - - case Internal.TypeSystem.TypeFlags.SignatureMethodVariable: - signature = _writer.GetVariableTypeSignature((uint)((SignatureMethodVariable)type).Index, true); - break; - - case Internal.TypeSystem.TypeFlags.Void: - case Internal.TypeSystem.TypeFlags.Boolean: - case Internal.TypeSystem.TypeFlags.Char: - case Internal.TypeSystem.TypeFlags.SByte: - case Internal.TypeSystem.TypeFlags.Byte: - case Internal.TypeSystem.TypeFlags.Int16: - case Internal.TypeSystem.TypeFlags.UInt16: - case Internal.TypeSystem.TypeFlags.Int32: - case Internal.TypeSystem.TypeFlags.UInt32: - case Internal.TypeSystem.TypeFlags.Int64: - case Internal.TypeSystem.TypeFlags.UInt64: - case Internal.TypeSystem.TypeFlags.Single: - case Internal.TypeSystem.TypeFlags.Double: - case Internal.TypeSystem.TypeFlags.IntPtr: - case Internal.TypeSystem.TypeFlags.UIntPtr: - case Internal.TypeSystem.TypeFlags.Enum: - signature = GetNativeLayoutInfoSignatureForEEType(factory, type); - break; - - case Internal.TypeSystem.TypeFlags.Class: - case Internal.TypeSystem.TypeFlags.ValueType: - case Internal.TypeSystem.TypeFlags.Interface: - if (type.HasInstantiation && !type.IsGenericDefinition) - { - TypeDesc typeDef = type.GetTypeDefinition(); - - Vertex typeDefVertex = GetNativeLayoutInfoSignatureForTypeSignature(factory, typeDef); - Vertex[] args = new Vertex[type.Instantiation.Length]; - for (int i = 0; i < args.Length; i++) - args[i] = GetNativeLayoutInfoSignatureForTypeSignature(factory, type.Instantiation[i]); - - signature = _writer.GetInstantiationTypeSignature(typeDefVertex, args); - } - else - { - signature = GetNativeLayoutInfoSignatureForEEType(factory, type); - } - break; - - case Internal.TypeSystem.TypeFlags.Array: - { - ArrayType arrayType = type as ArrayType; - - Vertex elementType = GetNativeLayoutInfoSignatureForTypeSignature(factory, arrayType.ElementType); - - // Skip bounds and lobounds (TODO) - var bounds = Array.Empty<uint>(); - var lobounds = Array.Empty<uint>(); - - signature = _writer.GetMDArrayTypeSignature(elementType, (uint)arrayType.Rank, bounds, lobounds); - } - break; + // Dependencies of the NativeLayoutInfo node are tracked by the callers that emit data into the native layout writer + if (relocsOnly) + return new ObjectData(Array.Empty<byte>(), Array.Empty<Relocation>(), 1, new ISymbolNode[] { this }); - // TODO case Internal.TypeSystem.TypeFlags.FunctionPointer: + SaveNativeLayoutInfoWriter(factory); - default: - throw new NotImplementedException("NYI"); - } + _endSymbol.SetSymbolOffset(_writerSavedBytes.Length); - Debug.Assert(signature != null); - return signature; + return new ObjectData(_writerSavedBytes, Array.Empty<Relocation>(), 1, new ISymbolNode[] { this, _endSymbol }); } - #endregion } } diff --git a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/NativeLayoutSignatureNode.cs b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/NativeLayoutSignatureNode.cs new file mode 100644 index 000000000..b9fbce4cd --- /dev/null +++ b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/NativeLayoutSignatureNode.cs @@ -0,0 +1,66 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +using System; + +using Internal.Text; + +namespace ILCompiler.DependencyAnalysis +{ + /// <summary> + /// Represents a native layout signature. A signature is a pair where the first item is a pointer + /// to the TypeManager that contains the native layout info blob of interest, and the second item + /// is an offset into that native layout info blob + /// </summary> + class NativeLayoutSignatureNode : ObjectNode, ISymbolNode + { + private static int s_counter = 0; + + private int _id; + private NativeLayoutSavedVertexNode _nativeSignature; + + public NativeLayoutSignatureNode(NativeLayoutSavedVertexNode nativeSignature) + { + _nativeSignature = nativeSignature; + _id = s_counter++; + } + + public void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) + { + sb.Append(nameMangler.CompilationUnitPrefix).Append("__NativeLayoutSignature_" + _id); + } + public int Offset => 0; + protected override string GetName() => this.GetMangledName(); + public override ObjectNodeSection Section => ObjectNodeSection.ReadOnlyDataSection; + public override bool IsShareable => false; + public override bool StaticDependenciesAreComputed => true; + + protected override DependencyList ComputeNonRelocationBasedDependencies(NodeFactory factory) + { + DependencyList dependencies = new DependencyList(); + dependencies.Add(new DependencyListEntry(_nativeSignature, "NativeLayoutSignatureNode target vertex")); + return dependencies; + } + + public override ObjectData GetData(NodeFactory factory, bool relocsOnly = false) + { + // This node does not trigger generation of other nodes. + if (relocsOnly) + return new ObjectData(Array.Empty<byte>(), Array.Empty<Relocation>(), 1, new ISymbolNode[] { this }); + + // Ensure native layout is saved to get valid Vertex offsets + factory.MetadataManager.NativeLayoutInfo.SaveNativeLayoutInfoWriter(factory); + + ObjectDataBuilder objData = new ObjectDataBuilder(factory); + + objData.Alignment = objData.TargetPointerSize; + objData.DefinedSymbols.Add(this); + + objData.EmitPointerReloc(factory.TypeManagerIndirection); + objData.EmitNaturalInt(_nativeSignature.SavedVertex.VertexOffset); + + return objData.ToObjectData(); + } + } +}
\ No newline at end of file diff --git a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/NativeLayoutVertexNode.cs b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/NativeLayoutVertexNode.cs new file mode 100644 index 000000000..1283b45fe --- /dev/null +++ b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/NativeLayoutVertexNode.cs @@ -0,0 +1,407 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +using System; +using System.Collections.Generic; +using System.Diagnostics; + +using Internal.NativeFormat; +using Internal.TypeSystem; +using ILCompiler.DependencyAnalysisFramework; + +namespace ILCompiler.DependencyAnalysis +{ + /// <summary> + /// Wrapper nodes for native layout vertex structures. These wrapper nodes are "abstract" as they do not + /// generate any data. They are used to keep track of the dependency nodes required by a Vertex structure. + /// + /// Any node in the graph that references data in the native layout blob needs to create one of these + /// NativeLayoutVertexNode nodes, and track it as a dependency of itself. + /// Example: MethodCodeNodes that are saved to the table in the ExactMethodInstantiationsNode reference + /// signatures stored in the native layout blob, so a NativeLayoutPlacedSignatureVertexNode node is created + /// and returned as a static dependency of the associated MethodCodeNode (in the GetStaticDependencies API). + /// + /// Each NativeLayoutVertexNode that gets marked in the graph will register itself with the NativeLayoutInfoNode, + /// so that the NativeLayoutInfoNode can write it later to the native layout blob during the call to its GetData API. + /// </summary> + internal abstract class NativeLayoutVertexNode : DependencyNodeCore<NodeFactory> + { + public override bool HasConditionalStaticDependencies => false; + public override bool HasDynamicDependencies => false; + public override bool InterestingForDynamicDependencyAnalysis => false; + public override bool StaticDependenciesAreComputed => true; + + public override IEnumerable<CombinedDependencyListEntry> GetConditionalStaticDependencies(NodeFactory context) + { + return Array.Empty<CombinedDependencyListEntry>(); + } + + public override IEnumerable<CombinedDependencyListEntry> SearchDynamicDependencies(List<DependencyNodeCore<NodeFactory>> markedNodes, int firstNode, NodeFactory context) + { + return Array.Empty<CombinedDependencyListEntry>(); + } + + protected override void OnMarked(NodeFactory context) + { + context.MetadataManager.NativeLayoutInfo.AddVertexNodeToNativeLayout(this); + } + + public abstract Vertex WriteVertex(NodeFactory factory); + + protected NativeWriter GetNativeWriter(NodeFactory factory) + { + // There is only one native layout info blob, so only one writer for now... + return factory.MetadataManager.NativeLayoutInfo.Writer; + } + } + + /// <summary> + /// Any NativeLayoutVertexNode that needs to expose the native layout Vertex after it has been saved + /// needs to derive from this NativeLayoutSavedVertexNode class. + /// + /// A nativelayout Vertex should typically only be exposed for Vertex offset fetching purposes, after the native + /// writer is saved (Vertex offsets get generated when the native writer gets saved). + /// + /// It is important for whoever derives from this class to produce unified Vertices. Calling the WriteVertex method + /// multiple times should always produce the same exact unified Vertex each time (hence the assert in SetSavedVertex). + /// All nativewriter.Getxyz methods return unified Vertices. + /// + /// When exposing a saved Vertex that is a result of a section placement operation (Section.Place(...)), always make + /// sure a unified Vertex is being placed in the section (Section.Place creates a PlacedVertex structure that wraps the + /// Vertex to be placed, so if the Vertex to be placed is unified, there will only be a single unified PlacedVertex + /// structure created for that placed Vertex). + /// </summary> + internal abstract class NativeLayoutSavedVertexNode : NativeLayoutVertexNode + { + public Vertex SavedVertex { get; private set; } + protected Vertex SetSavedVertex(Vertex value) + { + Debug.Assert(SavedVertex == null || Object.ReferenceEquals(SavedVertex, value)); + SavedVertex = value; + return value; + } + } + + internal sealed class NativeLayoutMethodLdTokenVertexNode : NativeLayoutSavedVertexNode + { + private MethodDesc _method; + private NativeLayoutTypeSignatureVertexNode _containingTypeSig; + private NativeLayoutMethodSignatureVertexNode _methodSig; + private NativeLayoutTypeSignatureVertexNode[] _instantiationArgsSig; + + protected override string GetName() => "NativeLayoutMethodLdTokenVertexNode_" + NodeFactory.NameMangler.GetMangledMethodName(_method); + + public NativeLayoutMethodLdTokenVertexNode(NodeFactory factory, MethodDesc method) + { + _method = method; + _containingTypeSig = factory.NativeLayout.TypeSignatureVertex(method.OwningType); + _methodSig = factory.NativeLayout.MethodSignatureVertex(method.GetTypicalMethodDefinition()); + if (method.HasInstantiation) + { + _instantiationArgsSig = new NativeLayoutTypeSignatureVertexNode[method.Instantiation.Length]; + for (int i = 0; i < _instantiationArgsSig.Length; i++) + _instantiationArgsSig[i] = factory.NativeLayout.TypeSignatureVertex(method.Instantiation[i]); + } + } + + public override IEnumerable<DependencyListEntry> GetStaticDependencies(NodeFactory context) + { + DependencyList dependencies = new DependencyList(); + + dependencies.Add(new DependencyListEntry(_containingTypeSig, "NativeLayoutLdTokenVertexNode containing type signature")); + dependencies.Add(new DependencyListEntry(_methodSig, "NativeLayoutLdTokenVertexNode method signature")); + foreach (var arg in _instantiationArgsSig) + dependencies.Add(new DependencyListEntry(arg, "NativeLayoutLdTokenVertexNode instantiation argument signature")); + + return dependencies; + } + + public override Vertex WriteVertex(NodeFactory factory) + { + Vertex containingType = _containingTypeSig.WriteVertex(factory); + Vertex methodSig = _methodSig.WriteVertex(factory); + Vertex methodNameAndSig = GetNativeWriter(factory).GetMethodNameAndSigSignature(_method.Name, methodSig); + + Debug.Assert(_instantiationArgsSig == null || (_method.HasInstantiation && _method.Instantiation.Length == _instantiationArgsSig.Length)); + + Vertex[] args = null; + MethodFlags flags = 0; + if (_method.HasInstantiation) + { + flags |= MethodFlags.HasInstantiation; + args = new Vertex[_method.Instantiation.Length]; + for (int i = 0; i < args.Length; i++) + args[i] = _instantiationArgsSig[i].WriteVertex(factory); + } + + Vertex signature = GetNativeWriter(factory).GetMethodSignature((uint)flags, 0, containingType, methodNameAndSig, args); + return SetSavedVertex(factory.MetadataManager.NativeLayoutInfo.LdTokenInfoSection.Place(signature)); + } + } + + internal sealed class NativeLayoutMethodSignatureVertexNode : NativeLayoutVertexNode + { + private MethodDesc _method; + private NativeLayoutTypeSignatureVertexNode _returnTypeSig; + private NativeLayoutTypeSignatureVertexNode[] _parametersSig; + + protected override string GetName() => "NativeLayoutMethodSignatureVertexNode" + NodeFactory.NameMangler.GetMangledMethodName(_method); + + public NativeLayoutMethodSignatureVertexNode(NodeFactory factory, MethodDesc method) + { + _method = method; + _returnTypeSig = factory.NativeLayout.TypeSignatureVertex(method.Signature.ReturnType); + _parametersSig = new NativeLayoutTypeSignatureVertexNode[method.Signature.Length]; + for (int i = 0; i < _parametersSig.Length; i++) + _parametersSig[i] = factory.NativeLayout.TypeSignatureVertex(method.Signature[i]); + } + + public override IEnumerable<DependencyListEntry> GetStaticDependencies(NodeFactory context) + { + DependencyList dependencies = new DependencyList(); + + dependencies.Add(new DependencyListEntry(_returnTypeSig, "NativeLayoutMethodSignatureVertexNode return type signature")); + foreach (var arg in _parametersSig) + dependencies.Add(new DependencyListEntry(arg, "NativeLayoutMethodSignatureVertexNode parameter signature")); + + return dependencies; + } + + public override Vertex WriteVertex(NodeFactory factory) + { + MethodCallingConvention methodCallingConvention = default(MethodCallingConvention); + + if (_method.Signature.GenericParameterCount > 0) + methodCallingConvention |= MethodCallingConvention.Generic; + if (_method.Signature.IsStatic) + methodCallingConvention |= MethodCallingConvention.Static; + + Debug.Assert(_method.Signature.Length == _parametersSig.Length); + + Vertex returnType = _returnTypeSig.WriteVertex(factory); + Vertex[] parameters = new Vertex[_parametersSig.Length]; + for (int i = 0; i < _parametersSig.Length; i++) + parameters[i] = _parametersSig[i].WriteVertex(factory); + + Vertex signature = GetNativeWriter(factory).GetMethodSigSignature((uint)methodCallingConvention, (uint)_method.Signature.GenericParameterCount, returnType, parameters); + return factory.MetadataManager.NativeLayoutInfo.SignaturesSection.Place(signature); + } + } + + internal sealed class NativeLayoutMethodNameAndSignatureVertexNode : NativeLayoutVertexNode + { + private MethodDesc _method; + private NativeLayoutMethodSignatureVertexNode _methodSig; + + protected override string GetName() => "NativeLayoutMethodNameAndSignatureVertexNode" + NodeFactory.NameMangler.GetMangledMethodName(_method); + + public NativeLayoutMethodNameAndSignatureVertexNode(NodeFactory factory, MethodDesc method) + { + _method = method; + _methodSig = factory.NativeLayout.MethodSignatureVertex(method); + } + public override IEnumerable<DependencyListEntry> GetStaticDependencies(NodeFactory context) + { + return new DependencyListEntry[] { new DependencyListEntry(_methodSig, "NativeLayoutMethodNameAndSignatureVertexNode signature vertex") }; + } + public override Vertex WriteVertex(NodeFactory factory) + { + Vertex methodSig = _methodSig.WriteVertex(factory); + return GetNativeWriter(factory).GetMethodNameAndSigSignature(_method.Name, methodSig); + } + } + + internal abstract class NativeLayoutTypeSignatureVertexNode : NativeLayoutVertexNode + { + protected readonly TypeDesc _type; + + protected NativeLayoutTypeSignatureVertexNode(TypeDesc type) + { + _type = type; + } + + protected override string GetName() => "NativeLayoutTypeSignatureVertexNode" + NodeFactory.NameMangler.GetMangledTypeName(_type); + + public static NativeLayoutTypeSignatureVertexNode NewTypeSignatureVertexNode(NodeFactory factory, TypeDesc type) + { + switch (type.Category) + { + case Internal.TypeSystem.TypeFlags.Array: + case Internal.TypeSystem.TypeFlags.SzArray: + case Internal.TypeSystem.TypeFlags.Pointer: + case Internal.TypeSystem.TypeFlags.ByRef: + return new NativeLayoutParameterizedTypeSignatureVertexNode(factory, type); + + case Internal.TypeSystem.TypeFlags.SignatureTypeVariable: + case Internal.TypeSystem.TypeFlags.SignatureMethodVariable: + return new NativeLayoutGenericVarSignatureVertexNode(factory, type); + + // TODO Internal.TypeSystem.TypeFlags.FunctionPointer (Runtime parsing also not yet implemented) + case Internal.TypeSystem.TypeFlags.FunctionPointer: + throw new NotImplementedException("FunctionPointer signature"); + + default: + { + Debug.Assert(type.IsDefType); + + if (type.HasInstantiation && !type.IsGenericDefinition) + return new NativeLayoutInstantiatedTypeSignatureVertexNode(factory, type); + else + return new NativeLayoutEETypeSignatureVertexNode(factory, type); + } + } + } + + sealed class NativeLayoutParameterizedTypeSignatureVertexNode : NativeLayoutTypeSignatureVertexNode + { + private NativeLayoutVertexNode _parameterTypeSig; + + public NativeLayoutParameterizedTypeSignatureVertexNode(NodeFactory factory, TypeDesc type) : base(type) + { + _parameterTypeSig = factory.NativeLayout.TypeSignatureVertex(((ParameterizedType)type).ParameterType); + } + public override IEnumerable<DependencyListEntry> GetStaticDependencies(NodeFactory context) + { + return new DependencyListEntry[] { new DependencyListEntry(_parameterTypeSig, "NativeLayoutParameterizedTypeSignatureVertexNode parameter type signature") }; + } + public override Vertex WriteVertex(NodeFactory factory) + { + switch (_type.Category) + { + case Internal.TypeSystem.TypeFlags.SzArray: + return GetNativeWriter(factory).GetModifierTypeSignature(TypeModifierKind.Array, _parameterTypeSig.WriteVertex(factory)); + + case Internal.TypeSystem.TypeFlags.Pointer: + return GetNativeWriter(factory).GetModifierTypeSignature(TypeModifierKind.Pointer, _parameterTypeSig.WriteVertex(factory)); + + case Internal.TypeSystem.TypeFlags.ByRef: + return GetNativeWriter(factory).GetModifierTypeSignature(TypeModifierKind.ByRef, _parameterTypeSig.WriteVertex(factory)); + + case Internal.TypeSystem.TypeFlags.Array: + { + Vertex elementType = _parameterTypeSig.WriteVertex(factory); + + // Skip bounds and lobounds (TODO) + var bounds = Array.Empty<uint>(); + var lobounds = Array.Empty<uint>(); + + return GetNativeWriter(factory).GetMDArrayTypeSignature(elementType, (uint)((ArrayType)_type).Rank, bounds, lobounds); + } + } + + Debug.Assert(false, "UNREACHABLE"); + return null; + } + } + + sealed class NativeLayoutGenericVarSignatureVertexNode : NativeLayoutTypeSignatureVertexNode + { + public NativeLayoutGenericVarSignatureVertexNode(NodeFactory factory, TypeDesc type) : base(type) + { + } + public override IEnumerable<DependencyListEntry> GetStaticDependencies(NodeFactory context) + { + return Array.Empty<DependencyListEntry>(); + } + public override Vertex WriteVertex(NodeFactory factory) + { + switch (_type.Category) + { + case Internal.TypeSystem.TypeFlags.SignatureTypeVariable: + return GetNativeWriter(factory).GetVariableTypeSignature((uint)((SignatureVariable)_type).Index, false); + + case Internal.TypeSystem.TypeFlags.SignatureMethodVariable: + return GetNativeWriter(factory).GetVariableTypeSignature((uint)((SignatureMethodVariable)_type).Index, true); + } + + Debug.Assert(false, "UNREACHABLE"); + return null; + } + } + + sealed class NativeLayoutInstantiatedTypeSignatureVertexNode : NativeLayoutTypeSignatureVertexNode + { + private NativeLayoutTypeSignatureVertexNode _genericTypeDefSig; + private NativeLayoutTypeSignatureVertexNode[] _instantiationArgs; + + public NativeLayoutInstantiatedTypeSignatureVertexNode(NodeFactory factory, TypeDesc type) : base(type) + { + Debug.Assert(type.HasInstantiation && !type.IsGenericDefinition); + + _genericTypeDefSig = factory.NativeLayout.TypeSignatureVertex(type.GetTypeDefinition()); + _instantiationArgs = new NativeLayoutTypeSignatureVertexNode[type.Instantiation.Length]; + for (int i = 0; i < _instantiationArgs.Length; i++) + _instantiationArgs[i] = factory.NativeLayout.TypeSignatureVertex(type.Instantiation[i]); + + } + public override IEnumerable<DependencyListEntry> GetStaticDependencies(NodeFactory context) + { + DependencyList dependencies = new DependencyList(); + + dependencies.Add(new DependencyListEntry(_genericTypeDefSig, "NativeLayoutInstantiatedTypeSignatureVertexNode generic definition signature")); + foreach (var arg in _instantiationArgs) + dependencies.Add(new DependencyListEntry(arg, "NativeLayoutInstantiatedTypeSignatureVertexNode instantiation argument signature")); + + return dependencies; + } + public override Vertex WriteVertex(NodeFactory factory) + { + Vertex genericDefVertex = _genericTypeDefSig.WriteVertex(factory); + Vertex[] args = new Vertex[_instantiationArgs.Length]; + for (int i = 0; i < args.Length; i++) + args[i] = _instantiationArgs[i].WriteVertex(factory); + + return GetNativeWriter(factory).GetInstantiationTypeSignature(genericDefVertex, args); + } + } + + sealed class NativeLayoutEETypeSignatureVertexNode : NativeLayoutTypeSignatureVertexNode + { + public NativeLayoutEETypeSignatureVertexNode(NodeFactory factory, TypeDesc type) : base(type) + { + Debug.Assert(!type.HasInstantiation || type.IsGenericDefinition); + } + public override IEnumerable<DependencyListEntry> GetStaticDependencies(NodeFactory context) + { + return new DependencyListEntry[] + { + new DependencyListEntry(context.NecessaryTypeSymbol(_type), "NativeLayoutEETypeVertexNode containing type signature") + }; + } + public override Vertex WriteVertex(NodeFactory factory) + { + IEETypeNode eetypeNode = factory.NecessaryTypeSymbol(_type); + uint typeIndex = factory.MetadataManager.NativeLayoutInfo.ExternalReferences.GetIndex(eetypeNode); + return GetNativeWriter(factory).GetExternalTypeSignature(typeIndex); + } + } + } + + internal sealed class NativeLayoutPlacedSignatureVertexNode : NativeLayoutSavedVertexNode + { + private NativeLayoutVertexNode _signatureToBePlaced; + + protected override string GetName() => "NativeLayoutTypeSignatureVertexNode"; + + public NativeLayoutPlacedSignatureVertexNode(NativeLayoutVertexNode signatureToBePlaced) + { + _signatureToBePlaced = signatureToBePlaced; + } + public override IEnumerable<DependencyListEntry> GetStaticDependencies(NodeFactory context) + { + return new DependencyListEntry[] { new DependencyListEntry(_signatureToBePlaced, "NativeLayoutPlacedSignatureVertexNode placed signature") }; + } + public override Vertex WriteVertex(NodeFactory factory) + { + // Always use the NativeLayoutInfo blob for names and sigs, even if the associated types/methods are written elsewhere. + // This saves space, since we can Unify more signatures, allows optimizations in comparing sigs in the same module, and + // prevents the dynamic type loader having to know about other native layout sections (since sigs contain types). If we are + // using a non-native layout info writer, write the sig to the native layout info, and refer to it by offset in its own + // section. At runtime, we will assume all names and sigs are in the native layout and find it. + + Vertex signature = _signatureToBePlaced.WriteVertex(factory); + return SetSavedVertex(factory.MetadataManager.NativeLayoutInfo.SignaturesSection.Place(signature)); + } + } +}
\ No newline at end of file diff --git a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/NodeFactory.GenericLookups.cs b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/NodeFactory.GenericLookups.cs index decf899d1..374358743 100644 --- a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/NodeFactory.GenericLookups.cs +++ b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/NodeFactory.GenericLookups.cs @@ -47,6 +47,11 @@ namespace ILCompiler.DependencyAnalysis return new VirtualResolveGenericLookupResult(method); }); + _typeThreadStaticBaseIndexSymbols = new NodeCache<TypeDesc, GenericLookupResult>(type => + { + return new TypeThreadStaticBaseIndexGenericLookupResult(type); + }); + _typeGCStaticBaseSymbols = new NodeCache<TypeDesc, GenericLookupResult>(type => { return new TypeGCStaticBaseGenericLookupResult(type); @@ -65,6 +70,13 @@ namespace ILCompiler.DependencyAnalysis return _typeSymbols.GetOrAdd(type); } + private NodeCache<TypeDesc, GenericLookupResult> _typeThreadStaticBaseIndexSymbols; + + public GenericLookupResult TypeThreadStaticBaseIndex(TypeDesc type) + { + return _typeThreadStaticBaseIndexSymbols.GetOrAdd(type); + } + private NodeCache<TypeDesc, GenericLookupResult> _typeGCStaticBaseSymbols; public GenericLookupResult TypeGCStaticBase(TypeDesc type) diff --git a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/NodeFactory.NativeLayout.cs b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/NodeFactory.NativeLayout.cs new file mode 100644 index 000000000..0cc8c7102 --- /dev/null +++ b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/NodeFactory.NativeLayout.cs @@ -0,0 +1,97 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +using Internal.TypeSystem; + +namespace ILCompiler.DependencyAnalysis +{ + /// Part of Node factory that deals with nodes describing native layout information + partial class NodeFactory + { + /// <summary> + /// Helper class that provides a level of grouping for all the native layout lookups + /// </summary> + public class NativeLayoutHelper + { + NodeFactory _factory; + + public NativeLayoutHelper(NodeFactory factory) + { + _factory = factory; + CreateNodeCaches(); + } + + private void CreateNodeCaches() + { + _typeSignatures = new NodeCache<TypeDesc, NativeLayoutTypeSignatureVertexNode>(type => + { + return NativeLayoutTypeSignatureVertexNode.NewTypeSignatureVertexNode(_factory, type); + }); + + _methodSignatures = new NodeCache<MethodDesc, NativeLayoutMethodSignatureVertexNode>(method => + { + return new NativeLayoutMethodSignatureVertexNode(_factory, method); + }); + + _methodNameAndSignatures = new NodeCache<MethodDesc, NativeLayoutMethodNameAndSignatureVertexNode>(method => + { + return new NativeLayoutMethodNameAndSignatureVertexNode(_factory, method); + }); + + _placedSignatures = new NodeCache<NativeLayoutVertexNode, NativeLayoutPlacedSignatureVertexNode>(vertexNode => + { + return new NativeLayoutPlacedSignatureVertexNode(vertexNode); + }); + + _methodLdTokenSignatures = new NodeCache<MethodDesc, NativeLayoutMethodLdTokenVertexNode>(method => + { + return new NativeLayoutMethodLdTokenVertexNode(_factory, method); + }); + + _nativeLayoutSignatureNodes = new NodeCache<NativeLayoutSavedVertexNode, NativeLayoutSignatureNode>(signature => + { + return new NativeLayoutSignatureNode(signature); + }); + } + + private NodeCache<TypeDesc, NativeLayoutTypeSignatureVertexNode> _typeSignatures; + internal NativeLayoutTypeSignatureVertexNode TypeSignatureVertex(TypeDesc type) + { + return _typeSignatures.GetOrAdd(type); + } + + private NodeCache<MethodDesc, NativeLayoutMethodSignatureVertexNode> _methodSignatures; + internal NativeLayoutMethodSignatureVertexNode MethodSignatureVertex(MethodDesc method) + { + return _methodSignatures.GetOrAdd(method); + } + + private NodeCache<MethodDesc, NativeLayoutMethodNameAndSignatureVertexNode> _methodNameAndSignatures; + internal NativeLayoutMethodNameAndSignatureVertexNode MethodNameAndSignatureVertex(MethodDesc method) + { + return _methodNameAndSignatures.GetOrAdd(method); + } + + private NodeCache<NativeLayoutVertexNode, NativeLayoutPlacedSignatureVertexNode> _placedSignatures; + internal NativeLayoutPlacedSignatureVertexNode PlacedSignatureVertex(NativeLayoutVertexNode vertexNode) + { + return _placedSignatures.GetOrAdd(vertexNode); + } + + private NodeCache<MethodDesc, NativeLayoutMethodLdTokenVertexNode> _methodLdTokenSignatures; + internal NativeLayoutMethodLdTokenVertexNode MethodLdTokenVertex(MethodDesc method) + { + return _methodLdTokenSignatures.GetOrAdd(method); + } + + private NodeCache<NativeLayoutSavedVertexNode, NativeLayoutSignatureNode> _nativeLayoutSignatureNodes; + internal NativeLayoutSignatureNode NativeLayoutSignature(NativeLayoutSavedVertexNode signature) + { + return _nativeLayoutSignatureNodes.GetOrAdd(signature); + } + } + + public NativeLayoutHelper NativeLayout; + } +}
\ No newline at end of file diff --git a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/NodeFactory.cs b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/NodeFactory.cs index e9cb6ca7d..083017c3a 100644 --- a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/NodeFactory.cs +++ b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/NodeFactory.cs @@ -13,6 +13,7 @@ using Internal.Text; using Internal.TypeSystem; using Internal.Runtime; using Internal.IL; +using Internal.NativeFormat; namespace ILCompiler.DependencyAnalysis { @@ -152,6 +153,11 @@ namespace ILCompiler.DependencyAnalysis return new ThreadStaticsNode(type, this); }); + _typeThreadStaticIndices = new NodeCache<MetadataType, TypeThreadStaticIndexNode>(type => + { + return new TypeThreadStaticIndexNode(type); + }); + _GCStaticEETypes = new NodeCache<GCPointerMap, GCStaticEETypeNode>((GCPointerMap gcMap) => { return new GCStaticEETypeNode(Target, gcMap); @@ -235,6 +241,11 @@ namespace ILCompiler.DependencyAnalysis return new InterfaceDispatchMapNode(type); }); + _runtimeMethodHandles = new NodeCache<MethodDesc, RuntimeMethodHandleNode>((MethodDesc method) => + { + return new RuntimeMethodHandleNode(this, method); + }); + _interfaceDispatchMapIndirectionNodes = new NodeCache<TypeDesc, EmbeddedObjectNode>((TypeDesc type) => { var dispatchMap = InterfaceDispatchMap(type); @@ -283,6 +294,8 @@ namespace ILCompiler.DependencyAnalysis { return new StringAllocatorMethodNode(constructor); }); + + NativeLayout = new NativeLayoutHelper(this); } protected abstract IMethodNode CreateMethodEntrypointNode(MethodDesc method); @@ -349,15 +362,27 @@ namespace ILCompiler.DependencyAnalysis private NodeCache<MetadataType, ThreadStaticsNode> _threadStatics; - public ISymbolNode TypeThreadStaticsSymbol(MetadataType type) + public ThreadStaticsNode TypeThreadStaticsSymbol(MetadataType type) + { + // This node is always used in the context of its index within the region. + // We should never ask for this if the current compilation doesn't contain the + // associated type. + Debug.Assert(_compilationModuleGroup.ContainsType(type)); + return _threadStatics.GetOrAdd(type); + } + + + private NodeCache<MetadataType, TypeThreadStaticIndexNode> _typeThreadStaticIndices; + + public ISymbolNode TypeThreadStaticIndex(MetadataType type) { if (_compilationModuleGroup.ContainsType(type)) { - return _threadStatics.GetOrAdd(type); + return _typeThreadStaticIndices.GetOrAdd(type); } else { - return ExternSymbol("__ThreadStaticBase_" + NodeFactory.NameMangler.GetMangledTypeName(type)); + return ExternSymbol("__TypeThreadStaticIndex_" + NameMangler.GetMangledTypeName(type)); } } @@ -368,6 +393,13 @@ namespace ILCompiler.DependencyAnalysis return _interfaceDispatchCells.GetOrAdd(method); } + private NodeCache<MethodDesc, RuntimeMethodHandleNode> _runtimeMethodHandles; + + internal RuntimeMethodHandleNode RuntimeMethodHandle(MethodDesc method) + { + return _runtimeMethodHandles.GetOrAdd(method); + } + private class BlobTupleEqualityComparer : IEqualityComparer<Tuple<Utf8String, byte[], int>> { bool IEqualityComparer<Tuple<Utf8String, byte[], int>>.Equals(Tuple<Utf8String, byte[], int> x, Tuple<Utf8String, byte[], int> y) @@ -657,7 +689,7 @@ namespace ILCompiler.DependencyAnalysis public ArrayOfEmbeddedPointersNode<IMethodNode> EagerCctorTable = new ArrayOfEmbeddedPointersNode<IMethodNode>( "__EagerCctorStart", "__EagerCctorEnd", - new EagerConstructorComparer()); + null); public ArrayOfEmbeddedPointersNode<InterfaceDispatchMapNode> DispatchMapTable = new ArrayOfEmbeddedPointersNode<InterfaceDispatchMapNode>( "__DispatchMapTableStart", diff --git a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/ReadyToRunGenericHelperNode.cs b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/ReadyToRunGenericHelperNode.cs index e68854235..8b9bde04e 100644 --- a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/ReadyToRunGenericHelperNode.cs +++ b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/ReadyToRunGenericHelperNode.cs @@ -36,6 +36,8 @@ namespace ILCompiler.DependencyAnalysis return factory.GenericLookup.TypeGCStaticBase((TypeDesc)target); case ReadyToRunHelperId.GetNonGCStaticBase: return factory.GenericLookup.TypeNonGCStaticBase((TypeDesc)target); + case ReadyToRunHelperId.GetThreadStaticBase: + return factory.GenericLookup.TypeThreadStaticBaseIndex((TypeDesc)target); case ReadyToRunHelperId.MethodDictionary: return factory.GenericLookup.MethodDictionary((MethodDesc)target); case ReadyToRunHelperId.VirtualCall: diff --git a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/ReadyToRunHelperNode.cs b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/ReadyToRunHelperNode.cs index 249fc4114..477eef6dd 100644 --- a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/ReadyToRunHelperNode.cs +++ b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/ReadyToRunHelperNode.cs @@ -156,12 +156,6 @@ namespace ILCompiler.DependencyAnalysis dependencyList.Add(factory.VirtualMethodUse((MethodDesc)_target), "ReadyToRun Virtual Method Address Load"); return dependencyList; } - else if (_id == ReadyToRunHelperId.GetThreadStaticBase) - { - DependencyList dependencyList = new DependencyList(); - dependencyList.Add(factory.TypeThreadStaticsSymbol((MetadataType)_target), "ReadyToRun Thread Static Storage"); - return dependencyList; - } else { return null; diff --git a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/ResourceDataNode.cs b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/ResourceDataNode.cs new file mode 100644 index 000000000..2596bcf0b --- /dev/null +++ b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/ResourceDataNode.cs @@ -0,0 +1,189 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +using Internal.Text; +using Internal.TypeSystem.Ecma; +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using System.Reflection.Metadata; +using System.Reflection.PortableExecutable; + +namespace ILCompiler.DependencyAnalysis +{ + /// <summary> + /// Blob of data containing resources for all assemblies generated into the image. + /// Resources are simply copied from the inputs and concatenated into this blob. + /// All format information is provided by <see cref="ResourceIndexNode"/> + /// </summary> + internal class ResourceDataNode : ObjectNode, ISymbolNode + { + /// <summary> + /// Resource index information generated while extracting resources into the data blob + /// </summary> + private List<ResourceIndexData> _indexData; + private int _totalLength; + + public ResourceDataNode() + { + _endSymbol = new ObjectAndOffsetSymbolNode(this, 0, "__embedded_resourcedata_End", true); + } + + private ObjectAndOffsetSymbolNode _endSymbol; + public ISymbolNode EndSymbol => _endSymbol; + + public override bool IsShareable => false; + + public override ObjectNodeSection Section => ObjectNodeSection.ReadOnlyDataSection; + + public override bool StaticDependenciesAreComputed => true; + + public int Offset => 0; + + public void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) + { + sb.Append(nameMangler.CompilationUnitPrefix).Append("__embedded_resourcedata"); + } + + protected override string GetName() => this.GetMangledName(); + + public override ObjectData GetData(NodeFactory factory, bool relocsOnly = false) + { + // This node has no relocations. + if (relocsOnly) + return new ObjectData(Array.Empty<byte>(), Array.Empty<Relocation>(), 1, new ISymbolNode[] { this }); + + byte[] blob = GenerateResourceBlob(factory); + return new ObjectData( + blob, + Array.Empty<Relocation>(), + 1, + new ISymbolNode[] + { + this, + EndSymbol + }); + } + + public IReadOnlyList<ResourceIndexData> GetOrCreateIndexData(NodeFactory factory) + { + if (_indexData != null) + { + return _indexData; + } + + _totalLength = 0; + _indexData = new List<ResourceIndexData>(); + // Build up index information + foreach (EcmaAssembly module in factory.MetadataManager.GetModulesWithMetadata().OfType<EcmaAssembly>()) + { + PEMemoryBlock resourceDirectory = module.PEReader.GetSectionData(module.PEReader.PEHeaders.CorHeader.ResourcesDirectory.RelativeVirtualAddress); + + try + { + checked + { + foreach (var resourceHandle in module.MetadataReader.ManifestResources) + { + ManifestResource resource = module.MetadataReader.GetManifestResource(resourceHandle); + + // Don't try to embed linked resources or resources in other assemblies + if (!resource.Implementation.IsNil) + { + continue; + } + + string resourceName = module.MetadataReader.GetString(resource.Name); + string assemblyName = module.GetName().FullName; + BlobReader reader = resourceDirectory.GetReader((int)resource.Offset, resourceDirectory.Length - (int)resource.Offset); + int length = (int)reader.ReadUInt32(); + ResourceIndexData indexData = new ResourceIndexData(assemblyName, resourceName, _totalLength, (int)resource.Offset + sizeof(Int32), module, length); + _indexData.Add(indexData); + _totalLength += length; + } + } + } + catch (OverflowException) + { + throw new BadImageFormatException(); + } + } + + return _indexData; + } + + /// <summary> + /// Extracts resources from all modules being compiled into a single blob and saves + /// the information needed to create an index into that blob. + /// </summary> + private byte[] GenerateResourceBlob(NodeFactory factory) + { + GetOrCreateIndexData(factory); + + // Read resources into the blob + byte[] resourceBlob = new byte[_totalLength]; + int currentPos = 0; + foreach (ResourceIndexData indexData in _indexData) + { + EcmaModule module = indexData.EcmaModule; + PEMemoryBlock resourceDirectory = module.PEReader.GetSectionData(module.PEReader.PEHeaders.CorHeader.ResourcesDirectory.RelativeVirtualAddress); + Debug.Assert(currentPos == indexData.NativeOffset); + BlobReader reader = resourceDirectory.GetReader(indexData.EcmaOffset, indexData.Length); + byte[] resourceData = reader.ReadBytes(indexData.Length); + Buffer.BlockCopy(resourceData, 0, resourceBlob, currentPos, resourceData.Length); + currentPos += resourceData.Length; + } + + _endSymbol.SetSymbolOffset(resourceBlob.Length); + return resourceBlob; + } + } + + /// <summary> + /// Data about individual manifest resources + /// </summary> + internal class ResourceIndexData + { + public ResourceIndexData(string assemblyName, string resourceName, int nativeOffset, int ecmaOffset, EcmaModule ecmaModule, int length) + { + AssemblyName = assemblyName; + ResourceName = resourceName; + NativeOffset = nativeOffset; + EcmaOffset = ecmaOffset; + EcmaModule = ecmaModule; + Length = length; + } + + /// <summary> + /// Full name of the assembly that contains the resource + /// </summary> + public string AssemblyName { get; } + + /// <summary> + /// Name of the resource + /// </summary> + public string ResourceName { get; } + + /// <summary> + /// Offset of the resource within the native resource blob + /// </summary> + public int NativeOffset { get; } + + /// <summary> + /// Offset of the resource within the .mresources section of the ECMA module + /// </summary> + public int EcmaOffset { get; } + + /// <summary> + /// Module the resource is defined in + /// </summary> + public EcmaModule EcmaModule { get; } + + /// <summary> + /// Length of the resource + /// </summary> + public int Length { get; } + } +}
\ No newline at end of file diff --git a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/ResourceIndexNode.cs b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/ResourceIndexNode.cs new file mode 100644 index 000000000..7a9bfa453 --- /dev/null +++ b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/ResourceIndexNode.cs @@ -0,0 +1,100 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +using Internal.NativeFormat; +using Internal.Text; +using System; +using System.IO; + +namespace ILCompiler.DependencyAnalysis +{ + /// <summary> + /// Represents a hash table of resources within the resource blob in the image. + /// </summary> + internal class ResourceIndexNode : ObjectNode, ISymbolNode + { + private ResourceDataNode _resourceDataNode; + + public ResourceIndexNode(ResourceDataNode resourceDataNode) + { + _resourceDataNode = resourceDataNode; + _endSymbol = new ObjectAndOffsetSymbolNode(this, 0, "__embedded_resourceindex_End", true); + } + + private ObjectAndOffsetSymbolNode _endSymbol; + + public ISymbolNode EndSymbol => _endSymbol; + + public override bool IsShareable => false; + + public override ObjectNodeSection Section => ObjectNodeSection.ReadOnlyDataSection; + + public override bool StaticDependenciesAreComputed => true; + + public int Offset => 0; + + public void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) + { + sb.Append(nameMangler.CompilationUnitPrefix).Append("__embedded_resourceindex"); + } + + protected override string GetName() => this.GetMangledName(); + + public override ObjectData GetData(NodeFactory factory, bool relocsOnly = false) + { + // This node has no relocations. + if (relocsOnly) + return new ObjectData(Array.Empty<byte>(), Array.Empty<Relocation>(), 1, new ISymbolNode[] { this }); + + byte[] blob = GenerateIndexBlob(factory); + return new ObjectData( + blob, + Array.Empty<Relocation>(), + 1, + new ISymbolNode[] + { + this, + EndSymbol + }); + } + + /// <summary> + /// Builds a native hashtable containing data about each manifest resource + /// </summary> + /// <returns></returns> + private byte[] GenerateIndexBlob(NodeFactory factory) + { + NativeWriter nativeWriter = new NativeWriter(); + Section indexHashtableSection = nativeWriter.NewSection(); + VertexHashtable indexHashtable = new VertexHashtable(); + indexHashtableSection.Place(indexHashtable); + + // Build a table with a tuple of Assembly Full Name, Resource Name, Offset within the resource data blob, Length + // for each resource. + // This generates a hashtable for the convenience of managed code since there's + // a reader for VertexHashtable, but not for VertexSequence. + + foreach (ResourceIndexData indexData in _resourceDataNode.GetOrCreateIndexData(factory)) + { + Vertex asmName = nativeWriter.GetStringConstant(indexData.AssemblyName); + Vertex resourceName = nativeWriter.GetStringConstant(indexData.ResourceName); + Vertex offsetVertex = nativeWriter.GetUnsignedConstant((uint)indexData.NativeOffset); + Vertex lengthVertex = nativeWriter.GetUnsignedConstant((uint)indexData.Length); + + Vertex indexVertex = nativeWriter.GetTuple(asmName, resourceName); + indexVertex = nativeWriter.GetTuple(indexVertex, offsetVertex); + indexVertex = nativeWriter.GetTuple(indexVertex, lengthVertex); + + int hashCode = TypeHashingAlgorithms.ComputeNameHashCode(indexData.AssemblyName); + indexHashtable.Append((uint)hashCode, indexHashtableSection.Place(indexVertex)); + } + + MemoryStream stream = new MemoryStream(); + nativeWriter.Save(stream); + byte[] blob = stream.ToArray(); + _endSymbol.SetSymbolOffset(blob.Length); + return blob; + } + } +}
\ No newline at end of file diff --git a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/RuntimeMethodHandleNode.cs b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/RuntimeMethodHandleNode.cs new file mode 100644 index 000000000..ef95dfa27 --- /dev/null +++ b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/RuntimeMethodHandleNode.cs @@ -0,0 +1,48 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +using System; +using System.Diagnostics; + +using Internal.Text; +using Internal.TypeSystem; + +namespace ILCompiler.DependencyAnalysis +{ + class RuntimeMethodHandleNode : ObjectNode, ISymbolNode + { + MethodDesc _targetMethod; + + public RuntimeMethodHandleNode(NodeFactory factory, MethodDesc targetMethod) + { + Debug.Assert(!targetMethod.IsSharedByGenericInstantiations); + _targetMethod = targetMethod; + } + + public void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) + { + sb.Append(nameMangler.CompilationUnitPrefix) + .Append("__RuntimeMethodHandle_") + .Append(NodeFactory.NameMangler.GetMangledMethodName(_targetMethod)); + } + public int Offset => 0; + protected override string GetName() => this.GetMangledName(); + public override ObjectNodeSection Section => ObjectNodeSection.ReadOnlyDataSection; + public override bool IsShareable => false; + public override bool StaticDependenciesAreComputed => true; + + public override ObjectData GetData(NodeFactory factory, bool relocsOnly = false) + { + ObjectDataBuilder objData = new ObjectDataBuilder(factory); + + objData.Alignment = objData.TargetPointerSize; + objData.DefinedSymbols.Add(this); + + NativeLayoutMethodLdTokenVertexNode ldtokenSigNode = factory.NativeLayout.MethodLdTokenVertex(_targetMethod); + objData.EmitPointerReloc(factory.NativeLayout.NativeLayoutSignature(ldtokenSigNode)); + + return objData.ToObjectData(); + } + } +} diff --git a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/RyuJitNodeFactory.cs b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/RyuJitNodeFactory.cs index b8430f23c..cb2d74962 100644 --- a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/RyuJitNodeFactory.cs +++ b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/RyuJitNodeFactory.cs @@ -49,19 +49,7 @@ namespace ILCompiler.DependencyAnalysis protected override ISymbolNode CreateReadyToRunHelperNode(Tuple<ReadyToRunHelperId, object> helperCall) { - ReadyToRunHelperNode node = new ReadyToRunHelperNode(this, helperCall.Item1, helperCall.Item2); - - if ((node.Id != ReadyToRunHelperId.GetThreadStaticBase) || - CompilationModuleGroup.ContainsType((TypeDesc)node.Target)) - { - return node; - } - else - { - // The ReadyToRun helper for a type with thread static fields resides in the same module as the target type. - // Other modules should use an extern symbol node to access it. - return ExternSymbol(node.GetMangledName()); - } + return new ReadyToRunHelperNode(this, helperCall.Item1, helperCall.Item2); } } } diff --git a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/Target_X64/X64ReadyToRunGenericHelperNode.cs b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/Target_X64/X64ReadyToRunGenericHelperNode.cs index 9b5bba78b..2ab092c7f 100644 --- a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/Target_X64/X64ReadyToRunGenericHelperNode.cs +++ b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/Target_X64/X64ReadyToRunGenericHelperNode.cs @@ -99,6 +99,42 @@ namespace ILCompiler.DependencyAnalysis } break; + case ReadyToRunHelperId.GetThreadStaticBase: + { + MetadataType target = (MetadataType)_target; + + // Look up the index cell + EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg1, _lookupSignature, relocsOnly); + + ISymbolNode helperEntrypoint; + if (factory.TypeSystemContext.HasLazyStaticConstructor(target)) + { + // There is a lazy class constructor. We need the non-GC static base because that's where the + // class constructor context lives. + GenericLookupResult nonGcRegionLookup = factory.GenericLookup.TypeNonGCStaticBase(target); + EmitDictionaryLookup(factory, ref encoder, encoder.TargetRegister.Arg0, encoder.TargetRegister.Arg2, nonGcRegionLookup, relocsOnly); + + helperEntrypoint = factory.HelperEntrypoint(HelperEntrypoint.EnsureClassConstructorRunAndReturnThreadStaticBase); + } + else + { + helperEntrypoint = factory.HelperEntrypoint(HelperEntrypoint.GetThreadStaticBaseForType); + } + + // First arg: address of the TypeManager slot that provides the helper with + // information about module index and the type manager instance (which is used + // for initialization on first access). + AddrMode loadFromArg1 = new AddrMode(encoder.TargetRegister.Arg1, null, 0, 0, AddrModeSize.Int64); + encoder.EmitMOV(encoder.TargetRegister.Arg0, ref loadFromArg1); + + // Second arg: index of the type in the ThreadStatic section of the modules + AddrMode loadFromArg1AndDelta = new AddrMode(encoder.TargetRegister.Arg1, null, factory.Target.PointerSize, 0, AddrModeSize.Int64); + encoder.EmitMOV(encoder.TargetRegister.Arg1, ref loadFromArg1AndDelta); + + encoder.EmitJMP(helperEntrypoint); + } + break; + // These are all simple: just get the thing from the dictionary and we're done case ReadyToRunHelperId.TypeHandle: case ReadyToRunHelperId.MethodDictionary: diff --git a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/Target_X64/X64ReadyToRunHelperNode.cs b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/Target_X64/X64ReadyToRunHelperNode.cs index 8a7bea4a1..b1f859519 100644 --- a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/Target_X64/X64ReadyToRunHelperNode.cs +++ b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/Target_X64/X64ReadyToRunHelperNode.cs @@ -110,27 +110,18 @@ namespace ILCompiler.DependencyAnalysis case ReadyToRunHelperId.GetThreadStaticBase: { MetadataType target = (MetadataType)Target; - ThreadStaticsNode targetNode = factory.TypeThreadStaticsSymbol(target) as ThreadStaticsNode; - int typeTlsIndex = 0; - // The GetThreadStaticBase helper should be generated only in the compilation module group - // that contains the thread static field because the helper needs the index of the type - // in Thread Static section of the containing module. - // TODO: This needs to be fixed this for the multi-module compilation - Debug.Assert(targetNode != null); - - if (!relocsOnly) - { - // Get index of the targetNode in the Thread Static region - typeTlsIndex = factory.ThreadStaticsRegion.IndexOfEmbeddedObject(targetNode); - } + encoder.EmitLEAQ(encoder.TargetRegister.Arg2, factory.TypeThreadStaticIndex(target)); // First arg: address of the TypeManager slot that provides the helper with // information about module index and the type manager instance (which is used // for initialization on first access). - encoder.EmitLEAQ(encoder.TargetRegister.Arg0, factory.TypeManagerIndirection); + AddrMode loadFromArg2 = new AddrMode(encoder.TargetRegister.Arg2, null, 0, 0, AddrModeSize.Int64); + encoder.EmitMOV(encoder.TargetRegister.Arg0, ref loadFromArg2); + // Second arg: index of the type in the ThreadStatic section of the modules - encoder.EmitMOV(encoder.TargetRegister.Arg1, typeTlsIndex); + AddrMode loadFromArg2AndDelta = new AddrMode(encoder.TargetRegister.Arg2, null, factory.Target.PointerSize, 0, AddrModeSize.Int64); + encoder.EmitMOV(encoder.TargetRegister.Arg1, ref loadFromArg2AndDelta); if (!factory.TypeSystemContext.HasLazyStaticConstructor(target)) { diff --git a/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/TypeThreadStaticIndexNode.cs b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/TypeThreadStaticIndexNode.cs new file mode 100644 index 000000000..9949961b2 --- /dev/null +++ b/src/ILCompiler.Compiler/src/Compiler/DependencyAnalysis/TypeThreadStaticIndexNode.cs @@ -0,0 +1,61 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +using Internal.Text; +using Internal.TypeSystem; + +namespace ILCompiler.DependencyAnalysis +{ + /// <summary> + /// Represents a node containing information necessary at runtime to locate type's thread static base. + /// </summary> + internal class TypeThreadStaticIndexNode : ObjectNode, ISymbolNode + { + private MetadataType _type; + + public TypeThreadStaticIndexNode(MetadataType type) + { + _type = type; + } + + public void AppendMangledName(NameMangler nameMangler, Utf8StringBuilder sb) + { + sb.Append("__TypeThreadStaticIndex_") + .Append(NodeFactory.NameMangler.GetMangledTypeName(_type)); + } + public int Offset => 0; + protected override string GetName() => this.GetMangledName(); + public override ObjectNodeSection Section => ObjectNodeSection.ReadOnlyDataSection; + public override bool IsShareable => true; + public override bool StaticDependenciesAreComputed => true; + + protected override DependencyList ComputeNonRelocationBasedDependencies(NodeFactory factory) + { + return new DependencyList + { + new DependencyListEntry(factory.TypeThreadStaticsSymbol(_type), "Thread static storage") + }; + } + + public override ObjectData GetData(NodeFactory factory, bool relocsOnly = false) + { + ObjectDataBuilder objData = new ObjectDataBuilder(factory); + + objData.Alignment = objData.TargetPointerSize; + objData.DefinedSymbols.Add(this); + + int typeTlsIndex = 0; + if (!relocsOnly) + { + var node = factory.TypeThreadStaticsSymbol(_type); + typeTlsIndex = factory.ThreadStaticsRegion.IndexOfEmbeddedObject(node); + } + + objData.EmitPointerReloc(factory.TypeManagerIndirection); + objData.EmitNaturalInt(typeTlsIndex); + + return objData.ToObjectData(); + } + } +} diff --git a/src/ILCompiler.Compiler/src/Compiler/LibraryInitializers.cs b/src/ILCompiler.Compiler/src/Compiler/LibraryInitializers.cs new file mode 100644 index 000000000..42b942ce2 --- /dev/null +++ b/src/ILCompiler.Compiler/src/Compiler/LibraryInitializers.cs @@ -0,0 +1,103 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +using System; +using System.Collections.Generic; + +using Internal.TypeSystem; + +using AssemblyName = System.Reflection.AssemblyName; +using Debug = System.Diagnostics.Debug; + +namespace ILCompiler +{ + /// <summary> + /// Encapsulates a list of class constructors that must be run in a prescribed order during start-up + /// </summary> + public sealed class LibraryInitializers + { + private const string ClassLibraryPlaceHolderString = "*ClassLibrary*"; + private const string LibraryInitializerContainerNamespaceName = "Internal.Runtime.CompilerHelpers"; + private const string LibraryInitializerContainerTypeName = "LibraryInitializer"; + private const string LibraryInitializerMethodName = "InitializeLibrary"; + + private static readonly LibraryInitializerInfo[] s_assembliesWithLibraryInitializers = + { + new LibraryInitializerInfo(ClassLibraryPlaceHolderString), + new LibraryInitializerInfo("System.Private.TypeLoader", false), + new LibraryInitializerInfo("System.Private.Reflection.Execution", false), + new LibraryInitializerInfo("System.Private.DeveloperExperience.Console"), + }; + + private List<MethodDesc> _libraryInitializerMethods; + + private readonly TypeSystemContext _context; + private readonly bool _isCppCodeGen; + + public LibraryInitializers(TypeSystemContext context, bool isCppCodeGen) + { + _context = context; + // + // We should not care which code-gen is being used, however currently CppCodeGen cannot + // handle code pulled in by all explicit cctors. + // + // See https://github.com/dotnet/corert/issues/2518 + // + _isCppCodeGen = isCppCodeGen; + } + + public IList<MethodDesc> LibraryInitializerMethods + { + get + { + if (_libraryInitializerMethods == null) + InitLibraryInitializers(); + + return _libraryInitializerMethods; + } + } + + private void InitLibraryInitializers() + { + Debug.Assert(_libraryInitializerMethods == null); + + _libraryInitializerMethods = new List<MethodDesc>(); + + foreach (var entry in s_assembliesWithLibraryInitializers) + { + if (_isCppCodeGen && !entry.UseWithCppCodeGen) + continue; + + ModuleDesc assembly = entry.Assembly == ClassLibraryPlaceHolderString + ? _context.SystemModule + : _context.ResolveAssembly(new AssemblyName(entry.Assembly), false); + + if (assembly == null) + continue; + + TypeDesc containingType = assembly.GetType(LibraryInitializerContainerNamespaceName, LibraryInitializerContainerTypeName, false); + if (containingType == null) + continue; + + MethodDesc initializerMethod = containingType.GetMethod(LibraryInitializerMethodName, null); + if (initializerMethod == null) + continue; + + _libraryInitializerMethods.Add(initializerMethod); + } + } + + private sealed class LibraryInitializerInfo + { + public string Assembly { get; } + public bool UseWithCppCodeGen { get; } + + public LibraryInitializerInfo(string assembly, bool useWithCppCodeGen = true) + { + Assembly = assembly; + UseWithCppCodeGen = useWithCppCodeGen; + } + } + } +} diff --git a/src/ILCompiler.Compiler/src/Compiler/MainMethodRootProvider.cs b/src/ILCompiler.Compiler/src/Compiler/MainMethodRootProvider.cs index ebcfc24e7..4df659ad8 100644 --- a/src/ILCompiler.Compiler/src/Compiler/MainMethodRootProvider.cs +++ b/src/ILCompiler.Compiler/src/Compiler/MainMethodRootProvider.cs @@ -3,6 +3,7 @@ // See the LICENSE file in the project root for more information. using System; +using System.Collections.Generic; using Internal.TypeSystem; using Internal.TypeSystem.Ecma; @@ -21,10 +22,12 @@ namespace ILCompiler public const string ManagedEntryPointMethodName = "__managed__Main"; private EcmaModule _module; + private IList<MethodDesc> _libraryInitializers; - public MainMethodRootProvider(EcmaModule module) + public MainMethodRootProvider(EcmaModule module, IList<MethodDesc> libraryInitializers) { _module = module; + _libraryInitializers = libraryInitializers; } public void AddCompilationRoots(IRootingServiceProvider rootProvider) @@ -34,7 +37,7 @@ namespace ILCompiler throw new Exception("No managed entrypoint defined for executable module"); TypeDesc owningType = _module.GetGlobalModuleType(); - var startupCodeMain = new StartupCodeMainMethod(owningType, mainMethod); + var startupCodeMain = new StartupCodeMainMethod(owningType, mainMethod, _libraryInitializers); rootProvider.AddCompilationRoot(startupCodeMain, "Startup Code Main Method", ManagedEntryPointMethodName); } diff --git a/src/ILCompiler.Compiler/src/Compiler/MetadataGeneration.cs b/src/ILCompiler.Compiler/src/Compiler/MetadataGeneration.cs index bad225bff..ff39e1b51 100644 --- a/src/ILCompiler.Compiler/src/Compiler/MetadataGeneration.cs +++ b/src/ILCompiler.Compiler/src/Compiler/MetadataGeneration.cs @@ -47,7 +47,6 @@ namespace ILCompiler private Dictionary<DynamicInvokeMethodSignature, MethodDesc> _dynamicInvokeThunks = new Dictionary<DynamicInvokeMethodSignature, MethodDesc>(); internal NativeLayoutInfoNode NativeLayoutInfo { get; private set; } - internal GenericsHashtableNode GenericsHashtable { get; private set; } public MetadataGeneration(NodeFactory factory) { @@ -72,8 +71,16 @@ namespace ILCompiler header.Add(BlobIdToReadyToRunSection(ReflectionMapBlob.EmbeddedMetadata), metadataNode, metadataNode, metadataNode.EndSymbol); var commonFixupsTableNode = new ExternalReferencesTableNode("CommonFixupsTable"); + var nativeReferencesTableNode = new ExternalReferencesTableNode("NativeReferences"); + var resourceDataNode = new ResourceDataNode(); + header.Add(BlobIdToReadyToRunSection(ReflectionMapBlob.BlobIdResourceData), resourceDataNode, resourceDataNode, resourceDataNode.EndSymbol); + + var resourceIndexNode = new ResourceIndexNode(resourceDataNode); + header.Add(BlobIdToReadyToRunSection(ReflectionMapBlob.BlobIdResourceIndex), resourceIndexNode, resourceIndexNode, resourceIndexNode.EndSymbol); + var typeMapNode = new TypeMetadataMapNode(commonFixupsTableNode); + header.Add(BlobIdToReadyToRunSection(ReflectionMapBlob.TypeMap), typeMapNode, typeMapNode, typeMapNode.EndSymbol); var cctorContextMapNode = new ClassConstructorContextMap(commonFixupsTableNode); @@ -88,18 +95,18 @@ namespace ILCompiler var fieldMapNode = new ReflectionFieldMapNode(commonFixupsTableNode); header.Add(BlobIdToReadyToRunSection(ReflectionMapBlob.FieldAccessMap), fieldMapNode, fieldMapNode, fieldMapNode.EndSymbol); - var externalNativeReferencesTableNode = new ExternalReferencesTableNode("NativeReferences"); - header.Add(BlobIdToReadyToRunSection(ReflectionMapBlob.NativeReferences), externalNativeReferencesTableNode, externalNativeReferencesTableNode, externalNativeReferencesTableNode.EndSymbol); - - NativeLayoutInfo = new NativeLayoutInfoNode(externalNativeReferencesTableNode); + NativeLayoutInfo = new NativeLayoutInfoNode(nativeReferencesTableNode); header.Add(BlobIdToReadyToRunSection(ReflectionMapBlob.NativeLayoutInfo), NativeLayoutInfo, NativeLayoutInfo, NativeLayoutInfo.EndSymbol); - GenericsHashtable = new GenericsHashtableNode(externalNativeReferencesTableNode); - header.Add(BlobIdToReadyToRunSection(ReflectionMapBlob.GenericsHashtable), GenericsHashtable, GenericsHashtable, GenericsHashtable.EndSymbol); + var exactMethodInstantiations = new ExactMethodInstantiationsNode(nativeReferencesTableNode); + header.Add(BlobIdToReadyToRunSection(ReflectionMapBlob.ExactMethodInstantiationsHashtable), exactMethodInstantiations, exactMethodInstantiations, exactMethodInstantiations.EndSymbol); + + var genericsHashtable = new GenericsHashtableNode(nativeReferencesTableNode); + header.Add(BlobIdToReadyToRunSection(ReflectionMapBlob.GenericsHashtable), genericsHashtable, genericsHashtable, genericsHashtable.EndSymbol); // This one should go last - header.Add(BlobIdToReadyToRunSection(ReflectionMapBlob.CommonFixupsTable), - commonFixupsTableNode, commonFixupsTableNode, commonFixupsTableNode.EndSymbol); + header.Add(BlobIdToReadyToRunSection(ReflectionMapBlob.CommonFixupsTable), commonFixupsTableNode, commonFixupsTableNode, commonFixupsTableNode.EndSymbol); + header.Add(BlobIdToReadyToRunSection(ReflectionMapBlob.NativeReferences), nativeReferencesTableNode, nativeReferencesTableNode, nativeReferencesTableNode.EndSymbol); } private void Graph_NewMarkedNode(DependencyNodeCore<NodeFactory> obj) @@ -109,14 +116,6 @@ namespace ILCompiler { _typesWithEETypesGenerated.Add(eetypeNode.Type); AddGeneratedType(eetypeNode.Type); - - // If this is an instantiated non-canonical generic type, add it to the generic instantiations hashtable - if (eetypeNode.Type.HasInstantiation && !eetypeNode.Type.IsGenericDefinition) - { - if (!eetypeNode.Type.IsCanonicalSubtype(CanonicalFormKind.Any)) - GenericsHashtable.AddInstantiatedTypeEntry(_nodeFactory, eetypeNode.Type); - } - return; } @@ -343,6 +342,14 @@ namespace ILCompiler } } + /// <summary> + /// Returns a set of modules that will get some metadata emitted into the output module + /// </summary> + public HashSet<ModuleDesc> GetModulesWithMetadata() + { + return _modulesSeen; + } + public byte[] GetMetadataBlob() { EnsureMetadataGenerated(); @@ -377,11 +384,21 @@ namespace ILCompiler return _arrayTypesGenerated; } + internal IEnumerable<MethodDesc> GetCompiledMethods() + { + return _methodsGenerated; + } + internal bool TypeGeneratesEEType(TypeDesc type) { return _typesWithEETypesGenerated.Contains(type); } + internal IEnumerable<TypeDesc> GetTypesWithEETypes() + { + return _typesWithEETypesGenerated; + } + private struct DummyMetadataPolicy : IMetadataPolicy { private MetadataGeneration _parent; diff --git a/src/ILCompiler.Compiler/src/Compiler/NameMangler.cs b/src/ILCompiler.Compiler/src/Compiler/NameMangler.cs index 4ebb2df61..c6f2e6335 100644 --- a/src/ILCompiler.Compiler/src/Compiler/NameMangler.cs +++ b/src/ILCompiler.Compiler/src/Compiler/NameMangler.cs @@ -87,14 +87,14 @@ namespace ILCompiler sb.Append("_"); } - string santizedName = (sb != null) ? sb.ToString() : s; + string sanitizedName = (sb != null) ? sb.ToString() : s; // The character sequences denoting generic instantiations, arrays, byrefs, or pointers must be // restricted to that use only. Replace them if they happened to be used in any identifiers in // the compilation input. return _mangleForCplusPlus - ? santizedName.Replace(EnterNameScopeSequence, "_AA_").Replace(ExitNameScopeSequence, "_VV_") - : santizedName; + ? sanitizedName.Replace(EnterNameScopeSequence, "_AA_").Replace(ExitNameScopeSequence, "_VV_") + : sanitizedName; } /// <summary> diff --git a/src/ILCompiler.Compiler/src/CppCodeGen/CppWriter.cs b/src/ILCompiler.Compiler/src/CppCodeGen/CppWriter.cs index c9bdff5c9..413f5db33 100644 --- a/src/ILCompiler.Compiler/src/CppCodeGen/CppWriter.cs +++ b/src/ILCompiler.Compiler/src/CppCodeGen/CppWriter.cs @@ -268,7 +268,7 @@ namespace ILCompiler.CppCodeGen /// <returns>C++ declaration name for <param name="methodName"/>.</returns> public string GetCppMethodDeclarationName(TypeDesc owningType, string methodName) { - var s = GetCppTypeName(owningType); + var s = _compilation.NameMangler.GetMangledTypeName(owningType); if (s.StartsWith("::")) { // For a Method declaration we do not need the starting :: @@ -297,10 +297,10 @@ namespace ILCompiler.CppCodeGen public string SanitizeCppVarName(string varName) { // TODO: name mangling robustness - if (varName == "errno") // some names collide with CRT headers - varName += "_"; + if (varName == "errno" || varName == "environ" || varName == "template" || varName == "typename") // some names collide with CRT headers + return "_" + varName + "_"; - return varName; + return _compilation.NameMangler.SanitizeName(varName); } private void CompileExternMethod(CppMethodCodeNode methodCodeNodeNeedingCode, string importName) @@ -378,20 +378,6 @@ namespace ILCompiler.CppCodeGen if (methodIL == null) return; - // TODO: Remove this code once CppCodegen is able to generate code for the reflection startup path. - // The startup path runs before any user code is executed. - // For now we replace the startup path with a simple "ret". Reflection won't work, but - // programs not using reflection will. - if (method.Name == ".cctor") - { - MetadataType owningType = method.OwningType as MetadataType; - if (owningType != null && - owningType.Name == "ReflectionExecution" && owningType.Namespace == "Internal.Reflection.Execution") - { - methodIL = new Internal.IL.Stubs.ILStubMethodIL(method, new byte[] { (byte)ILOpcode.ret }, Array.Empty<LocalVariableDefinition>(), null); - } - } - try { // TODO: hacky special-case @@ -1037,15 +1023,14 @@ namespace ILCompiler.CppCodeGen { _emittedTypes = new HashSet<TypeDesc>(); } + TypeDesc nodeType = typeNode.Type; - if (nodeType.IsPointer || nodeType.IsByRef || _emittedTypes.Contains(nodeType)) + if (_emittedTypes.Contains(nodeType)) return; - _emittedTypes.Add(nodeType); - // Create Namespaces - string mangledName = GetCppTypeName(nodeType); + string mangledName = _compilation.NameMangler.GetMangledTypeName(nodeType); int nesting = 0; int current = 0; @@ -1088,7 +1073,6 @@ namespace ILCompiler.CppCodeGen // TODO: Enable once the dependencies are tracked for arrays // if (((DependencyNode)_compilation.NodeFactory.ConstructedTypeSymbol(t)).Marked) - if (!nodeType.IsPointer && !nodeType.IsByRef) { typeDefinitions.AppendLine(); typeDefinitions.Append("static MethodTable * __getMethodTable();"); @@ -1154,11 +1138,8 @@ namespace ILCompiler.CppCodeGen typeDefinitions.AppendEmptyLine(); // declare method table - if (!nodeType.IsPointer && !nodeType.IsByRef) - { - methodTable.Append(GetCodeForObjectNode(typeNode as ObjectNode, factory)); - methodTable.AppendEmptyLine(); - } + methodTable.Append(GetCodeForObjectNode(typeNode as ObjectNode, factory)); + methodTable.AppendEmptyLine(); } private String GetCodeForReadyToRunHeader(ReadyToRunHeaderNode headerNode, NodeFactory factory) diff --git a/src/ILCompiler.Compiler/src/CppCodeGen/ILToCppImporter.cs b/src/ILCompiler.Compiler/src/CppCodeGen/ILToCppImporter.cs index 78e225be6..7aa132b61 100644 --- a/src/ILCompiler.Compiler/src/CppCodeGen/ILToCppImporter.cs +++ b/src/ILCompiler.Compiler/src/CppCodeGen/ILToCppImporter.cs @@ -149,10 +149,10 @@ namespace Internal.IL var localSlotToInfoMap = new Dictionary<int, ILLocalVariable>(); foreach (var v in localVariables) { - string sanitizedName = _compilation.NameMangler.SanitizeName(v.Name); - if (!names.Add(v.Name)) + string sanitizedName = _writer.SanitizeCppVarName(v.Name); + if (!names.Add(sanitizedName)) { - sanitizedName = string.Format("{0}_local{1}", v.Name, v.Slot); + sanitizedName = string.Format("{0}_local{1}", sanitizedName, v.Slot); names.Add(sanitizedName); } @@ -172,7 +172,7 @@ namespace Internal.IL int index = 0; foreach (var p in parameters) { - parameterIndexToNameMap[index] = p; + parameterIndexToNameMap[index] = _writer.SanitizeCppVarName(p); ++index; } @@ -422,7 +422,7 @@ namespace Internal.IL if (_parameterIndexToNameMap != null && argument && _parameterIndexToNameMap.ContainsKey(index)) { - return _writer.SanitizeCppVarName(_parameterIndexToNameMap[index]); + return _parameterIndexToNameMap[index]; } return (argument ? "_a" : "_l") + index.ToStringInvariant(); @@ -2417,7 +2417,7 @@ namespace Internal.IL "::", _writer.GetCppMethodName(helper), "((intptr_t)", - _writer.GetCppTypeName((TypeDesc)ldtokenValue), + _compilation.NameMangler.GetMangledTypeName((TypeDesc)ldtokenValue), "::__getMethodTable())"); value = new LdTokenEntry<TypeDesc>(StackValueKind.ValueType, name, (TypeDesc)ldtokenValue, GetWellKnownType(ldtokenKind)); @@ -2493,7 +2493,7 @@ namespace Internal.IL GetSignatureTypeNameAndAddReference(type); - PushExpression(StackValueKind.Int32, "sizeof(" + _writer.GetCppTypeName(type) + ")"); + PushExpression(StackValueKind.Int32, "(int32_t)sizeof(" + _writer.GetCppTypeName(type) + ")"); } private void ImportRefAnyType() @@ -2575,9 +2575,12 @@ namespace Internal.IL AddTypeDependency(type, constructed); - foreach (var field in type.GetFields()) + if (!type.IsGenericDefinition) { - AddTypeDependency(field.FieldType, false); + foreach (var field in type.GetFields()) + { + AddTypeDependency(field.FieldType, false); + } } } private void AddTypeDependency(TypeDesc type, bool constructed) @@ -2586,14 +2589,8 @@ namespace Internal.IL { return; } - else if (type.IsPointer || type.IsByRef) - { - Debug.Assert(type is ParameterizedType); - AddTypeDependency((type as ParameterizedType).ParameterType, constructed); - return; - } - Object node; + Object node; if (constructed) node = _nodeFactory.ConstructedTypeSymbol(type); else diff --git a/src/ILCompiler.Compiler/src/IL/Stubs/StartupCode/StartupCodeMainMethod.cs b/src/ILCompiler.Compiler/src/IL/Stubs/StartupCode/StartupCodeMainMethod.cs index afb38ad6c..251dd3af3 100644 --- a/src/ILCompiler.Compiler/src/IL/Stubs/StartupCode/StartupCodeMainMethod.cs +++ b/src/ILCompiler.Compiler/src/IL/Stubs/StartupCode/StartupCodeMainMethod.cs @@ -3,6 +3,7 @@ // See the LICENSE file in the project root for more information. using System; +using System.Collections.Generic; using Internal.TypeSystem; @@ -20,11 +21,13 @@ namespace Internal.IL.Stubs.StartupCode private TypeDesc _owningType; private MainMethodWrapper _mainMethod; private MethodSignature _signature; + private IList<MethodDesc> _libraryInitializers; - public StartupCodeMainMethod(TypeDesc owningType, MethodDesc mainMethod) + public StartupCodeMainMethod(TypeDesc owningType, MethodDesc mainMethod, IList<MethodDesc> libraryInitializers) { _owningType = owningType; _mainMethod = new MainMethodWrapper(owningType, mainMethod); + _libraryInitializers = libraryInitializers; } public override TypeSystemContext Context @@ -56,14 +59,15 @@ namespace Internal.IL.Stubs.StartupCode ILEmitter emitter = new ILEmitter(); ILCodeStream codeStream = emitter.NewCodeStream(); - ModuleDesc developerExperience = Context.ResolveAssembly(new AssemblyName("System.Private.DeveloperExperience.Console"), false); - if (developerExperience != null) + // Allow the class library to run explicitly ordered class constructors first thing in start-up. + if (_libraryInitializers != null) { - TypeDesc connectorType = developerExperience.GetKnownType("Internal.DeveloperExperience", "DeveloperExperienceConnectorConsole"); - MethodDesc initializeMethod = connectorType.GetKnownMethod("Initialize", null); - codeStream.Emit(ILOpcode.call, emitter.NewToken(initializeMethod)); + foreach (MethodDesc method in _libraryInitializers) + { + codeStream.Emit(ILOpcode.call, emitter.NewToken(method)); + } } - + MetadataType startup = Context.GetHelperType("StartupCodeHelpers"); // Initialize command line args if the class library supports this diff --git a/src/ILCompiler.Compiler/src/ILCompiler.Compiler.csproj b/src/ILCompiler.Compiler/src/ILCompiler.Compiler.csproj index f5f2f3f3d..2330e0a02 100644 --- a/src/ILCompiler.Compiler/src/ILCompiler.Compiler.csproj +++ b/src/ILCompiler.Compiler/src/ILCompiler.Compiler.csproj @@ -90,6 +90,7 @@ <Link>JitInterface\JitConfigProvider.cs</Link> </Compile> <Compile Include="Compiler\CompilerGeneratedType.cs" /> + <Compile Include="Compiler\LibraryInitializers.cs" /> <Compile Include="Compiler\ICompilationRootProvider.cs" /> <Compile Include="Compiler\Compilation.cs" /> <Compile Include="Compiler\CompilationBuilder.cs" /> @@ -103,7 +104,11 @@ <Compile Include="Compiler\DependencyAnalysis\ArrayMapNode.cs" /> <Compile Include="Compiler\DependencyAnalysis\ReflectionFieldMapNode.cs" /> <Compile Include="Compiler\DependencyAnalysis\NativeLayoutInfoNode.cs" /> + <Compile Include="Compiler\DependencyAnalysis\NativeLayoutVertexNode.cs" /> <Compile Include="Compiler\DependencyAnalysis\GenericsHashtableNode.cs" /> + <Compile Include="Compiler\DependencyAnalysis\ExactMethodInstantiationsNode.cs" /> + <Compile Include="Compiler\DependencyAnalysis\RuntimeMethodHandleNode.cs" /> + <Compile Include="Compiler\DependencyAnalysis\NativeLayoutSignatureNode.cs" /> <Compile Include="Compiler\DependencyAnalysis\ReadyToRunGenericHelperNode.cs" /> <Compile Include="Compiler\DependencyAnalysis\ArrayOfFrozenObjectsNode.cs" /> <Compile Include="Compiler\DependencyAnalysis\ClassConstructorContextMap.cs" /> @@ -125,9 +130,10 @@ <Compile Include="Compiler\DependencyAnalysis\GenericLookupResult.cs" /> <Compile Include="Compiler\DependencyAnalysis\IEETypeNode.cs" /> <Compile Include="Compiler\DependencyAnalysis\INodeWithRuntimeDeterminedDependencies.cs" /> - <Compile Include="Compiler\DependencyAnalysis\NodeFactory.GenericLookups.cs" /> <Compile Include="Compiler\DependencyAnalysis\PInvokeMethodFixupNode.cs" /> - <Compile Include="Compiler\DependencyAnalysis\PInvokeModuleFixupNode.cs" /> + <Compile Include="Compiler\DependencyAnalysis\PInvokeModuleFixupNode.cs" /> + <Compile Include="Compiler\DependencyAnalysis\ResourceDataNode.cs" /> + <Compile Include="Compiler\DependencyAnalysis\ResourceIndexNode.cs" /> <Compile Include="Compiler\DependencyAnalysis\RyuJitNodeFactory.cs" /> <Compile Include="Compiler\DependencyAnalysis\CppMethodCodeNode.cs" /> <Compile Include="Compiler\DependencyAnalysis\RuntimeImportMethodNode.cs" /> @@ -158,6 +164,8 @@ <Compile Include="Compiler\DependencyAnalysis\INodeWithDebugInfo.cs" /> <Compile Include="Compiler\DependencyAnalysis\ISymbolNode.cs" /> <Compile Include="Compiler\DependencyAnalysis\NodeFactory.cs" /> + <Compile Include="Compiler\DependencyAnalysis\NodeFactory.GenericLookups.cs" /> + <Compile Include="Compiler\DependencyAnalysis\NodeFactory.NativeLayout.cs" /> <Compile Include="Compiler\DependencyAnalysis\NonGCStaticsNode.cs" /> <Compile Include="Compiler\DependencyAnalysis\ObjectAndOffsetSymbolNode.cs" /> <Compile Include="Compiler\DependencyAnalysis\ObjectDataBuilder.cs" /> @@ -168,6 +176,7 @@ <Compile Include="Compiler\DependencyAnalysis\Target_X64\TargetRegisterMap.cs" /> <Compile Include="Compiler\DependencyAnalysis\Target_X64\X64UnboxingStubNode.cs" /> <Compile Include="Compiler\DependencyAnalysis\ThreadStaticsNode.cs" /> + <Compile Include="Compiler\DependencyAnalysis\TypeThreadStaticIndexNode.cs" /> <Compile Include="Compiler\DependencyAnalysis\TypeMetadataMapNode.cs" /> <Compile Include="Compiler\DependencyAnalysis\UnboxingStubNode.cs" /> <Compile Include="Compiler\DependencyAnalysis\VirtualMethodUseNode.cs" /> @@ -266,7 +275,7 @@ </Compile> <Compile Include="..\..\Common\src\TypeSystem\Interop\IL\PInvokeMethodData.cs"> <Link>TypeSystem\Interop\IL\PInvokeMethodData.cs</Link> - </Compile> + </Compile> <Compile Include="..\..\Common\src\TypeSystem\IL\Stubs\UnsafeIntrinsics.cs"> <Link>IL\Stubs\UnsafeIntrinsics.cs</Link> </Compile> diff --git a/src/ILCompiler.TypeSystem/src/ILCompiler.TypeSystem.csproj b/src/ILCompiler.TypeSystem/src/ILCompiler.TypeSystem.csproj index aac62c84e..e2ea5f3cd 100644 --- a/src/ILCompiler.TypeSystem/src/ILCompiler.TypeSystem.csproj +++ b/src/ILCompiler.TypeSystem/src/ILCompiler.TypeSystem.csproj @@ -212,6 +212,9 @@ <Compile Include="..\..\Common\src\TypeSystem\Common\TypeSystemHelpers.cs"> <Link>TypeSystem\Common\TypeSystemHelpers.cs</Link> </Compile> + <Compile Include="..\..\Common\src\TypeSystem\Common\TypeSystemConstaintsHelpers.cs"> + <Link>TypeSystem\Common\TypeSystemConstaintsHelpers.cs</Link> + </Compile> <Compile Include="..\..\Common\src\TypeSystem\Common\Utilities\TypeNameFormatter.cs"> <Link>Utilities\TypeNameFormatter.cs</Link> </Compile> diff --git a/src/ILCompiler.TypeSystem/tests/ConstraintsValidationTest.cs b/src/ILCompiler.TypeSystem/tests/ConstraintsValidationTest.cs new file mode 100644 index 000000000..3b4d12395 --- /dev/null +++ b/src/ILCompiler.TypeSystem/tests/ConstraintsValidationTest.cs @@ -0,0 +1,254 @@ +// Licensed to the.NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +using Internal.TypeSystem; + +using Xunit; + +namespace TypeSystemTests +{ + public class ConstraintsValidationTest + { + private TestTypeSystemContext _context; + private ModuleDesc _testModule; + + private MetadataType _iNonGenType; + private MetadataType _iGenType; + private MetadataType _arg1Type; + private MetadataType _arg2Type; + private MetadataType _arg3Type; + private MetadataType _structArgWithDefaultCtorType; + private MetadataType _structArgWithoutDefaultCtorType; + private MetadataType _classArgWithDefaultCtorType; + private MetadataType _classArgWithoutDefaultCtorType; + private MetadataType _referenceTypeConstraintType; + private MetadataType _defaultConstructorConstraintType; + private MetadataType _notNullableValueTypeConstraintType; + private MetadataType _simpleTypeConstraintType; + private MetadataType _doubleSimpleTypeConstraintType; + private MetadataType _simpleGenericConstraintType; + private MetadataType _complexGenericConstraint1Type; + private MetadataType _complexGenericConstraint2Type; + private MetadataType _complexGenericConstraint3Type; + private MetadataType _multipleConstraintsType; + + public ConstraintsValidationTest() + { + _context = new TestTypeSystemContext(TargetArchitecture.Unknown); + var systemModule = _context.CreateModuleForSimpleName("CoreTestAssembly"); + _context.SetSystemModule(systemModule); + + _testModule = systemModule; + + _iNonGenType = _testModule.GetType("GenericConstraints", "INonGen"); + _iGenType = _testModule.GetType("GenericConstraints", "IGen`1"); + _arg1Type = _testModule.GetType("GenericConstraints", "Arg1"); + _arg2Type = _testModule.GetType("GenericConstraints", "Arg2`1"); + _arg3Type = _testModule.GetType("GenericConstraints", "Arg3`1"); + _structArgWithDefaultCtorType = _testModule.GetType("GenericConstraints", "StructArgWithDefaultCtor"); + _structArgWithoutDefaultCtorType = _testModule.GetType("GenericConstraints", "StructArgWithoutDefaultCtor"); + _classArgWithDefaultCtorType = _testModule.GetType("GenericConstraints", "ClassArgWithDefaultCtor"); + _classArgWithoutDefaultCtorType = _testModule.GetType("GenericConstraints", "ClassArgWithoutDefaultCtor"); + + _referenceTypeConstraintType = _testModule.GetType("GenericConstraints", "ReferenceTypeConstraint`1"); + _defaultConstructorConstraintType = _testModule.GetType("GenericConstraints", "DefaultConstructorConstraint`1"); + _notNullableValueTypeConstraintType = _testModule.GetType("GenericConstraints", "NotNullableValueTypeConstraint`1"); + _simpleTypeConstraintType = _testModule.GetType("GenericConstraints", "SimpleTypeConstraint`1"); + _doubleSimpleTypeConstraintType = _testModule.GetType("GenericConstraints", "DoubleSimpleTypeConstraint`1"); + _simpleGenericConstraintType = _testModule.GetType("GenericConstraints", "SimpleGenericConstraint`2"); + _complexGenericConstraint1Type = _testModule.GetType("GenericConstraints", "ComplexGenericConstraint1`2"); + _complexGenericConstraint2Type = _testModule.GetType("GenericConstraints", "ComplexGenericConstraint2`2"); + _complexGenericConstraint3Type = _testModule.GetType("GenericConstraints", "ComplexGenericConstraint3`2"); + _multipleConstraintsType = _testModule.GetType("GenericConstraints", "MultipleConstraints`2"); + } + + [Fact] + public void TestTypeConstraints() + { + MetadataType instantiatedType; + + MetadataType arg2OfInt = _arg2Type.MakeInstantiatedType(_context.GetWellKnownType(WellKnownType.Int32)); + MetadataType arg2OfBool = _arg2Type.MakeInstantiatedType(_context.GetWellKnownType(WellKnownType.Boolean)); + MetadataType arg2OfObject = _arg2Type.MakeInstantiatedType(_context.GetWellKnownType(WellKnownType.Object)); + + // ReferenceTypeConstraint + { + instantiatedType = _referenceTypeConstraintType.MakeInstantiatedType(_arg1Type); + Assert.True(instantiatedType.CheckConstraints()); + + instantiatedType = _referenceTypeConstraintType.MakeInstantiatedType(_iNonGenType); + Assert.True(instantiatedType.CheckConstraints()); + + instantiatedType = _referenceTypeConstraintType.MakeInstantiatedType(_structArgWithDefaultCtorType); + Assert.False(instantiatedType.CheckConstraints()); + + instantiatedType = _referenceTypeConstraintType.MakeInstantiatedType(_context.GetWellKnownType(WellKnownType.Int32)); + Assert.False(instantiatedType.CheckConstraints()); + } + + // DefaultConstructorConstraint + { + instantiatedType = _defaultConstructorConstraintType.MakeInstantiatedType(_arg1Type); + Assert.True(instantiatedType.CheckConstraints()); + + instantiatedType = _defaultConstructorConstraintType.MakeInstantiatedType(_classArgWithDefaultCtorType); + Assert.True(instantiatedType.CheckConstraints()); + + instantiatedType = _defaultConstructorConstraintType.MakeInstantiatedType(_classArgWithoutDefaultCtorType); + Assert.False(instantiatedType.CheckConstraints()); + + instantiatedType = _defaultConstructorConstraintType.MakeInstantiatedType(_context.GetWellKnownType(WellKnownType.Int32)); + Assert.True(instantiatedType.CheckConstraints()); + + instantiatedType = _defaultConstructorConstraintType.MakeInstantiatedType(_structArgWithDefaultCtorType); + Assert.True(instantiatedType.CheckConstraints()); + + // Structs always have implicit default constructors + instantiatedType = _defaultConstructorConstraintType.MakeInstantiatedType(_structArgWithoutDefaultCtorType); + Assert.True(instantiatedType.CheckConstraints()); + } + + // NotNullableValueTypeConstraint + { + instantiatedType = _notNullableValueTypeConstraintType.MakeInstantiatedType(_arg1Type); + Assert.False(instantiatedType.CheckConstraints()); + + instantiatedType = _notNullableValueTypeConstraintType.MakeInstantiatedType(_structArgWithDefaultCtorType); + Assert.True(instantiatedType.CheckConstraints()); + + MetadataType nullable = (MetadataType)_context.GetWellKnownType(WellKnownType.Nullable); + MetadataType nullableOfInt = nullable.MakeInstantiatedType(_context.GetWellKnownType(WellKnownType.Int32)); + + instantiatedType = _notNullableValueTypeConstraintType.MakeInstantiatedType(nullableOfInt); + Assert.False(instantiatedType.CheckConstraints()); + } + + // SimpleTypeConstraint and DoubleSimpleTypeConstraint + foreach(var genType in new MetadataType[] { _simpleTypeConstraintType , _doubleSimpleTypeConstraintType }) + { + instantiatedType = genType.MakeInstantiatedType(_arg1Type); + Assert.True(instantiatedType.CheckConstraints()); + + instantiatedType = genType.MakeInstantiatedType(_iNonGenType); + Assert.False(instantiatedType.CheckConstraints()); + + instantiatedType = genType.MakeInstantiatedType(_classArgWithDefaultCtorType); + Assert.False(instantiatedType.CheckConstraints()); + } + + // SimpleGenericConstraint + { + instantiatedType = _simpleGenericConstraintType.MakeInstantiatedType(_arg1Type, _arg1Type); + Assert.True(instantiatedType.CheckConstraints()); + + instantiatedType = _simpleGenericConstraintType.MakeInstantiatedType(_arg1Type, _iNonGenType); + Assert.True(instantiatedType.CheckConstraints()); + + instantiatedType = _simpleGenericConstraintType.MakeInstantiatedType(_classArgWithDefaultCtorType, _classArgWithoutDefaultCtorType); + Assert.False(instantiatedType.CheckConstraints()); + + instantiatedType = _simpleGenericConstraintType.MakeInstantiatedType(_arg1Type, _context.GetWellKnownType(WellKnownType.Object)); + Assert.True(instantiatedType.CheckConstraints()); + + instantiatedType = _simpleGenericConstraintType.MakeInstantiatedType(_structArgWithDefaultCtorType, _context.GetWellKnownType(WellKnownType.ValueType)); + Assert.True(instantiatedType.CheckConstraints()); + + instantiatedType = _simpleGenericConstraintType.MakeInstantiatedType(_arg1Type, _context.GetWellKnownType(WellKnownType.ValueType)); + Assert.False(instantiatedType.CheckConstraints()); + + instantiatedType = _simpleGenericConstraintType.MakeInstantiatedType(_context.GetWellKnownType(WellKnownType.UInt16), _context.GetWellKnownType(WellKnownType.UInt32)); + Assert.False(instantiatedType.CheckConstraints()); + + instantiatedType = _simpleGenericConstraintType.MakeInstantiatedType(_context.GetWellKnownType(WellKnownType.UInt16), _context.GetWellKnownType(WellKnownType.ValueType)); + Assert.True(instantiatedType.CheckConstraints()); + } + + // ComplexGenericConstraint1 + { + instantiatedType = _complexGenericConstraint1Type.MakeInstantiatedType(_arg1Type, _arg1Type /* uninteresting */); + Assert.False(instantiatedType.CheckConstraints()); + + instantiatedType = _complexGenericConstraint1Type.MakeInstantiatedType(arg2OfInt, _arg1Type /* uninteresting */); + Assert.True(instantiatedType.CheckConstraints()); + + instantiatedType = _complexGenericConstraint1Type.MakeInstantiatedType(arg2OfBool, _arg1Type /* uninteresting */); + Assert.False(instantiatedType.CheckConstraints()); + + instantiatedType = _complexGenericConstraint1Type.MakeInstantiatedType(arg2OfObject, _arg1Type /* uninteresting */); + Assert.False(instantiatedType.CheckConstraints()); + } + + // ComplexGenericConstraint2 + { + MetadataType arg2OfArg2OfInt = _arg2Type.MakeInstantiatedType(arg2OfInt); + MetadataType arg2OfArg2OfBool = _arg2Type.MakeInstantiatedType(arg2OfBool); + MetadataType arg2OfArg2OfObject = _arg2Type.MakeInstantiatedType(arg2OfObject); + + instantiatedType = _complexGenericConstraint2Type.MakeInstantiatedType(_arg1Type, _context.GetWellKnownType(WellKnownType.Int32)); + Assert.False(instantiatedType.CheckConstraints()); + + instantiatedType = _complexGenericConstraint2Type.MakeInstantiatedType(arg2OfArg2OfInt, _context.GetWellKnownType(WellKnownType.Int32)); + Assert.True(instantiatedType.CheckConstraints()); + instantiatedType = _complexGenericConstraint2Type.MakeInstantiatedType(arg2OfArg2OfBool, _context.GetWellKnownType(WellKnownType.Int32)); + Assert.False(instantiatedType.CheckConstraints()); + instantiatedType = _complexGenericConstraint2Type.MakeInstantiatedType(arg2OfArg2OfObject, _context.GetWellKnownType(WellKnownType.Int32)); + Assert.False(instantiatedType.CheckConstraints()); + + instantiatedType = _complexGenericConstraint2Type.MakeInstantiatedType(arg2OfArg2OfInt, _context.GetWellKnownType(WellKnownType.Object)); + Assert.False(instantiatedType.CheckConstraints()); + instantiatedType = _complexGenericConstraint2Type.MakeInstantiatedType(arg2OfArg2OfBool, _context.GetWellKnownType(WellKnownType.Object)); + Assert.False(instantiatedType.CheckConstraints()); + instantiatedType = _complexGenericConstraint2Type.MakeInstantiatedType(arg2OfArg2OfObject, _context.GetWellKnownType(WellKnownType.Object)); + Assert.True(instantiatedType.CheckConstraints()); + + instantiatedType = _complexGenericConstraint2Type.MakeInstantiatedType(arg2OfArg2OfInt, _context.GetWellKnownType(WellKnownType.Boolean)); + Assert.False(instantiatedType.CheckConstraints()); + instantiatedType = _complexGenericConstraint2Type.MakeInstantiatedType(arg2OfArg2OfBool, _context.GetWellKnownType(WellKnownType.Boolean)); + Assert.True(instantiatedType.CheckConstraints()); + instantiatedType = _complexGenericConstraint2Type.MakeInstantiatedType(arg2OfArg2OfObject, _context.GetWellKnownType(WellKnownType.Boolean)); + Assert.False(instantiatedType.CheckConstraints()); + } + + // ComplexGenericConstraint3 + { + MetadataType igenOfObject = _iGenType.MakeInstantiatedType(_context.GetWellKnownType(WellKnownType.Object)); + + instantiatedType = _complexGenericConstraint3Type.MakeInstantiatedType(igenOfObject, _context.GetWellKnownType(WellKnownType.Object)); + Assert.True(instantiatedType.CheckConstraints()); + + // Variance-compatible instantiation argument + instantiatedType = _complexGenericConstraint3Type.MakeInstantiatedType(igenOfObject, _context.GetWellKnownType(WellKnownType.String)); + Assert.True(instantiatedType.CheckConstraints()); + + // Type that implements the interface + var arg3OfObject = _arg3Type.MakeInstantiatedType(_context.GetWellKnownType(WellKnownType.Object)); + instantiatedType = _complexGenericConstraint3Type.MakeInstantiatedType(arg3OfObject, _context.GetWellKnownType(WellKnownType.Object)); + Assert.True(instantiatedType.CheckConstraints()); + + // Type that implements a variant compatible interface + instantiatedType = _complexGenericConstraint3Type.MakeInstantiatedType(arg3OfObject, _context.GetWellKnownType(WellKnownType.String)); + Assert.True(instantiatedType.CheckConstraints()); + } + + // MultipleConstraints + { + // Violate the class constraint + instantiatedType = _multipleConstraintsType.MakeInstantiatedType(_structArgWithDefaultCtorType, _context.GetWellKnownType(WellKnownType.Object)); + Assert.False(instantiatedType.CheckConstraints()); + + // Violate the new() constraint + instantiatedType = _multipleConstraintsType.MakeInstantiatedType(_classArgWithoutDefaultCtorType, _context.GetWellKnownType(WellKnownType.Object)); + Assert.False(instantiatedType.CheckConstraints()); + + // Violate the IGen<U> constraint + instantiatedType = _multipleConstraintsType.MakeInstantiatedType(_arg1Type, _context.GetWellKnownType(WellKnownType.Object)); + Assert.False(instantiatedType.CheckConstraints()); + + // Satisfy all constraints + instantiatedType = _multipleConstraintsType.MakeInstantiatedType(_classArgWithDefaultCtorType, _context.GetWellKnownType(WellKnownType.Object)); + Assert.True(instantiatedType.CheckConstraints()); + } + } + } +} diff --git a/src/ILCompiler.TypeSystem/tests/CoreTestAssembly/CoreTestAssembly.csproj b/src/ILCompiler.TypeSystem/tests/CoreTestAssembly/CoreTestAssembly.csproj index 056b0534a..74380612a 100644 --- a/src/ILCompiler.TypeSystem/tests/CoreTestAssembly/CoreTestAssembly.csproj +++ b/src/ILCompiler.TypeSystem/tests/CoreTestAssembly/CoreTestAssembly.csproj @@ -29,6 +29,7 @@ <Compile Include="Canonicalization.cs" /> <Compile Include="Casting.cs" /> <Compile Include="GCPointerMap.cs" /> + <Compile Include="GenericConstraints.cs" /> <Compile Include="Hashcode.cs" /> <Compile Include="InterfaceArrangements.cs" /> <Compile Include="GenericTypes.cs" /> diff --git a/src/ILCompiler.TypeSystem/tests/CoreTestAssembly/GenericConstraints.cs b/src/ILCompiler.TypeSystem/tests/CoreTestAssembly/GenericConstraints.cs new file mode 100644 index 000000000..8a9f7d394 --- /dev/null +++ b/src/ILCompiler.TypeSystem/tests/CoreTestAssembly/GenericConstraints.cs @@ -0,0 +1,53 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +namespace GenericConstraints +{ + public interface INonGen { } + + public interface IGen<in T> { } + + public class Arg1 : INonGen { } + + public class Arg2<T> { } + + public class Arg3<T> : IGen<T> { } + + public struct StructArgWithDefaultCtor { } + + public struct StructArgWithoutDefaultCtor + { + public StructArgWithoutDefaultCtor(int argument) { } + } + + public class ClassArgWithDefaultCtor : IGen<object> + { + public ClassArgWithDefaultCtor() { } + } + + public class ClassArgWithoutDefaultCtor : IGen<object> + { + public ClassArgWithoutDefaultCtor(int argument) { } + } + + public class ReferenceTypeConstraint<T> where T : class { } + + public class DefaultConstructorConstraint<T> where T : new() { } + + public class NotNullableValueTypeConstraint<T> where T : struct { } + + public class SimpleTypeConstraint<T> where T : Arg1 { } + + public class DoubleSimpleTypeConstraint<T> where T : Arg1, INonGen { } + + public class SimpleGenericConstraint<T, U> where T : U { } + + public class ComplexGenericConstraint1<T, U> where T : Arg2<int> { } + + public class ComplexGenericConstraint2<T, U> where T : Arg2<Arg2<U>> { } + + public class ComplexGenericConstraint3<T, U> where T : IGen<U> { } + + public class MultipleConstraints<T, U> where T : class, IGen<U>, new() { } +} diff --git a/src/ILCompiler.TypeSystem/tests/TypeSystem.Tests.csproj b/src/ILCompiler.TypeSystem/tests/TypeSystem.Tests.csproj index dc84f7306..ec379925b 100644 --- a/src/ILCompiler.TypeSystem/tests/TypeSystem.Tests.csproj +++ b/src/ILCompiler.TypeSystem/tests/TypeSystem.Tests.csproj @@ -38,6 +38,7 @@ <ItemGroup> <Compile Include="ArchitectureSpecificFieldLayoutTests.cs" /> <Compile Include="CanonicalizationTests.cs" /> + <Compile Include="ConstraintsValidationTest.cs" /> <Compile Include="GCPointerMapTests.cs" /> <Compile Include="GenericTypeAndMethodTests.cs" /> <Compile Include="CastingTests.cs" /> diff --git a/src/ILCompiler/reproNative/reproNativeCpp.vcxproj b/src/ILCompiler/reproNative/reproNativeCpp.vcxproj index a6ff763be..eb8789560 100644 --- a/src/ILCompiler/reproNative/reproNativeCpp.vcxproj +++ b/src/ILCompiler/reproNative/reproNativeCpp.vcxproj @@ -59,6 +59,7 @@ <AdditionalIncludeDirectories>..\..\Native\gc;..\..\Native\gc\env;..\..\Native\Bootstrap</AdditionalIncludeDirectories> <DisableSpecificWarnings>4477</DisableSpecificWarnings> <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary> + <AdditionalOptions>/bigobj</AdditionalOptions> </ClCompile> <Link> <SubSystem>Console</SubSystem> @@ -79,6 +80,7 @@ <AdditionalIncludeDirectories>..\..\Native\gc;..\..\Native\gc\env</AdditionalIncludeDirectories> <DisableSpecificWarnings>4477</DisableSpecificWarnings> <RuntimeLibrary>MultiThreaded</RuntimeLibrary> + <AdditionalOptions>/bigobj</AdditionalOptions> </ClCompile> <Link> <SubSystem>Console</SubSystem> diff --git a/src/ILCompiler/src/Program.cs b/src/ILCompiler/src/Program.cs index 04e265623..16793a5ac 100644 --- a/src/ILCompiler/src/Program.cs +++ b/src/ILCompiler/src/Program.cs @@ -209,7 +209,9 @@ namespace ILCompiler if (entrypointModule != null) { - compilationRoots.Add(new MainMethodRootProvider(entrypointModule)); + LibraryInitializers libraryInitializers = + new LibraryInitializers(typeSystemContext, _isCppCodegen); + compilationRoots.Add(new MainMethodRootProvider(entrypointModule, libraryInitializers.LibraryInitializerMethods)); } if (_multiFile) @@ -240,20 +242,6 @@ namespace ILCompiler compilationRoots.Add(new ExportedMethodsRootProvider((EcmaModule)typeSystemContext.SystemModule)); - // System.Private.Reflection.Execution needs to establish a communication channel with System.Private.CoreLib - // at process startup. This is done through an eager constructor that calls into CoreLib and passes it - // a callback object. - // - // Since CoreLib cannot reference anything, the type and it's eager constructor won't be added to the compilation - // unless we explictly add it. - - var refExec = typeSystemContext.GetModuleForSimpleName("System.Private.Reflection.Execution", false); - if (refExec != null) - { - var exec = refExec.GetType("Internal.Reflection.Execution", "ReflectionExecution"); - compilationRoots.Add(new SingleMethodRootProvider(exec.GetStaticConstructor())); - } - compilationGroup = new SingleFileCompilationModuleGroup(typeSystemContext); } diff --git a/src/JitInterface/src/CorInfoImpl.cs b/src/JitInterface/src/CorInfoImpl.cs index 8e3c1859a..2064ed4de 100644 --- a/src/JitInterface/src/CorInfoImpl.cs +++ b/src/JitInterface/src/CorInfoImpl.cs @@ -1661,7 +1661,7 @@ namespace Internal.JitInterface // Find out what kind of base do we need to look up. if (field.IsThreadStatic) { - throw new NotImplementedException(); + helperId = ReadyToRunHelperId.GetThreadStaticBase; } else if (field.HasGCStaticBase) { diff --git a/src/Native/Runtime/coreclr/gcinfodecoder.cpp b/src/Native/Runtime/coreclr/gcinfodecoder.cpp index fceee6662..1a8eb7eca 100644 --- a/src/Native/Runtime/coreclr/gcinfodecoder.cpp +++ b/src/Native/Runtime/coreclr/gcinfodecoder.cpp @@ -283,7 +283,7 @@ GcInfoDecoder::GcInfoDecoder( if (hasReversePInvokeFrame) { - m_ReversePInvokeFrameStackSlot = (INT32)m_Reader.DecodeVarLengthSigned(REVERSE_PINVOKE_FRAME_ENCBASE); + m_ReversePInvokeFrameStackSlot = (INT32)DENORMALIZE_STACK_SLOT(m_Reader.DecodeVarLengthSigned(REVERSE_PINVOKE_FRAME_ENCBASE)); } else { diff --git a/src/Native/Runtime/eventtrace.h b/src/Native/Runtime/eventtrace.h index 77ab912e9..ae88847ab 100644 --- a/src/Native/Runtime/eventtrace.h +++ b/src/Native/Runtime/eventtrace.h @@ -29,7 +29,18 @@ #define _VMEVENTTRACE_H_ #include "eventtracebase.h" +#include "gcinterface.h" +#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) +struct ProfilingScanContext : ScanContext +{ + BOOL fProfilerPinned; + void * pvEtwContext; + void *pHeapId; + + ProfilingScanContext(BOOL fProfilerPinnedParam); +}; +#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) namespace ETW { diff --git a/src/Native/Runtime/gcheaputilities.cpp b/src/Native/Runtime/gcheaputilities.cpp index 0ff2910fe..dd328fc46 100644 --- a/src/Native/Runtime/gcheaputilities.cpp +++ b/src/Native/Runtime/gcheaputilities.cpp @@ -7,4 +7,14 @@ #include "gcheaputilities.h" // This is the global GC heap, maintained by the VM. -GPTR_IMPL(IGCHeap, g_pGCHeap);
\ No newline at end of file +GPTR_IMPL(IGCHeap, g_pGCHeap); + +// These globals are variables used within the GC and maintained +// by the EE for use in write barriers. It is the responsibility +// of the GC to communicate updates to these globals to the EE through +// GCToEEInterface::StompWriteBarrier. +GPTR_IMPL_INIT(uint32_t, g_card_table, nullptr); +GPTR_IMPL_INIT(uint8_t, g_lowest_address, nullptr); +GPTR_IMPL_INIT(uint8_t, g_highest_address, nullptr); +uint8_t* g_ephemeral_low = (uint8_t*)1; +uint8_t* g_ephemeral_high = (uint8_t*)~0; diff --git a/src/Native/Runtime/gcheaputilities.h b/src/Native/Runtime/gcheaputilities.h index 6e08472a2..5aec56bb2 100644 --- a/src/Native/Runtime/gcheaputilities.h +++ b/src/Native/Runtime/gcheaputilities.h @@ -10,6 +10,19 @@ // The singular heap instance. GPTR_DECL(IGCHeap, g_pGCHeap); +#ifndef DACCESS_COMPILE +extern "C" { +#endif // !DACCESS_COMPILE +GPTR_DECL(uint8_t,g_lowest_address); +GPTR_DECL(uint8_t,g_highest_address); +GPTR_DECL(uint32_t,g_card_table); +#ifndef DACCESS_COMPILE +} +#endif // !DACCESS_COMPILE + +extern "C" uint8_t* g_ephemeral_low; +extern "C" uint8_t* g_ephemeral_high; + // GCHeapUtilities provides a number of static methods // that operate on the global heap instance. It can't be // instantiated. diff --git a/src/Native/Runtime/gcrhenv.cpp b/src/Native/Runtime/gcrhenv.cpp index 2ff736292..2dfb7fcf1 100644 --- a/src/Native/Runtime/gcrhenv.cpp +++ b/src/Native/Runtime/gcrhenv.cpp @@ -51,6 +51,25 @@ #include "holder.h" +#ifdef FEATURE_ETW + #ifndef _INC_WINDOWS + typedef void* LPVOID; + typedef uint32_t UINT; + typedef void* PVOID; + typedef uint64_t ULONGLONG; + typedef uint32_t ULONG; + typedef int64_t LONGLONG; + typedef uint8_t BYTE; + typedef uint16_t UINT16; + #endif // _INC_WINDOWS + + #include "etwevents.h" + #include "eventtrace.h" +#else // FEATURE_ETW + #include "etmdummy.h" + #define ETW_EVENT_ENABLED(e,f) false +#endif // FEATURE_ETW + GPTR_IMPL(EEType, g_pFreeObjectEEType); #define USE_CLR_CACHE_SIZE_BEHAVIOR @@ -119,7 +138,7 @@ UInt32 EtwCallback(UInt32 IsEnabled, RH_ETW_CONTEXT * pContext) FireEtwGCSettings(GCHeapUtilities::GetGCHeap()->GetValidSegmentSize(FALSE), GCHeapUtilities::GetGCHeap()->GetValidSegmentSize(TRUE), GCHeapUtilities::IsServerHeap()); - GCHeapUtilities::GetGCHeap()->TraceGCSegments(); + GCHeapUtilities::GetGCHeap()->DiagTraceGCSegments(); } // Special check for the runtime provider's GCHeapCollectKeyword. Profilers @@ -686,8 +705,8 @@ void RedhawkGCInterface::ScanHeap(GcScanObjectFunction pfnScanCallback, void *pC // static void RedhawkGCInterface::ScanObject(void *pObject, GcScanObjectFunction pfnScanCallback, void *pContext) { -#if !defined(DACCESS_COMPILE) && (defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)) - GCHeapUtilities::GetGCHeap()->WalkObject((Object*)pObject, (walk_fn)pfnScanCallback, pContext); +#if !defined(DACCESS_COMPILE) && defined(FEATURE_EVENT_TRACE) + GCHeapUtilities::GetGCHeap()->DiagWalkObject((Object*)pObject, (walk_fn)pfnScanCallback, pContext); #else UNREFERENCED_PARAMETER(pObject); UNREFERENCED_PARAMETER(pfnScanCallback); @@ -759,7 +778,7 @@ void RedhawkGCInterface::ScanStaticRoots(GcScanRootFunction pfnScanCallback, voi // static void RedhawkGCInterface::ScanHandleTableRoots(GcScanRootFunction pfnScanCallback, void *pContext) { -#if !defined(DACCESS_COMPILE) && (defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)) +#if !defined(DACCESS_COMPILE) && defined(FEATURE_EVENT_TRACE) ScanRootsContext sContext; sContext.m_pfnCallback = pfnScanCallback; sContext.m_pContext = pContext; @@ -1148,6 +1167,166 @@ Thread* GCToEEInterface::CreateBackgroundThread(GCBackgroundThreadFunction threa return threadStubArgs.m_pThread; } +void GCToEEInterface::DiagGCStart(int gen, bool isInduced) +{ + UNREFERENCED_PARAMETER(gen); + UNREFERENCED_PARAMETER(isInduced); +} + +void GCToEEInterface::DiagUpdateGenerationBounds() +{ +} + +void GCToEEInterface::DiagWalkFReachableObjects(void* gcContext) +{ + UNREFERENCED_PARAMETER(gcContext); +} + +void GCToEEInterface::DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent) +{ + UNREFERENCED_PARAMETER(index); + UNREFERENCED_PARAMETER(gen); + UNREFERENCED_PARAMETER(reason); + UNREFERENCED_PARAMETER(fConcurrent); +} + +// Note on last parameter: when calling this for bgc, only ETW +// should be sending these events so that existing profapi profilers +// don't get confused. +void WalkMovedReferences(uint8_t* begin, uint8_t* end, + ptrdiff_t reloc, + size_t context, + BOOL fCompacting, + BOOL fBGC) +{ + UNREFERENCED_PARAMETER(begin); + UNREFERENCED_PARAMETER(end); + UNREFERENCED_PARAMETER(reloc); + UNREFERENCED_PARAMETER(context); + UNREFERENCED_PARAMETER(fCompacting); + UNREFERENCED_PARAMETER(fBGC); +} + +// +// Diagnostics code +// + +#ifdef FEATURE_EVENT_TRACE +inline BOOL ShouldTrackMovementForProfilerOrEtw() +{ + if (ETW::GCLog::ShouldTrackMovementForEtw()) + return true; + + return false; +} +#endif // FEATURE_EVENT_TRACE + +void GCToEEInterface::DiagWalkSurvivors(void* gcContext) +{ +#ifdef FEATURE_EVENT_TRACE + if (ShouldTrackMovementForProfilerOrEtw()) + { + size_t context = 0; + ETW::GCLog::BeginMovedReferences(&context); + GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, context, walk_for_gc); + ETW::GCLog::EndMovedReferences(context); + } +#else + UNREFERENCED_PARAMETER(gcContext); +#endif // FEATURE_EVENT_TRACE +} + +void GCToEEInterface::DiagWalkLOHSurvivors(void* gcContext) +{ +#ifdef FEATURE_EVENT_TRACE + if (ShouldTrackMovementForProfilerOrEtw()) + { + size_t context = 0; + ETW::GCLog::BeginMovedReferences(&context); + GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, context, walk_for_loh); + ETW::GCLog::EndMovedReferences(context); + } +#else + UNREFERENCED_PARAMETER(gcContext); +#endif // FEATURE_EVENT_TRACE +} + +void GCToEEInterface::DiagWalkBGCSurvivors(void* gcContext) +{ +#ifdef FEATURE_EVENT_TRACE + if (ShouldTrackMovementForProfilerOrEtw()) + { + size_t context = 0; + ETW::GCLog::BeginMovedReferences(&context); + GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, context, walk_for_bgc); + ETW::GCLog::EndMovedReferences(context); + } +#else + UNREFERENCED_PARAMETER(gcContext); +#endif // FEATURE_EVENT_TRACE +} + +void GCToEEInterface::StompWriteBarrier(WriteBarrierParameters* args) +{ + // CoreRT doesn't patch the write barrier like CoreCLR does, but it + // still needs to record the changes in the GC heap. + assert(args != nullptr); + switch (args->operation) + { + case WriteBarrierOp::StompResize: + // StompResize requires a new card table, a new lowest address, and + // a new highest address + assert(args->card_table != nullptr); + assert(args->lowest_address != nullptr); + assert(args->highest_address != nullptr); + g_card_table = args->card_table; + + // We need to make sure that other threads executing checked write barriers + // will see the g_card_table update before g_lowest/highest_address updates. + // Otherwise, the checked write barrier may AV accessing the old card table + // with address that it does not cover. Write barriers access card table + // without memory barriers for performance reasons, so we need to flush + // the store buffers here. + FlushProcessWriteBuffers(); + + g_lowest_address = args->lowest_address; + VolatileStore(&g_highest_address, args->highest_address); + return; + case WriteBarrierOp::StompEphemeral: + // StompEphemeral requires a new ephemeral low and a new ephemeral high + assert(args->ephemeral_low != nullptr); + assert(args->ephemeral_high != nullptr); + g_ephemeral_low = args->ephemeral_low; + g_ephemeral_high = args->ephemeral_high; + return; + case WriteBarrierOp::Initialize: + // This operation should only be invoked once, upon initialization. + assert(g_card_table == nullptr); + assert(g_lowest_address == nullptr); + assert(g_highest_address == nullptr); + assert(args->card_table != nullptr); + assert(args->lowest_address != nullptr); + assert(args->highest_address != nullptr); + assert(args->ephemeral_low != nullptr); + assert(args->ephemeral_high != nullptr); + assert(args->is_runtime_suspended && "the runtime must be suspended here!"); + + g_card_table = args->card_table; + g_lowest_address = args->lowest_address; + g_highest_address = args->highest_address; + g_ephemeral_low = args->ephemeral_low; + g_ephemeral_high = args->ephemeral_high; + return; + case WriteBarrierOp::SwitchToWriteWatch: + case WriteBarrierOp::SwitchToNonWriteWatch: + assert(!"CoreRT does not have an implementation of non-OS WriteWatch"); + return; + default: + assert(!"Unknokwn WriteBarrierOp enum"); + return; + } +} + #endif // !DACCESS_COMPILE // NOTE: this method is not in thread.cpp because it needs access to the layout of alloc_context for DAC to know the @@ -1339,14 +1518,6 @@ MethodTable * g_pFreeObjectMethodTable; int32_t g_TrapReturningThreads; bool g_fFinalizerRunOnShutDown; -void StompWriteBarrierEphemeral(bool /* isRuntimeSuspended */) -{ -} - -void StompWriteBarrierResize(bool /* isRuntimeSuspended */, bool /*bReqUpperBoundsCheck*/) -{ -} - bool IsGCThread() { return false; @@ -1425,3 +1596,17 @@ void CPUGroupInfo::GetGroupForProcessor(uint16_t /*processor_number*/, uint16_t { ASSERT_UNCONDITIONALLY("NYI: CPUGroupInfo::GetGroupForProcessor"); } + +#if defined(FEATURE_EVENT_TRACE) && !defined(DACCESS_COMPILE) +ProfilingScanContext::ProfilingScanContext(BOOL fProfilerPinnedParam) + : ScanContext() +{ + pHeapId = NULL; + fProfilerPinned = fProfilerPinnedParam; + pvEtwContext = NULL; +#ifdef FEATURE_CONSERVATIVE_GC + // To not confuse GCScan::GcScanRoots + promotion = g_pConfig->GetGCConservative(); +#endif +} +#endif // defined(FEATURE_EVENT_TRACE) && !defined(DACCESS_COMPILE) diff --git a/src/Native/Runtime/profheapwalkhelper.cpp b/src/Native/Runtime/profheapwalkhelper.cpp index b4dddca18..038e99ee0 100644 --- a/src/Native/Runtime/profheapwalkhelper.cpp +++ b/src/Native/Runtime/profheapwalkhelper.cpp @@ -141,7 +141,7 @@ BOOL HeapWalkHelper(Object * pBO, void * pvContext) //if (pMT->ContainsPointersOrCollectible()) { // First round through calculates the number of object refs for this class - GCHeapUtilities::GetGCHeap()->WalkObject(pBO, &CountContainedObjectRef, (void *)&cNumRefs); + GCHeapUtilities::GetGCHeap()->DiagWalkObject(pBO, &CountContainedObjectRef, (void *)&cNumRefs); if (cNumRefs > 0) { @@ -166,7 +166,7 @@ BOOL HeapWalkHelper(Object * pBO, void * pvContext) // Second round saves off all of the ref values OBJECTREF * pCurObjRef = arrObjRef; - GCHeapUtilities::GetGCHeap()->WalkObject(pBO, &SaveContainedObjectRef, (void *)&pCurObjRef); + GCHeapUtilities::GetGCHeap()->DiagWalkObject(pBO, &SaveContainedObjectRef, (void *)&pCurObjRef); } } diff --git a/src/Native/Runtime/unix/PalRedhawkUnix.cpp b/src/Native/Runtime/unix/PalRedhawkUnix.cpp index 8b699c07c..91c5842cc 100644 --- a/src/Native/Runtime/unix/PalRedhawkUnix.cpp +++ b/src/Native/Runtime/unix/PalRedhawkUnix.cpp @@ -1457,13 +1457,12 @@ void GCToOSInterface::YieldThread(uint32_t switchCount) // Reserve virtual memory range. // Parameters: -// address - starting virtual address, it can be NULL to let the function choose the starting address // size - size of the virtual memory range // alignment - requested memory alignment, 0 means no specific alignment requested // flags - flags to control special settings like write watching // Return: // Starting virtual address of the reserved range -void* GCToOSInterface::VirtualReserve(void* address, size_t size, size_t alignment, uint32_t flags) +void* GCToOSInterface::VirtualReserve(size_t size, size_t alignment, uint32_t flags) { ASSERT_MSG(!(flags & VirtualReserveFlags::WriteWatch), "WriteWatch not supported on Unix"); @@ -1474,7 +1473,7 @@ void* GCToOSInterface::VirtualReserve(void* address, size_t size, size_t alignme size_t alignedSize = size + (alignment - OS_PAGE_SIZE); - void * pRetVal = mmap(address, alignedSize, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0); + void * pRetVal = mmap(nullptr, alignedSize, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0); if (pRetVal != NULL) { diff --git a/src/Native/Runtime/unix/UnixNativeCodeManager.cpp b/src/Native/Runtime/unix/UnixNativeCodeManager.cpp index 4138cc8b5..927755238 100644 --- a/src/Native/Runtime/unix/UnixNativeCodeManager.cpp +++ b/src/Native/Runtime/unix/UnixNativeCodeManager.cpp @@ -144,12 +144,20 @@ bool UnixNativeCodeManager::UnwindStackFrame(MethodInfo * pMethodInfo, p += sizeof(int32_t); GcInfoDecoder decoder(GCInfoToken(p), DECODE_REVERSE_PINVOKE_VAR); + INT32 slot = decoder.GetReversePInvokeFrameStackSlot(); + assert(slot != NO_REVERSE_PINVOKE_FRAME); - // @TODO: CORERT: Encode reverse PInvoke frame slot in GCInfo: https://github.com/dotnet/corert/issues/2115 - // INT32 slot = decoder.GetReversePInvokeFrameStackSlot(); - // assert(slot != NO_REVERSE_PINVOKE_FRAME); - - *ppPreviousTransitionFrame = (PTR_VOID)-1; + TADDR basePointer = NULL; + UINT32 stackBasedRegister = decoder.GetStackBaseRegister(); + if (stackBasedRegister == NO_STACK_BASE_REGISTER) + { + basePointer = dac_cast<TADDR>(pRegisterSet->GetSP()); + } + else + { + basePointer = dac_cast<TADDR>(pRegisterSet->GetFP()); + } + *ppPreviousTransitionFrame = *(void**)(basePointer + slot); return true; } diff --git a/src/Native/Runtime/windows/CoffNativeCodeManager.cpp b/src/Native/Runtime/windows/CoffNativeCodeManager.cpp index 6692d298b..076a57539 100644 --- a/src/Native/Runtime/windows/CoffNativeCodeManager.cpp +++ b/src/Native/Runtime/windows/CoffNativeCodeManager.cpp @@ -321,12 +321,20 @@ bool CoffNativeCodeManager::UnwindStackFrame(MethodInfo * pMethodInfo, p += sizeof(int32_t); GcInfoDecoder decoder(GCInfoToken(p), DECODE_REVERSE_PINVOKE_VAR); + INT32 slot = decoder.GetReversePInvokeFrameStackSlot(); + assert(slot != NO_REVERSE_PINVOKE_FRAME); - // @TODO: CORERT: Encode reverse PInvoke frame slot in GCInfo: https://github.com/dotnet/corert/issues/2115 - // INT32 slot = decoder.GetReversePInvokeFrameStackSlot(); - // assert(slot != NO_REVERSE_PINVOKE_FRAME); - - *ppPreviousTransitionFrame = (PTR_VOID)-1; + TADDR basePointer = NULL; + UINT32 stackBasedRegister = decoder.GetStackBaseRegister(); + if (stackBasedRegister == NO_STACK_BASE_REGISTER) + { + basePointer = dac_cast<TADDR>(pRegisterSet->GetSP()); + } + else + { + basePointer = dac_cast<TADDR>(pRegisterSet->GetFP()); + } + *ppPreviousTransitionFrame = *(void**)(basePointer + slot); return true; } diff --git a/src/Native/Runtime/windows/PalRedhawkMinWin.cpp b/src/Native/Runtime/windows/PalRedhawkMinWin.cpp index 49097edd3..87b52a1ec 100644 --- a/src/Native/Runtime/windows/PalRedhawkMinWin.cpp +++ b/src/Native/Runtime/windows/PalRedhawkMinWin.cpp @@ -1482,7 +1482,7 @@ void GCToOSInterface::YieldThread(uint32_t /*switchCount*/) // flags - flags to control special settings like write watching // Return: // Starting virtual address of the reserved range -void* GCToOSInterface::VirtualReserve(void* address, size_t size, size_t alignment, uint32_t flags) +void* GCToOSInterface::VirtualReserve(size_t size, size_t alignment, uint32_t flags) { DWORD memFlags = (flags & VirtualReserveFlags::WriteWatch) ? (MEM_RESERVE | MEM_WRITE_WATCH) : MEM_RESERVE; return ::VirtualAlloc(0, size, memFlags, PAGE_READWRITE); diff --git a/src/Native/System.Private.CoreLib.Native/pal_errno.cpp b/src/Native/System.Private.CoreLib.Native/pal_errno.cpp index e7e7bb6ba..9a8ee708b 100644 --- a/src/Native/System.Private.CoreLib.Native/pal_errno.cpp +++ b/src/Native/System.Private.CoreLib.Native/pal_errno.cpp @@ -5,12 +5,12 @@ #include <stdint.h> #include <errno.h> -extern "C" int32_t CoreLibNative_GetLastErrNo() +extern "C" int32_t CoreLibNative_GetErrNo() { return errno; } -extern "C" void CoreLibNative_SetLastErrNo(int32_t error) +extern "C" void CoreLibNative_ClearErrNo() { - errno = error; + errno = 0; } diff --git a/src/Native/gc/env/gcenv.base.h b/src/Native/gc/env/gcenv.base.h index 94f73762f..0a0de73ee 100644 --- a/src/Native/gc/env/gcenv.base.h +++ b/src/Native/gc/env/gcenv.base.h @@ -96,7 +96,7 @@ inline HRESULT HRESULT_FROM_WIN32(unsigned long x) #define UNREFERENCED_PARAMETER(P) (void)(P) #ifdef PLATFORM_UNIX -#define _vsnprintf vsnprintf +#define _vsnprintf_s(string, sizeInBytes, count, format, args) vsnprintf(string, sizeInBytes, format, args) #define sprintf_s snprintf #define swprintf_s swprintf #endif diff --git a/src/Native/gc/env/gcenv.ee.h b/src/Native/gc/env/gcenv.ee.h index dc6c1d84b..beb0c1a98 100644 --- a/src/Native/gc/env/gcenv.ee.h +++ b/src/Native/gc/env/gcenv.ee.h @@ -56,6 +56,16 @@ public: static void GcEnumAllocContexts(enum_alloc_context_func* fn, void* param); static Thread* CreateBackgroundThread(GCBackgroundThreadFunction threadStart, void* arg); + + // Diagnostics methods. + static void DiagGCStart(int gen, bool isInduced); + static void DiagUpdateGenerationBounds(); + static void DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent); + static void DiagWalkFReachableObjects(void* gcContext); + static void DiagWalkSurvivors(void* gcContext); + static void DiagWalkLOHSurvivors(void* gcContext); + static void DiagWalkBGCSurvivors(void* gcContext); + static void StompWriteBarrier(WriteBarrierParameters* args); }; #endif // __GCENV_EE_H__ diff --git a/src/Native/gc/env/gcenv.os.h b/src/Native/gc/env/gcenv.os.h index bb0153f11..6a126f29e 100644 --- a/src/Native/gc/env/gcenv.os.h +++ b/src/Native/gc/env/gcenv.os.h @@ -73,13 +73,12 @@ public: // Reserve virtual memory range. // Parameters: - // address - starting virtual address, it can be NULL to let the function choose the starting address // size - size of the virtual memory range // alignment - requested memory alignment // flags - flags to control special settings like write watching // Return: // Starting virtual address of the reserved range - static void* VirtualReserve(void *address, size_t size, size_t alignment, uint32_t flags); + static void* VirtualReserve(size_t size, size_t alignment, uint32_t flags); // Release virtual memory range previously reserved using VirtualReserve // Parameters: diff --git a/src/Native/gc/gc.cpp b/src/Native/gc/gc.cpp index 3ba5369a7..99e646c50 100644 --- a/src/Native/gc/gc.cpp +++ b/src/Native/gc/gc.cpp @@ -21,22 +21,6 @@ #define USE_INTROSORT -#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) -inline BOOL ShouldTrackMovementForProfilerOrEtw() -{ -#ifdef GC_PROFILING - if (CORProfilerTrackGC()) - return true; -#endif - -#ifdef FEATURE_EVENT_TRACE - if (ETW::GCLog::ShouldTrackMovementForEtw()) - return true; -#endif - - return false; -} -#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE) BOOL bgc_heap_walk_for_etw_p = FALSE; @@ -349,8 +333,8 @@ void gc_heap::add_to_history_per_heap() #endif //BACKGROUND_GC current_hist->fgc_lowest = lowest_address; current_hist->fgc_highest = highest_address; - current_hist->g_lowest = g_lowest_address; - current_hist->g_highest = g_highest_address; + current_hist->g_lowest = g_gc_lowest_address; + current_hist->g_highest = g_gc_highest_address; gchist_index_per_heap++; if (gchist_index_per_heap == max_history_count) @@ -405,7 +389,7 @@ void log_va_msg(const char *fmt, va_list args) int pid_len = sprintf_s (&pBuffer[buffer_start], BUFFERSIZE - buffer_start, "[%5d]", (uint32_t)GCToOSInterface::GetCurrentThreadIdForLogging()); buffer_start += pid_len; memset(&pBuffer[buffer_start], '-', BUFFERSIZE - buffer_start); - int msg_len = _vsnprintf(&pBuffer[buffer_start], BUFFERSIZE - buffer_start, fmt, args ); + int msg_len = _vsnprintf_s(&pBuffer[buffer_start], BUFFERSIZE - buffer_start, _TRUNCATE, fmt, args ); if (msg_len == -1) { msg_len = BUFFERSIZE - buffer_start; @@ -1418,9 +1402,6 @@ int mark_time, plan_time, sweep_time, reloc_time, compact_time; #ifndef MULTIPLE_HEAPS -#define ephemeral_low g_ephemeral_low -#define ephemeral_high g_ephemeral_high - #endif // MULTIPLE_HEAPS #ifdef TRACE_GC @@ -2192,6 +2173,52 @@ int log2(unsigned int n) return pos; } +#ifndef DACCESS_COMPILE + +void stomp_write_barrier_resize(bool is_runtime_suspended, bool requires_upper_bounds_check) +{ + WriteBarrierParameters args = {}; + args.operation = WriteBarrierOp::StompResize; + args.is_runtime_suspended = is_runtime_suspended; + args.requires_upper_bounds_check = requires_upper_bounds_check; + args.card_table = g_gc_card_table; + args.lowest_address = g_gc_lowest_address; + args.highest_address = g_gc_highest_address; +#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP + if (SoftwareWriteWatch::IsEnabledForGCHeap()) + { + args.write_watch_table = g_gc_sw_ww_table; + } +#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP + GCToEEInterface::StompWriteBarrier(&args); +} + +void stomp_write_barrier_ephemeral(uint8_t* ephemeral_low, uint8_t* ephemeral_high) +{ + WriteBarrierParameters args = {}; + args.operation = WriteBarrierOp::StompEphemeral; + args.is_runtime_suspended = true; + args.ephemeral_low = ephemeral_low; + args.ephemeral_high = ephemeral_high; + GCToEEInterface::StompWriteBarrier(&args); +} + +void stomp_write_barrier_initialize() +{ + WriteBarrierParameters args = {}; + args.operation = WriteBarrierOp::Initialize; + args.is_runtime_suspended = true; + args.requires_upper_bounds_check = false; + args.card_table = g_gc_card_table; + args.lowest_address = g_gc_lowest_address; + args.highest_address = g_gc_highest_address; + args.ephemeral_low = reinterpret_cast<uint8_t*>(1); + args.ephemeral_high = reinterpret_cast<uint8_t*>(~0); + GCToEEInterface::StompWriteBarrier(&args); +} + +#endif // DACCESS_COMPILE + //extract the low bits [0,low[ of a uint32_t #define lowbits(wrd, bits) ((wrd) & ((1 << (bits))-1)) //extract the high bits [high, 32] of a uint32_t @@ -2397,6 +2424,10 @@ BOOL gc_heap::ro_segments_in_range; size_t gc_heap::gen0_big_free_spaces = 0; +uint8_t* gc_heap::ephemeral_low; + +uint8_t* gc_heap::ephemeral_high; + uint8_t* gc_heap::lowest_address; uint8_t* gc_heap::highest_address; @@ -3422,7 +3453,7 @@ inline size_t ro_seg_begin_index (heap_segment* seg) { size_t begin_index = (size_t)seg / gc_heap::min_segment_size; - begin_index = max (begin_index, (size_t)g_lowest_address / gc_heap::min_segment_size); + begin_index = max (begin_index, (size_t)g_gc_lowest_address / gc_heap::min_segment_size); return begin_index; } @@ -3430,14 +3461,14 @@ inline size_t ro_seg_end_index (heap_segment* seg) { size_t end_index = (size_t)(heap_segment_reserved (seg) - 1) / gc_heap::min_segment_size; - end_index = min (end_index, (size_t)g_highest_address / gc_heap::min_segment_size); + end_index = min (end_index, (size_t)g_gc_highest_address / gc_heap::min_segment_size); return end_index; } void seg_mapping_table_add_ro_segment (heap_segment* seg) { #ifdef GROWABLE_SEG_MAPPING_TABLE - if ((heap_segment_reserved (seg) <= g_lowest_address) || (heap_segment_mem (seg) >= g_highest_address)) + if ((heap_segment_reserved (seg) <= g_gc_lowest_address) || (heap_segment_mem (seg) >= g_gc_highest_address)) return; #endif //GROWABLE_SEG_MAPPING_TABLE @@ -3621,7 +3652,7 @@ gc_heap* seg_mapping_table_heap_of_worker (uint8_t* o) gc_heap* seg_mapping_table_heap_of (uint8_t* o) { #ifdef GROWABLE_SEG_MAPPING_TABLE - if ((o < g_lowest_address) || (o >= g_highest_address)) + if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address)) return 0; #endif //GROWABLE_SEG_MAPPING_TABLE @@ -3631,7 +3662,7 @@ gc_heap* seg_mapping_table_heap_of (uint8_t* o) gc_heap* seg_mapping_table_heap_of_gc (uint8_t* o) { #if defined(FEATURE_BASICFREEZE) && defined(GROWABLE_SEG_MAPPING_TABLE) - if ((o < g_lowest_address) || (o >= g_highest_address)) + if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address)) return 0; #endif //FEATURE_BASICFREEZE || GROWABLE_SEG_MAPPING_TABLE @@ -3643,7 +3674,7 @@ gc_heap* seg_mapping_table_heap_of_gc (uint8_t* o) heap_segment* seg_mapping_table_segment_of (uint8_t* o) { #if defined(FEATURE_BASICFREEZE) && defined(GROWABLE_SEG_MAPPING_TABLE) - if ((o < g_lowest_address) || (o >= g_highest_address)) + if ((o < g_gc_lowest_address) || (o >= g_gc_highest_address)) #ifdef FEATURE_BASICFREEZE return ro_segment_lookup (o); #else @@ -3686,7 +3717,7 @@ heap_segment* seg_mapping_table_segment_of (uint8_t* o) #ifdef FEATURE_BASICFREEZE // TODO: This was originally written assuming that the seg_mapping_table would always contain entries for ro - // segments whenever the ro segment falls into the [g_lowest_address,g_highest_address) range. I.e., it had an + // segments whenever the ro segment falls into the [g_gc_lowest_address,g_gc_highest_address) range. I.e., it had an // extra "&& (size_t)(entry->seg1) & ro_in_entry" expression. However, at the moment, grow_brick_card_table does // not correctly go through the ro segments and add them back to the seg_mapping_table when the [lowest,highest) // range changes. We should probably go ahead and modify grow_brick_card_table and put back the @@ -4086,8 +4117,8 @@ BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t num_h memory_details.current_block_normal = 0; memory_details.current_block_large = 0; - g_lowest_address = MAX_PTR; - g_highest_address = 0; + g_gc_lowest_address = MAX_PTR; + g_gc_highest_address = 0; if (((size_t)MAX_PTR - large_size) < normal_size) { @@ -4107,8 +4138,8 @@ BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t num_h uint8_t* allatonce_block = (uint8_t*)virtual_alloc (requestedMemory); if (allatonce_block) { - g_lowest_address = allatonce_block; - g_highest_address = allatonce_block + (memory_details.block_count * (large_size + normal_size)); + g_gc_lowest_address = allatonce_block; + g_gc_highest_address = allatonce_block + (memory_details.block_count * (large_size + normal_size)); memory_details.allocation_pattern = initial_memory_details::ALLATONCE; for(size_t i = 0; i < memory_details.block_count; i++) @@ -4131,8 +4162,8 @@ BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t num_h if (b2) { memory_details.allocation_pattern = initial_memory_details::TWO_STAGE; - g_lowest_address = min(b1,b2); - g_highest_address = max(b1 + memory_details.block_count*normal_size, + g_gc_lowest_address = min(b1,b2); + g_gc_highest_address = max(b1 + memory_details.block_count*normal_size, b2 + memory_details.block_count*large_size); for(size_t i = 0; i < memory_details.block_count; i++) { @@ -4178,10 +4209,10 @@ BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t num_h } else { - if (current_block->memory_base < g_lowest_address) - g_lowest_address = current_block->memory_base; - if (((uint8_t *) current_block->memory_base + block_size) > g_highest_address) - g_highest_address = (current_block->memory_base + block_size); + if (current_block->memory_base < g_gc_lowest_address) + g_gc_lowest_address = current_block->memory_base; + if (((uint8_t *) current_block->memory_base + block_size) > g_gc_highest_address) + g_gc_highest_address = (current_block->memory_base + block_size); } reserve_success = TRUE; } @@ -4288,7 +4319,7 @@ void* virtual_alloc (size_t size) flags = VirtualReserveFlags::WriteWatch; } #endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP - void* prgmem = GCToOSInterface::VirtualReserve (0, requested_size, card_size * card_word_width, flags); + void* prgmem = GCToOSInterface::VirtualReserve (requested_size, card_size * card_word_width, flags); void *aligned_mem = prgmem; // We don't want (prgmem + size) to be right at the end of the address space @@ -4623,22 +4654,22 @@ gc_heap::get_segment (size_t size, BOOL loh_p) { uint8_t* start; uint8_t* end; - if (mem < g_lowest_address) + if (mem < g_gc_lowest_address) { start = (uint8_t*)mem; } else { - start = (uint8_t*)g_lowest_address; + start = (uint8_t*)g_gc_lowest_address; } - if (((uint8_t*)mem + size) > g_highest_address) + if (((uint8_t*)mem + size) > g_gc_highest_address) { end = (uint8_t*)mem + size; } else { - end = (uint8_t*)g_highest_address; + end = (uint8_t*)g_gc_highest_address; } if (gc_heap::grow_brick_card_tables (start, end, size, result, __this, loh_p) != 0) @@ -4703,10 +4734,7 @@ heap_segment* gc_heap::get_segment_for_loh (size_t size FireEtwGCCreateSegment_V1((size_t)heap_segment_mem(res), (size_t)(heap_segment_reserved (res) - heap_segment_mem(res)), ETW::GCLog::ETW_GC_INFO::LARGE_OBJECT_HEAP, GetClrInstanceId()); -#ifdef GC_PROFILING - if (CORProfilerTrackGC()) - UpdateGenerationBounds(); -#endif // GC_PROFILING + GCToEEInterface::DiagUpdateGenerationBounds(); #ifdef MULTIPLE_HEAPS hp->thread_loh_segment (res); @@ -5340,7 +5368,7 @@ heap_segment* gc_heap::segment_of (uint8_t* add, ptrdiff_t& delta, BOOL verify_p uint8_t* sadd = add; heap_segment* hs = 0; heap_segment* hs1 = 0; - if (!((add >= g_lowest_address) && (add < g_highest_address))) + if (!((add >= g_gc_lowest_address) && (add < g_gc_highest_address))) { delta = 0; return 0; @@ -5523,7 +5551,6 @@ public: saved_post_plug_reloc = temp; } -#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) void swap_pre_plug_and_saved_for_profiler() { gap_reloc_pair temp; @@ -5539,7 +5566,6 @@ public: memcpy (saved_post_plug_info_start, &saved_post_plug, sizeof (saved_post_plug)); saved_post_plug = temp; } -#endif //GC_PROFILING || //FEATURE_EVENT_TRACE // We should think about whether it's really necessary to have to copy back the pre plug // info since it was already copied during compacting plugs. But if a plug doesn't move @@ -6399,7 +6425,7 @@ void gc_heap::set_card (size_t card) inline void gset_card (size_t card) { - g_card_table [card_word (card)] |= (1 << card_bit (card)); + g_gc_card_table [card_word (card)] |= (1 << card_bit (card)); } inline @@ -6510,7 +6536,7 @@ size_t size_card_bundle_of (uint8_t* from, uint8_t* end) uint32_t* translate_card_bundle_table (uint32_t* cb) { - return (uint32_t*)((uint8_t*)cb - ((((size_t)g_lowest_address) / (card_size*card_word_width*card_bundle_size*card_bundle_word_width)) * sizeof (uint32_t))); + return (uint32_t*)((uint8_t*)cb - ((((size_t)g_gc_lowest_address) / (card_size*card_word_width*card_bundle_size*card_bundle_word_width)) * sizeof (uint32_t))); } void gc_heap::enable_card_bundles () @@ -6722,7 +6748,7 @@ size_t size_mark_array_of (uint8_t* from, uint8_t* end) // according to the lowest_address. uint32_t* translate_mark_array (uint32_t* ma) { - return (uint32_t*)((uint8_t*)ma - size_mark_array_of (0, g_lowest_address)); + return (uint32_t*)((uint8_t*)ma - size_mark_array_of (0, g_gc_lowest_address)); } // from and end must be page aligned addresses. @@ -6850,16 +6876,16 @@ void release_card_table (uint32_t* c_table) { destroy_card_table (c_table); // sever the link from the parent - if (&g_card_table[card_word (gcard_of(g_lowest_address))] == c_table) + if (&g_gc_card_table[card_word (gcard_of(g_gc_lowest_address))] == c_table) { - g_card_table = 0; + g_gc_card_table = 0; #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP SoftwareWriteWatch::StaticClose(); #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP } else { - uint32_t* p_table = &g_card_table[card_word (gcard_of(g_lowest_address))]; + uint32_t* p_table = &g_gc_card_table[card_word (gcard_of(g_gc_lowest_address))]; if (p_table) { while (p_table && (card_table_next (p_table) != c_table)) @@ -6881,8 +6907,8 @@ void destroy_card_table (uint32_t* c_table) uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end) { - assert (g_lowest_address == start); - assert (g_highest_address == end); + assert (g_gc_lowest_address == start); + assert (g_gc_highest_address == end); uint32_t virtual_reserve_flags = VirtualReserveFlags::None; @@ -6902,7 +6928,7 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end) if (can_use_write_watch_for_card_table()) { virtual_reserve_flags |= VirtualReserveFlags::WriteWatch; - cb = size_card_bundle_of (g_lowest_address, g_highest_address); + cb = size_card_bundle_of (g_gc_lowest_address, g_gc_highest_address); } #endif //CARD_BUNDLE @@ -6918,7 +6944,7 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end) #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP #ifdef GROWABLE_SEG_MAPPING_TABLE - size_t st = size_seg_mapping_table_of (g_lowest_address, g_highest_address); + size_t st = size_seg_mapping_table_of (g_gc_lowest_address, g_gc_highest_address); size_t st_table_offset = sizeof(card_table_info) + cs + bs + cb + wws; size_t st_table_offset_aligned = align_for_seg_mapping_table (st_table_offset); @@ -6932,7 +6958,7 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end) size_t alloc_size = sizeof (uint8_t)*(sizeof(card_table_info) + cs + bs + cb + wws + st + ms); size_t alloc_size_aligned = Align (alloc_size, g_SystemInfo.dwAllocationGranularity-1); - uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (0, alloc_size_aligned, 0, virtual_reserve_flags); + uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (alloc_size_aligned, 0, virtual_reserve_flags); if (!mem) return 0; @@ -6973,7 +6999,7 @@ uint32_t* gc_heap::make_card_table (uint8_t* start, uint8_t* end) #ifdef GROWABLE_SEG_MAPPING_TABLE seg_mapping_table = (seg_mapping*)(mem + st_table_offset_aligned); seg_mapping_table = (seg_mapping*)((uint8_t*)seg_mapping_table - - size_seg_mapping_table_of (0, (align_lower_segment (g_lowest_address)))); + size_seg_mapping_table_of (0, (align_lower_segment (g_gc_lowest_address)))); #endif //GROWABLE_SEG_MAPPING_TABLE #ifdef MARK_ARRAY @@ -7012,10 +7038,10 @@ int gc_heap::grow_brick_card_tables (uint8_t* start, gc_heap* hp, BOOL loh_p) { - uint8_t* la = g_lowest_address; - uint8_t* ha = g_highest_address; - uint8_t* saved_g_lowest_address = min (start, g_lowest_address); - uint8_t* saved_g_highest_address = max (end, g_highest_address); + uint8_t* la = g_gc_lowest_address; + uint8_t* ha = g_gc_highest_address; + uint8_t* saved_g_lowest_address = min (start, g_gc_lowest_address); + uint8_t* saved_g_highest_address = max (end, g_gc_highest_address); #ifdef BACKGROUND_GC // This value is only for logging purpose - it's not necessarily exactly what we // would commit for mark array but close enough for diagnostics purpose. @@ -7045,18 +7071,18 @@ int gc_heap::grow_brick_card_tables (uint8_t* start, #endif // BIT64 ps *= 2; - if (saved_g_lowest_address < g_lowest_address) + if (saved_g_lowest_address < g_gc_lowest_address) { - if (ps > (size_t)g_lowest_address) + if (ps > (size_t)g_gc_lowest_address) saved_g_lowest_address = (uint8_t*)OS_PAGE_SIZE; else { - assert (((size_t)g_lowest_address - ps) >= OS_PAGE_SIZE); - saved_g_lowest_address = min (saved_g_lowest_address, (g_lowest_address - ps)); + assert (((size_t)g_gc_lowest_address - ps) >= OS_PAGE_SIZE); + saved_g_lowest_address = min (saved_g_lowest_address, (g_gc_lowest_address - ps)); } } - if (saved_g_highest_address > g_highest_address) + if (saved_g_highest_address > g_gc_highest_address) { saved_g_highest_address = max ((saved_g_lowest_address + ps), saved_g_highest_address); if (saved_g_highest_address > top) @@ -7069,7 +7095,7 @@ int gc_heap::grow_brick_card_tables (uint8_t* start, bool write_barrier_updated = false; uint32_t virtual_reserve_flags = VirtualReserveFlags::None; - uint32_t* saved_g_card_table = g_card_table; + uint32_t* saved_g_card_table = g_gc_card_table; uint32_t* ct = 0; uint32_t* translated_ct = 0; short* bt = 0; @@ -7125,7 +7151,7 @@ int gc_heap::grow_brick_card_tables (uint8_t* start, dprintf (GC_TABLE_LOG, ("card table: %Id; brick table: %Id; card bundle: %Id; sw ww table: %Id; seg table: %Id; mark array: %Id", cs, bs, cb, wws, st, ms)); - uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (0, alloc_size_aligned, 0, virtual_reserve_flags); + uint8_t* mem = (uint8_t*)GCToOSInterface::VirtualReserve (alloc_size_aligned, 0, virtual_reserve_flags); if (!mem) { @@ -7152,7 +7178,7 @@ int gc_heap::grow_brick_card_tables (uint8_t* start, card_table_refcount (ct) = 0; card_table_lowest_address (ct) = saved_g_lowest_address; card_table_highest_address (ct) = saved_g_highest_address; - card_table_next (ct) = &g_card_table[card_word (gcard_of (la))]; + card_table_next (ct) = &g_gc_card_table[card_word (gcard_of (la))]; //clear the card table /* @@ -7179,9 +7205,9 @@ int gc_heap::grow_brick_card_tables (uint8_t* start, seg_mapping* new_seg_mapping_table = (seg_mapping*)(mem + st_table_offset_aligned); new_seg_mapping_table = (seg_mapping*)((uint8_t*)new_seg_mapping_table - size_seg_mapping_table_of (0, (align_lower_segment (saved_g_lowest_address)))); - memcpy(&new_seg_mapping_table[seg_mapping_word_of(g_lowest_address)], - &seg_mapping_table[seg_mapping_word_of(g_lowest_address)], - size_seg_mapping_table_of(g_lowest_address, g_highest_address)); + memcpy(&new_seg_mapping_table[seg_mapping_word_of(g_gc_lowest_address)], + &seg_mapping_table[seg_mapping_word_of(g_gc_lowest_address)], + size_seg_mapping_table_of(g_gc_lowest_address, g_gc_highest_address)); seg_mapping_table = new_seg_mapping_table; } @@ -7243,13 +7269,12 @@ int gc_heap::grow_brick_card_tables (uint8_t* start, // Note on points where the runtime is suspended anywhere in this function. Upon an attempt to suspend the // runtime, a different thread may suspend first, causing this thread to block at the point of the suspend call. // So, at any suspend point, externally visible state needs to be consistent, as code that depends on that state - // may run while this thread is blocked. This includes updates to g_card_table, g_lowest_address, and - // g_highest_address. + // may run while this thread is blocked. This includes updates to g_gc_card_table, g_gc_lowest_address, and + // g_gc_highest_address. suspend_EE(); } - g_card_table = translated_ct; - + g_gc_card_table = translated_ct; SoftwareWriteWatch::SetResizedUntranslatedTable( mem + sw_ww_table_offset, saved_g_lowest_address, @@ -7260,7 +7285,9 @@ int gc_heap::grow_brick_card_tables (uint8_t* start, // grow version of the write barrier. This test tells us if the new // segment was allocated at a lower address than the old, requiring // that we start doing an upper bounds check in the write barrier. - StompWriteBarrierResize(true, la != saved_g_lowest_address); + g_gc_lowest_address = saved_g_lowest_address; + g_gc_highest_address = saved_g_highest_address; + stomp_write_barrier_resize(true, la != saved_g_lowest_address); write_barrier_updated = true; if (!is_runtime_suspended) @@ -7271,9 +7298,12 @@ int gc_heap::grow_brick_card_tables (uint8_t* start, else #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP { - g_card_table = translated_ct; + g_gc_card_table = translated_ct; } + g_gc_lowest_address = saved_g_lowest_address; + g_gc_highest_address = saved_g_highest_address; + if (!write_barrier_updated) { // This passes a bool telling whether we need to switch to the post @@ -7284,19 +7314,9 @@ int gc_heap::grow_brick_card_tables (uint8_t* start, // to be changed, so we are doing this after all global state has // been updated. See the comment above suspend_EE() above for more // info. - StompWriteBarrierResize(!!IsGCThread(), la != saved_g_lowest_address); + stomp_write_barrier_resize(!!IsGCThread(), la != saved_g_lowest_address); } - // We need to make sure that other threads executing checked write barriers - // will see the g_card_table update before g_lowest/highest_address updates. - // Otherwise, the checked write barrier may AV accessing the old card table - // with address that it does not cover. Write barriers access card table - // without memory barriers for performance reasons, so we need to flush - // the store buffers here. - GCToOSInterface::FlushProcessWriteBuffers(); - - g_lowest_address = saved_g_lowest_address; - VolatileStore(&g_highest_address, saved_g_highest_address); return 0; @@ -7305,7 +7325,7 @@ fail: if (mem) { - assert(g_card_table == saved_g_card_table); + assert(g_gc_card_table == saved_g_card_table); //delete (uint32_t*)((uint8_t*)ct - sizeof(card_table_info)); if (!GCToOSInterface::VirtualRelease (mem, alloc_size_aligned)) @@ -7463,7 +7483,7 @@ void gc_heap::copy_brick_card_table() assert (ha == card_table_highest_address (&old_card_table[card_word (card_of (la))])); /* todo: Need a global lock for this */ - uint32_t* ct = &g_card_table[card_word (gcard_of (g_lowest_address))]; + uint32_t* ct = &g_gc_card_table[card_word (gcard_of (g_gc_lowest_address))]; own_card_table (ct); card_table = translate_card_table (ct); /* End of global lock */ @@ -7476,8 +7496,8 @@ void gc_heap::copy_brick_card_table() if (gc_can_use_concurrent) { mark_array = translate_mark_array (card_table_mark_array (ct)); - assert (mark_word_of (g_highest_address) == - mark_word_of (align_on_mark_word (g_highest_address))); + assert (mark_word_of (g_gc_highest_address) == + mark_word_of (align_on_mark_word (g_gc_highest_address))); } else mark_array = NULL; @@ -7486,13 +7506,13 @@ void gc_heap::copy_brick_card_table() #ifdef CARD_BUNDLE #if defined(MARK_ARRAY) && defined(_DEBUG) #ifdef GROWABLE_SEG_MAPPING_TABLE - size_t st = size_seg_mapping_table_of (g_lowest_address, g_highest_address); + size_t st = size_seg_mapping_table_of (g_gc_lowest_address, g_gc_highest_address); #else //GROWABLE_SEG_MAPPING_TABLE size_t st = 0; #endif //GROWABLE_SEG_MAPPING_TABLE #endif //MARK_ARRAY && _DEBUG card_bundle_table = translate_card_bundle_table (card_table_card_bundle_table (ct)); - assert (&card_bundle_table [card_bundle_word (cardw_card_bundle (card_word (card_of (g_lowest_address))))] == + assert (&card_bundle_table [card_bundle_word (cardw_card_bundle (card_word (card_of (g_gc_lowest_address))))] == card_table_card_bundle_table (ct)); //set the card table if we are in a heap growth scenario @@ -9330,13 +9350,13 @@ void gc_heap::update_card_table_bundle() bool success = GCToOSInterface::GetWriteWatch (false /* resetState */ , base_address, region_size, (void**)g_addresses, &bcount); - assert (success); + assert (success && "GetWriteWatch failed!"); dprintf (3,("Found %d pages written", bcount)); for (unsigned i = 0; i < bcount; i++) { size_t bcardw = (uint32_t*)(max(g_addresses[i],base_address)) - &card_table[0]; size_t ecardw = (uint32_t*)(min(g_addresses[i]+OS_PAGE_SIZE, high_address)) - &card_table[0]; - assert (bcardw >= card_word (card_of (g_lowest_address))); + assert (bcardw >= card_word (card_of (g_gc_lowest_address))); card_bundles_set (cardw_card_bundle (bcardw), cardw_card_bundle (align_cardw_on_bundle (ecardw))); @@ -9639,7 +9659,7 @@ void gc_heap::make_generation (generation& gen, heap_segment* seg, uint8_t* star #endif //FREE_USAGE_STATS } -void gc_heap::adjust_ephemeral_limits (bool is_runtime_suspended) +void gc_heap::adjust_ephemeral_limits () { ephemeral_low = generation_allocation_start (generation_of (max_generation - 1)); ephemeral_high = heap_segment_reserved (ephemeral_heap_segment); @@ -9647,8 +9667,10 @@ void gc_heap::adjust_ephemeral_limits (bool is_runtime_suspended) dprintf (3, ("new ephemeral low: %Ix new ephemeral high: %Ix", (size_t)ephemeral_low, (size_t)ephemeral_high)) +#ifndef MULTIPLE_HEAPS // This updates the write barrier helpers with the new info. - StompWriteBarrierEphemeral(is_runtime_suspended); + stomp_write_barrier_ephemeral(ephemeral_low, ephemeral_high); +#endif // MULTIPLE_HEAPS } #if defined(TRACE_GC) || defined(GC_CONFIG_DRIVEN) @@ -9821,9 +9843,9 @@ HRESULT gc_heap::initialize_gc (size_t segment_size, settings.first_init(); - g_card_table = make_card_table (g_lowest_address, g_highest_address); + g_gc_card_table = make_card_table (g_gc_lowest_address, g_gc_highest_address); - if (!g_card_table) + if (!g_gc_card_table) return E_OUTOFMEMORY; gc_started = FALSE; @@ -10306,7 +10328,7 @@ gc_heap::init_gc_heap (int h_number) #endif //MULTIPLE_HEAPS /* todo: Need a global lock for this */ - uint32_t* ct = &g_card_table [card_word (card_of (g_lowest_address))]; + uint32_t* ct = &g_gc_card_table [card_word (card_of (g_gc_lowest_address))]; own_card_table (ct); card_table = translate_card_table (ct); /* End of global lock */ @@ -10317,13 +10339,13 @@ gc_heap::init_gc_heap (int h_number) #ifdef CARD_BUNDLE card_bundle_table = translate_card_bundle_table (card_table_card_bundle_table (ct)); - assert (&card_bundle_table [card_bundle_word (cardw_card_bundle (card_word (card_of (g_lowest_address))))] == + assert (&card_bundle_table [card_bundle_word (cardw_card_bundle (card_word (card_of (g_gc_lowest_address))))] == card_table_card_bundle_table (ct)); #endif //CARD_BUNDLE #ifdef MARK_ARRAY if (gc_can_use_concurrent) - mark_array = translate_mark_array (card_table_mark_array (&g_card_table[card_word (card_of (g_lowest_address))])); + mark_array = translate_mark_array (card_table_mark_array (&g_gc_card_table[card_word (card_of (g_gc_lowest_address))])); else mark_array = NULL; #endif //MARK_ARRAY @@ -10360,6 +10382,7 @@ gc_heap::init_gc_heap (int h_number) (size_t)(heap_segment_reserved (lseg) - heap_segment_mem(lseg)), ETW::GCLog::ETW_GC_INFO::LARGE_OBJECT_HEAP, GetClrInstanceId()); + #ifdef SEG_MAPPING_TABLE seg_mapping_table_add_segment (lseg, __this); #else //SEG_MAPPING_TABLE @@ -10442,7 +10465,7 @@ gc_heap::init_gc_heap (int h_number) make_background_mark_stack (b_arr); #endif //BACKGROUND_GC - adjust_ephemeral_limits(true); + adjust_ephemeral_limits(); #ifdef MARK_ARRAY // why would we clear the mark array for this page? it should be cleared.. @@ -13043,12 +13066,12 @@ int gc_heap::try_allocate_more_space (alloc_context* acontext, size_t size, if (can_allocate) { - //ETW trace for allocation tick size_t alloc_context_bytes = acontext->alloc_limit + Align (min_obj_size, align_const) - acontext->alloc_ptr; int etw_allocation_index = ((gen_number == 0) ? 0 : 1); etw_allocation_running_amount[etw_allocation_index] += alloc_context_bytes; + if (etw_allocation_running_amount[etw_allocation_index] > etw_allocation_tick) { #ifdef FEATURE_REDHAWK @@ -14785,6 +14808,9 @@ int gc_heap::generation_to_condemn (int n_initial, dprintf (GTC_LOG, ("h%d: alloc full - BLOCK", heap_number)); n = max_generation; *blocking_collection_p = TRUE; + if ((local_settings->reason == reason_oos_loh) || + (local_settings->reason == reason_alloc_loh)) + evaluate_elevation = FALSE; local_condemn_reasons->set_condition (gen_before_oom); } @@ -15183,7 +15209,7 @@ void gc_heap::gc1() vm_heap->GcCondemnedGeneration = settings.condemned_generation; - assert (g_card_table == card_table); + assert (g_gc_card_table == card_table); { if (n == max_generation) @@ -15337,7 +15363,11 @@ void gc_heap::gc1() if (!settings.concurrent) #endif //BACKGROUND_GC { - adjust_ephemeral_limits(!!IsGCThread()); +#ifndef FEATURE_REDHAWK + // IsGCThread() always returns false on CoreRT, but this assert is useful in CoreCLR. + assert(!!IsGCThread()); +#endif // FEATURE_REDHAWK + adjust_ephemeral_limits(); } #ifdef BACKGROUND_GC @@ -15472,7 +15502,15 @@ void gc_heap::gc1() #ifdef FEATURE_EVENT_TRACE if (bgc_heap_walk_for_etw_p && settings.concurrent) { - make_free_lists_for_profiler_for_bgc(); + GCToEEInterface::DiagWalkBGCSurvivors(__this); + +#ifdef MULTIPLE_HEAPS + bgc_t_join.join(this, gc_join_after_profiler_heap_walk); + if (bgc_t_join.joined()) + { + bgc_t_join.restart(); + } +#endif // MULTIPLE_HEAPS } #endif // FEATURE_EVENT_TRACE #endif //BACKGROUND_GC @@ -16169,7 +16207,11 @@ BOOL gc_heap::expand_soh_with_minimal_gc() dd_gc_new_allocation (dynamic_data_of (max_generation)) -= ephemeral_size; dd_new_allocation (dynamic_data_of (max_generation)) = dd_gc_new_allocation (dynamic_data_of (max_generation)); - adjust_ephemeral_limits(!!IsGCThread()); +#ifndef FEATURE_REDHAWK + // IsGCThread() always returns false on CoreRT, but this assert is useful in CoreCLR. + assert(!!IsGCThread()); +#endif // FEATURE_REDHAWK + adjust_ephemeral_limits(); return TRUE; } else @@ -16382,7 +16424,7 @@ int gc_heap::garbage_collect (int n) for (int i = 0; i < n_heaps; i++) { //copy the card and brick tables - if (g_card_table != g_heaps[i]->card_table) + if (g_gc_card_table != g_heaps[i]->card_table) { g_heaps[i]->copy_brick_card_table(); } @@ -16406,100 +16448,67 @@ int gc_heap::garbage_collect (int n) } #endif //BACKGROUND_GC // check for card table growth - if (g_card_table != card_table) + if (g_gc_card_table != card_table) copy_brick_card_table(); #endif //MULTIPLE_HEAPS - BOOL should_evaluate_elevation = FALSE; - BOOL should_do_blocking_collection = FALSE; + BOOL should_evaluate_elevation = FALSE; + BOOL should_do_blocking_collection = FALSE; #ifdef MULTIPLE_HEAPS - int gen_max = condemned_generation_num; - for (int i = 0; i < n_heaps; i++) - { - if (gen_max < g_heaps[i]->condemned_generation_num) - gen_max = g_heaps[i]->condemned_generation_num; - if ((!should_evaluate_elevation) && (g_heaps[i]->elevation_requested)) - should_evaluate_elevation = TRUE; - if ((!should_do_blocking_collection) && (g_heaps[i]->blocking_collection)) - should_do_blocking_collection = TRUE; - } + int gen_max = condemned_generation_num; + for (int i = 0; i < n_heaps; i++) + { + if (gen_max < g_heaps[i]->condemned_generation_num) + gen_max = g_heaps[i]->condemned_generation_num; + if ((!should_evaluate_elevation) && (g_heaps[i]->elevation_requested)) + should_evaluate_elevation = TRUE; + if ((!should_do_blocking_collection) && (g_heaps[i]->blocking_collection)) + should_do_blocking_collection = TRUE; + } - settings.condemned_generation = gen_max; -//logically continues after GC_PROFILING. + settings.condemned_generation = gen_max; #else //MULTIPLE_HEAPS - settings.condemned_generation = generation_to_condemn (n, - &blocking_collection, - &elevation_requested, - FALSE); - should_evaluate_elevation = elevation_requested; - should_do_blocking_collection = blocking_collection; -#endif //MULTIPLE_HEAPS - - settings.condemned_generation = joined_generation_to_condemn ( - should_evaluate_elevation, - settings.condemned_generation, - &should_do_blocking_collection - STRESS_HEAP_ARG(n) - ); + settings.condemned_generation = generation_to_condemn (n, + &blocking_collection, + &elevation_requested, + FALSE); + should_evaluate_elevation = elevation_requested; + should_do_blocking_collection = blocking_collection; +#endif //MULTIPLE_HEAPS + + settings.condemned_generation = joined_generation_to_condemn ( + should_evaluate_elevation, + settings.condemned_generation, + &should_do_blocking_collection + STRESS_HEAP_ARG(n) + ); - STRESS_LOG1(LF_GCROOTS|LF_GC|LF_GCALLOC, LL_INFO10, - "condemned generation num: %d\n", settings.condemned_generation); + STRESS_LOG1(LF_GCROOTS|LF_GC|LF_GCALLOC, LL_INFO10, + "condemned generation num: %d\n", settings.condemned_generation); - record_gcs_during_no_gc(); + record_gcs_during_no_gc(); - if (settings.condemned_generation > 1) - settings.promotion = TRUE; + if (settings.condemned_generation > 1) + settings.promotion = TRUE; #ifdef HEAP_ANALYZE - // At this point we've decided what generation is condemned - // See if we've been requested to analyze survivors after the mark phase - if (AnalyzeSurvivorsRequested(settings.condemned_generation)) - { - heap_analyze_enabled = TRUE; - } -#endif // HEAP_ANALYZE - -#ifdef GC_PROFILING - - // If we're tracking GCs, then we need to walk the first generation - // before collection to track how many items of each class has been - // allocated. - UpdateGenerationBounds(); - GarbageCollectionStartedCallback(settings.condemned_generation, settings.reason == reason_induced); + // At this point we've decided what generation is condemned + // See if we've been requested to analyze survivors after the mark phase + if (AnalyzeSurvivorsRequested(settings.condemned_generation)) { - BEGIN_PIN_PROFILER(CORProfilerTrackGC()); - size_t profiling_context = 0; - -#ifdef MULTIPLE_HEAPS - int hn = 0; - for (hn = 0; hn < gc_heap::n_heaps; hn++) - { - gc_heap* hp = gc_heap::g_heaps [hn]; - - // When we're walking objects allocated by class, then we don't want to walk the large - // object heap because then it would count things that may have been around for a while. - hp->walk_heap (&AllocByClassHelper, (void *)&profiling_context, 0, FALSE); - } -#else - // When we're walking objects allocated by class, then we don't want to walk the large - // object heap because then it would count things that may have been around for a while. - gc_heap::walk_heap (&AllocByClassHelper, (void *)&profiling_context, 0, FALSE); -#endif //MULTIPLE_HEAPS - - // Notify that we've reached the end of the Gen 0 scan - g_profControlBlock.pProfInterface->EndAllocByClass(&profiling_context); - END_PIN_PROFILER(); + heap_analyze_enabled = TRUE; } +#endif // HEAP_ANALYZE -#endif // GC_PROFILING + GCToEEInterface::DiagGCStart(settings.condemned_generation, settings.reason == reason_induced); #ifdef BACKGROUND_GC if ((settings.condemned_generation == max_generation) && (recursive_gc_sync::background_running_p())) { - //TODO BACKGROUND_GC If we just wait for the end of gc, it won't woork + //TODO BACKGROUND_GC If we just wait for the end of gc, it won't work // because we have to collect 0 and 1 properly // in particular, the allocation contexts are gone. // For now, it is simpler to collect max_generation-1 @@ -19625,12 +19634,7 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p) dprintf (3, ("Finalize marking")); finalize_queue->ScanForFinalization (GCHeap::Promote, condemned_gen_number, mark_only_p, __this); -#ifdef GC_PROFILING - if (CORProfilerTrackGC()) - { - finalize_queue->WalkFReachableObjects (__this); - } -#endif //GC_PROFILING + GCToEEInterface::DiagWalkFReachableObjects(__this); #endif // FEATURE_PREMORTEM_FINALIZATION // Scan dependent handles again to promote any secondaries associated with primaries that were promoted @@ -21105,8 +21109,7 @@ void gc_heap::relocate_in_loh_compact() generation_free_obj_space (gen))); } -#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) -void gc_heap::walk_relocation_loh (size_t profiling_context) +void gc_heap::walk_relocation_for_loh (size_t profiling_context, record_surv_fn fn) { generation* gen = large_object_generation; heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); @@ -21136,14 +21139,7 @@ void gc_heap::walk_relocation_loh (size_t profiling_context) STRESS_LOG_PLUG_MOVE(o, (o + size), -reloc); - { - ETW::GCLog::MovedReference( - o, - (o + size), - reloc, - profiling_context, - settings.compaction); - } + fn (o, (o + size), reloc, profiling_context, settings.compaction, FALSE); o = o + size; if (o < heap_segment_allocated (seg)) @@ -21160,7 +21156,6 @@ void gc_heap::walk_relocation_loh (size_t profiling_context) } } } -#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) BOOL gc_heap::loh_object_p (uint8_t* o) { @@ -22318,10 +22313,7 @@ void gc_heap::plan_phase (int condemned_gen_number) if (!loh_compacted_p) #endif //FEATURE_LOH_COMPACTION { -#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) - if (ShouldTrackMovementForProfilerOrEtw()) - notify_profiler_of_surviving_large_objects(); -#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) + GCToEEInterface::DiagWalkLOHSurvivors(__this); sweep_large_objects(); } } @@ -22432,7 +22424,7 @@ void gc_heap::plan_phase (int condemned_gen_number) for (i = 0; i < n_heaps; i++) { //copy the card and brick tables - if (g_card_table!= g_heaps[i]->card_table) + if (g_gc_card_table!= g_heaps[i]->card_table) { g_heaps[i]->copy_brick_card_table(); } @@ -22523,12 +22515,7 @@ void gc_heap::plan_phase (int condemned_gen_number) assert (generation_allocation_segment (consing_gen) == ephemeral_heap_segment); -#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) - if (ShouldTrackMovementForProfilerOrEtw()) - { - record_survived_for_profiler(condemned_gen_number, first_condemned_address); - } -#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) + GCToEEInterface::DiagWalkSurvivors(__this); relocate_phase (condemned_gen_number, first_condemned_address); compact_phase (condemned_gen_number, first_condemned_address, @@ -22738,12 +22725,7 @@ void gc_heap::plan_phase (int condemned_gen_number) fix_older_allocation_area (older_gen); } -#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) - if (ShouldTrackMovementForProfilerOrEtw()) - { - record_survived_for_profiler(condemned_gen_number, first_condemned_address); - } -#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) + GCToEEInterface::DiagWalkSurvivors(__this); gen0_big_free_spaces = 0; make_free_lists (condemned_gen_number); @@ -23949,8 +23931,7 @@ void gc_heap::relocate_survivors (int condemned_gen_number, } } -#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) -void gc_heap::walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, walk_relocate_args* args, size_t profiling_context) +void gc_heap::walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, walk_relocate_args* args) { if (check_last_object_p) { @@ -23970,15 +23951,10 @@ void gc_heap::walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, w } ptrdiff_t last_plug_relocation = node_relocation_distance (plug); - ptrdiff_t reloc = settings.compaction ? last_plug_relocation : 0; - STRESS_LOG_PLUG_MOVE(plug, (plug + size), -last_plug_relocation); + ptrdiff_t reloc = settings.compaction ? last_plug_relocation : 0; - ETW::GCLog::MovedReference(plug, - (plug + size), - reloc, - profiling_context, - settings.compaction); + (args->fn) (plug, (plug + size), reloc, args->profiling_context, settings.compaction, FALSE); if (check_last_object_p) { @@ -23995,12 +23971,12 @@ void gc_heap::walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, w } } -void gc_heap::walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args, size_t profiling_context) +void gc_heap::walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args) { assert ((tree != NULL)); if (node_left_child (tree)) { - walk_relocation_in_brick (tree + node_left_child (tree), args, profiling_context); + walk_relocation_in_brick (tree + node_left_child (tree), args); } uint8_t* plug = tree; @@ -24029,7 +24005,7 @@ void gc_heap::walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args, assert (last_plug_size >= Align (min_obj_size)); } - walk_plug (args->last_plug, last_plug_size, check_last_object_p, args, profiling_context); + walk_plug (args->last_plug, last_plug_size, check_last_object_p, args); } else { @@ -24042,18 +24018,14 @@ void gc_heap::walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args, if (node_right_child (tree)) { - walk_relocation_in_brick (tree + node_right_child (tree), args, profiling_context); - + walk_relocation_in_brick (tree + node_right_child (tree), args); } } -void gc_heap::walk_relocation (int condemned_gen_number, - uint8_t* first_condemned_address, - size_t profiling_context) - +void gc_heap::walk_relocation (size_t profiling_context, record_surv_fn fn) { - generation* condemned_gen = generation_of (condemned_gen_number); - uint8_t* start_address = first_condemned_address; + generation* condemned_gen = generation_of (settings.condemned_generation); + uint8_t* start_address = generation_allocation_start (condemned_gen); size_t current_brick = brick_of (start_address); heap_segment* current_heap_segment = heap_segment_rw (generation_start_segment (condemned_gen)); @@ -24066,6 +24038,8 @@ void gc_heap::walk_relocation (int condemned_gen_number, args.is_shortened = FALSE; args.pinned_plug_entry = 0; args.last_plug = 0; + args.profiling_context = profiling_context; + args.fn = fn; while (1) { @@ -24075,8 +24049,8 @@ void gc_heap::walk_relocation (int condemned_gen_number, { walk_plug (args.last_plug, (heap_segment_allocated (current_heap_segment) - args.last_plug), - args.is_shortened, - &args, profiling_context); + args.is_shortened, + &args); args.last_plug = 0; } if (heap_segment_next_rw (current_heap_segment)) @@ -24097,16 +24071,29 @@ void gc_heap::walk_relocation (int condemned_gen_number, { walk_relocation_in_brick (brick_address (current_brick) + brick_entry - 1, - &args, - profiling_context); + &args); } } current_brick++; } } +void gc_heap::walk_survivors (record_surv_fn fn, size_t context, walk_surv_type type) +{ + if (type == walk_for_gc) + walk_survivors_relocation (context, fn); #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE) -void gc_heap::walk_relocation_for_bgc(size_t profiling_context) + else if (type == walk_for_bgc) + walk_survivors_for_bgc (context, fn); +#endif //BACKGROUND_GC && FEATURE_EVENT_TRACE + else if (type == walk_for_loh) + walk_survivors_for_loh (context, fn); + else + assert (!"unknown type!"); +} + +#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE) +void gc_heap::walk_survivors_for_bgc (size_t profiling_context, record_surv_fn fn) { // This should only be called for BGCs assert(settings.concurrent); @@ -24140,8 +24127,7 @@ void gc_heap::walk_relocation_for_bgc(size_t profiling_context) uint8_t* end = heap_segment_allocated (seg); while (o < end) - { - + { if (method_table(o) == g_pFreeObjectMethodTable) { o += Align (size (o), align_const); @@ -24164,51 +24150,18 @@ void gc_heap::walk_relocation_for_bgc(size_t profiling_context) uint8_t* plug_end = o; - // Note on last parameter: since this is for bgc, only ETW - // should be sending these events so that existing profapi profilers - // don't get confused. - ETW::GCLog::MovedReference( - plug_start, + fn (plug_start, plug_end, 0, // Reloc distance == 0 as this is non-compacting profiling_context, FALSE, // Non-compacting - FALSE); // fAllowProfApiNotification + TRUE); // BGC } seg = heap_segment_next (seg); } } - -void gc_heap::make_free_lists_for_profiler_for_bgc () -{ - assert(settings.concurrent); - - size_t profiling_context = 0; - ETW::GCLog::BeginMovedReferences(&profiling_context); - - // This provides the profiler with information on what blocks of - // memory are moved during a gc. - - walk_relocation_for_bgc(profiling_context); - - // Notify the EE-side profiling code that all the references have been traced for - // this heap, and that it needs to flush all cached data it hasn't sent to the - // profiler and release resources it no longer needs. Since this is for bgc, only - // ETW should be sending these events so that existing profapi profilers don't get confused. - ETW::GCLog::EndMovedReferences(profiling_context, FALSE /* fAllowProfApiNotification */); - -#ifdef MULTIPLE_HEAPS - bgc_t_join.join(this, gc_join_after_profiler_heap_walk); - if (bgc_t_join.joined()) - { - bgc_t_join.restart(); - } -#endif // MULTIPLE_HEAPS -} - #endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE) -#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) void gc_heap::relocate_phase (int condemned_gen_number, uint8_t* first_condemned_address) @@ -24809,7 +24762,7 @@ void gc_heap::compact_phase (int condemned_gen_number, #pragma warning(push) #pragma warning(disable:4702) // C4702: unreachable code: gc_thread_function may not return #endif //_MSC_VER -void __stdcall gc_heap::gc_thread_stub (void* arg) +void gc_heap::gc_thread_stub (void* arg) { ClrFlsSetThreadType (ThreadType_GC); STRESS_LOG_RESERVE_MEM (GC_STRESSLOG_MULTIPLY); @@ -25177,14 +25130,14 @@ BOOL gc_heap::commit_mark_array_new_seg (gc_heap* hp, if (new_card_table == 0) { - new_card_table = g_card_table; + new_card_table = g_gc_card_table; } if (hp->card_table != new_card_table) { if (new_lowest_address == 0) { - new_lowest_address = g_lowest_address; + new_lowest_address = g_gc_lowest_address; } uint32_t* ct = &new_card_table[card_word (gcard_of (new_lowest_address))]; @@ -29174,7 +29127,7 @@ generation* gc_heap::expand_heap (int condemned_generation, return consing_gen; //copy the card and brick tables - if (g_card_table!= card_table) + if (g_gc_card_table!= card_table) copy_brick_card_table(); BOOL new_segment_p = (heap_segment_next (new_seg) == 0); @@ -30619,35 +30572,21 @@ BOOL gc_heap::large_object_marked (uint8_t* o, BOOL clearp) return m; } -#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) -void gc_heap::record_survived_for_profiler(int condemned_gen_number, uint8_t * start_address) +void gc_heap::walk_survivors_relocation (size_t profiling_context, record_surv_fn fn) { - size_t profiling_context = 0; - - ETW::GCLog::BeginMovedReferences(&profiling_context); - // Now walk the portion of memory that is actually being relocated. - walk_relocation(condemned_gen_number, start_address, profiling_context); + walk_relocation (profiling_context, fn); #ifdef FEATURE_LOH_COMPACTION if (loh_compacted_p) { - walk_relocation_loh (profiling_context); + walk_relocation_for_loh (profiling_context, fn); } #endif //FEATURE_LOH_COMPACTION - - // Notify the EE-side profiling code that all the references have been traced for - // this heap, and that it needs to flush all cached data it hasn't sent to the - // profiler and release resources it no longer needs. - ETW::GCLog::EndMovedReferences(profiling_context); } -void gc_heap::notify_profiler_of_surviving_large_objects () +void gc_heap::walk_survivors_for_loh (size_t profiling_context, record_surv_fn fn) { - size_t profiling_context = 0; - - ETW::GCLog::BeginMovedReferences(&profiling_context); - generation* gen = large_object_generation; heap_segment* seg = heap_segment_rw (generation_start_segment (gen));; @@ -30657,13 +30596,6 @@ void gc_heap::notify_profiler_of_surviving_large_objects () uint8_t* plug_end = o; uint8_t* plug_start = o; - // Generally, we can only get here if this is TRUE: - // (CORProfilerTrackGC() || ETW::GCLog::ShouldTrackMovementForEtw()) - // But we can't always assert that, as races could theoretically cause GC profiling - // or ETW to turn off just before we get here. This is harmless (we do checks later - // on, under appropriate locks, before actually calling into profilers), though it's - // a slowdown to determine these plugs for nothing. - while (1) { if (o >= heap_segment_allocated (seg)) @@ -30691,12 +30623,7 @@ void gc_heap::notify_profiler_of_surviving_large_objects () plug_end = o; - ETW::GCLog::MovedReference( - plug_start, - plug_end, - 0, - profiling_context, - FALSE); + fn (plug_start, plug_end, 0, profiling_context, FALSE, FALSE); } else { @@ -30706,9 +30633,7 @@ void gc_heap::notify_profiler_of_surviving_large_objects () } } } - ETW::GCLog::EndMovedReferences(profiling_context); } -#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) #ifdef BACKGROUND_GC @@ -31940,7 +31865,6 @@ void gc_heap::descr_card_table () void gc_heap::descr_generations_to_profiler (gen_walk_fn fn, void *context) { -#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) #ifdef MULTIPLE_HEAPS int n_heaps = g_theGCHeap->GetNumberOfHeaps (); for (int i = 0; i < n_heaps; i++) @@ -32018,7 +31942,6 @@ void gc_heap::descr_generations_to_profiler (gen_walk_fn fn, void *context) curr_gen_number0--; } } -#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) } #ifdef TRACE_GC @@ -32514,7 +32437,7 @@ void gc_heap::clear_all_mark_array() void gc_heap::verify_mark_array_cleared (heap_segment* seg) { #if defined (VERIFY_HEAP) && defined (MARK_ARRAY) - assert (card_table == g_card_table); + assert (card_table == g_gc_card_table); size_t markw = mark_word_of (heap_segment_mem (seg)); size_t markw_end = mark_word_of (heap_segment_reserved (seg)); @@ -32862,8 +32785,8 @@ gc_heap::verify_heap (BOOL begin_gc_p) #endif //BACKGROUND_GC #ifndef MULTIPLE_HEAPS - if ((g_ephemeral_low != generation_allocation_start (generation_of (max_generation - 1))) || - (g_ephemeral_high != heap_segment_reserved (ephemeral_heap_segment))) + if ((ephemeral_low != generation_allocation_start (generation_of (max_generation - 1))) || + (ephemeral_high != heap_segment_reserved (ephemeral_heap_segment))) { FATAL_GC_ERROR(); } @@ -32922,7 +32845,7 @@ gc_heap::verify_heap (BOOL begin_gc_p) for (int i = 0; i < n_heaps; i++) { //copy the card and brick tables - if (g_card_table != g_heaps[i]->card_table) + if (g_gc_card_table != g_heaps[i]->card_table) { g_heaps[i]->copy_brick_card_table(); } @@ -32931,7 +32854,7 @@ gc_heap::verify_heap (BOOL begin_gc_p) current_join->restart(); } #else - if (g_card_table != card_table) + if (g_gc_card_table != card_table) copy_brick_card_table(); #endif //MULTIPLE_HEAPS @@ -33356,11 +33279,11 @@ HRESULT GCHeap::Shutdown () //CloseHandle (WaitForGCEvent); //find out if the global card table hasn't been used yet - uint32_t* ct = &g_card_table[card_word (gcard_of (g_lowest_address))]; + uint32_t* ct = &g_gc_card_table[card_word (gcard_of (g_gc_lowest_address))]; if (card_table_refcount (ct) == 0) { destroy_card_table (ct); - g_card_table = 0; + g_gc_card_table = 0; #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP SoftwareWriteWatch::StaticClose(); #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP @@ -33520,7 +33443,7 @@ HRESULT GCHeap::Initialize () return E_FAIL; } - StompWriteBarrierResize(true, false); + stomp_write_barrier_initialize(); #ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way #if defined (STRESS_HEAP) && !defined (MULTIPLE_HEAPS) @@ -33557,10 +33480,7 @@ HRESULT GCHeap::Initialize () { GCScan::GcRuntimeStructuresValid (TRUE); -#ifdef GC_PROFILING - if (CORProfilerTrackGC()) - UpdateGenerationBounds(); -#endif // GC_PROFILING + GCToEEInterface::DiagUpdateGenerationBounds(); } return hr; @@ -33644,7 +33564,7 @@ Object * GCHeap::NextObj (Object * object) uint8_t* o = (uint8_t*)object; #ifndef FEATURE_BASICFREEZE - if (!((o < g_highest_address) && (o >= g_lowest_address))) + if (!((o < g_gc_highest_address) && (o >= g_gc_lowest_address))) { return NULL; } @@ -33715,7 +33635,7 @@ BOOL GCHeap::IsHeapPointer (void* vpObject, BOOL small_heap_only) uint8_t* object = (uint8_t*) vpObject; #ifndef FEATURE_BASICFREEZE - if (!((object < g_highest_address) && (object >= g_lowest_address))) + if (!((object < g_gc_highest_address) && (object >= g_gc_lowest_address))) return FALSE; #endif //!FEATURE_BASICFREEZE @@ -34969,7 +34889,6 @@ void gc_heap::do_post_gc() { if (!settings.concurrent) { - GCProfileWalkHeap(); initGCShadow(); } @@ -34989,13 +34908,10 @@ void gc_heap::do_post_gc() GCToEEInterface::GcDone(settings.condemned_generation); -#ifdef GC_PROFILING - if (!settings.concurrent) - { - UpdateGenerationBounds(); - GarbageCollectionFinishedCallback(); - } -#endif // GC_PROFILING + GCToEEInterface::DiagGCEnd(VolatileLoad(&settings.gc_index), + (uint32_t)settings.condemned_generation, + (uint32_t)settings.reason, + !!settings.concurrent); //dprintf (1, (" ****end of Garbage Collection**** %d(gen0:%d)(%d)", dprintf (1, ("*EGC* %d(gen0:%d)(%d)(%s)", @@ -35772,85 +35688,6 @@ void GCHeap::SetFinalizationRun (Object* obj) #endif // FEATURE_PREMORTEM_FINALIZATION -//---------------------------------------------------------------------------- -// -// Write Barrier Support for bulk copy ("Clone") operations -// -// StartPoint is the target bulk copy start point -// len is the length of the bulk copy (in bytes) -// -// -// Performance Note: -// -// This is implemented somewhat "conservatively", that is we -// assume that all the contents of the bulk copy are object -// references. If they are not, and the value lies in the -// ephemeral range, we will set false positives in the card table. -// -// We could use the pointer maps and do this more accurately if necessary - -#if defined(_MSC_VER) && defined(_TARGET_X86_) -#pragma optimize("y", on) // Small critical routines, don't put in EBP frame -#endif //_MSC_VER && _TARGET_X86_ - -void -GCHeap::SetCardsAfterBulkCopy( Object **StartPoint, size_t len ) -{ - Object **rover; - Object **end; - - // Target should aligned - assert(Aligned ((size_t)StartPoint)); - - - // Don't optimize the Generation 0 case if we are checking for write barrier voilations - // since we need to update the shadow heap even in the generation 0 case. -#if defined (WRITE_BARRIER_CHECK) && !defined (SERVER_GC) - if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_BARRIERCHECK) - for(unsigned i=0; i < len / sizeof(Object*); i++) - updateGCShadow(&StartPoint[i], StartPoint[i]); -#endif //WRITE_BARRIER_CHECK && !SERVER_GC - -#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP - if (SoftwareWriteWatch::IsEnabledForGCHeap()) - { - SoftwareWriteWatch::SetDirtyRegion(StartPoint, len); - } -#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP - - // If destination is in Gen 0 don't bother - if ( -#ifdef BACKGROUND_GC - (!gc_heap::settings.concurrent) && -#endif //BACKGROUND_GC - (g_theGCHeap->WhichGeneration( (Object*) StartPoint ) == 0)) - return; - - rover = StartPoint; - end = StartPoint + (len/sizeof(Object*)); - while (rover < end) - { - if ( (((uint8_t*)*rover) >= g_ephemeral_low) && (((uint8_t*)*rover) < g_ephemeral_high) ) - { - // Set Bit For Card and advance to next card - size_t card = gcard_of ((uint8_t*)rover); - - Interlocked::Or (&g_card_table[card/card_word_width], (1U << (card % card_word_width))); - // Skip to next card for the object - rover = (Object**)align_on_card ((uint8_t*)(rover+1)); - } - else - { - rover++; - } - } -} - -#if defined(_MSC_VER) && defined(_TARGET_X86_) -#pragma optimize("", on) // Go back to command line default optimizations -#endif //_MSC_VER && _TARGET_X86_ - - #ifdef FEATURE_PREMORTEM_FINALIZATION //-------------------------------------------------------------------- @@ -36278,21 +36115,17 @@ CFinalize::GcScanRoots (promote_func* fn, int hn, ScanContext *pSC) } } -#ifdef GC_PROFILING -void CFinalize::WalkFReachableObjects (gc_heap* hp) +void CFinalize::WalkFReachableObjects (fq_walk_fn fn) { - BEGIN_PIN_PROFILER(CORProfilerPresent()); Object** startIndex = SegQueue (CriticalFinalizerListSeg); Object** stopCriticalIndex = SegQueueLimit (CriticalFinalizerListSeg); Object** stopIndex = SegQueueLimit (FinalizerListSeg); for (Object** po = startIndex; po < stopIndex; po++) { //report *po - g_profControlBlock.pProfInterface->FinalizeableObjectQueued(po < stopCriticalIndex, (ObjectID)*po); + fn(po < stopCriticalIndex, *po); } - END_PIN_PROFILER(); } -#endif //GC_PROFILING BOOL CFinalize::ScanForFinalization (promote_func* pfn, int gen, BOOL mark_only_p, @@ -36528,8 +36361,7 @@ void CFinalize::CheckFinalizerObjects() // End of VM specific support // //------------------------------------------------------------------------------ - -void gc_heap::walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p) +void gc_heap::walk_heap_per_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p) { generation* gen = gc_heap::generation_of (gen_number); heap_segment* seg = generation_start_segment (gen); @@ -36585,9 +36417,29 @@ void gc_heap::walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_la } } -void GCHeap::WalkObject (Object* obj, walk_fn fn, void* context) +void gc_heap::walk_finalize_queue (fq_walk_fn fn) +{ +#ifdef FEATURE_PREMORTEM_FINALIZATION + finalize_queue->WalkFReachableObjects (fn); +#endif //FEATURE_PREMORTEM_FINALIZATION +} + +void gc_heap::walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p) +{ +#ifdef MULTIPLE_HEAPS + for (int hn = 0; hn < gc_heap::n_heaps; hn++) + { + gc_heap* hp = gc_heap::g_heaps [hn]; + + hp->walk_heap_per_heap (fn, context, gen_number, walk_large_object_heap_p); + } +#else + walk_heap_per_heap(fn, context, gen_number, walk_large_object_heap_p); +#endif //MULTIPLE_HEAPS +} + +void GCHeap::DiagWalkObject (Object* obj, walk_fn fn, void* context) { -#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) uint8_t* o = (uint8_t*)obj; if (o) { @@ -36602,7 +36454,48 @@ void GCHeap::WalkObject (Object* obj, walk_fn fn, void* context) } ); } -#endif //defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) +} + +void GCHeap::DiagWalkSurvivorsWithType (void* gc_context, record_surv_fn fn, size_t diag_context, walk_surv_type type) +{ + gc_heap* hp = (gc_heap*)gc_context; + hp->walk_survivors (fn, diag_context, type); +} + +void GCHeap::DiagWalkHeap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p) +{ + gc_heap::walk_heap (fn, context, gen_number, walk_large_object_heap_p); +} + +void GCHeap::DiagWalkFinalizeQueue (void* gc_context, fq_walk_fn fn) +{ + gc_heap* hp = (gc_heap*)gc_context; + hp->walk_finalize_queue (fn); +} + +void GCHeap::DiagScanFinalizeQueue (fq_scan_fn fn, ScanContext* sc) +{ +#ifdef MULTIPLE_HEAPS + for (int hn = 0; hn < gc_heap::n_heaps; hn++) + { + gc_heap* hp = gc_heap::g_heaps [hn]; + hp->finalize_queue->GcScanRoots(fn, hn, sc); + } +#else + pGenGCHeap->finalize_queue->GcScanRoots(fn, 0, sc); +#endif //MULTIPLE_HEAPS +} + +void GCHeap::DiagScanHandles (handle_scan_fn fn, int gen_number, ScanContext* context) +{ + UNREFERENCED_PARAMETER(gen_number); + GCScan::GcScanHandlesForProfilerAndETW (max_generation, context, fn); +} + +void GCHeap::DiagScanDependentHandles (handle_scan_fn fn, int gen_number, ScanContext* context) +{ + UNREFERENCED_PARAMETER(gen_number); + GCScan::GcScanDependentHandlesForProfilerAndETW (max_generation, context, fn); } // Go through and touch (read) each page straddled by a memory block. @@ -36649,11 +36542,11 @@ void initGCShadow() if (!(g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_BARRIERCHECK)) return; - size_t len = g_highest_address - g_lowest_address; + size_t len = g_gc_highest_address - g_gc_lowest_address; if (len > (size_t)(g_GCShadowEnd - g_GCShadow)) { deleteGCShadow(); - g_GCShadowEnd = g_GCShadow = (uint8_t *)GCToOSInterface::VirtualReserve(0, len, 0, VirtualReserveFlags::None); + g_GCShadowEnd = g_GCShadow = (uint8_t *)GCToOSInterface::VirtualReserve(len, 0, VirtualReserveFlags::None); if (g_GCShadow == NULL || !GCToOSInterface::VirtualCommit(g_GCShadow, len)) { _ASSERTE(!"Not enough memory to run HeapVerify level 2"); @@ -36668,10 +36561,10 @@ void initGCShadow() g_GCShadowEnd += len; } - // save the value of g_lowest_address at this time. If this value changes before + // save the value of g_gc_lowest_address at this time. If this value changes before // the next call to checkGCWriteBarrier() it means we extended the heap (with a // large object segment most probably), and the whole shadow segment is inconsistent. - g_shadow_lowest_address = g_lowest_address; + g_shadow_lowest_address = g_gc_lowest_address; //****** Copy the whole GC heap ****** // @@ -36681,7 +36574,7 @@ void initGCShadow() generation* gen = gc_heap::generation_of (max_generation); heap_segment* seg = heap_segment_rw (generation_start_segment (gen)); - ptrdiff_t delta = g_GCShadow - g_lowest_address; + ptrdiff_t delta = g_GCShadow - g_gc_lowest_address; BOOL small_object_segments = TRUE; while(1) { @@ -36709,7 +36602,7 @@ void initGCShadow() // test to see if 'ptr' was only updated via the write barrier. inline void testGCShadow(Object** ptr) { - Object** shadow = (Object**) &g_GCShadow[((uint8_t*) ptr - g_lowest_address)]; + Object** shadow = (Object**) &g_GCShadow[((uint8_t*) ptr - g_gc_lowest_address)]; if (*ptr != 0 && (uint8_t*) shadow < g_GCShadowEnd && *ptr != *shadow) { @@ -36768,9 +36661,9 @@ void testGCShadowHelper (uint8_t* x) // Walk the whole heap, looking for pointers that were not updated with the write barrier. void checkGCWriteBarrier() { - // g_shadow_lowest_address != g_lowest_address means the GC heap was extended by a segment + // g_shadow_lowest_address != g_gc_lowest_address means the GC heap was extended by a segment // and the GC shadow segment did not track that change! - if (g_GCShadowEnd <= g_GCShadow || g_shadow_lowest_address != g_lowest_address) + if (g_GCShadowEnd <= g_GCShadow || g_shadow_lowest_address != g_gc_lowest_address) { // No shadow stack, nothing to check. return; diff --git a/src/Native/gc/gc.h b/src/Native/gc/gc.h index ca9c28d8f..6f8626a3d 100644 --- a/src/Native/gc/gc.h +++ b/src/Native/gc/gc.h @@ -14,7 +14,19 @@ Module Name: #ifndef __GC_H #define __GC_H +#ifdef Sleep +// This is a funny workaround for the fact that "common.h" defines Sleep to be +// Dont_Use_Sleep, with the hope of causing linker errors whenever someone tries to use sleep. +// +// However, GCToOSInterface defines a function called Sleep, which (due to this define) becomes +// "Dont_Use_Sleep", which the GC in turn happily uses. The symbol that GCToOSInterface actually +// exported was called "GCToOSInterface::Dont_Use_Sleep". While we progress in making the GC standalone, +// we'll need to break the dependency on common.h (the VM header) and this problem will become moot. +#undef Sleep +#endif // Sleep + #include "gcinterface.h" +#include "env/gcenv.os.h" #include "env/gcenv.ee.h" #ifdef FEATURE_STANDALONE_GC @@ -125,6 +137,10 @@ class DacHeapWalker; #define MP_LOCKS +extern "C" uint32_t* g_gc_card_table; +extern "C" uint8_t* g_gc_lowest_address; +extern "C" uint8_t* g_gc_highest_address; + namespace WKS { ::IGCHeapInternal* CreateGCHeap(); class GCHeap; diff --git a/src/Native/gc/gccommon.cpp b/src/Native/gc/gccommon.cpp index 2e6bfce83..0292705a1 100644 --- a/src/Native/gc/gccommon.cpp +++ b/src/Native/gc/gccommon.cpp @@ -26,28 +26,22 @@ IGCHeapInternal* g_theGCHeap; IGCToCLR* g_theGCToCLR; #endif // FEATURE_STANDALONE_GC -/* global versions of the card table and brick table */ -GPTR_IMPL(uint32_t,g_card_table); - -/* absolute bounds of the GC memory */ -GPTR_IMPL_INIT(uint8_t,g_lowest_address,0); -GPTR_IMPL_INIT(uint8_t,g_highest_address,0); - #ifdef GC_CONFIG_DRIVEN GARY_IMPL(size_t, gc_global_mechanisms, MAX_GLOBAL_GC_MECHANISMS_COUNT); #endif //GC_CONFIG_DRIVEN #ifndef DACCESS_COMPILE -uint8_t* g_ephemeral_low = (uint8_t*)1; -uint8_t* g_ephemeral_high = (uint8_t*)~0; - #ifdef WRITE_BARRIER_CHECK uint8_t* g_GCShadow; uint8_t* g_GCShadowEnd; uint8_t* g_shadow_lowest_address = NULL; #endif +uint32_t* g_gc_card_table; +uint8_t* g_gc_lowest_address = 0; +uint8_t* g_gc_highest_address = 0; + VOLATILE(int32_t) m_GCLock = -1; #ifdef GC_CONFIG_DRIVEN diff --git a/src/Native/gc/gcee.cpp b/src/Native/gc/gcee.cpp index 58a553661..c93cc91b5 100644 --- a/src/Native/gc/gcee.cpp +++ b/src/Native/gc/gcee.cpp @@ -381,209 +381,6 @@ size_t GCHeap::GetNow() return GetHighPrecisionTimeStamp(); } -void ProfScanRootsHelper(Object** ppObject, ScanContext *pSC, uint32_t dwFlags) -{ -#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) - Object *pObj = *ppObject; -#ifdef INTERIOR_POINTERS - if (dwFlags & GC_CALL_INTERIOR) - { - uint8_t *o = (uint8_t*)pObj; - gc_heap* hp = gc_heap::heap_of (o); - - if ((o < hp->gc_low) || (o >= hp->gc_high)) - { - return; - } - pObj = (Object*) hp->find_object(o, hp->gc_low); - } -#endif //INTERIOR_POINTERS - ScanRootsHelper(pObj, ppObject, pSC, dwFlags); -#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) -} - -// TODO - at some point we would like to completely decouple profiling -// from ETW tracing using a pattern similar to this, where the -// ProfilingScanContext has flags about whether or not certain things -// should be tracked, and each one of these ProfilerShouldXYZ functions -// will check these flags and determine what to do based upon that. -// GCProfileWalkHeapWorker can, in turn, call those methods without fear -// of things being ifdef'd out. - -// Returns TRUE if GC profiling is enabled and the profiler -// should scan dependent handles, FALSE otherwise. -BOOL ProfilerShouldTrackConditionalWeakTableElements() -{ -#if defined(GC_PROFILING) - return CORProfilerTrackConditionalWeakTableElements(); -#else - return FALSE; -#endif // defined (GC_PROFILING) -} - -// If GC profiling is enabled, informs the profiler that we are done -// tracing dependent handles. -void ProfilerEndConditionalWeakTableElementReferences(void* heapId) -{ -#if defined (GC_PROFILING) - g_profControlBlock.pProfInterface->EndConditionalWeakTableElementReferences(heapId); -#else - UNREFERENCED_PARAMETER(heapId); -#endif // defined (GC_PROFILING) -} - -// If GC profiling is enabled, informs the profiler that we are done -// tracing root references. -void ProfilerEndRootReferences2(void* heapId) -{ -#if defined (GC_PROFILING) - g_profControlBlock.pProfInterface->EndRootReferences2(heapId); -#else - UNREFERENCED_PARAMETER(heapId); -#endif // defined (GC_PROFILING) -} - -// This is called only if we've determined that either: -// a) The Profiling API wants to do a walk of the heap, and it has pinned the -// profiler in place (so it cannot be detached), and it's thus safe to call into the -// profiler, OR -// b) ETW infrastructure wants to do a walk of the heap either to log roots, -// objects, or both. -// This can also be called to do a single walk for BOTH a) and b) simultaneously. Since -// ETW can ask for roots, but not objects -#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) - -void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw) -{ - { - ProfilingScanContext SC(fProfilerPinned); - - // **** Scan roots: Only scan roots if profiling API wants them or ETW wants them. - if (fProfilerPinned || fShouldWalkHeapRootsForEtw) - { -#ifdef MULTIPLE_HEAPS - int hn; - - // Must emulate each GC thread number so we can hit each - // heap for enumerating the roots. - for (hn = 0; hn < gc_heap::n_heaps; hn++) - { - // Ask the vm to go over all of the roots for this specific - // heap. - gc_heap* hp = gc_heap::g_heaps [hn]; - SC.thread_number = hn; - GCScan::GcScanRoots(&ProfScanRootsHelper, max_generation, max_generation, &SC); - - // The finalizer queue is also a source of roots - SC.dwEtwRootKind = kEtwGCRootKindFinalizer; - hp->finalize_queue->GcScanRoots(&ProfScanRootsHelper, hn, &SC); - } -#else - // Ask the vm to go over all of the roots - GCScan::GcScanRoots(&ProfScanRootsHelper, max_generation, max_generation, &SC); - - // The finalizer queue is also a source of roots - SC.dwEtwRootKind = kEtwGCRootKindFinalizer; - pGenGCHeap->finalize_queue->GcScanRoots(&ProfScanRootsHelper, 0, &SC); - -#endif // MULTIPLE_HEAPS - // Handles are kept independent of wks/svr/concurrent builds - SC.dwEtwRootKind = kEtwGCRootKindHandle; - GCScan::GcScanHandlesForProfilerAndETW(max_generation, &SC); - - // indicate that regular handle scanning is over, so we can flush the buffered roots - // to the profiler. (This is for profapi only. ETW will flush after the - // entire heap was is complete, via ETW::GCLog::EndHeapDump.) - if (fProfilerPinned) - { - ProfilerEndRootReferences2(&SC.pHeapId); - } - } - - // **** Scan dependent handles: only if the profiler supports it or ETW wants roots - if ((fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements()) || - fShouldWalkHeapRootsForEtw) - { - // GcScanDependentHandlesForProfiler double-checks - // CORProfilerTrackConditionalWeakTableElements() before calling into the profiler - - GCScan::GcScanDependentHandlesForProfilerAndETW(max_generation, &SC); - - // indicate that dependent handle scanning is over, so we can flush the buffered roots - // to the profiler. (This is for profapi only. ETW will flush after the - // entire heap was is complete, via ETW::GCLog::EndHeapDump.) - if (fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements()) - { - ProfilerEndConditionalWeakTableElementReferences(&SC.pHeapId); - } - } - - ProfilerWalkHeapContext profilerWalkHeapContext(fProfilerPinned, SC.pvEtwContext); - - // **** Walk objects on heap: only if profiling API wants them or ETW wants them. - if (fProfilerPinned || fShouldWalkHeapObjectsForEtw) - { -#ifdef MULTIPLE_HEAPS - int hn; - - // Walk the heap and provide the objref to the profiler - for (hn = 0; hn < gc_heap::n_heaps; hn++) - { - gc_heap* hp = gc_heap::g_heaps [hn]; - hp->walk_heap(&HeapWalkHelper, &profilerWalkHeapContext, max_generation, TRUE /* walk the large object heap */); - } -#else - gc_heap::walk_heap(&HeapWalkHelper, &profilerWalkHeapContext, max_generation, TRUE); -#endif //MULTIPLE_HEAPS - } - -#ifdef FEATURE_EVENT_TRACE - // **** Done! Indicate to ETW helpers that the heap walk is done, so any buffers - // should be flushed into the ETW stream - if (fShouldWalkHeapObjectsForEtw || fShouldWalkHeapRootsForEtw) - { - ETW::GCLog::EndHeapDump(&profilerWalkHeapContext); - } -#endif // FEATURE_EVENT_TRACE - } -} -#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) - -void GCProfileWalkHeap() -{ - BOOL fWalkedHeapForProfiler = FALSE; - -#ifdef FEATURE_EVENT_TRACE - if (ETW::GCLog::ShouldWalkStaticsAndCOMForEtw()) - ETW::GCLog::WalkStaticsAndCOMForETW(); - - BOOL fShouldWalkHeapRootsForEtw = ETW::GCLog::ShouldWalkHeapRootsForEtw(); - BOOL fShouldWalkHeapObjectsForEtw = ETW::GCLog::ShouldWalkHeapObjectsForEtw(); -#else // !FEATURE_EVENT_TRACE - BOOL fShouldWalkHeapRootsForEtw = FALSE; - BOOL fShouldWalkHeapObjectsForEtw = FALSE; -#endif // FEATURE_EVENT_TRACE - -#if defined (GC_PROFILING) - { - BEGIN_PIN_PROFILER(CORProfilerTrackGC()); - GCProfileWalkHeapWorker(TRUE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw); - fWalkedHeapForProfiler = TRUE; - END_PIN_PROFILER(); - } -#endif // defined (GC_PROFILING) - -#if defined (GC_PROFILING) || defined(FEATURE_EVENT_TRACE) - // we need to walk the heap if one of GC_PROFILING or FEATURE_EVENT_TRACE - // is defined, since both of them make use of the walk heap worker. - if (!fWalkedHeapForProfiler && - (fShouldWalkHeapRootsForEtw || fShouldWalkHeapObjectsForEtw)) - { - GCProfileWalkHeapWorker(FALSE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw); - } -#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) -} - BOOL GCHeap::IsGCInProgressHelper (BOOL bConsiderGCStart) { return GcInProgress || (bConsiderGCStart? VolatileLoad(&gc_heap::gc_started) : FALSE); @@ -786,7 +583,7 @@ IGCHeapInternal* CreateGCHeap() { return new(nothrow) GCHeap(); // we return wks or svr } -void GCHeap::TraceGCSegments() +void GCHeap::DiagTraceGCSegments() { #ifdef FEATURE_EVENT_TRACE heap_segment* seg = 0; @@ -823,7 +620,7 @@ void GCHeap::TraceGCSegments() #endif // FEATURE_EVENT_TRACE } -void GCHeap::DescrGenerationsToProfiler (gen_walk_fn fn, void *context) +void GCHeap::DiagDescrGenerations (gen_walk_fn fn, void *context) { #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) pGenGCHeap->descr_generations_to_profiler(fn, context); diff --git a/src/Native/gc/gcenv.ee.standalone.inl b/src/Native/gc/gcenv.ee.standalone.inl index 2ecc6fc83..3b64586d7 100644 --- a/src/Native/gc/gcenv.ee.standalone.inl +++ b/src/Native/gc/gcenv.ee.standalone.inl @@ -125,4 +125,52 @@ inline Thread* GCToEEInterface::CreateBackgroundThread(GCBackgroundThreadFunctio return g_theGCToCLR->CreateBackgroundThread(threadStart, arg); } +inline void GCToEEInterface::DiagGCStart(int gen, bool isInduced) +{ + assert(g_theGCToCLR != nullptr); + g_theGCToCLR->DiagGCStart(gen, isInduced); +} + +inline void GCToEEInterface::DiagUpdateGenerationBounds() +{ + assert(g_theGCToCLR != nullptr); + g_theGCToCLR->DiagUpdateGenerationBounds(); +} + +inline void GCToEEInterface::DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent) +{ + assert(g_theGCToCLR != nullptr); + g_theGCToCLR->DiagGCEnd(index, gen, reason, fConcurrent); +} + +inline void GCToEEInterface::DiagWalkFReachableObjects(void* gcContext) +{ + assert(g_theGCToCLR != nullptr); + g_theGCToCLR->DiagWalkFReachableObjects(gcContext); +} + +inline void GCToEEInterface::DiagWalkSurvivors(void* gcContext) +{ + assert(g_theGCToCLR != nullptr); + g_theGCToCLR->DiagWalkSurvivors(gcContext); +} + +inline void GCToEEInterface::DiagWalkLOHSurvivors(void* gcContext) +{ + assert(g_theGCToCLR != nullptr); + g_theGCToCLR->DiagWalkLOHSurvivors(gcContext); +} + +inline void GCToEEInterface::DiagWalkBGCSurvivors(void* gcContext) +{ + assert(g_theGCToCLR != nullptr); + return g_theGCToCLR->DiagWalkBGCSurvivors(gcContext); +} + +inline void GCToEEInterface::StompWriteBarrier(WriteBarrierParameters* args) +{ + assert(g_theGCToCLR != nullptr); + g_theGCToCLR->StompWriteBarrier(args); +} + #endif // __GCTOENV_EE_STANDALONE_INL__ diff --git a/src/Native/gc/sample/gcenv.windows.cpp b/src/Native/gc/gcenv.unix.cpp index 76187f218..0235952e2 100644 --- a/src/Native/gc/sample/gcenv.windows.cpp +++ b/src/Native/gc/gcenv.unix.cpp @@ -2,67 +2,37 @@ // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. -// -// Implementation of the GC environment -// - -#include "common.h" - -#include "windows.h" - -#include "gcenv.h" -#include "gc.h" - -MethodTable * g_pFreeObjectMethodTable; - -int32_t g_TrapReturningThreads; - -bool g_fFinalizerRunOnShutDown; - -GCSystemInfo g_SystemInfo; - -static LARGE_INTEGER g_performanceFrequency; +#include "env/gcenv.structs.h" +#include "env/gcenv.base.h" +#include "env/gcenv.os.h" // Initialize the interface implementation // Return: // true if it has succeeded, false if it has failed bool GCToOSInterface::Initialize() { - if (!::QueryPerformanceFrequency(&g_performanceFrequency)) - { - return false; - } - - SYSTEM_INFO systemInfo; - GetSystemInfo(&systemInfo); - - g_SystemInfo.dwNumberOfProcessors = systemInfo.dwNumberOfProcessors; - g_SystemInfo.dwPageSize = systemInfo.dwPageSize; - g_SystemInfo.dwAllocationGranularity = systemInfo.dwAllocationGranularity; - - return true; + throw nullptr; } // Shutdown the interface implementation void GCToOSInterface::Shutdown() { + throw nullptr; } -// Get numeric id of the current thread if possible on the +// Get numeric id of the current thread if possible on the // current platform. It is indended for logging purposes only. // Return: // Numeric id of the current thread or 0 if the uint64_t GCToOSInterface::GetCurrentThreadIdForLogging() { - return ::GetCurrentThreadId(); + throw nullptr; } // Get id of the process -// Return: -// Id of the current process uint32_t GCToOSInterface::GetCurrentProcessId() { - return ::GetCurrentProcessId(); + throw nullptr; } // Set ideal affinity for the current thread @@ -72,63 +42,37 @@ uint32_t GCToOSInterface::GetCurrentProcessId() // true if it has succeeded, false if it has failed bool GCToOSInterface::SetCurrentThreadIdealAffinity(GCThreadAffinity* affinity) { - bool success = true; - -#if !defined(FEATURE_CORESYSTEM) - SetThreadIdealProcessor(GetCurrentThread(), (DWORD)affinity->Processor); -#else - PROCESSOR_NUMBER proc; - - if (affinity->Group != -1) - { - proc.Group = (WORD)affinity->Group; - proc.Number = (BYTE)affinity->Processor; - proc.Reserved = 0; - - success = !!SetThreadIdealProcessorEx(GetCurrentThread(), &proc, NULL); - } - else - { - if (GetThreadIdealProcessorEx(GetCurrentThread(), &proc)) - { - proc.Number = affinity->Processor; - success = !!SetThreadIdealProcessorEx(GetCurrentThread(), &proc, NULL); - } - } -#endif - - return success; + throw nullptr; } // Get the number of the current processor uint32_t GCToOSInterface::GetCurrentProcessorNumber() { - _ASSERTE(GCToOSInterface::CanGetCurrentProcessorNumber()); - return ::GetCurrentProcessorNumber(); + throw nullptr; } // Check if the OS supports getting current processor number bool GCToOSInterface::CanGetCurrentProcessorNumber() { - return true; + throw nullptr; } // Flush write buffers of processors that are executing threads of the current process void GCToOSInterface::FlushProcessWriteBuffers() { - ::FlushProcessWriteBuffers(); + throw nullptr; } // Break into a debugger void GCToOSInterface::DebugBreak() { - ::DebugBreak(); + throw nullptr; } // Get number of logical processors uint32_t GCToOSInterface::GetLogicalCpuCount() { - return g_SystemInfo.dwNumberOfProcessors; + throw nullptr; } // Causes the calling thread to sleep for the specified number of milliseconds @@ -136,7 +80,7 @@ uint32_t GCToOSInterface::GetLogicalCpuCount() // sleepMSec - time to sleep before switching to another thread void GCToOSInterface::Sleep(uint32_t sleepMSec) { - ::Sleep(sleepMSec); + throw nullptr; } // Causes the calling thread to yield execution to another thread that is ready to run on the current processor. @@ -144,21 +88,19 @@ void GCToOSInterface::Sleep(uint32_t sleepMSec) // switchCount - number of times the YieldThread was called in a loop void GCToOSInterface::YieldThread(uint32_t switchCount) { - SwitchToThread(); + throw nullptr; } // Reserve virtual memory range. // Parameters: -// address - starting virtual address, it can be NULL to let the function choose the starting address // size - size of the virtual memory range -// alignment - requested memory alignment +// alignment - requested memory alignment, 0 means no specific alignment requested // flags - flags to control special settings like write watching // Return: // Starting virtual address of the reserved range -void* GCToOSInterface::VirtualReserve(void* address, size_t size, size_t alignment, uint32_t flags) +void* GCToOSInterface::VirtualReserve(size_t size, size_t alignment, uint32_t flags) { - DWORD memFlags = (flags & VirtualReserveFlags::WriteWatch) ? (MEM_RESERVE | MEM_WRITE_WATCH) : MEM_RESERVE; - return ::VirtualAlloc(0, size, memFlags, PAGE_READWRITE); + throw nullptr; } // Release virtual memory range previously reserved using VirtualReserve @@ -169,8 +111,7 @@ void* GCToOSInterface::VirtualReserve(void* address, size_t size, size_t alignme // true if it has succeeded, false if it has failed bool GCToOSInterface::VirtualRelease(void* address, size_t size) { - UNREFERENCED_PARAMETER(size); - return !!::VirtualFree(address, 0, MEM_RELEASE); + throw nullptr; } // Commit virtual memory range. It must be part of a range reserved using VirtualReserve. @@ -181,7 +122,7 @@ bool GCToOSInterface::VirtualRelease(void* address, size_t size) // true if it has succeeded, false if it has failed bool GCToOSInterface::VirtualCommit(void* address, size_t size) { - return ::VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE) != NULL; + throw nullptr; } // Decomit virtual memory range. @@ -192,10 +133,10 @@ bool GCToOSInterface::VirtualCommit(void* address, size_t size) // true if it has succeeded, false if it has failed bool GCToOSInterface::VirtualDecommit(void* address, size_t size) { - return !!::VirtualFree(address, size, MEM_DECOMMIT); + throw nullptr; } -// Reset virtual memory range. Indicates that data in the memory range specified by address and size is no +// Reset virtual memory range. Indicates that data in the memory range specified by address and size is no // longer of interest, but it should not be decommitted. // Parameters: // address - starting virtual address @@ -205,20 +146,13 @@ bool GCToOSInterface::VirtualDecommit(void* address, size_t size) // true if it has succeeded, false if it has failed bool GCToOSInterface::VirtualReset(void * address, size_t size, bool unlock) { - bool success = ::VirtualAlloc(address, size, MEM_RESET, PAGE_READWRITE) != NULL; - if (success && unlock) - { - // Remove the page range from the working set - ::VirtualUnlock(address, size); - } - - return success; + throw nullptr; } // Check if the OS supports write watching bool GCToOSInterface::SupportsWriteWatch() { - return false; + throw nullptr; } // Reset the write tracking state for the specified virtual memory range. @@ -227,6 +161,7 @@ bool GCToOSInterface::SupportsWriteWatch() // size - size of the virtual memory range void GCToOSInterface::ResetWriteWatch(void* address, size_t size) { + throw nullptr; } // Retrieve addresses of the pages that are written to in a region of virtual memory @@ -241,7 +176,7 @@ void GCToOSInterface::ResetWriteWatch(void* address, size_t size) // true if it has succeeded, false if it has failed bool GCToOSInterface::GetWriteWatch(bool resetState, void* address, size_t size, void** pageAddresses, uintptr_t* pageAddressesCount) { - return false; + throw nullptr; } // Get size of the largest cache on the processor die @@ -252,8 +187,7 @@ bool GCToOSInterface::GetWriteWatch(bool resetState, void* address, size_t size, // Size of the cache size_t GCToOSInterface::GetLargestOnDieCacheSize(bool trueSize) { - // TODO: implement - return 0; + throw nullptr; } // Get affinity mask of the current process @@ -271,7 +205,7 @@ size_t GCToOSInterface::GetLargestOnDieCacheSize(bool trueSize) // specify a 1 bit for a processor when the system affinity mask specifies a 0 bit for that processor. bool GCToOSInterface::GetCurrentProcessAffinityMask(uintptr_t* processMask, uintptr_t* systemMask) { - return false; + throw nullptr; } // Get number of processors assigned to the current process @@ -279,7 +213,7 @@ bool GCToOSInterface::GetCurrentProcessAffinityMask(uintptr_t* processMask, uint // The number of processors uint32_t GCToOSInterface::GetCurrentProcessCpuCount() { - return g_SystemInfo.dwNumberOfProcessors; + throw nullptr; } // Return the size of the user-mode portion of the virtual address space of this process. @@ -287,27 +221,18 @@ uint32_t GCToOSInterface::GetCurrentProcessCpuCount() // non zero if it has succeeded, 0 if it has failed size_t GCToOSInterface::GetVirtualMemoryLimit() { - MEMORYSTATUSEX memStatus; - - memStatus.dwLength = sizeof(MEMORYSTATUSEX); - BOOL fRet = GlobalMemoryStatusEx(&memStatus); - _ASSERTE(fRet); - - return (size_t)memStatus.ullTotalVirtual; + throw nullptr; } // Get the physical memory that this process can use. // Return: // non zero if it has succeeded, 0 if it has failed +// Remarks: +// If a process runs with a restricted memory limit, it returns the limit. If there's no limit +// specified, it returns amount of actual physical memory. uint64_t GCToOSInterface::GetPhysicalMemoryLimit() { - MEMORYSTATUSEX memStatus; - - memStatus.dwLength = sizeof(MEMORYSTATUSEX); - BOOL fRet = GlobalMemoryStatusEx(&memStatus); - _ASSERTE(fRet); - - return memStatus.ullTotalPhys; + throw nullptr; } // Get memory status @@ -318,25 +243,7 @@ uint64_t GCToOSInterface::GetPhysicalMemoryLimit() // available_page_file - The maximum amount of memory the current process can commit, in bytes. void GCToOSInterface::GetMemoryStatus(uint32_t* memory_load, uint64_t* available_physical, uint64_t* available_page_file) { - MEMORYSTATUSEX memStatus; - - memStatus.dwLength = sizeof(MEMORYSTATUSEX); - BOOL fRet = GlobalMemoryStatusEx(&memStatus); - _ASSERTE (fRet); - - // If the machine has more RAM than virtual address limit, let us cap it. - // The GC can never use more than virtual address limit. - if (memStatus.ullAvailPhys > memStatus.ullTotalVirtual) - { - memStatus.ullAvailPhys = memStatus.ullAvailVirtual; - } - - if (memory_load != NULL) - *memory_load = memStatus.dwMemoryLoad; - if (available_physical != NULL) - *available_physical = memStatus.ullAvailPhys; - if (available_page_file != NULL) - *available_page_file = memStatus.ullAvailPageFile; + throw nullptr; } // Get a high precision performance counter @@ -344,14 +251,7 @@ void GCToOSInterface::GetMemoryStatus(uint32_t* memory_load, uint64_t* available // The counter value int64_t GCToOSInterface::QueryPerformanceCounter() { - LARGE_INTEGER ts; - if (!::QueryPerformanceCounter(&ts)) - { - _ASSERTE(!"Fatal Error - cannot query performance counter."); - abort(); - } - - return ts.QuadPart; + throw nullptr; } // Get a frequency of the high precision performance counter @@ -359,7 +259,7 @@ int64_t GCToOSInterface::QueryPerformanceCounter() // The counter frequency int64_t GCToOSInterface::QueryPerformanceFrequency() { - return g_performanceFrequency.QuadPart; + throw nullptr; } // Get a time stamp with a low precision @@ -367,31 +267,11 @@ int64_t GCToOSInterface::QueryPerformanceFrequency() // Time stamp in milliseconds uint32_t GCToOSInterface::GetLowPrecisionTimeStamp() { - return ::GetTickCount(); + throw nullptr; } -// Parameters of the GC thread stub -struct GCThreadStubParam -{ - GCThreadFunction GCThreadFunction; - void* GCThreadParam; -}; -// GC thread stub to convert GC thread function to an OS specific thread function -static DWORD __stdcall GCThreadStub(void* param) -{ - GCThreadStubParam *stubParam = (GCThreadStubParam*)param; - GCThreadFunction function = stubParam->GCThreadFunction; - void* threadParam = stubParam->GCThreadParam; - - delete stubParam; - - function(threadParam); - - return 0; -} - -// Create a new thread +// Create a new thread for GC use // Parameters: // function - the function to be executed by the thread // param - parameters of the thread @@ -400,54 +280,29 @@ static DWORD __stdcall GCThreadStub(void* param) // true if it has succeeded, false if it has failed bool GCToOSInterface::CreateThread(GCThreadFunction function, void* param, GCThreadAffinity* affinity) { - DWORD thread_id; - - GCThreadStubParam* stubParam = new (nothrow) GCThreadStubParam(); - if (stubParam == NULL) - { - return false; - } - - stubParam->GCThreadFunction = function; - stubParam->GCThreadParam = param; - - HANDLE gc_thread = ::CreateThread(NULL, 0, GCThreadStub, stubParam, CREATE_SUSPENDED, &thread_id); - - if (!gc_thread) - { - delete stubParam; - return false; - } - - SetThreadPriority(gc_thread, /* THREAD_PRIORITY_ABOVE_NORMAL );*/ THREAD_PRIORITY_HIGHEST ); - - ResumeThread(gc_thread); - - CloseHandle(gc_thread); - - return true; + throw nullptr; } // Initialize the critical section void CLRCriticalSection::Initialize() { - ::InitializeCriticalSection(&m_cs); + throw nullptr; } // Destroy the critical section void CLRCriticalSection::Destroy() { - ::DeleteCriticalSection(&m_cs); + throw nullptr; } // Enter the critical section. Blocks until the section can be entered. void CLRCriticalSection::Enter() { - ::EnterCriticalSection(&m_cs); + throw nullptr; } // Leave the critical section void CLRCriticalSection::Leave() { - ::LeaveCriticalSection(&m_cs); -} + throw nullptr; +}
\ No newline at end of file diff --git a/src/Native/gc/gcenv.windows.cpp b/src/Native/gc/gcenv.windows.cpp new file mode 100644 index 000000000..a63647824 --- /dev/null +++ b/src/Native/gc/gcenv.windows.cpp @@ -0,0 +1,625 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +#include <cstdint> +#include <cassert> +#include <cstddef> +#include <memory> +#include "windows.h" +#include "psapi.h" +#include "env/gcenv.structs.h" +#include "env/gcenv.base.h" +#include "env/gcenv.os.h" + +GCSystemInfo g_SystemInfo; + +typedef BOOL (WINAPI *PGET_PROCESS_MEMORY_INFO)(HANDLE handle, PROCESS_MEMORY_COUNTERS* memCounters, uint32_t cb); +static PGET_PROCESS_MEMORY_INFO GCGetProcessMemoryInfo = 0; + +static size_t g_RestrictedPhysicalMemoryLimit = (size_t)UINTPTR_MAX; + +typedef BOOL (WINAPI *PIS_PROCESS_IN_JOB)(HANDLE processHandle, HANDLE jobHandle, BOOL* result); +typedef BOOL (WINAPI *PQUERY_INFORMATION_JOB_OBJECT)(HANDLE jobHandle, JOBOBJECTINFOCLASS jobObjectInfoClass, void* lpJobObjectInfo, DWORD cbJobObjectInfoLength, LPDWORD lpReturnLength); + +namespace { + +void GetProcessMemoryLoad(LPMEMORYSTATUSEX pMSEX) +{ + pMSEX->dwLength = sizeof(MEMORYSTATUSEX); + BOOL fRet = ::GlobalMemoryStatusEx(pMSEX); + assert(fRet); + + // If the machine has more RAM than virtual address limit, let us cap it. + // Our GC can never use more than virtual address limit. + if (pMSEX->ullAvailPhys > pMSEX->ullTotalVirtual) + { + pMSEX->ullAvailPhys = pMSEX->ullAvailVirtual; + } +} + +static size_t GetRestrictedPhysicalMemoryLimit() +{ + LIMITED_METHOD_CONTRACT; + + // The limit was cached already + if (g_RestrictedPhysicalMemoryLimit != (size_t)UINTPTR_MAX) + return g_RestrictedPhysicalMemoryLimit; + + size_t job_physical_memory_limit = (size_t)UINTPTR_MAX; + BOOL in_job_p = FALSE; + HINSTANCE hinstKernel32 = 0; + + PIS_PROCESS_IN_JOB GCIsProcessInJob = 0; + PQUERY_INFORMATION_JOB_OBJECT GCQueryInformationJobObject = 0; + + hinstKernel32 = LoadLibraryEx(L"kernel32.dll", nullptr, LOAD_LIBRARY_SEARCH_SYSTEM32); + if (!hinstKernel32) + goto exit; + + GCIsProcessInJob = (PIS_PROCESS_IN_JOB)GetProcAddress(hinstKernel32, "IsProcessInJob"); + if (!GCIsProcessInJob) + goto exit; + + if (!GCIsProcessInJob(GetCurrentProcess(), NULL, &in_job_p)) + goto exit; + + if (in_job_p) + { + GCGetProcessMemoryInfo = (PGET_PROCESS_MEMORY_INFO)GetProcAddress(hinstKernel32, "K32GetProcessMemoryInfo"); + + if (!GCGetProcessMemoryInfo) + goto exit; + + GCQueryInformationJobObject = (PQUERY_INFORMATION_JOB_OBJECT)GetProcAddress(hinstKernel32, "QueryInformationJobObject"); + + if (!GCQueryInformationJobObject) + goto exit; + + JOBOBJECT_EXTENDED_LIMIT_INFORMATION limit_info; + if (GCQueryInformationJobObject (NULL, JobObjectExtendedLimitInformation, &limit_info, + sizeof(limit_info), NULL)) + { + size_t job_memory_limit = (size_t)UINTPTR_MAX; + size_t job_process_memory_limit = (size_t)UINTPTR_MAX; + size_t job_workingset_limit = (size_t)UINTPTR_MAX; + + // Notes on the NT job object: + // + // You can specific a bigger process commit or working set limit than + // job limit which is pointless so we use the smallest of all 3 as + // to calculate our "physical memory load" or "available physical memory" + // when running inside a job object, ie, we treat this as the amount of physical memory + // our process is allowed to use. + // + // The commit limit is already reflected by default when you run in a + // job but the physical memory load is not. + // + if ((limit_info.BasicLimitInformation.LimitFlags & JOB_OBJECT_LIMIT_JOB_MEMORY) != 0) + job_memory_limit = limit_info.JobMemoryLimit; + if ((limit_info.BasicLimitInformation.LimitFlags & JOB_OBJECT_LIMIT_PROCESS_MEMORY) != 0) + job_process_memory_limit = limit_info.ProcessMemoryLimit; + if ((limit_info.BasicLimitInformation.LimitFlags & JOB_OBJECT_LIMIT_WORKINGSET) != 0) + job_workingset_limit = limit_info.BasicLimitInformation.MaximumWorkingSetSize; + + job_physical_memory_limit = min (job_memory_limit, job_process_memory_limit); + job_physical_memory_limit = min (job_physical_memory_limit, job_workingset_limit); + + MEMORYSTATUSEX ms; + ::GetProcessMemoryLoad(&ms); + + // A sanity check in case someone set a larger limit than there is actual physical memory. + job_physical_memory_limit = (size_t) min (job_physical_memory_limit, ms.ullTotalPhys); + } + } + +exit: + if (job_physical_memory_limit == (size_t)UINTPTR_MAX) + { + job_physical_memory_limit = 0; + + FreeLibrary(hinstKernel32); + } + + VolatileStore(&g_RestrictedPhysicalMemoryLimit, job_physical_memory_limit); + return g_RestrictedPhysicalMemoryLimit; +} + +} // anonymous namespace + +// Initialize the interface implementation +// Return: +// true if it has succeeded, false if it has failed +bool GCToOSInterface::Initialize() +{ + SYSTEM_INFO systemInfo; + GetSystemInfo(&systemInfo); + + g_SystemInfo.dwNumberOfProcessors = systemInfo.dwNumberOfProcessors; + g_SystemInfo.dwPageSize = systemInfo.dwPageSize; + g_SystemInfo.dwAllocationGranularity = systemInfo.dwAllocationGranularity; + + return true; +} + +// Shutdown the interface implementation +void GCToOSInterface::Shutdown() +{ + // nothing to do. +} + +// Get numeric id of the current thread if possible on the +// current platform. It is indended for logging purposes only. +// Return: +// Numeric id of the current thread or 0 if the +uint64_t GCToOSInterface::GetCurrentThreadIdForLogging() +{ + return ::GetCurrentThreadId(); +} + +// Get id of the process +uint32_t GCToOSInterface::GetCurrentProcessId() +{ + return ::GetCurrentThreadId(); +} + +// Set ideal affinity for the current thread +// Parameters: +// affinity - ideal processor affinity for the thread +// Return: +// true if it has succeeded, false if it has failed +bool GCToOSInterface::SetCurrentThreadIdealAffinity(GCThreadAffinity* affinity) +{ + bool success = true; + +#if !defined(FEATURE_CORESYSTEM) + SetThreadIdealProcessor(GetCurrentThread(), (DWORD)affinity->Processor); +#else + PROCESSOR_NUMBER proc; + + if (affinity->Group != -1) + { + proc.Group = (WORD)affinity->Group; + proc.Number = (BYTE)affinity->Processor; + proc.Reserved = 0; + + success = !!SetThreadIdealProcessorEx(GetCurrentThread(), &proc, NULL); + } + else + { + if (GetThreadIdealProcessorEx(GetCurrentThread(), &proc)) + { + proc.Number = affinity->Processor; + success = !!SetThreadIdealProcessorEx(GetCurrentThread(), &proc, NULL); + } + } +#endif + + return success; +} + +// Get the number of the current processor +uint32_t GCToOSInterface::GetCurrentProcessorNumber() +{ + assert(GCToOSInterface::CanGetCurrentProcessorNumber()); + return ::GetCurrentProcessorNumber(); +} + +// Check if the OS supports getting current processor number +bool GCToOSInterface::CanGetCurrentProcessorNumber() +{ + // on all Windows platforms we support this API exists + return true; +} + +// Flush write buffers of processors that are executing threads of the current process +void GCToOSInterface::FlushProcessWriteBuffers() +{ + ::FlushProcessWriteBuffers(); +} + +// Break into a debugger +void GCToOSInterface::DebugBreak() +{ + ::DebugBreak(); +} + +// Get number of logical processors +uint32_t GCToOSInterface::GetLogicalCpuCount() +{ + // TODO(segilles) processor detection + return 1; +} + +// Causes the calling thread to sleep for the specified number of milliseconds +// Parameters: +// sleepMSec - time to sleep before switching to another thread +void GCToOSInterface::Sleep(uint32_t sleepMSec) +{ + // TODO(segilles) CLR implementation of __SwitchToThread spins for short sleep durations + // to avoid context switches - is that interesting or useful here? + if (sleepMSec > 0) + { + ::SleepEx(sleepMSec, FALSE); + } +} + +// Causes the calling thread to yield execution to another thread that is ready to run on the current processor. +// Parameters: +// switchCount - number of times the YieldThread was called in a loop +void GCToOSInterface::YieldThread(uint32_t switchCount) +{ + UNREFERENCED_PARAMETER(switchCount); + SwitchToThread(); +} + +// Reserve virtual memory range. +// Parameters: +// address - starting virtual address, it can be NULL to let the function choose the starting address +// size - size of the virtual memory range +// alignment - requested memory alignment, 0 means no specific alignment requested +// flags - flags to control special settings like write watching +// Return: +// Starting virtual address of the reserved range +void* GCToOSInterface::VirtualReserve(size_t size, size_t alignment, uint32_t flags) +{ + // Windows already ensures 64kb alignment on VirtualAlloc. The current CLR + // implementation ignores it on Windows, other than making some sanity checks on it. + UNREFERENCED_PARAMETER(alignment); + assert((alignment & (alignment - 1)) == 0); + assert(alignment <= 0x10000); + DWORD memFlags = (flags & VirtualReserveFlags::WriteWatch) ? (MEM_RESERVE | MEM_WRITE_WATCH) : MEM_RESERVE; + return ::VirtualAlloc(nullptr, size, memFlags, PAGE_READWRITE); +} + +// Release virtual memory range previously reserved using VirtualReserve +// Parameters: +// address - starting virtual address +// size - size of the virtual memory range +// Return: +// true if it has succeeded, false if it has failed +bool GCToOSInterface::VirtualRelease(void* address, size_t size) +{ + return !!::VirtualFree(address, 0, MEM_RELEASE); +} + +// Commit virtual memory range. It must be part of a range reserved using VirtualReserve. +// Parameters: +// address - starting virtual address +// size - size of the virtual memory range +// Return: +// true if it has succeeded, false if it has failed +bool GCToOSInterface::VirtualCommit(void* address, size_t size) +{ + return ::VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE) != nullptr; +} + +// Decomit virtual memory range. +// Parameters: +// address - starting virtual address +// size - size of the virtual memory range +// Return: +// true if it has succeeded, false if it has failed +bool GCToOSInterface::VirtualDecommit(void* address, size_t size) +{ + return !!::VirtualFree(address, size, MEM_DECOMMIT); +} + +// Reset virtual memory range. Indicates that data in the memory range specified by address and size is no +// longer of interest, but it should not be decommitted. +// Parameters: +// address - starting virtual address +// size - size of the virtual memory range +// unlock - true if the memory range should also be unlocked +// Return: +// true if it has succeeded, false if it has failed. Returns false also if +// unlocking was requested but the unlock failed. +bool GCToOSInterface::VirtualReset(void * address, size_t size, bool unlock) +{ + bool success = ::VirtualAlloc(address, size, MEM_RESET, PAGE_READWRITE) != nullptr; + if (success && unlock) + { + ::VirtualUnlock(address, size); + } + + return success; +} + +// Check if the OS supports write watching +bool GCToOSInterface::SupportsWriteWatch() +{ + void* mem = GCToOSInterface::VirtualReserve(g_SystemInfo.dwAllocationGranularity, 0, VirtualReserveFlags::WriteWatch); + if (mem != nullptr) + { + GCToOSInterface::VirtualRelease(mem, g_SystemInfo.dwAllocationGranularity); + return true; + } + + return false; +} + +// Reset the write tracking state for the specified virtual memory range. +// Parameters: +// address - starting virtual address +// size - size of the virtual memory range +void GCToOSInterface::ResetWriteWatch(void* address, size_t size) +{ + ::ResetWriteWatch(address, size); +} + +// Retrieve addresses of the pages that are written to in a region of virtual memory +// Parameters: +// resetState - true indicates to reset the write tracking state +// address - starting virtual address +// size - size of the virtual memory range +// pageAddresses - buffer that receives an array of page addresses in the memory region +// pageAddressesCount - on input, size of the lpAddresses array, in array elements +// on output, the number of page addresses that are returned in the array. +// Return: +// true if it has succeeded, false if it has failed +bool GCToOSInterface::GetWriteWatch(bool resetState, void* address, size_t size, void** pageAddresses, uintptr_t* pageAddressesCount) +{ + uint32_t flags = resetState ? 1 : 0; + ULONG granularity; + + bool success = ::GetWriteWatch(flags, address, size, pageAddresses, (ULONG_PTR*)pageAddressesCount, &granularity) == 0; + if (success) + { + assert(granularity == OS_PAGE_SIZE); + } + + return success; +} + +// Get size of the largest cache on the processor die +// Parameters: +// trueSize - true to return true cache size, false to return scaled up size based on +// the processor architecture +// Return: +// Size of the cache +size_t GCToOSInterface::GetLargestOnDieCacheSize(bool trueSize) +{ + // TODO(segilles) processor detection (see src/vm/util.cpp:1935) + return 0; +} + +// Get affinity mask of the current process +// Parameters: +// processMask - affinity mask for the specified process +// systemMask - affinity mask for the system +// Return: +// true if it has succeeded, false if it has failed +// Remarks: +// A process affinity mask is a bit vector in which each bit represents the processors that +// a process is allowed to run on. A system affinity mask is a bit vector in which each bit +// represents the processors that are configured into a system. +// A process affinity mask is a subset of the system affinity mask. A process is only allowed +// to run on the processors configured into a system. Therefore, the process affinity mask cannot +// specify a 1 bit for a processor when the system affinity mask specifies a 0 bit for that processor. +bool GCToOSInterface::GetCurrentProcessAffinityMask(uintptr_t* processMask, uintptr_t* systemMask) +{ + return !!::GetProcessAffinityMask(::GetCurrentProcess(), (PDWORD_PTR)processMask, (PDWORD_PTR)systemMask); +} + +// Get number of processors assigned to the current process +// Return: +// The number of processors +uint32_t GCToOSInterface::GetCurrentProcessCpuCount() +{ + // TODO(segilles) this does not take into account process affinity + return g_SystemInfo.dwNumberOfProcessors; +} + +// Return the size of the user-mode portion of the virtual address space of this process. +// Return: +// non zero if it has succeeded, 0 if it has failed +size_t GCToOSInterface::GetVirtualMemoryLimit() +{ + MEMORYSTATUSEX memStatus; + if (::GlobalMemoryStatusEx(&memStatus)) + { + return (size_t)memStatus.ullAvailVirtual; + } + + return 0; +} + +// Get the physical memory that this process can use. +// Return: +// non zero if it has succeeded, 0 if it has failed +// Remarks: +// If a process runs with a restricted memory limit, it returns the limit. If there's no limit +// specified, it returns amount of actual physical memory. +uint64_t GCToOSInterface::GetPhysicalMemoryLimit() +{ + size_t restricted_limit = GetRestrictedPhysicalMemoryLimit(); + if (restricted_limit != 0) + return restricted_limit; + + MEMORYSTATUSEX memStatus; + if (::GlobalMemoryStatusEx(&memStatus)) + { + return memStatus.ullTotalPhys; + } + + return 0; +} + +// Get memory status +// Parameters: +// memory_load - A number between 0 and 100 that specifies the approximate percentage of physical memory +// that is in use (0 indicates no memory use and 100 indicates full memory use). +// available_physical - The amount of physical memory currently available, in bytes. +// available_page_file - The maximum amount of memory the current process can commit, in bytes. +void GCToOSInterface::GetMemoryStatus(uint32_t* memory_load, uint64_t* available_physical, uint64_t* available_page_file) +{ + uint64_t restricted_limit = GetRestrictedPhysicalMemoryLimit(); + if (restricted_limit != 0) + { + PROCESS_MEMORY_COUNTERS pmc; + if (GCGetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc))) + { + if (memory_load) + *memory_load = (uint32_t)((float)pmc.WorkingSetSize * 100.0 / (float)restricted_limit); + if (available_physical) + *available_physical = restricted_limit - pmc.WorkingSetSize; + // Available page file doesn't mean much when physical memory is restricted since + // we don't know how much of it is available to this process so we are not going to + // bother to make another OS call for it. + if (available_page_file) + *available_page_file = 0; + + return; + } + } + + MEMORYSTATUSEX ms; + ::GetProcessMemoryLoad(&ms); + + if (memory_load != nullptr) + *memory_load = ms.dwMemoryLoad; + if (available_physical != nullptr) + *available_physical = ms.ullAvailPhys; + if (available_page_file != nullptr) + *available_page_file = ms.ullAvailPageFile; +} + +// Get a high precision performance counter +// Return: +// The counter value +int64_t GCToOSInterface::QueryPerformanceCounter() +{ + LARGE_INTEGER ts; + if (!::QueryPerformanceCounter(&ts)) + { + assert(false && "Failed to query performance counter"); + } + + return ts.QuadPart; +} + +// Get a frequency of the high precision performance counter +// Return: +// The counter frequency +int64_t GCToOSInterface::QueryPerformanceFrequency() +{ + LARGE_INTEGER ts; + if (!::QueryPerformanceFrequency(&ts)) + { + assert(false && "Failed to query performance counter"); + } + + return ts.QuadPart; +} + +// Get a time stamp with a low precision +// Return: +// Time stamp in milliseconds +uint32_t GCToOSInterface::GetLowPrecisionTimeStamp() +{ + return ::GetTickCount(); +} + +// Parameters of the GC thread stub +struct GCThreadStubParam +{ + GCThreadFunction GCThreadFunction; + void* GCThreadParam; +}; + +// GC thread stub to convert GC thread function to an OS specific thread function +static DWORD GCThreadStub(void* param) +{ + GCThreadStubParam *stubParam = (GCThreadStubParam*)param; + GCThreadFunction function = stubParam->GCThreadFunction; + void* threadParam = stubParam->GCThreadParam; + + delete stubParam; + + function(threadParam); + + return 0; +} + + +// Create a new thread for GC use +// Parameters: +// function - the function to be executed by the thread +// param - parameters of the thread +// affinity - processor affinity of the thread +// Return: +// true if it has succeeded, false if it has failed +bool GCToOSInterface::CreateThread(GCThreadFunction function, void* param, GCThreadAffinity* affinity) +{ + uint32_t thread_id; + + std::unique_ptr<GCThreadStubParam> stubParam(new (std::nothrow) GCThreadStubParam()); + if (!stubParam) + { + return false; + } + + stubParam->GCThreadFunction = function; + stubParam->GCThreadParam = param; + + HANDLE gc_thread = ::CreateThread( + nullptr, + 512 * 1024 /* Thread::StackSize_Medium */, + (LPTHREAD_START_ROUTINE)GCThreadStub, + stubParam.get(), + CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, + (DWORD*)&thread_id); + + if (!gc_thread) + { + return false; + } + + stubParam.release(); + bool result = !!::SetThreadPriority(gc_thread, /* THREAD_PRIORITY_ABOVE_NORMAL );*/ THREAD_PRIORITY_HIGHEST ); + assert(result && "failed to set thread priority"); + + if (affinity->Group != GCThreadAffinity::None) + { + assert(affinity->Processor != GCThreadAffinity::None); + GROUP_AFFINITY ga; + ga.Group = (WORD)affinity->Group; + ga.Reserved[0] = 0; // reserve must be filled with zero + ga.Reserved[1] = 0; // otherwise call may fail + ga.Reserved[2] = 0; + ga.Mask = (size_t)1 << affinity->Processor; + + bool result = !!::SetThreadGroupAffinity(gc_thread, &ga, nullptr); + assert(result && "failed to set thread affinity"); + } + else if (affinity->Processor != GCThreadAffinity::None) + { + ::SetThreadAffinityMask(gc_thread, (DWORD_PTR)1 << affinity->Processor); + } + + return true; +} + +// Initialize the critical section +void CLRCriticalSection::Initialize() +{ + ::InitializeCriticalSection(&m_cs); +} + +// Destroy the critical section +void CLRCriticalSection::Destroy() +{ + ::DeleteCriticalSection(&m_cs); +} + +// Enter the critical section. Blocks until the section can be entered. +void CLRCriticalSection::Enter() +{ + ::EnterCriticalSection(&m_cs); +} + +// Leave the critical section +void CLRCriticalSection::Leave() +{ + ::LeaveCriticalSection(&m_cs); +} diff --git a/src/Native/gc/gcimpl.h b/src/Native/gc/gcimpl.h index d7393c357..cb91c4dc3 100644 --- a/src/Native/gc/gcimpl.h +++ b/src/Native/gc/gcimpl.h @@ -77,7 +77,7 @@ public: size_t GetLastGCDuration(int generation); size_t GetNow(); - void TraceGCSegments (); + void DiagTraceGCSegments (); void PublishObject(uint8_t* obj); BOOL IsGCInProgressHelper (BOOL bConsiderGCStart = FALSE); @@ -198,8 +198,7 @@ public: BOOL FinalizeAppDomain(AppDomain *pDomain, BOOL fRunFinalizers); BOOL ShouldRestartFinalizerWatchDog(); - void SetCardsAfterBulkCopy( Object**, size_t); - void WalkObject (Object* obj, walk_fn fn, void* context); + void DiagWalkObject (Object* obj, walk_fn fn, void* context); public: // FIX @@ -272,7 +271,19 @@ protected: #endif // STRESS_HEAP #endif // FEATURE_REDHAWK - virtual void DescrGenerationsToProfiler (gen_walk_fn fn, void *context); + virtual void DiagDescrGenerations (gen_walk_fn fn, void *context); + + virtual void DiagWalkSurvivorsWithType (void* gc_context, record_surv_fn fn, size_t diag_context, walk_surv_type type); + + virtual void DiagWalkFinalizeQueue (void* gc_context, fq_walk_fn fn); + + virtual void DiagScanFinalizeQueue (fq_scan_fn fn, ScanContext* context); + + virtual void DiagScanHandles (handle_scan_fn fn, int gen_number, ScanContext* context); + + virtual void DiagScanDependentHandles (handle_scan_fn fn, int gen_number, ScanContext* context); + + virtual void DiagWalkHeap(walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p); public: Object * NextObj (Object * object); diff --git a/src/Native/gc/gcinterface.ee.h b/src/Native/gc/gcinterface.ee.h index 36d20c719..c5f87ef03 100644 --- a/src/Native/gc/gcinterface.ee.h +++ b/src/Native/gc/gcinterface.ee.h @@ -92,6 +92,42 @@ public: // Creates and returns a new background thread. virtual Thread* CreateBackgroundThread(GCBackgroundThreadFunction threadStart, void* arg) = 0; + + // When a GC starts, gives the diagnostics code a chance to run. + virtual + void DiagGCStart(int gen, bool isInduced) = 0; + + // When GC heap segments change, gives the diagnostics code a chance to run. + virtual + void DiagUpdateGenerationBounds() = 0; + + // When a GC ends, gives the diagnostics code a chance to run. + virtual + void DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent) = 0; + + // During a GC after we discover what objects' finalizers should run, gives the diagnostics code a chance to run. + virtual + void DiagWalkFReachableObjects(void* gcContext) = 0; + + // During a GC after we discover the survivors and the relocation info, + // gives the diagnostics code a chance to run. This includes LOH if we are + // compacting LOH. + virtual + void DiagWalkSurvivors(void* gcContext) = 0; + + // During a full GC after we discover what objects to survive on LOH, + // gives the diagnostics code a chance to run. + virtual + void DiagWalkLOHSurvivors(void* gcContext) = 0; + + // At the end of a background GC, gives the diagnostics code a chance to run. + virtual + void DiagWalkBGCSurvivors(void* gcContext) = 0; + + // Informs the EE of changes to the location of the card table, potentially updating the write + // barrier if it needs to be updated. + virtual + void StompWriteBarrier(WriteBarrierParameters* args) = 0; }; #endif // _GCINTERFACE_EE_H_ diff --git a/src/Native/gc/gcinterface.h b/src/Native/gc/gcinterface.h index ac14332dc..4ba4e0c63 100644 --- a/src/Native/gc/gcinterface.h +++ b/src/Native/gc/gcinterface.h @@ -34,6 +34,70 @@ typedef enum SUSPEND_FOR_GC_PREP = 6 } SUSPEND_REASON; +typedef enum +{ + walk_for_gc = 1, + walk_for_bgc = 2, + walk_for_loh = 3 +} walk_surv_type; + +// Different operations that can be done by GCToEEInterface::StompWriteBarrier +enum class WriteBarrierOp +{ + StompResize, + StompEphemeral, + Initialize, + SwitchToWriteWatch, + SwitchToNonWriteWatch +}; + +// Arguments to GCToEEInterface::StompWriteBarrier +struct WriteBarrierParameters +{ + // The operation that StompWriteBarrier will perform. + WriteBarrierOp operation; + + // Whether or not the runtime is currently suspended. If it is not, + // the EE will need to suspend it before bashing the write barrier. + // Used for all operations. + bool is_runtime_suspended; + + // Whether or not the GC has moved the ephemeral generation to no longer + // be at the top of the heap. When the ephemeral generation is at the top + // of the heap, and the write barrier observes that a pointer is greater than + // g_ephemeral_low, it does not need to check that the pointer is less than + // g_ephemeral_high because there is nothing in the GC heap above the ephemeral + // generation. When this is not the case, however, the GC must inform the EE + // so that the EE can switch to a write barrier that checks that a pointer + // is both greater than g_ephemeral_low and less than g_ephemeral_high. + // Used for WriteBarrierOp::StompResize. + bool requires_upper_bounds_check; + + // The new card table location. May or may not be the same as the previous + // card table. Used for WriteBarrierOp::Initialize and WriteBarrierOp::StompResize. + uint32_t* card_table; + + // The heap's new low boundary. May or may not be the same as the previous + // value. Used for WriteBarrierOp::Initialize and WriteBarrierOp::StompResize. + uint8_t* lowest_address; + + // The heap's new high boundary. May or may not be the same as the previous + // value. Used for WriteBarrierOp::Initialize and WriteBarrierOp::StompResize. + uint8_t* highest_address; + + // The new start of the ephemeral generation. + // Used for WriteBarrierOp::StompEphemeral. + uint8_t* ephemeral_low; + + // The new end of the ephemeral generation. + // Used for WriteBarrierOp::StompEphemeral. + uint8_t* ephemeral_high; + + // The new write watch table, if we are using our own write watch + // implementation. Used for WriteBarrierOp::SwitchToWriteWatch only. + uint8_t* write_watch_table; +}; + #include "gcinterface.ee.h" // The allocation context must be known to the VM for use in the allocation @@ -88,6 +152,12 @@ struct segment_info // one for the object header, and one for the first field in the object. #define min_obj_size ((sizeof(uint8_t*) + sizeof(uintptr_t) + sizeof(size_t))) +#define max_generation 2 + +// The bit shift used to convert a memory address into an index into the +// Software Write Watch table. +#define SOFTWARE_WRITE_WATCH_AddressToTableByteIndexShift 0xc + class Object; class IGCHeap; @@ -101,19 +171,6 @@ IGCHeap* InitializeGarbageCollector(IGCToCLR* clrToGC); // and the heap is actually recated. void InitializeHeapType(bool bServerHeap); -#ifndef DACCESS_COMPILE -extern "C" { -#endif // !DACCESS_COMPILE -GPTR_DECL(uint8_t,g_lowest_address); -GPTR_DECL(uint8_t,g_highest_address); -GPTR_DECL(uint32_t,g_card_table); -#ifndef DACCESS_COMPILE -} -#endif // !DACCESS_COMPILE - -extern "C" uint8_t* g_ephemeral_low; -extern "C" uint8_t* g_ephemeral_high; - #ifdef WRITE_BARRIER_CHECK //always defined, but should be 0 in Server GC extern uint8_t* g_GCShadow; @@ -174,6 +231,10 @@ enum end_no_gc_region_status typedef BOOL (* walk_fn)(Object*, void*); typedef void (* gen_walk_fn)(void* context, int generation, uint8_t* range_start, uint8_t* range_end, uint8_t* range_reserved); +typedef void (* record_surv_fn)(uint8_t* begin, uint8_t* end, ptrdiff_t reloc, size_t context, BOOL compacting_p, BOOL bgc_p); +typedef void (* fq_walk_fn)(BOOL, void*); +typedef void (* fq_scan_fn)(Object** ppObject, ScanContext *pSC, uint32_t dwFlags); +typedef void (* handle_scan_fn)(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, BOOL isDependent); // IGCHeap is the interface that the VM will use when interacting with the GC. class IGCHeap { @@ -347,9 +408,6 @@ public: // sanity checks asserting that a GC has not occured. virtual unsigned GetGcCount() = 0; - // Sets cards after an object has been memmoved. - virtual void SetCardsAfterBulkCopy(Object** obj, size_t length) = 0; - // Gets whether or not the home heap of this alloc context matches the heap // associated with this thread. virtual bool IsThreadUsingAllocationContextHeap(gc_alloc_context* acontext, int thread_number) = 0; @@ -413,8 +471,8 @@ public: // with the given size and flags. virtual Object* AllocAlign8 (gc_alloc_context* acontext, size_t size, uint32_t flags) = 0; - // If allocating on the LOH, blocks if a BGC is in a position (concurrent mark) - // where the LOH allocator can't allocate. + // This is for the allocator to indicate it's done allocating a large object during a + // background GC as the BGC threads also need to walk LOH. virtual void PublishObject(uint8_t* obj) = 0; // Gets the event that suspended threads will use to wait for the @@ -449,13 +507,31 @@ public: */ // Walks an object, invoking a callback on each member. - virtual void WalkObject(Object* obj, walk_fn fn, void* context) = 0; + virtual void DiagWalkObject(Object* obj, walk_fn fn, void* context) = 0; + + // Walk the heap object by object. + virtual void DiagWalkHeap(walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p) = 0; + + // Walks the survivors and get the relocation information if objects have moved. + virtual void DiagWalkSurvivorsWithType(void* gc_context, record_surv_fn fn, size_t diag_context, walk_surv_type type) = 0; + + // Walks the finalization queue. + virtual void DiagWalkFinalizeQueue(void* gc_context, fq_walk_fn fn) = 0; + + // Scan roots on finalizer queue. This is a generic function. + virtual void DiagScanFinalizeQueue(fq_scan_fn fn, ScanContext* context) = 0; + + // Scan handles for profiling or ETW. + virtual void DiagScanHandles(handle_scan_fn fn, int gen_number, ScanContext* context) = 0; + + // Scan dependent handles for profiling or ETW. + virtual void DiagScanDependentHandles(handle_scan_fn fn, int gen_number, ScanContext* context) = 0; // Describes all generations to the profiler, invoking a callback on each generation. - virtual void DescrGenerationsToProfiler(gen_walk_fn fn, void* context) = 0; + virtual void DiagDescrGenerations(gen_walk_fn fn, void* context) = 0; // Traces all GC segments and fires ETW events with information on them. - virtual void TraceGCSegments() = 0; + virtual void DiagTraceGCSegments() = 0; /* =========================================================================== @@ -550,26 +626,4 @@ struct ScanContext } }; -#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) -struct ProfilingScanContext : ScanContext -{ - BOOL fProfilerPinned; - void * pvEtwContext; - void *pHeapId; - - ProfilingScanContext(BOOL fProfilerPinnedParam) : ScanContext() - { - LIMITED_METHOD_CONTRACT; - - pHeapId = NULL; - fProfilerPinned = fProfilerPinnedParam; - pvEtwContext = NULL; -#ifdef FEATURE_CONSERVATIVE_GC - // To not confuse GCScan::GcScanRoots - promotion = g_pConfig->GetGCConservative(); -#endif - } -}; -#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) - #endif // _GC_INTERFACE_H_ diff --git a/src/Native/gc/gcpriv.h b/src/Native/gc/gcpriv.h index e0147c33c..1f97d7f2d 100644 --- a/src/Native/gc/gcpriv.h +++ b/src/Native/gc/gcpriv.h @@ -24,7 +24,9 @@ inline void FATAL_GC_ERROR() { +#ifndef DACCESS_COMPILE GCToOSInterface::DebugBreak(); +#endif // DACCESS_COMPILE _ASSERTE(!"Fatal Error in GC."); EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); } @@ -1073,9 +1075,6 @@ enum interesting_data_point }; //class definition of the internal class -#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) -extern void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw); -#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) class gc_heap { friend struct ::_DacGlobals; @@ -1225,7 +1224,7 @@ public: static gc_heap* balance_heaps_loh (alloc_context* acontext, size_t size); static - void __stdcall gc_thread_stub (void* arg); + void gc_thread_stub (void* arg); #endif //MULTIPLE_HEAPS CObjectHeader* try_fast_alloc (size_t jsize); @@ -1283,35 +1282,48 @@ public: protected: - PER_HEAP + PER_HEAP_ISOLATED void walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p); + PER_HEAP + void walk_heap_per_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p); + struct walk_relocate_args { uint8_t* last_plug; BOOL is_shortened; mark* pinned_plug_entry; + size_t profiling_context; + record_surv_fn fn; }; PER_HEAP + void walk_survivors (record_surv_fn fn, size_t context, walk_surv_type type); + + PER_HEAP void walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, - walk_relocate_args* args, size_t profiling_context); + walk_relocate_args* args); PER_HEAP - void walk_relocation (int condemned_gen_number, - uint8_t* first_condemned_address, size_t profiling_context); + void walk_relocation (size_t profiling_context, record_surv_fn fn); PER_HEAP - void walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args, size_t profiling_context); + void walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args); -#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE) PER_HEAP - void walk_relocation_for_bgc(size_t profiling_context); + void walk_finalize_queue (fq_walk_fn fn); +#if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE) PER_HEAP - void make_free_lists_for_profiler_for_bgc(); + void walk_survivors_for_bgc (size_t profiling_context, record_surv_fn fn); #endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE) + // used in blocking GCs after plan phase so this walks the plugs. + PER_HEAP + void walk_survivors_relocation (size_t profiling_context, record_surv_fn fn); + PER_HEAP + void walk_survivors_for_loh (size_t profiling_context, record_surv_fn fn); + PER_HEAP int generation_to_condemn (int n, BOOL* blocking_collection_p, @@ -1659,7 +1671,7 @@ protected: PER_HEAP void reset_write_watch (BOOL concurrent_p); PER_HEAP - void adjust_ephemeral_limits (bool is_runtime_suspended); + void adjust_ephemeral_limits (); PER_HEAP void make_generation (generation& gen, heap_segment* seg, uint8_t* start, uint8_t* pointer); @@ -2148,10 +2160,8 @@ protected: PER_HEAP void relocate_in_loh_compact(); -#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) PER_HEAP - void walk_relocation_loh (size_t profiling_context); -#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) + void walk_relocation_for_loh (size_t profiling_context, record_surv_fn fn); PER_HEAP BOOL loh_enque_pinned_plug (uint8_t* plug, size_t len); @@ -2549,12 +2559,6 @@ protected: PER_HEAP_ISOLATED void descr_generations_to_profiler (gen_walk_fn fn, void *context); -#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) - PER_HEAP - void record_survived_for_profiler(int condemned_gen_number, uint8_t * first_condemned_address); - PER_HEAP - void notify_profiler_of_surviving_large_objects (); -#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) /*------------ Multiple non isolated heaps ----------------*/ #ifdef MULTIPLE_HEAPS @@ -2798,13 +2802,11 @@ public: PER_HEAP void exit_gc_done_event_lock(); -#ifdef MULTIPLE_HEAPS PER_HEAP uint8_t* ephemeral_low; //lowest ephemeral address PER_HEAP uint8_t* ephemeral_high; //highest ephemeral address -#endif //MULTIPLE_HEAPS PER_HEAP uint32_t* card_table; @@ -3763,9 +3765,7 @@ public: Object* GetNextFinalizableObject (BOOL only_non_critical=FALSE); BOOL ScanForFinalization (promote_func* fn, int gen,BOOL mark_only_p, gc_heap* hp); void RelocateFinalizationData (int gen, gc_heap* hp); -#ifdef GC_PROFILING - void WalkFReachableObjects (gc_heap* hp); -#endif //GC_PROFILING + void WalkFReachableObjects (fq_walk_fn fn); void GcScanRoots (promote_func* fn, int hn, ScanContext *pSC); void UpdatePromotedGenerations (int gen, BOOL gen_0_empty_p); size_t GetPromotedCount(); @@ -4317,9 +4317,6 @@ dynamic_data* gc_heap::dynamic_data_of (int gen_number) return &dynamic_data_table [ gen_number ]; } -extern "C" uint8_t* g_ephemeral_low; -extern "C" uint8_t* g_ephemeral_high; - #define card_word_width ((size_t)32) // diff --git a/src/Native/gc/gcrecord.h b/src/Native/gc/gcrecord.h index 8c95ad04d..fff1fc5c8 100644 --- a/src/Native/gc/gcrecord.h +++ b/src/Native/gc/gcrecord.h @@ -13,7 +13,7 @@ Module Name: #ifndef __gc_record_h__ #define __gc_record_h__ -#define max_generation 2 +//#define max_generation 2 // We pack the dynamic tuning for deciding which gen to condemn in a uint32_t. // We assume that 2 bits are enough to represent the generation. diff --git a/src/Native/gc/gcscan.cpp b/src/Native/gc/gcscan.cpp index f021554fd..b4e6352dd 100644 --- a/src/Native/gc/gcscan.cpp +++ b/src/Native/gc/gcscan.cpp @@ -192,33 +192,32 @@ void GCScan::GcScanHandles (promote_func* fn, int condemned, int max_gen, } } - -#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) - /* * Scan all handle roots in this 'namespace' for profiling */ -void GCScan::GcScanHandlesForProfilerAndETW (int max_gen, ScanContext* sc) +void GCScan::GcScanHandlesForProfilerAndETW (int max_gen, ScanContext* sc, handle_scan_fn fn) { LIMITED_METHOD_CONTRACT; +#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) LOG((LF_GC|LF_GCROOTS, LL_INFO10, "Profiler Root Scan Phase, Handles\n")); - Ref_ScanPointersForProfilerAndETW(max_gen, (uintptr_t)sc); + Ref_ScanHandlesForProfilerAndETW(max_gen, (uintptr_t)sc, fn); +#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) } /* * Scan dependent handles in this 'namespace' for profiling */ -void GCScan::GcScanDependentHandlesForProfilerAndETW (int max_gen, ProfilingScanContext* sc) +void GCScan::GcScanDependentHandlesForProfilerAndETW (int max_gen, ScanContext* sc, handle_scan_fn fn) { LIMITED_METHOD_CONTRACT; +#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) LOG((LF_GC|LF_GCROOTS, LL_INFO10, "Profiler Root Scan Phase, DependentHandles\n")); - Ref_ScanDependentHandlesForProfilerAndETW(max_gen, sc); -} - + Ref_ScanDependentHandlesForProfilerAndETW(max_gen, sc, fn); #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) +} void GCScan::GcRuntimeStructuresValid (BOOL bValid) { diff --git a/src/Native/gc/gcscan.h b/src/Native/gc/gcscan.h index 3515b8e1b..362370fa4 100644 --- a/src/Native/gc/gcscan.h +++ b/src/Native/gc/gcscan.h @@ -52,10 +52,8 @@ class GCScan static void EnumMemoryRegions(CLRDataEnumMemoryFlags flags); #endif // DACCESS_COMPILE -#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) - static void GcScanHandlesForProfilerAndETW (int max_gen, ScanContext* sc); - static void GcScanDependentHandlesForProfilerAndETW (int max_gen, ProfilingScanContext* sc); -#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) + static void GcScanHandlesForProfilerAndETW (int max_gen, ScanContext* sc, handle_scan_fn fn); + static void GcScanDependentHandlesForProfilerAndETW (int max_gen, ScanContext* sc, handle_scan_fn fn); // scan for dead weak pointers static void GcWeakPtrScan (promote_func* fn, int condemned, int max_gen, ScanContext*sc ); diff --git a/src/Native/gc/gcsvr.cpp b/src/Native/gc/gcsvr.cpp index cf5fc9335..70801dd4e 100644 --- a/src/Native/gc/gcsvr.cpp +++ b/src/Native/gc/gcsvr.cpp @@ -13,6 +13,7 @@ #include "gc.h" #include "gcscan.h" #include "gcdesc.h" +#include "softwarewritewatch.h" #define SERVER_GC 1 diff --git a/src/Native/gc/gcwks.cpp b/src/Native/gc/gcwks.cpp index 574df8215..5c489df0e 100644 --- a/src/Native/gc/gcwks.cpp +++ b/src/Native/gc/gcwks.cpp @@ -11,6 +11,7 @@ #include "gc.h" #include "gcscan.h" #include "gcdesc.h" +#include "softwarewritewatch.h" #ifdef SERVER_GC #undef SERVER_GC diff --git a/src/Native/gc/handletablecache.cpp b/src/Native/gc/handletablecache.cpp index b2af40c82..aaf3370bd 100644 --- a/src/Native/gc/handletablecache.cpp +++ b/src/Native/gc/handletablecache.cpp @@ -15,6 +15,12 @@ #include "gcenv.h" +#ifdef Sleep // TODO(segilles) +#undef Sleep +#endif // Sleep + +#include "env/gcenv.os.h" + #include "handletablepriv.h" /**************************************************************************** diff --git a/src/Native/gc/handletablecore.cpp b/src/Native/gc/handletablecore.cpp index be65b142b..5776c26ac 100644 --- a/src/Native/gc/handletablecore.cpp +++ b/src/Native/gc/handletablecore.cpp @@ -611,7 +611,7 @@ TableSegment *SegmentAlloc(HandleTable *pTable) _ASSERTE(HANDLE_SEGMENT_ALIGNMENT >= HANDLE_SEGMENT_SIZE); _ASSERTE(HANDLE_SEGMENT_ALIGNMENT == 0x10000); - pSegment = (TableSegment *)GCToOSInterface::VirtualReserve(NULL, HANDLE_SEGMENT_SIZE, HANDLE_SEGMENT_ALIGNMENT, VirtualReserveFlags::None); + pSegment = (TableSegment *)GCToOSInterface::VirtualReserve(HANDLE_SEGMENT_SIZE, HANDLE_SEGMENT_ALIGNMENT, VirtualReserveFlags::None); _ASSERTE(((size_t)pSegment % HANDLE_SEGMENT_ALIGNMENT) == 0); // bail out if we couldn't get any memory diff --git a/src/Native/gc/objecthandle.cpp b/src/Native/gc/objecthandle.cpp index d8834b72f..e8eed9300 100644 --- a/src/Native/gc/objecthandle.cpp +++ b/src/Native/gc/objecthandle.cpp @@ -110,6 +110,21 @@ void CALLBACK PromoteRefCounted(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtra } #endif // FEATURE_COMINTEROP || FEATURE_REDHAWK + +// Only used by profiling/ETW. +//---------------------------------------------------------------------------- + +/* + * struct DIAG_DEPSCANINFO + * + * used when tracing dependent handles for profiling/ETW. + */ +struct DIAG_DEPSCANINFO +{ + HANDLESCANPROC pfnTrace; // tracing function to use + uintptr_t pfnProfilingOrETW; +}; + void CALLBACK TraceDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2) { WRAPPER_NO_CONTRACT; @@ -122,14 +137,15 @@ void CALLBACK TraceDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pEx // object should also be non-NULL. _ASSERTE(*pExtraInfo == NULL || *pObjRef != NULL); - // lp2 is a HANDLESCANPROC - HANDLESCANPROC pfnTrace = (HANDLESCANPROC) lp2; + struct DIAG_DEPSCANINFO *pInfo = (struct DIAG_DEPSCANINFO*)lp2; + + HANDLESCANPROC pfnTrace = pInfo->pfnTrace; // is the handle's secondary object non-NULL? if ((*pObjRef != NULL) && (*pExtraInfo != 0)) { // yes - call the tracing function for this handle - pfnTrace(pObjRef, NULL, lp1, *pExtraInfo); + pfnTrace(pObjRef, NULL, lp1, (uintptr_t)(pInfo->pfnProfilingOrETW)); } } @@ -414,7 +430,7 @@ void CALLBACK ScanPointerForProfilerAndETW(_UNCHECKED_OBJECTREF *pObjRef, uintpt CONTRACTL_END; #endif // FEATURE_REDHAWK UNREFERENCED_PARAMETER(pExtraInfo); - UNREFERENCED_PARAMETER(lp2); + handle_scan_fn fn = (handle_scan_fn)lp2; LOG((LF_GC | LF_CORPROF, LL_INFO100000, LOG_HANDLE_OBJECT_CLASS("Notifying profiler of ", pObjRef, "to ", *pObjRef))); @@ -422,7 +438,7 @@ void CALLBACK ScanPointerForProfilerAndETW(_UNCHECKED_OBJECTREF *pObjRef, uintpt Object **pRef = (Object **)pObjRef; // Get a hold of the heap ID that's tacked onto the end of the scancontext struct. - ProfilingScanContext *pSC = (ProfilingScanContext *)lp1; + ScanContext *pSC = (ScanContext *)lp1; uint32_t rootFlags = 0; BOOL isDependent = FALSE; @@ -487,60 +503,15 @@ void CALLBACK ScanPointerForProfilerAndETW(_UNCHECKED_OBJECTREF *pObjRef, uintpt _UNCHECKED_OBJECTREF pSec = NULL; -#ifdef GC_PROFILING - // Give the profiler the objectref. - if (pSC->fProfilerPinned) + if (isDependent) { - if (!isDependent) - { - BEGIN_PIN_PROFILER(CORProfilerTrackGC()); - g_profControlBlock.pProfInterface->RootReference2( - (uint8_t *)*pRef, - kEtwGCRootKindHandle, - (EtwGCRootFlags)rootFlags, - pRef, - &pSC->pHeapId); - END_PIN_PROFILER(); - } - else - { - BEGIN_PIN_PROFILER(CORProfilerTrackConditionalWeakTableElements()); - pSec = (_UNCHECKED_OBJECTREF)HndGetHandleExtraInfo(handle); - g_profControlBlock.pProfInterface->ConditionalWeakTableElementReference( - (uint8_t*)*pRef, - (uint8_t*)pSec, - pRef, - &pSC->pHeapId); - END_PIN_PROFILER(); - } + pSec = (_UNCHECKED_OBJECTREF)HndGetHandleExtraInfo(handle); } -#endif // GC_PROFILING - -#if defined(FEATURE_EVENT_TRACE) - // Notify ETW of the handle - if (ETW::GCLog::ShouldWalkHeapRootsForEtw()) - { - if (isDependent && (pSec == NULL)) - { - pSec = (_UNCHECKED_OBJECTREF)HndGetHandleExtraInfo(handle); - } - - ETW::GCLog::RootReference( - handle, - *pRef, // object being rooted - pSec, // pSecondaryNodeForDependentHandle - isDependent, - pSC, - 0, // dwGCFlags, - rootFlags); // ETW handle flags - } -#endif // defined(FEATURE_EVENT_TRACE) + fn(pRef, pSec, rootFlags, pSC, isDependent); } #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) - - /* * Scan callback for updating pointers. * @@ -1417,13 +1388,15 @@ void Ref_ScanDependentHandlesForRelocation(uint32_t condemned, uint32_t maxgen, /* loop scan version of TraceVariableHandles for single-thread-managed Ref_* functions should be kept in sync with the code above + Only used by profiling/ETW. */ -void TraceDependentHandlesBySingleThread(HANDLESCANPROC pfnTrace, uintptr_t lp1, uint32_t condemned, uint32_t maxgen, uint32_t flags) +void TraceDependentHandlesBySingleThread(HANDLESCANPROC pfnTrace, uintptr_t lp1, uintptr_t lp2, uint32_t condemned, uint32_t maxgen, uint32_t flags) { WRAPPER_NO_CONTRACT; // set up to scan variable handles with the specified mask and trace function uint32_t type = HNDTYPE_DEPENDENT; + struct DIAG_DEPSCANINFO info = { pfnTrace, lp2 }; HandleTableMap *walk = &g_HandleTableMap; while (walk) { @@ -1436,14 +1409,13 @@ void TraceDependentHandlesBySingleThread(HANDLESCANPROC pfnTrace, uintptr_t lp1, HHANDLETABLE hTable = walk->pBuckets[i]->pTable[uCPUindex]; if (hTable) HndScanHandlesForGC(hTable, TraceDependentHandle, - lp1, (uintptr_t)pfnTrace, &type, 1, condemned, maxgen, HNDGCF_EXTRAINFO | flags); + lp1, (uintptr_t)&info, &type, 1, condemned, maxgen, HNDGCF_EXTRAINFO | flags); } } walk = walk->pNext; } } - // We scan handle tables by their buckets (ie, AD index). We could get into the situation where // the AD indices are not very compacted (for example if we have just unloaded ADs and their // indices haven't been reused yet) and we could be scanning them in an unbalanced fashion. @@ -1623,7 +1595,7 @@ void Ref_UpdatePointers(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Re #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) // Please update this if you change the Ref_UpdatePointers function above. -void Ref_ScanPointersForProfilerAndETW(uint32_t maxgen, uintptr_t lp1) +void Ref_ScanHandlesForProfilerAndETW(uint32_t maxgen, uintptr_t lp1, handle_scan_fn fn) { WRAPPER_NO_CONTRACT; @@ -1662,16 +1634,16 @@ void Ref_ScanPointersForProfilerAndETW(uint32_t maxgen, uintptr_t lp1) { HHANDLETABLE hTable = walk->pBuckets[i]->pTable[uCPUindex]; if (hTable) - HndScanHandlesForGC(hTable, &ScanPointerForProfilerAndETW, lp1, 0, types, _countof(types), maxgen, maxgen, flags); + HndScanHandlesForGC(hTable, &ScanPointerForProfilerAndETW, lp1, (uintptr_t)fn, types, _countof(types), maxgen, maxgen, flags); } walk = walk->pNext; } // update pointers in variable handles whose dynamic type is VHT_WEAK_SHORT, VHT_WEAK_LONG or VHT_STRONG - TraceVariableHandlesBySingleThread(&ScanPointerForProfilerAndETW, lp1, 0, VHT_WEAK_SHORT | VHT_WEAK_LONG | VHT_STRONG, maxgen, maxgen, flags); + TraceVariableHandlesBySingleThread(&ScanPointerForProfilerAndETW, lp1, (uintptr_t)fn, VHT_WEAK_SHORT | VHT_WEAK_LONG | VHT_STRONG, maxgen, maxgen, flags); } -void Ref_ScanDependentHandlesForProfilerAndETW(uint32_t maxgen, ProfilingScanContext * SC) +void Ref_ScanDependentHandlesForProfilerAndETW(uint32_t maxgen, ScanContext * SC, handle_scan_fn fn) { WRAPPER_NO_CONTRACT; @@ -1680,12 +1652,7 @@ void Ref_ScanDependentHandlesForProfilerAndETW(uint32_t maxgen, ProfilingScanCon uint32_t flags = HNDGCF_NORMAL; uintptr_t lp1 = (uintptr_t)SC; - // we'll re-use pHeapId (which was either unused (0) or freed by EndRootReferences2 - // (-1)), so reset it to NULL - _ASSERTE((*((size_t *)(&SC->pHeapId)) == (size_t)(-1)) || - (*((size_t *)(&SC->pHeapId)) == (size_t)(0))); - SC->pHeapId = NULL; - TraceDependentHandlesBySingleThread(&ScanPointerForProfilerAndETW, lp1, maxgen, maxgen, flags); + TraceDependentHandlesBySingleThread(&ScanPointerForProfilerAndETW, lp1, (uintptr_t)fn, maxgen, maxgen, flags); } #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) diff --git a/src/Native/gc/objecthandle.h b/src/Native/gc/objecthandle.h index 89365267d..34c2a0e32 100644 --- a/src/Native/gc/objecthandle.h +++ b/src/Native/gc/objecthandle.h @@ -652,7 +652,6 @@ BOOL Ref_ContainHandle(HandleTableBucket *pBucket, OBJECTHANDLE handle); */ struct ScanContext; struct DhContext; -struct ProfilingScanContext; void Ref_BeginSynchronousGC (uint32_t uCondemnedGeneration, uint32_t uMaxGeneration); void Ref_EndSynchronousGC (uint32_t uCondemnedGeneration, uint32_t uMaxGeneration); @@ -672,10 +671,12 @@ void Ref_ScanSizedRefHandles(uint32_t condemned, uint32_t maxgen, ScanContext* s void Ref_ScanPointers(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn); #endif +typedef void (* handle_scan_fn)(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, BOOL isDependent); + void Ref_CheckReachable (uint32_t uCondemnedGeneration, uint32_t uMaxGeneration, uintptr_t lp1); void Ref_CheckAlive (uint32_t uCondemnedGeneration, uint32_t uMaxGeneration, uintptr_t lp1); -void Ref_ScanPointersForProfilerAndETW(uint32_t uMaxGeneration, uintptr_t lp1); -void Ref_ScanDependentHandlesForProfilerAndETW(uint32_t uMaxGeneration, ProfilingScanContext * SC); +void Ref_ScanHandlesForProfilerAndETW(uint32_t uMaxGeneration, uintptr_t lp1, handle_scan_fn fn); +void Ref_ScanDependentHandlesForProfilerAndETW(uint32_t uMaxGeneration, ScanContext * SC, handle_scan_fn fn); void Ref_AgeHandles (uint32_t uCondemnedGeneration, uint32_t uMaxGeneration, uintptr_t lp1); void Ref_RejuvenateHandles(uint32_t uCondemnedGeneration, uint32_t uMaxGeneration, uintptr_t lp1); diff --git a/src/Native/gc/sample/CMakeLists.txt b/src/Native/gc/sample/CMakeLists.txt index 572fba371..9552cc51e 100644 --- a/src/Native/gc/sample/CMakeLists.txt +++ b/src/Native/gc/sample/CMakeLists.txt @@ -22,11 +22,11 @@ set(SOURCES if(WIN32) list(APPEND SOURCES - gcenv.windows.cpp) + ../gcenv.windows.cpp) add_definitions(-DUNICODE=1) else() list(APPEND SOURCES - gcenv.unix.cpp) + ../gcenv.unix.cpp) endif() _add_executable(gcsample diff --git a/src/Native/gc/sample/GCSample.cpp b/src/Native/gc/sample/GCSample.cpp index 7f0dd8cab..112d29142 100644 --- a/src/Native/gc/sample/GCSample.cpp +++ b/src/Native/gc/sample/GCSample.cpp @@ -91,17 +91,14 @@ inline void ErectWriteBarrier(Object ** dst, Object * ref) { // if the dst is outside of the heap (unboxed value classes) then we // simply exit - if (((uint8_t*)dst < g_lowest_address) || ((uint8_t*)dst >= g_highest_address)) + if (((uint8_t*)dst < g_gc_lowest_address) || ((uint8_t*)dst >= g_gc_highest_address)) return; - if((uint8_t*)ref >= g_ephemeral_low && (uint8_t*)ref < g_ephemeral_high) - { - // volatile is used here to prevent fetch of g_card_table from being reordered - // with g_lowest/highest_address check above. See comment in code:gc_heap::grow_brick_card_tables. - uint8_t* pCardByte = (uint8_t *)*(volatile uint8_t **)(&g_card_table) + card_byte((uint8_t *)dst); - if(*pCardByte != 0xFF) - *pCardByte = 0xFF; - } + // volatile is used here to prevent fetch of g_card_table from being reordered + // with g_lowest/highest_address check above. See comment in code:gc_heap::grow_brick_card_tables. + uint8_t* pCardByte = (uint8_t *)*(volatile uint8_t **)(&g_gc_card_table) + card_byte((uint8_t *)dst); + if(*pCardByte != 0xFF) + *pCardByte = 0xFF; } void WriteBarrier(Object ** dst, Object * ref) diff --git a/src/Native/gc/sample/GCSample.vcxproj b/src/Native/gc/sample/GCSample.vcxproj index b196e1f34..1716f462e 100644 --- a/src/Native/gc/sample/GCSample.vcxproj +++ b/src/Native/gc/sample/GCSample.vcxproj @@ -84,10 +84,12 @@ </ItemGroup> <ItemGroup> <ClCompile Include="gcenv.ee.cpp" /> - <ClCompile Include="gcenv.windows.cpp" /> <ClCompile Include="GCSample.cpp" /> <ClCompile Include="..\gccommon.cpp" /> <ClCompile Include="..\gceewks.cpp" /> + <ClCompile Include="..\gcenv.windows.cpp"> + <PrecompiledHeader>NotUsing</PrecompiledHeader> + </ClCompile> <ClCompile Include="..\gcscan.cpp" /> <ClCompile Include="..\gcwks.cpp" /> <ClCompile Include="..\handletable.cpp" /> @@ -96,8 +98,7 @@ <ClCompile Include="..\handletablescan.cpp" /> <ClCompile Include="..\objecthandle.cpp" /> <ClCompile Include="..\env\common.cpp"> - <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Create</PrecompiledHeader> - <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Create</PrecompiledHeader> + <PrecompiledHeader>Create</PrecompiledHeader> </ClCompile> </ItemGroup> <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> diff --git a/src/Native/gc/sample/GCSample.vcxproj.filters b/src/Native/gc/sample/GCSample.vcxproj.filters index e46c05456..f6aacfd0c 100644 --- a/src/Native/gc/sample/GCSample.vcxproj.filters +++ b/src/Native/gc/sample/GCSample.vcxproj.filters @@ -59,7 +59,7 @@ <ClCompile Include="gcenv.ee.cpp"> <Filter>Source Files</Filter> </ClCompile> - <ClCompile Include="gcenv.windows.cpp"> + <ClCompile Include="..\gcenv.windows.cpp"> <Filter>Source Files</Filter> </ClCompile> </ItemGroup> diff --git a/src/Native/gc/sample/gcenv.ee.cpp b/src/Native/gc/sample/gcenv.ee.cpp index 25d829e79..ac227b482 100644 --- a/src/Native/gc/sample/gcenv.ee.cpp +++ b/src/Native/gc/sample/gcenv.ee.cpp @@ -9,6 +9,12 @@ #include "gcenv.h" #include "gc.h" +MethodTable * g_pFreeObjectMethodTable; + +int32_t g_TrapReturningThreads; + +bool g_fFinalizerRunOnShutDown; + EEConfig * g_pConfig; bool CLREventStatic::CreateManualEventNoThrow(bool bInitialState) @@ -221,6 +227,38 @@ Thread* GCToEEInterface::CreateBackgroundThread(GCBackgroundThreadFunction threa return NULL; } +void GCToEEInterface::DiagGCStart(int gen, bool isInduced) +{ +} + +void GCToEEInterface::DiagUpdateGenerationBounds() +{ +} + +void GCToEEInterface::DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent) +{ +} + +void GCToEEInterface::DiagWalkFReachableObjects(void* gcContext) +{ +} + +void GCToEEInterface::DiagWalkSurvivors(void* gcContext) +{ +} + +void GCToEEInterface::DiagWalkLOHSurvivors(void* gcContext) +{ +} + +void GCToEEInterface::DiagWalkBGCSurvivors(void* gcContext) +{ +} + +void GCToEEInterface::StompWriteBarrier(WriteBarrierParameters* args) +{ +} + void FinalizerThread::EnableFinalization() { // Signal to finalizer thread that there are objects to finalize @@ -238,14 +276,6 @@ bool IsGCSpecialThread() return false; } -void StompWriteBarrierEphemeral(bool /* isRuntimeSuspended */) -{ -} - -void StompWriteBarrierResize(bool /* isRuntimeSuspended */, bool /*bReqUpperBoundsCheck*/) -{ -} - bool IsGCThread() { return false; diff --git a/src/Native/gc/sample/gcenv.unix.cpp b/src/Native/gc/sample/gcenv.unix.cpp deleted file mode 100644 index a5e9e83ee..000000000 --- a/src/Native/gc/sample/gcenv.unix.cpp +++ /dev/null @@ -1,14 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. -// See the LICENSE file in the project root for more information. - -// -// Implementation of the GC environment -// - -#include "common.h" - -#include "gcenv.h" -#include "gc.h" - -// TODO: Implement diff --git a/src/Native/gc/softwarewritewatch.cpp b/src/Native/gc/softwarewritewatch.cpp index 519744900..b85293857 100644 --- a/src/Native/gc/softwarewritewatch.cpp +++ b/src/Native/gc/softwarewritewatch.cpp @@ -3,9 +3,9 @@ // See the LICENSE file in the project root for more information. #include "common.h" -#include "softwarewritewatch.h" - #include "gcenv.h" +#include "env/gcenv.os.h" +#include "softwarewritewatch.h" #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP #ifndef DACCESS_COMPILE @@ -14,8 +14,8 @@ static_assert((static_cast<size_t>(1) << SOFTWARE_WRITE_WATCH_AddressToTableByte extern "C" { - uint8_t *g_sw_ww_table = nullptr; - bool g_sw_ww_enabled_for_gc_heap = false; + uint8_t *g_gc_sw_ww_table = nullptr; + bool g_gc_sw_ww_enabled_for_gc_heap = false; } void SoftwareWriteWatch::StaticClose() @@ -25,8 +25,8 @@ void SoftwareWriteWatch::StaticClose() return; } - g_sw_ww_enabled_for_gc_heap = false; - g_sw_ww_table = nullptr; + g_gc_sw_ww_enabled_for_gc_heap = false; + g_gc_sw_ww_table = nullptr; } bool SoftwareWriteWatch::GetDirtyFromBlock( diff --git a/src/Native/gc/softwarewritewatch.h b/src/Native/gc/softwarewritewatch.h index 3c8491cec..0e6e6c819 100644 --- a/src/Native/gc/softwarewritewatch.h +++ b/src/Native/gc/softwarewritewatch.h @@ -5,25 +5,20 @@ #ifndef __SOFTWARE_WRITE_WATCH_H__ #define __SOFTWARE_WRITE_WATCH_H__ +#include "gcinterface.h" +#include "gc.h" + #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP #ifndef DACCESS_COMPILE -extern void SwitchToWriteWatchBarrier(bool isRuntimeSuspended); -extern void SwitchToNonWriteWatchBarrier(bool isRuntimeSuspended); - -#define SOFTWARE_WRITE_WATCH_AddressToTableByteIndexShift 0xc - extern "C" { // Table containing the dirty state. This table is translated to exclude the lowest address it represents, see // TranslateTableToExcludeHeapStartAddress. - extern uint8_t *g_sw_ww_table; + extern uint8_t *g_gc_sw_ww_table; // Write watch may be disabled when it is not needed (between GCs for instance). This indicates whether it is enabled. - extern bool g_sw_ww_enabled_for_gc_heap; - - extern uint8_t *g_lowest_address; // start address of the GC heap - extern uint8_t *g_highest_address; // end address of the GC heap + extern bool g_gc_sw_ww_enabled_for_gc_heap; } class SoftwareWriteWatch @@ -116,7 +111,7 @@ inline void SoftwareWriteWatch::VerifyMemoryRegion( inline uint8_t *SoftwareWriteWatch::GetTable() { - return g_sw_ww_table; + return g_gc_sw_ww_table; } inline uint8_t *SoftwareWriteWatch::GetUntranslatedTable() @@ -163,7 +158,7 @@ inline void SoftwareWriteWatch::SetUntranslatedTable(uint8_t *untranslatedTable, assert(ALIGN_DOWN(untranslatedTable, sizeof(size_t)) == untranslatedTable); assert(heapStartAddress != nullptr); - g_sw_ww_table = TranslateTableToExcludeHeapStartAddress(untranslatedTable, heapStartAddress); + g_gc_sw_ww_table = TranslateTableToExcludeHeapStartAddress(untranslatedTable, heapStartAddress); } inline void SoftwareWriteWatch::SetResizedUntranslatedTable( @@ -194,7 +189,7 @@ inline void SoftwareWriteWatch::SetResizedUntranslatedTable( inline bool SoftwareWriteWatch::IsEnabledForGCHeap() { - return g_sw_ww_enabled_for_gc_heap; + return g_gc_sw_ww_enabled_for_gc_heap; } inline void SoftwareWriteWatch::EnableForGCHeap() @@ -204,9 +199,13 @@ inline void SoftwareWriteWatch::EnableForGCHeap() VerifyCreated(); assert(!IsEnabledForGCHeap()); + g_gc_sw_ww_enabled_for_gc_heap = true; - g_sw_ww_enabled_for_gc_heap = true; - SwitchToWriteWatchBarrier(true); + WriteBarrierParameters args = {}; + args.operation = WriteBarrierOp::SwitchToWriteWatch; + args.write_watch_table = g_gc_sw_ww_table; + args.is_runtime_suspended = true; + GCToEEInterface::StompWriteBarrier(&args); } inline void SoftwareWriteWatch::DisableForGCHeap() @@ -216,19 +215,22 @@ inline void SoftwareWriteWatch::DisableForGCHeap() VerifyCreated(); assert(IsEnabledForGCHeap()); + g_gc_sw_ww_enabled_for_gc_heap = false; - g_sw_ww_enabled_for_gc_heap = false; - SwitchToNonWriteWatchBarrier(true); + WriteBarrierParameters args = {}; + args.operation = WriteBarrierOp::SwitchToNonWriteWatch; + args.is_runtime_suspended = true; + GCToEEInterface::StompWriteBarrier(&args); } inline void *SoftwareWriteWatch::GetHeapStartAddress() { - return g_lowest_address; + return g_gc_lowest_address; } inline void *SoftwareWriteWatch::GetHeapEndAddress() { - return g_highest_address; + return g_gc_highest_address; } inline size_t SoftwareWriteWatch::GetTableByteIndex(void *address) diff --git a/src/System.Private.CoreLib/src/Internal/Runtime/CompilerHelpers/LibraryInitializer.cs b/src/System.Private.CoreLib/src/Internal/Runtime/CompilerHelpers/LibraryInitializer.cs new file mode 100644 index 000000000..e2a2b318b --- /dev/null +++ b/src/System.Private.CoreLib/src/Internal/Runtime/CompilerHelpers/LibraryInitializer.cs @@ -0,0 +1,26 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +using System; +using System.Runtime; +using System.Runtime.CompilerServices; + +using Debug = System.Diagnostics.Debug; + +namespace Internal.Runtime.CompilerHelpers +{ + /// <summary> + /// Container class to run specific class constructors in a defined order. Since we can't + /// directly invoke class constructors in C#, they're renamed Initialize. + /// </summary> + internal class LibraryInitializer + { + public static void InitializeLibrary() + { + PreallocatedOutOfMemoryException.Initialize(); + ClassConstructorRunner.Initialize(); + TypeLoaderExports.Initialize(); + } + } +} diff --git a/src/System.Private.CoreLib/src/Resources/Strings.resx b/src/System.Private.CoreLib/src/Resources/Strings.resx index 2e1ecbe88..24edf287c 100644 --- a/src/System.Private.CoreLib/src/Resources/Strings.resx +++ b/src/System.Private.CoreLib/src/Resources/Strings.resx @@ -687,6 +687,9 @@ <data name="Argument_OutOfOrderDateTimes" xml:space="preserve"> <value>The DateStart property must come before the DateEnd property.</value> </data> + <data name="ArgumentOutOfRange_HugeArrayNotSupported" xml:space="preserve"> + <value>Arrays larger than 2GB are not supported.</value> + </data> <data name="ArgumentOutOfRange_Index" xml:space="preserve"> <value>Index was out of range. Must be non-negative and less than the size of the collection.</value> </data> diff --git a/src/System.Private.CoreLib/src/System.Private.CoreLib.csproj b/src/System.Private.CoreLib/src/System.Private.CoreLib.csproj index d5b115c32..baf7c70b5 100644 --- a/src/System.Private.CoreLib/src/System.Private.CoreLib.csproj +++ b/src/System.Private.CoreLib/src/System.Private.CoreLib.csproj @@ -63,6 +63,7 @@ <ItemGroup> <Compile Include="Internal\Diagnostics\ExceptionExtensions.cs" /> <Compile Include="Internal\Diagnostics\StackTraceHelper.cs" /> + <Compile Include="Internal\Runtime\CompilerHelpers\LibraryInitializer.cs" /> <Compile Include="Internal\Runtime\CompilerHelpers\ReflectionHelpers.cs" /> <Compile Include="Internal\Runtime\CompilerHelpers\StartupCode\ThreadingHelpers.cs" /> </ItemGroup> @@ -221,6 +222,7 @@ <Compile Include="System\BadImageFormatException.cs" /> <Compile Include="System\Boolean.cs" /> <Compile Include="System\Buffer.cs" /> + <Compile Include="System\ByReference.cs" /> <Compile Include="System\Byte.cs" /> <Compile Include="System\Char.cs" /> <Compile Include="System\CharEnumerator.cs" /> @@ -1183,4 +1185,4 @@ </EmbeddedResource> </ItemGroup> <Import Project="$([MSBuild]::GetDirectoryNameOfFileAbove($(MSBuildThisFileDirectory), dir.targets))\dir.targets" /> -</Project> +</Project>
\ No newline at end of file diff --git a/src/System.Private.CoreLib/src/System/Array.cs b/src/System.Private.CoreLib/src/System/Array.cs index 5564ab734..2b96eced9 100644 --- a/src/System.Private.CoreLib/src/System/Array.cs +++ b/src/System.Private.CoreLib/src/System/Array.cs @@ -985,18 +985,6 @@ namespace System bool IList.IsReadOnly { get { return false; } } - bool IList.IsFixedSize - { - get { return true; } - } - - // Is this Array synchronized (i.e., thread-safe)? If you want a synchronized - // collection, you can use SyncRoot as an object to synchronize your - // collection with. You could also call GetSynchronized() - // to get a synchronized wrapper around the Array. - bool ICollection.IsSynchronized - { get { return false; } } - Object IList.this[int index] { get @@ -1061,13 +1049,6 @@ namespace System Array.Copy(this, 0, array, index, Length); } - // Returns an object appropriate for synchronizing access to this - // Array. - Object ICollection.SyncRoot - { - get { return this; } - } - // Make a new array which is a deep copy of the original array. // public Object Clone() @@ -1180,6 +1161,160 @@ namespace System return BinarySearch(array, 0, array.Length, value, null); } + public static TOutput[] ConvertAll<TInput, TOutput>(TInput[] array, Converter<TInput, TOutput> converter) + { + if (array == null) + throw new ArgumentNullException(nameof(array)); + + if (converter == null) + throw new ArgumentNullException(nameof(converter)); + + Contract.Ensures(Contract.Result<TOutput[]>() != null); + Contract.Ensures(Contract.Result<TOutput[]>().Length == array.Length); + Contract.EndContractBlock(); + + TOutput[] newArray = new TOutput[array.Length]; + for (int i = 0; i < array.Length; i++) + { + newArray[i] = converter(array[i]); + } + return newArray; + } + + public static void Copy(Array sourceArray, Array destinationArray, long length) + { + if (length > Int32.MaxValue || length < Int32.MinValue) + throw new ArgumentOutOfRangeException(nameof(length), SR.ArgumentOutOfRange_HugeArrayNotSupported); + + Array.Copy(sourceArray, destinationArray, (int)length); + } + + public static void Copy(Array sourceArray, long sourceIndex, Array destinationArray, long destinationIndex, long length) + { + if (sourceIndex > Int32.MaxValue || sourceIndex < Int32.MinValue) + throw new ArgumentOutOfRangeException(nameof(sourceIndex), SR.ArgumentOutOfRange_HugeArrayNotSupported); + if (destinationIndex > Int32.MaxValue || destinationIndex < Int32.MinValue) + throw new ArgumentOutOfRangeException(nameof(destinationIndex), SR.ArgumentOutOfRange_HugeArrayNotSupported); + if (length > Int32.MaxValue || length < Int32.MinValue) + throw new ArgumentOutOfRangeException(nameof(length), SR.ArgumentOutOfRange_HugeArrayNotSupported); + + Array.Copy(sourceArray, (int)sourceIndex, destinationArray, (int)destinationIndex, (int)length); + } + + [Pure] + public void CopyTo(Array array, long index) + { + if (index > Int32.MaxValue || index < Int32.MinValue) + throw new ArgumentOutOfRangeException(nameof(index), SR.ArgumentOutOfRange_HugeArrayNotSupported); + Contract.EndContractBlock(); + + this.CopyTo(array, (int)index); + } + + public static void ForEach<T>(T[] array, Action<T> action) + { + if (array == null) + throw new ArgumentNullException(nameof(array)); + + if (action == null) + throw new ArgumentNullException(nameof(action)); + + Contract.EndContractBlock(); + + for (int i = 0; i < array.Length; i++) + { + action(array[i]); + } + } + + public long LongLength + { + get + { + long ret = GetLength(0); + + for (int i = 1; i < Rank; ++i) + { + ret = ret * GetLength(i); + } + + return ret; + } + } + + public long GetLongLength(int dimension) + { + // This method does throw an IndexOutOfRangeException for compat if dimension < 0 or >= Rank + // by calling GetUpperBound + return GetLength(dimension); + } + + public Object GetValue(long index) + { + if (index > Int32.MaxValue || index < Int32.MinValue) + throw new ArgumentOutOfRangeException(nameof(index), SR.ArgumentOutOfRange_HugeArrayNotSupported); + Contract.EndContractBlock(); + + return this.GetValue((int)index); + } + + public Object GetValue(long index1, long index2) + { + if (index1 > Int32.MaxValue || index1 < Int32.MinValue) + throw new ArgumentOutOfRangeException(nameof(index1), SR.ArgumentOutOfRange_HugeArrayNotSupported); + if (index2 > Int32.MaxValue || index2 < Int32.MinValue) + throw new ArgumentOutOfRangeException(nameof(index2), SR.ArgumentOutOfRange_HugeArrayNotSupported); + Contract.EndContractBlock(); + + return this.GetValue((int)index1, (int)index2); + } + + public Object GetValue(long index1, long index2, long index3) + { + if (index1 > Int32.MaxValue || index1 < Int32.MinValue) + throw new ArgumentOutOfRangeException(nameof(index1), SR.ArgumentOutOfRange_HugeArrayNotSupported); + if (index2 > Int32.MaxValue || index2 < Int32.MinValue) + throw new ArgumentOutOfRangeException(nameof(index2), SR.ArgumentOutOfRange_HugeArrayNotSupported); + if (index3 > Int32.MaxValue || index3 < Int32.MinValue) + throw new ArgumentOutOfRangeException(nameof(index3), SR.ArgumentOutOfRange_HugeArrayNotSupported); + Contract.EndContractBlock(); + + return this.GetValue((int)index1, (int)index2, (int)index3); + } + + public Object GetValue(params long[] indices) + { + if (indices == null) + throw new ArgumentNullException(nameof(indices)); + if (Rank != indices.Length) + throw new ArgumentException(SR.Arg_RankIndices); + Contract.EndContractBlock(); + + int[] intIndices = new int[indices.Length]; + + for (int i = 0; i < indices.Length; ++i) + { + long index = indices[i]; + if (index > Int32.MaxValue || index < Int32.MinValue) + throw new ArgumentOutOfRangeException(nameof(index), SR.ArgumentOutOfRange_HugeArrayNotSupported); + intIndices[i] = (int)index; + } + + return this.GetValue(intIndices); + } + + public bool IsFixedSize { get { return true; } } + + // Is this Array synchronized (i.e., thread-safe)? If you want a synchronized + // collection, you can use SyncRoot as an object to synchronize your + // collection with. You could also call GetSynchronized() + // to get a synchronized wrapper around the Array. + public bool IsSynchronized { get { return false; } } + + // Returns an object appropriate for synchronizing access to this + // Array. + public Object SyncRoot { get { return this; } } + // Searches a section of an array for a given element using a binary search // algorithm. Elements of the array are compared to the search value using // the IComparable interface, which must be implemented by all diff --git a/src/System.Private.CoreLib/src/System/ByReference.cs b/src/System.Private.CoreLib/src/System/ByReference.cs new file mode 100644 index 000000000..d0129c2ee --- /dev/null +++ b/src/System.Private.CoreLib/src/System/ByReference.cs @@ -0,0 +1,41 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +using System.Runtime.CompilerServices; + +namespace System +{ + // ByReference<T> is meant to be used to represent "ref T" fields. It is working + // around lack of first class support for byref fields in C# and IL. The JIT and + // type loader have special handling for it that turns it into a thin wrapper around ref T. + [StackOnly] + internal struct ByReference<T> + { + // CS0169: The private field '{blah}' is never used +#pragma warning disable 169 + private IntPtr _value; +#pragma warning restore + + [Intrinsic] + public ByReference(ref T value) + { + // Implemented as a JIT intrinsic - This default implementation is for + // completeness and to provide a concrete error if called via reflection + // or if intrinsic is missed. + throw new System.PlatformNotSupportedException(); + } + + public ref T Value + { + [Intrinsic] + get + { + // Implemented as a JIT intrinsic - This default implementation is for + // completeness and to provide a concrete error if called via reflection + // or if the intrinsic is missed. + throw new System.PlatformNotSupportedException(); + } + } + } +} diff --git a/src/System.Private.CoreLib/src/System/Delegate.cs b/src/System.Private.CoreLib/src/System/Delegate.cs index 399fd783b..8263a8ac6 100644 --- a/src/System.Private.CoreLib/src/System/Delegate.cs +++ b/src/System.Private.CoreLib/src/System/Delegate.cs @@ -630,7 +630,7 @@ namespace System int invocationCount = (int)m_extraFunctionPointerOrData; del = new Delegate[invocationCount]; - for (int i = 0; i < invocationCount; i++) + for (int i = 0; i < del.Length; i++) del[i] = invocationList[i]; } return del; diff --git a/src/System.Private.CoreLib/src/System/Environment.cs b/src/System.Private.CoreLib/src/System/Environment.cs index 274c4e2a4..41d5dc038 100644 --- a/src/System.Private.CoreLib/src/System/Environment.cs +++ b/src/System.Private.CoreLib/src/System/Environment.cs @@ -33,9 +33,6 @@ namespace System Machine = 2, } - // Environment is marked as Eager to allow Lock to read the current - // thread ID, since Lock is used in ClassConstructorRunner.Cctor.GetCctor - [EagerOrderedStaticConstructor(EagerStaticConstructorOrder.SystemEnvironment)] public static partial class Environment { /*==================================TickCount=================================== diff --git a/src/System.Private.CoreLib/src/System/Reflection/Assembly.cs b/src/System.Private.CoreLib/src/System/Reflection/Assembly.cs index 2405a050f..66f68e474 100644 --- a/src/System.Private.CoreLib/src/System/Reflection/Assembly.cs +++ b/src/System.Private.CoreLib/src/System/Reflection/Assembly.cs @@ -38,11 +38,10 @@ namespace System.Reflection { Module[] m = GetModules(false); - int numModules = m.Length; int finalLength = 0; - Type[][] moduleTypes = new Type[numModules][]; + Type[][] moduleTypes = new Type[m.Length][]; - for (int i = 0; i < numModules; i++) + for (int i = 0; i < moduleTypes.Length; i++) { moduleTypes[i] = m[i].GetTypes(); finalLength += moduleTypes[i].Length; @@ -50,7 +49,7 @@ namespace System.Reflection int current = 0; Type[] ret = new Type[finalLength]; - for (int i = 0; i < numModules; i++) + for (int i = 0; i < moduleTypes.Length; i++) { int length = moduleTypes[i].Length; Array.Copy(moduleTypes[i], 0, ret, current, length); diff --git a/src/System.Private.CoreLib/src/System/Runtime/CompilerServices/ClassConstructorRunner.cs b/src/System.Private.CoreLib/src/System/Runtime/CompilerServices/ClassConstructorRunner.cs index b0ec3c9f9..f2f9ff581 100644 --- a/src/System.Private.CoreLib/src/System/Runtime/CompilerServices/ClassConstructorRunner.cs +++ b/src/System.Private.CoreLib/src/System/Runtime/CompilerServices/ClassConstructorRunner.cs @@ -13,9 +13,6 @@ using Internal.Runtime.CompilerHelpers; namespace System.Runtime.CompilerServices { - // Marked [EagerStaticClassConstruction] because Cctor.GetCctor - // uses _cctorGlobalLock - [EagerOrderedStaticConstructor(EagerStaticConstructorOrder.CompilerServicesClassConstructorRunner)] internal static partial class ClassConstructorRunner { //============================================================================================================== @@ -262,10 +259,8 @@ namespace System.Runtime.CompilerServices //============================================================================================================== // These structs are allocated on demand whenever the runtime tries to run a class constructor. Once the // the class constructor has been successfully initialized, we reclaim this structure. The structure is long- - // lived only if the class constructor threw an exception. This must be marked [EagerStaticClassConstruction] to - // avoid infinite mutual recursion in GetCctor. + // lived only if the class constructor threw an exception. //============================================================================================================== - [EagerOrderedStaticConstructor(EagerStaticConstructorOrder.CompilerServicesClassConstructorRunnerCctor)] private unsafe struct Cctor { public Lock Lock; @@ -273,14 +268,7 @@ namespace System.Runtime.CompilerServices public int HoldingThread; private int _refCount; private StaticClassConstructionContext* _pContext; - - // Because Cctor's are mutable structs, we have to give our callers raw references to the underlying arrays - // for this collection to be usable. This also means once we place a Cctor in an array, we can't grow or - // reallocate the array. - private static Cctor[][] s_cctorArrays = new Cctor[10][]; - private static int s_cctorArraysCount = 0; - private static int s_count; - + //========================================================================================================== // Gets the Cctor entry associated with a specific class constructor context (creating it if necessary.) //========================================================================================================== @@ -479,8 +467,25 @@ namespace System.Runtime.CompilerServices private static int s_nextBlockingRecordIndex; } - private static Lock s_cctorGlobalLock = new Lock(); + private static Lock s_cctorGlobalLock; + // These three statics are used by ClassConstructorRunner.Cctor but moved out to avoid an unnecessary + // extra class constructor call. + // + // Because Cctor's are mutable structs, we have to give our callers raw references to the underlying arrays + // for this collection to be usable. This also means once we place a Cctor in an array, we can't grow or + // reallocate the array. + private static Cctor[][] s_cctorArrays; + private static int s_cctorArraysCount; + private static int s_count; + + // Eager construction called from LibraryInitialize Cctor.GetCctor uses _cctorGlobalLock. + internal static void Initialize() + { + s_cctorArrays = new Cctor[10][]; + s_cctorGlobalLock = new Lock(); + } + [Conditional("ENABLE_NOISY_CCTOR_LOG")] private static void NoisyLog(string format, IntPtr cctorMethod, int threadId) { diff --git a/src/System.Private.CoreLib/src/System/Runtime/CompilerServices/ConditionalWeakTable.cs b/src/System.Private.CoreLib/src/System/Runtime/CompilerServices/ConditionalWeakTable.cs index b5caeb690..5bf209549 100644 --- a/src/System.Private.CoreLib/src/System/Runtime/CompilerServices/ConditionalWeakTable.cs +++ b/src/System.Private.CoreLib/src/System/Runtime/CompilerServices/ConditionalWeakTable.cs @@ -528,7 +528,7 @@ namespace System.Runtime.CompilerServices // Reallocate both buckets and entries and rebuild the bucket and entries from scratch. // This serves both to scrub entries with expired keys and to put the new entries in the proper bucket. int[] newBuckets = new int[newSize]; - for (int bucketIndex = 0; bucketIndex < newSize; bucketIndex++) + for (int bucketIndex = 0; bucketIndex < newBuckets.Length; bucketIndex++) { newBuckets[bucketIndex] = -1; } diff --git a/src/System.Private.CoreLib/src/System/Runtime/CompilerServices/EagerOrderedStaticConstructorAttribute.cs b/src/System.Private.CoreLib/src/System/Runtime/CompilerServices/EagerOrderedStaticConstructorAttribute.cs index ba4279c43..f3a699b17 100644 --- a/src/System.Private.CoreLib/src/System/Runtime/CompilerServices/EagerOrderedStaticConstructorAttribute.cs +++ b/src/System.Private.CoreLib/src/System/Runtime/CompilerServices/EagerOrderedStaticConstructorAttribute.cs @@ -22,19 +22,6 @@ namespace System.Runtime.CompilerServices public enum EagerStaticConstructorOrder : int { - // System.Private.CoreLib - SystemString, - SystemPreallocatedOutOfMemoryException, - SystemEnvironment, // ClassConstructorRunner.Cctor.GetCctor use Lock which inturn use current threadID , so System.Environment - // should come before CompilerServicesClassConstructorRunnerCctor - CompilerServicesClassConstructorRunnerCctor, - CompilerServicesClassConstructorRunner, - - // System.Private.TypeLoader - RuntimeTypeHandleEqualityComparer, - TypeLoaderEnvironment, - SystemRuntimeTypeLoaderExports, - // Interop InteropHeap, VtableIUnknown, diff --git a/src/System.Private.CoreLib/src/System/Runtime/CompilerServices/InternalCompilerAttributes.cs b/src/System.Private.CoreLib/src/System/Runtime/CompilerServices/InternalCompilerAttributes.cs index 298b4e466..3ad095e91 100644 --- a/src/System.Private.CoreLib/src/System/Runtime/CompilerServices/InternalCompilerAttributes.cs +++ b/src/System.Private.CoreLib/src/System/Runtime/CompilerServices/InternalCompilerAttributes.cs @@ -21,21 +21,4 @@ namespace System.Runtime.CompilerServices [AttributeUsage(AttributeTargets.Struct)] public sealed class StackOnlyAttribute : Attribute { } - -#if false // Unused right now. It is likely going to be useful for Span<T> implementation. - // This is a dummy class to be replaced by the compiler with a ref T - // It has to be a dummy class to avoid complicated type substitution - // and other complications in the compiler. - public sealed class ByReference<T> - { - // - // Managed pointer creation - // - [Intrinsic] - public static extern ByReference<T> FromRef(ref T source); - - [Intrinsic] - public static extern ref T ToRef(ByReference<T> source); - } -#endif } diff --git a/src/System.Private.CoreLib/src/System/Runtime/InteropServices/PInvokeMarshal.Unix.cs b/src/System.Private.CoreLib/src/System/Runtime/InteropServices/PInvokeMarshal.Unix.cs index 3358ddeb1..4f893e789 100644 --- a/src/System.Private.CoreLib/src/System/Runtime/InteropServices/PInvokeMarshal.Unix.cs +++ b/src/System.Private.CoreLib/src/System/Runtime/InteropServices/PInvokeMarshal.Unix.cs @@ -14,12 +14,12 @@ namespace System.Runtime.InteropServices { public static void SaveLastWin32Error() { - s_lastWin32Error = Interop.Sys.GetLastErrNo(); + s_lastWin32Error = Interop.Sys.GetErrNo(); } internal static void ClearLastWin32Error() { - Interop.Sys.SetLastErrNo(0); + Interop.Sys.ClearErrNo(); } public static unsafe String PtrToStringAnsi(IntPtr ptr) diff --git a/src/System.Private.CoreLib/src/System/Runtime/TypeLoaderExports.cs b/src/System.Private.CoreLib/src/System/Runtime/TypeLoaderExports.cs index 7f8cf556e..64b50165e 100644 --- a/src/System.Private.CoreLib/src/System/Runtime/TypeLoaderExports.cs +++ b/src/System.Private.CoreLib/src/System/Runtime/TypeLoaderExports.cs @@ -11,8 +11,6 @@ using System.Runtime.InteropServices; namespace System.Runtime { - // Initialize the cache eagerly to avoid null checks - [EagerOrderedStaticConstructor(EagerStaticConstructorOrder.SystemRuntimeTypeLoaderExports)] public static class TypeLoaderExports { [RuntimeExport("GetThreadStaticsForDynamicType")] @@ -73,12 +71,19 @@ namespace System.Runtime // Initialize the cache eagerly to avoid null checks. // Use array with just single element to make this pay-for-play. The actual cache will be allocated only // once the lazy lookups are actually needed. - private static Entry[] s_cache = new Entry[1]; + private static Entry[] s_cache; private static Lock s_lock; private static GCHandle s_previousCache; - private volatile static IntPtr[] s_resolutionFunctionPointers = new IntPtr[4]; - private static int s_nextResolutionFunctionPointerIndex = (int)SignatureKind.Count; + private volatile static IntPtr[] s_resolutionFunctionPointers; + private static int s_nextResolutionFunctionPointerIndex; + + internal static void Initialize() + { + s_cache = new Entry[1]; + s_resolutionFunctionPointers = new IntPtr[4]; + s_nextResolutionFunctionPointerIndex = (int)SignatureKind.Count; + } [RuntimeExport("GenericLookup")] public static IntPtr GenericLookup(IntPtr context, IntPtr signature) diff --git a/src/System.Private.CoreLib/src/System/RuntimeExceptionHelpers.cs b/src/System.Private.CoreLib/src/System/RuntimeExceptionHelpers.cs index 5c3945636..1a47b20a7 100644 --- a/src/System.Private.CoreLib/src/System/RuntimeExceptionHelpers.cs +++ b/src/System.Private.CoreLib/src/System/RuntimeExceptionHelpers.cs @@ -13,11 +13,15 @@ using Internal.Runtime.Augments; namespace System { - // Eagerly preallocate instance of out of memory exception to avoid infinite recursion once we run out of memory - [EagerOrderedStaticConstructor(EagerStaticConstructorOrder.SystemPreallocatedOutOfMemoryException)] internal class PreallocatedOutOfMemoryException { - public static readonly OutOfMemoryException Instance = new OutOfMemoryException(message: null); // Cannot call the nullary constructor as that triggers non-trivial resource manager logic. + public static OutOfMemoryException Instance { get; private set; } + + // Eagerly preallocate instance of out of memory exception to avoid infinite recursion once we run out of memory + internal static void Initialize() + { + Instance = new OutOfMemoryException(message: null); // Cannot call the nullary constructor as that triggers non-trivial resource manager logic. + } } public class RuntimeExceptionHelpers diff --git a/src/System.Private.CoreLib/src/System/Threading/CancellationTokenSource.cs b/src/System.Private.CoreLib/src/System/Threading/CancellationTokenSource.cs index 165708fb1..1777dfad9 100644 --- a/src/System.Private.CoreLib/src/System/Threading/CancellationTokenSource.cs +++ b/src/System.Private.CoreLib/src/System/Threading/CancellationTokenSource.cs @@ -841,12 +841,19 @@ namespace System.Threading /// <param name="token2">The second <see cref="T:System.Threading.CancellationToken">CancellationToken</see> to observe.</param> /// <returns>A <see cref="T:System.Threading.CancellationTokenSource">CancellationTokenSource</see> that is linked /// to the source tokens.</returns> - public static CancellationTokenSource CreateLinkedTokenSource(CancellationToken token1, CancellationToken token2) - { - return token1.CanBeCanceled || token2.CanBeCanceled ? - new LinkedCancellationTokenSource(token1, token2) : - new CancellationTokenSource(); - } + public static CancellationTokenSource CreateLinkedTokenSource(CancellationToken token1, CancellationToken token2) => + !token1.CanBeCanceled ? CreateLinkedTokenSource(token2) : + token2.CanBeCanceled ? new Linked2CancellationTokenSource(token1, token2) : + (CancellationTokenSource)new Linked1CancellationTokenSource(token1); + + /// <summary> + /// Creates a <see cref="CancellationTokenSource"/> that will be in the canceled state + /// when any of the source tokens are in the canceled state. + /// </summary> + /// <param name="token">The first <see cref="T:System.Threading.CancellationToken">CancellationToken</see> to observe.</param> + /// <returns>A <see cref="CancellationTokenSource"/> that is linked to the source tokens.</returns> + internal static CancellationTokenSource CreateLinkedTokenSource(CancellationToken token) => + token.CanBeCanceled ? new Linked1CancellationTokenSource(token) : new CancellationTokenSource(); /// <summary> /// Creates a <see cref="T:System.Threading.CancellationTokenSource">CancellationTokenSource</see> that will be in the canceled state @@ -861,14 +868,19 @@ namespace System.Threading if (tokens == null) throw new ArgumentNullException(nameof(tokens)); - if (tokens.Length == 0) - throw new ArgumentException(SR.CancellationToken_CreateLinkedToken_TokensIsEmpty); - - // a defensive copy is not required as the array has value-items that have only a single IntPtr field, - // hence each item cannot be null itself, and reads of the payloads cannot be torn. - Contract.EndContractBlock(); - - return new LinkedCancellationTokenSource(tokens); + switch (tokens.Length) + { + case 0: + throw new ArgumentException(SR.CancellationToken_CreateLinkedToken_TokensIsEmpty); + case 1: + return CreateLinkedTokenSource(tokens[0]); + case 2: + return CreateLinkedTokenSource(tokens[0], tokens[1]); + default: + // a defensive copy is not required as the array has value-items that have only a single reference field, + // hence each item cannot be null itself, and reads of the payloads cannot be torn. + return new LinkedNCancellationTokenSource(tokens); + } } @@ -884,35 +896,50 @@ namespace System.Threading } } - private sealed class LinkedCancellationTokenSource : CancellationTokenSource + private sealed class Linked1CancellationTokenSource : CancellationTokenSource { - private static readonly Action<object> s_linkedTokenCancelDelegate = - s => ((CancellationTokenSource)s).NotifyCancellation(throwOnFirstException: false); // skip ThrowIfDisposed() check in Cancel() - private CancellationTokenRegistration[] m_linkingRegistrations; + private readonly CancellationTokenRegistration _reg1; - internal LinkedCancellationTokenSource(CancellationToken token1, CancellationToken token2) + internal Linked1CancellationTokenSource(CancellationToken token1) { - bool token2CanBeCanceled = token2.CanBeCanceled; + _reg1 = token1.InternalRegisterWithoutEC(LinkedNCancellationTokenSource.s_linkedTokenCancelDelegate, this); + } - if (token1.CanBeCanceled) - { - m_linkingRegistrations = new CancellationTokenRegistration[token2CanBeCanceled ? 2 : 1]; // there will be at least 1 and at most 2 linkings - m_linkingRegistrations[0] = token1.InternalRegisterWithoutEC(s_linkedTokenCancelDelegate, this); - } + protected override void Dispose(bool disposing) + { + if (!disposing || m_disposed) return; + _reg1.Dispose(); + base.Dispose(disposing); + } + } - if (token2CanBeCanceled) - { - int index = 1; - if (m_linkingRegistrations == null) - { - m_linkingRegistrations = new CancellationTokenRegistration[1]; // this will be the only linking - index = 0; - } - m_linkingRegistrations[index] = token2.InternalRegisterWithoutEC(s_linkedTokenCancelDelegate, this); - } + private sealed class Linked2CancellationTokenSource : CancellationTokenSource + { + private readonly CancellationTokenRegistration _reg1; + private readonly CancellationTokenRegistration _reg2; + + internal Linked2CancellationTokenSource(CancellationToken token1, CancellationToken token2) + { + _reg1 = token1.InternalRegisterWithoutEC(LinkedNCancellationTokenSource.s_linkedTokenCancelDelegate, this); + _reg2 = token2.InternalRegisterWithoutEC(LinkedNCancellationTokenSource.s_linkedTokenCancelDelegate, this); } - internal LinkedCancellationTokenSource(params CancellationToken[] tokens) + protected override void Dispose(bool disposing) + { + if (!disposing || m_disposed) return; + _reg1.Dispose(); + _reg2.Dispose(); + base.Dispose(disposing); + } + } + + private sealed class LinkedNCancellationTokenSource : CancellationTokenSource + { + internal static readonly Action<object> s_linkedTokenCancelDelegate = + s => ((CancellationTokenSource)s).NotifyCancellation(throwOnFirstException: false); // skip ThrowIfDisposed() check in Cancel() + private CancellationTokenRegistration[] m_linkingRegistrations; + + internal LinkedNCancellationTokenSource(params CancellationToken[] tokens) { m_linkingRegistrations = new CancellationTokenRegistration[tokens.Length]; @@ -945,7 +972,6 @@ namespace System.Threading base.Dispose(disposing); } - } } diff --git a/src/System.Private.DeveloperExperience.Console/src/Internal/Runtime/CompilerHelpers/LibraryInitializer.cs b/src/System.Private.DeveloperExperience.Console/src/Internal/Runtime/CompilerHelpers/LibraryInitializer.cs new file mode 100644 index 000000000..68af06c7f --- /dev/null +++ b/src/System.Private.DeveloperExperience.Console/src/Internal/Runtime/CompilerHelpers/LibraryInitializer.cs @@ -0,0 +1,18 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +using System; + +using Internal.DeveloperExperience; + +namespace Internal.Runtime.CompilerHelpers +{ + public class LibraryInitializer + { + public static void InitializeLibrary() + { + DeveloperExperienceConnectorConsole.Initialize(); + } + } +} diff --git a/src/System.Private.DeveloperExperience.Console/src/System.Private.DeveloperExperience.Console.csproj b/src/System.Private.DeveloperExperience.Console/src/System.Private.DeveloperExperience.Console.csproj index 5cf519665..5a748e1b4 100644 --- a/src/System.Private.DeveloperExperience.Console/src/System.Private.DeveloperExperience.Console.csproj +++ b/src/System.Private.DeveloperExperience.Console/src/System.Private.DeveloperExperience.Console.csproj @@ -33,6 +33,7 @@ <ItemGroup> <Compile Include="Internal\DeveloperExperience\DeveloperExperienceConnector.cs" /> <Compile Include="Internal\DeveloperExperience\DeveloperExperienceConsole.cs" /> + <Compile Include="Internal\Runtime\CompilerHelpers\LibraryInitializer.cs" /> </ItemGroup> <Import Project="$([MSBuild]::GetDirectoryNameOfFileAbove($(MSBuildThisFileDirectory), dir.targets))\dir.targets" /> diff --git a/src/System.Private.Reflection.Execution/src/Internal/Reflection/Execution/ExecutionEnvironmentImplementation.MappingTables.cs b/src/System.Private.Reflection.Execution/src/Internal/Reflection/Execution/ExecutionEnvironmentImplementation.MappingTables.cs index 3d2fe2832..35e6a3e64 100644 --- a/src/System.Private.Reflection.Execution/src/Internal/Reflection/Execution/ExecutionEnvironmentImplementation.MappingTables.cs +++ b/src/System.Private.Reflection.Execution/src/Internal/Reflection/Execution/ExecutionEnvironmentImplementation.MappingTables.cs @@ -1075,7 +1075,7 @@ namespace Internal.Reflection.Execution } else { - uint nameAndSigOffset = externalReferences.GetNativeLayoutOffsetFromIndex(entryMethodHandleOrNameAndSigRaw); + uint nameAndSigOffset = externalReferences.GetExternalNativeLayoutOffset(entryMethodHandleOrNameAndSigRaw); MethodNameAndSignature nameAndSig; if (!TypeLoaderEnvironment.Instance.TryGetMethodNameAndSignatureFromNativeLayoutOffset(mappingTableModule, nameAndSigOffset, out nameAndSig)) { diff --git a/src/System.Private.Reflection.Execution/src/Internal/Reflection/Execution/ReflectionExecution.cs b/src/System.Private.Reflection.Execution/src/Internal/Reflection/Execution/ReflectionExecution.cs index 453dc7637..5d287ca1c 100644 --- a/src/System.Private.Reflection.Execution/src/Internal/Reflection/Execution/ReflectionExecution.cs +++ b/src/System.Private.Reflection.Execution/src/Internal/Reflection/Execution/ReflectionExecution.cs @@ -38,6 +38,12 @@ namespace Internal.Reflection.Execution [EagerOrderedStaticConstructor(EagerStaticConstructorOrder.ReflectionExecution)] public static class ReflectionExecution { + // + // CoreRT calls Initialize directly for all types its needs that typically have EagerOrderedStaticConstructor + // attributes. To retain compatibility, please ensure static initialization is not done inline, and instead + // added to Initialize. + // +#if !CORERT /// <summary> /// This eager constructor initializes runtime reflection support. As part of ExecutionEnvironmentImplementation /// initialization it enumerates the modules and registers the ones containing EmbeddedMetadata reflection blobs @@ -45,6 +51,12 @@ namespace Internal.Reflection.Execution /// </summary> static ReflectionExecution() { + Initialize(); + } +#endif + + internal static void Initialize() + { // Initialize Reflection.Core's one and only ExecutionDomain. ExecutionEnvironmentImplementation executionEnvironment = new ExecutionEnvironmentImplementation(); ReflectionDomainSetupImplementation setup = new ReflectionDomainSetupImplementation(executionEnvironment); @@ -85,7 +97,7 @@ namespace Internal.Reflection.Execution return ReflectionCoreExecution.ExecutionDomain.GetType(typeName, assemblyResolver, typeResolver, throwOnError, ignoreCase, defaultAssemblies); } - internal static ExecutionEnvironmentImplementation ExecutionEnvironment { get; } + internal static ExecutionEnvironmentImplementation ExecutionEnvironment { get; private set; } internal static IList<string> DefaultAssemblyNamesForGetType; } diff --git a/src/System.Private.Reflection.Execution/src/Internal/Runtime/CompilerHelpers/LibraryInitializer.cs b/src/System.Private.Reflection.Execution/src/Internal/Runtime/CompilerHelpers/LibraryInitializer.cs new file mode 100644 index 000000000..9932db2d1 --- /dev/null +++ b/src/System.Private.Reflection.Execution/src/Internal/Runtime/CompilerHelpers/LibraryInitializer.cs @@ -0,0 +1,18 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +using System; + +using Internal.Reflection.Execution; + +namespace Internal.Runtime.CompilerHelpers +{ + public class LibraryInitializer + { + public static void InitializeLibrary() + { + ReflectionExecution.Initialize(); + } + } +} diff --git a/src/System.Private.Reflection.Execution/src/System.Private.Reflection.Execution.csproj b/src/System.Private.Reflection.Execution/src/System.Private.Reflection.Execution.csproj index 4939f3853..20a4da520 100644 --- a/src/System.Private.Reflection.Execution/src/System.Private.Reflection.Execution.csproj +++ b/src/System.Private.Reflection.Execution/src/System.Private.Reflection.Execution.csproj @@ -90,6 +90,7 @@ <Compile Include="Internal\Reflection\Execution\PayForPlayExperience\MissingMetadataExceptionCreator.cs" /> <Compile Include="Internal\Reflection\Extensions\NonPortable\CustomAttributeInstantiator.cs" /> <Compile Include="Internal\Reflection\Extensions\NonPortable\DelegateMethodInfoRetriever.cs" /> + <Compile Include="Internal\Runtime\CompilerHelpers\LibraryInitializer.cs" /> <Compile Include="System\Reflection\MissingRuntimeArtifactException.cs" /> </ItemGroup> diff --git a/src/System.Private.TypeLoader/src/Internal/Runtime/CompilerHelpers/LibraryInitializer.cs b/src/System.Private.TypeLoader/src/Internal/Runtime/CompilerHelpers/LibraryInitializer.cs new file mode 100644 index 000000000..5b7964f8a --- /dev/null +++ b/src/System.Private.TypeLoader/src/Internal/Runtime/CompilerHelpers/LibraryInitializer.cs @@ -0,0 +1,18 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +using System; + +using Internal.Runtime.TypeLoader; + +namespace Internal.Runtime.CompilerHelpers +{ + public class LibraryInitializer + { + public static void InitializeLibrary() + { + TypeLoaderEnvironment.Initialize(); + } + } +} diff --git a/src/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/ExternalReferencesTable.cs b/src/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/ExternalReferencesTable.cs index 644eac90e..99f151906 100644 --- a/src/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/ExternalReferencesTable.cs +++ b/src/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/ExternalReferencesTable.cs @@ -137,16 +137,29 @@ namespace Internal.Runtime.TypeLoader { return RuntimeAugments.CreateRuntimeTypeHandle(GetIntPtrFromIndex(index)); } + } - unsafe public uint GetNativeLayoutOffsetFromIndex(uint index) + public static class ExternalReferencesTableExtentions + { + public static uint GetExternalNativeLayoutOffset(this ExternalReferencesTable extRefs, uint index) { - if (index >= _elementsCount) - throw new BadImageFormatException(); - + // CoreRT is a bit more optimized than ProjectN. In ProjectN, some tables that reference data + // in the native layout are constructed at NUTC compilation time, but the native layout is only + // generated at binder time, so we use the external references table to link the nutc-built + // tables with their native layout dependencies. + // + // In ProjectN, the nutc-built tables will be emitted with indices into the external references + // table, and the entries in the external references table will contain the offsets into the + // native layout blob. + // + // In CoreRT, since all tables and native layout blob are built together at the same time, we can + // optimize by writing the native layout offsets directly into the table, without requiring the extra + // lookup in the external references table. + // #if CORERT - return *(uint*)(((IntPtr*)_elements)[index]); + return index; #else - return ((TableElement*)_elements)[index]; + return extRefs.GetRvaFromIndex(index); #endif } } diff --git a/src/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TemplateLocator.cs b/src/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TemplateLocator.cs index ec5656cdc..7726b09d6 100644 --- a/src/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TemplateLocator.cs +++ b/src/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TemplateLocator.cs @@ -92,7 +92,7 @@ namespace Internal.Runtime.TypeLoader if (typeDesc == canonForm) { TypeLoaderLogger.WriteLine("Found metadata template for type " + concreteType.ToString() + ": " + typeDesc.ToString()); - nativeLayoutInfoToken = (uint)externalFixupsTable.GetNativeLayoutOffsetFromIndex(entryParser.GetUnsigned()); + nativeLayoutInfoToken = (uint)externalFixupsTable.GetExternalNativeLayoutOffset(entryParser.GetUnsigned()); if (nativeLayoutInfoToken == BadTokenFixupValue) { throw new BadImageFormatException(); @@ -150,7 +150,7 @@ namespace Internal.Runtime.TypeLoader if (methodDesc == canonForm) { TypeLoaderLogger.WriteLine("Found metadata template for method " + concreteMethod.ToString() + ": " + methodDesc.ToString()); - nativeLayoutInfoToken = (uint)externalFixupsTable.GetNativeLayoutOffsetFromIndex(entryParser.GetUnsigned()); + nativeLayoutInfoToken = (uint)externalFixupsTable.GetExternalNativeLayoutOffset(entryParser.GetUnsigned()); if (nativeLayoutInfoToken == BadTokenFixupValue) { throw new BadImageFormatException(); @@ -197,7 +197,7 @@ namespace Internal.Runtime.TypeLoader if (canonForm == candidateTemplate.ConvertToCanonForm(kind)) { TypeLoaderLogger.WriteLine("Found template for type " + concreteType.ToString() + ": " + candidateTemplate.ToString()); - nativeLayoutInfoToken = (uint)externalFixupsTable.GetNativeLayoutOffsetFromIndex(entryParser.GetUnsigned()); + nativeLayoutInfoToken = (uint)externalFixupsTable.GetExternalNativeLayoutOffset(entryParser.GetUnsigned()); if (nativeLayoutInfoToken == BadTokenFixupValue) { // TODO: once multifile gets fixed up, make this throw a BadImageFormatException @@ -269,7 +269,7 @@ namespace Internal.Runtime.TypeLoader NativeParser entryParser; while (!(entryParser = enumerator.GetNext()).IsNull) { - var methodSignatureParser = new NativeParser(nativeLayoutReader, externalFixupsTable.GetNativeLayoutOffsetFromIndex(entryParser.GetUnsigned())); + var methodSignatureParser = new NativeParser(nativeLayoutReader, externalFixupsTable.GetExternalNativeLayoutOffset(entryParser.GetUnsigned())); // Get the unified generic method holder and convert it to its canonical form var candidateTemplate = (InstantiatedMethod)context.GetMethod(ref methodSignatureParser); @@ -279,7 +279,7 @@ namespace Internal.Runtime.TypeLoader { TypeLoaderLogger.WriteLine("Found template for generic method " + concreteMethod.ToString() + ": " + candidateTemplate.ToString()); nativeLayoutInfoModule = moduleHandle; - nativeLayoutInfoToken = (uint)externalFixupsTable.GetNativeLayoutOffsetFromIndex(entryParser.GetUnsigned()); + nativeLayoutInfoToken = (uint)externalFixupsTable.GetExternalNativeLayoutOffset(entryParser.GetUnsigned()); if (nativeLayoutInfoToken == BadTokenFixupValue) { // TODO: once multifile gets fixed up, make this throw a BadImageFormatException diff --git a/src/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.GVMResolution.cs b/src/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.GVMResolution.cs index c701c13a6..dd7403ba5 100644 --- a/src/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.GVMResolution.cs +++ b/src/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.GVMResolution.cs @@ -85,7 +85,7 @@ namespace Internal.Runtime.TypeLoader for (uint j = 0; j < numTargetImplementations; j++) { - uint nameAndSigToken = extRefs.GetNativeLayoutOffsetFromIndex(entryParser.GetUnsigned()); + uint nameAndSigToken = extRefs.GetExternalNativeLayoutOffset(entryParser.GetUnsigned()); MethodNameAndSignature targetMethodNameAndSignature = GetMethodNameAndSignatureFromNativeReader(nativeLayoutReader, moduleHandle, nameAndSigToken); RuntimeTypeHandle targetTypeHandle = extRefs.GetRuntimeTypeHandleFromIndex(entryParser.GetUnsigned()); @@ -114,7 +114,7 @@ namespace Internal.Runtime.TypeLoader { RuntimeTypeHandle currentIfaceTypeHandle = default(RuntimeTypeHandle); - NativeParser ifaceSigParser = new NativeParser(nativeLayoutReader, extRefs.GetNativeLayoutOffsetFromIndex(entryParser.GetUnsigned())); + NativeParser ifaceSigParser = new NativeParser(nativeLayoutReader, extRefs.GetExternalNativeLayoutOffset(entryParser.GetUnsigned())); if (TypeLoaderEnvironment.Instance.GetTypeFromSignatureAndContext(ref ifaceSigParser, moduleHandle, targetTypeInstantiation, null, out currentIfaceTypeHandle)) { @@ -226,7 +226,7 @@ namespace Internal.Runtime.TypeLoader if (!openCallingTypeHandle.Equals(interfaceTypeHandle)) continue; - uint nameAndSigToken = extRefs.GetNativeLayoutOffsetFromIndex(entryParser.GetUnsigned()); + uint nameAndSigToken = extRefs.GetExternalNativeLayoutOffset(entryParser.GetUnsigned()); MethodNameAndSignature interfaceMethodNameAndSignature = GetMethodNameAndSignatureFromNativeReader(nativeLayoutReader, moduleHandle, nameAndSigToken); if (!interfaceMethodNameAndSignature.Equals(methodNameAndSignature)) @@ -459,13 +459,13 @@ namespace Internal.Runtime.TypeLoader if (!parsedTargetTypeHandle.Equals(openTargetTypeHandle)) continue; - uint parsedCallingNameAndSigToken = extRefs.GetNativeLayoutOffsetFromIndex(entryParser.GetUnsigned()); + uint parsedCallingNameAndSigToken = extRefs.GetExternalNativeLayoutOffset(entryParser.GetUnsigned()); MethodNameAndSignature parsedCallingNameAndSignature = GetMethodNameAndSignatureFromNativeReader(nativeLayoutReader, moduleHandle, parsedCallingNameAndSigToken); if (!parsedCallingNameAndSignature.Equals(callingMethodNameAndSignature)) continue; - uint parsedTargetMethodNameAndSigToken = extRefs.GetNativeLayoutOffsetFromIndex(entryParser.GetUnsigned()); + uint parsedTargetMethodNameAndSigToken = extRefs.GetExternalNativeLayoutOffset(entryParser.GetUnsigned()); MethodNameAndSignature targetMethodNameAndSignature = GetMethodNameAndSignatureFromNativeReader(nativeLayoutReader, moduleHandle, parsedTargetMethodNameAndSigToken); Debug.Assert(targetMethodNameAndSignature != null); diff --git a/src/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.LdTokenResultLookup.cs b/src/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.LdTokenResultLookup.cs index c1acd5517..777352910 100644 --- a/src/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.LdTokenResultLookup.cs +++ b/src/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.LdTokenResultLookup.cs @@ -83,7 +83,7 @@ namespace Internal.Runtime.TypeLoader return Encoding.UTF8.GetString(dataStream, checked((int)stringLen)); } - private static LowLevelDictionary<string, IntPtr> s_nativeFormatStrings = new LowLevelDictionary<string, IntPtr>(); + private static LowLevelDictionary<string, IntPtr> s_nativeFormatStrings; /// <summary> /// From a string, get a pointer to an allocated memory location that holds a NativeFormat encoded string. diff --git a/src/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.Metadata.cs b/src/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.Metadata.cs index c52479aad..22d2c7d56 100644 --- a/src/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.Metadata.cs +++ b/src/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.Metadata.cs @@ -852,7 +852,7 @@ namespace Internal.Runtime.TypeLoader if (!entryType.Equals(definitionType)) continue; - uint nameAndSigPointerToken = externalReferences.GetNativeLayoutOffsetFromIndex(entryParser.GetUnsigned()); + uint nameAndSigPointerToken = externalReferences.GetExternalNativeLayoutOffset(entryParser.GetUnsigned()); MethodNameAndSignature nameAndSig; if (!TypeLoaderEnvironment.Instance.TryGetMethodNameAndSignatureFromNativeLayoutOffset(moduleHandle, nameAndSigPointerToken, out nameAndSig)) @@ -995,7 +995,7 @@ namespace Internal.Runtime.TypeLoader if (!entryType.Equals(definitionType)) continue; - uint nameAndSigPointerToken = externalReferences.GetNativeLayoutOffsetFromIndex(entryParser.GetUnsigned()); + uint nameAndSigPointerToken = externalReferences.GetExternalNativeLayoutOffset(entryParser.GetUnsigned()); uint parentHierarchyAndFlag = entryParser.GetUnsigned(); bool isGenericVirtualMethod = ((parentHierarchyAndFlag & VirtualInvokeTableEntry.FlagsMask) == VirtualInvokeTableEntry.GenericVirtualMethod); @@ -1576,7 +1576,7 @@ namespace Internal.Runtime.TypeLoader } else { - uint nameAndSigToken = extRefTable.GetNativeLayoutOffsetFromIndex(entryParser.GetUnsigned()); + uint nameAndSigToken = extRefTable.GetExternalNativeLayoutOffset(entryParser.GetUnsigned()); MethodNameAndSignature nameAndSig; if (!TypeLoaderEnvironment.Instance.TryGetMethodNameAndSignatureFromNativeLayoutOffset(_moduleHandle, nameAndSigToken, out nameAndSig)) { @@ -1608,7 +1608,7 @@ namespace Internal.Runtime.TypeLoader { Debug.Assert((_hasEntryPoint || ((_flags & InvokeTableFlags.HasVirtualInvoke) != 0)) && ((_flags & InvokeTableFlags.RequiresInstArg) != 0)); - uint nameAndSigPointerToken = extRefTable.GetNativeLayoutOffsetFromIndex(entryParser.GetUnsigned()); + uint nameAndSigPointerToken = extRefTable.GetExternalNativeLayoutOffset(entryParser.GetUnsigned()); if (!TypeLoaderEnvironment.Instance.TryGetMethodNameAndSignatureFromNativeLayoutOffset(_moduleHandle, nameAndSigPointerToken, out _nameAndSignature)) { Debug.Assert(false); //Error diff --git a/src/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.NamedTypeLookup.cs b/src/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.NamedTypeLookup.cs index 64bdbed2c..9f1b828be 100644 --- a/src/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.NamedTypeLookup.cs +++ b/src/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.NamedTypeLookup.cs @@ -37,7 +37,7 @@ namespace Internal.Runtime.TypeLoader private NamedTypeRuntimeTypeHandleToMetadataHashtable _runtimeTypeHandleToMetadataHashtable = new NamedTypeRuntimeTypeHandleToMetadataHashtable(); - public static readonly IntPtr NoStaticsData = (IntPtr)1; + public static IntPtr NoStaticsData { get; private set; } private class NamedTypeRuntimeTypeHandleToMetadataHashtable : LockFreeReaderHashtable<RuntimeTypeHandle, NamedTypeLookupResult> { diff --git a/src/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.cs b/src/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.cs index b8d8deb11..addf2ed12 100644 --- a/src/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.cs +++ b/src/System.Private.TypeLoader/src/Internal/Runtime/TypeLoader/TypeLoaderEnvironment.cs @@ -86,13 +86,12 @@ namespace Internal.Runtime.TypeLoader } } - [EagerOrderedStaticConstructor(EagerStaticConstructorOrder.TypeLoaderEnvironment)] public sealed partial class TypeLoaderEnvironment { [ThreadStatic] private static bool t_isReentrant; - public static readonly TypeLoaderEnvironment Instance; + public static TypeLoaderEnvironment Instance { get; private set; } /// <summary> /// List of loaded binary modules is typically used to locate / process various metadata blobs @@ -107,10 +106,13 @@ namespace Internal.Runtime.TypeLoader [ThreadStatic] private static LowLevelDictionary<IntPtr, NativeReader> t_moduleNativeReaders; - static TypeLoaderEnvironment() + // Eager initialization called from LibraryInitializer for the assembly. + internal static void Initialize() { Instance = new TypeLoaderEnvironment(); RuntimeAugments.InitializeLookups(new Callbacks()); + s_nativeFormatStrings = new LowLevelDictionary<string, IntPtr>(); + NoStaticsData = (IntPtr)1; } public TypeLoaderEnvironment() diff --git a/src/System.Private.TypeLoader/src/System.Private.TypeLoader.csproj b/src/System.Private.TypeLoader/src/System.Private.TypeLoader.csproj index d1c371050..58f967904 100644 --- a/src/System.Private.TypeLoader/src/System.Private.TypeLoader.csproj +++ b/src/System.Private.TypeLoader/src/System.Private.TypeLoader.csproj @@ -27,7 +27,6 @@ <ReferencePath Include="$(AotPackageReferencePath)\System.Collections.dll" /> <ReferencePath Include="$(AotPackageReferencePath)\System.Collections.Immutable.dll" /> <ReferencePath Include="$(AotPackageReferencePath)\System.Reflection.Metadata.dll" /> - <ProjectReference Include="..\..\System.Private.CoreLib\src\System.Private.CoreLib.csproj" /> <ProjectReference Include="..\..\System.Private.Reflection.Metadata\src\System.Private.Reflection.Metadata.csproj" /> </ItemGroup> @@ -264,6 +263,7 @@ <Compile Include="..\..\Common\src\TypeSystem\Common\WellKnownType.cs"> <Link>Internal\TypeSystem\WellKnownType.cs</Link> </Compile> + <Compile Include="Internal\Runtime\CompilerHelpers\LibraryInitializer.cs" /> <Compile Include="Internal\Runtime\TypeLoader\CallConverterThunk.CallConversionInfo.cs" /> <Compile Include="Internal\Runtime\TypeLoader\CallConverterThunk.CallConversionParameters.cs" /> <Compile Include="Internal\Runtime\TypeLoader\CallConverterThunk.cs" /> diff --git a/src/packaging/netcoreapp/project.json b/src/packaging/netcoreapp/project.json index e84eff652..6c036cd40 100644 --- a/src/packaging/netcoreapp/project.json +++ b/src/packaging/netcoreapp/project.json @@ -2,43 +2,43 @@ "frameworks": { "netcoreapp1.2": { "dependencies": { - "System.Buffers": "4.4.0-beta-24906-01", - "System.Console": "4.4.0-beta-24906-01", - "System.Collections": "4.4.0-beta-24906-01", - "System.Collections.Concurrent": "4.4.0-beta-24906-01", - "System.Collections.NonGeneric": "4.4.0-beta-24906-01", - "System.Diagnostics.Debug": "4.4.0-beta-24906-01", - "System.Diagnostics.Tracing": "4.4.0-beta-24906-01", - "System.Diagnostics.Tools": "4.4.0-beta-24906-01", - "System.Globalization": "4.4.0-beta-24906-01", - "System.Globalization.Calendars": "4.4.0-beta-24906-01", - "System.IO": "4.4.0-beta-24906-01", - "System.IO.FileSystem": "4.4.0-beta-24906-01", - "System.IO.FileSystem.Primitives": "4.4.0-beta-24906-01", - "System.Linq": "4.4.0-beta-24906-01", - "System.Reflection": "4.4.0-beta-24906-01", - "System.Reflection.Primitives": "4.4.0-beta-24906-01", - "System.Reflection.Extensions": "4.4.0-beta-24906-01", - "System.Reflection.TypeExtensions": "4.4.0-beta-24906-01", - "System.Resources.ResourceManager": "4.4.0-beta-24906-01", - "System.Runtime": "4.4.0-beta-24906-01", - "System.Runtime.CompilerServices.Unsafe": "4.4.0-beta-24906-01", - "System.Runtime.Extensions": "4.4.0-beta-24906-01", - "System.Runtime.InteropServices": "4.4.0-beta-24906-01", - "System.Runtime.InteropServices.RuntimeInformation": "4.4.0-beta-24906-01", - "System.Runtime.Handles": "4.4.0-beta-24906-01", - "System.Runtime.Numerics": "4.4.0-beta-24906-01", - "System.Security.Principal": "4.4.0-beta-24906-01", - "System.Text.Encoding": "4.4.0-beta-24906-01", - "System.Text.Encoding.Extensions": "4.4.0-beta-24906-01", - "System.Threading": "4.4.0-beta-24906-01", - "System.Threading.Overlapped": "4.4.0-beta-24906-01", - "System.Threading.Tasks": "4.4.0-beta-24906-01", - "System.Threading.Thread": "4.4.0-beta-24906-01", - "System.Threading.Timer": "4.4.0-beta-24906-01", - "Microsoft.Win32.Primitives": "4.4.0-beta-24906-01", + "System.Buffers": "4.4.0-beta-24913-02", + "System.Console": "4.4.0-beta-24913-02", + "System.Collections": "4.4.0-beta-24913-02", + "System.Collections.Concurrent": "4.4.0-beta-24913-02", + "System.Collections.NonGeneric": "4.4.0-beta-24913-02", + "System.Diagnostics.Debug": "4.4.0-beta-24913-02", + "System.Diagnostics.Tracing": "4.4.0-beta-24913-02", + "System.Diagnostics.Tools": "4.4.0-beta-24913-02", + "System.Globalization": "4.4.0-beta-24913-02", + "System.Globalization.Calendars": "4.4.0-beta-24913-02", + "System.IO": "4.4.0-beta-24913-02", + "System.IO.FileSystem": "4.4.0-beta-24913-02", + "System.IO.FileSystem.Primitives": "4.4.0-beta-24913-02", + "System.Linq": "4.4.0-beta-24913-02", + "System.Reflection": "4.4.0-beta-24913-02", + "System.Reflection.Primitives": "4.4.0-beta-24913-02", + "System.Reflection.Extensions": "4.4.0-beta-24913-02", + "System.Reflection.TypeExtensions": "4.4.0-beta-24913-02", + "System.Resources.ResourceManager": "4.4.0-beta-24913-02", + "System.Runtime": "4.4.0-beta-24913-02", + "System.Runtime.CompilerServices.Unsafe": "4.4.0-beta-24913-02", + "System.Runtime.Extensions": "4.4.0-beta-24913-02", + "System.Runtime.InteropServices": "4.4.0-beta-24913-02", + "System.Runtime.InteropServices.RuntimeInformation": "4.4.0-beta-24913-02", + "System.Runtime.Handles": "4.4.0-beta-24913-02", + "System.Runtime.Numerics": "4.4.0-beta-24913-02", + "System.Security.Principal": "4.4.0-beta-24913-02", + "System.Text.Encoding": "4.4.0-beta-24913-02", + "System.Text.Encoding.Extensions": "4.4.0-beta-24913-02", + "System.Threading": "4.4.0-beta-24913-02", + "System.Threading.Overlapped": "4.4.0-beta-24913-02", + "System.Threading.Tasks": "4.4.0-beta-24913-02", + "System.Threading.Thread": "4.4.0-beta-24913-02", + "System.Threading.Timer": "4.4.0-beta-24913-02", + "Microsoft.Win32.Primitives": "4.4.0-beta-24913-02", - "runtime.native.System": "4.4.0-beta-24906-01" + "runtime.native.System": "4.4.0-beta-24913-02" } } }, diff --git a/src/packaging/packages.targets b/src/packaging/packages.targets index 948263e36..7b848c057 100644 --- a/src/packaging/packages.targets +++ b/src/packaging/packages.targets @@ -22,9 +22,9 @@ <StaticLibExt Condition="'$(OsEnvironment)'=='Windows_NT'">lib</StaticLibExt> <StaticLibExt Condition="'$(OsEnvironment)'!='Windows_NT'">a</StaticLibExt> - <CoreFxPackageVersion>4.4.0-beta-24906-01</CoreFxPackageVersion> + <CoreFxPackageVersion>4.4.0-beta-24913-02</CoreFxPackageVersion> - <JitPackageVersion>1.2.0-beta-24815-03</JitPackageVersion> + <JitPackageVersion>1.2.0-beta-24911-02</JitPackageVersion> <ObjectWriterPackageVersion>1.0.13-prerelease-00001</ObjectWriterPackageVersion> <ObjectWriterNuPkgRid Condition="'$(OSGroup)'=='Linux'">ubuntu.14.04-x64</ObjectWriterNuPkgRid> @@ -112,7 +112,6 @@ <!-- Repackage the CoreCLR framework --> <!-- TODO: Obtain this via nuget once the framework is properly packaged --> - <ILCompilerAnyFrameworkFiles Include="System.Runtime" /> <ILCompilerAnyFrameworkFiles Include="System.Globalization" /> <ILCompilerAnyFrameworkFiles Include="System.Globalization.Calendars" /> <ILCompilerAnyFrameworkFiles Include="System.IO" /> @@ -127,12 +126,21 @@ <Text><![CDATA[ <file src="packages/runtime.any.%(Identity)/$(CoreFxPackageVersion)/lib/netstandard1.7/%(Identity).dll" target="runtimes/$(NuPkgRid)/native/framework/%(Identity).dll" /> ]]></Text> </ILCompilerSdkBinPlace> - <ILCompilerFrameworkFiles Include="runtime.aot.System.Reflection.Primitives/$(CoreFxPackageVersion)/runtimes/aot/lib/uap10.1/System.Reflection.Primitives.dll" /> - <ILCompilerFrameworkFiles Include="runtime.aot.System.Runtime.InteropServices/$(CoreFxPackageVersion)/runtimes/aot/lib/uap10.1/System.Runtime.InteropServices.dll" /> - <ILCompilerFrameworkFiles Include="runtime.aot.System.Threading.Tasks/$(CoreFxPackageVersion)/runtimes/aot/lib/uap10.1/System.Threading.Tasks.dll" /> - <ILCompilerFrameworkFiles Include="runtime.aot.System.Collections/$(CoreFxPackageVersion)/runtimes/aot/lib/uap10.1/System.Collections.dll" /> - <ILCompilerFrameworkFiles Include="runtime.win.System.Diagnostics.Debug/$(CoreFxPackageVersion)/runtimes/aot/lib/netcore50/System.Diagnostics.Debug.dll" /> + <!-- Libraries with netcoreapp1.2corert configuration --> + <ILCompilerFrameworkFiles Include="runtime.win7.System.Private.Uri/$(CoreFxPackageVersion)/runtimes/win-corert/lib/netcoreapp1.2/System.Private.Uri.dll" Condition="'$(OsEnvironment)'=='Windows_NT'" /> + <ILCompilerFrameworkFiles Include="runtime.unix.System.Private.Uri/$(CoreFxPackageVersion)/runtimes/unix-corert/lib/netcoreapp1.2/System.Private.Uri.dll" Condition="'$(OsEnvironment)'!='Windows_NT'" /> + <ILCompilerFrameworkFiles Include="runtime.any.System.Runtime/$(CoreFxPackageVersion)/runtimes/corert/lib/netcoreapp1.2/System.Runtime.dll" /> + <ILCompilerFrameworkFiles Include="runtime.win.System.Runtime.Extensions/$(CoreFxPackageVersion)/runtimes/win-corert/lib/netcoreapp1.2/System.Runtime.Extensions.dll" Condition="'$(OsEnvironment)'=='Windows_NT'" /> + <ILCompilerFrameworkFiles Include="runtime.unix.System.Runtime.Extensions/$(CoreFxPackageVersion)/runtimes/unix-corert/lib/netcoreapp1.2/System.Runtime.Extensions.dll" Condition="'$(OsEnvironment)'!='Windows_NT'" /> + <ILCompilerFrameworkFiles Include="runtime.any.System.Reflection.Primitives/$(CoreFxPackageVersion)/runtimes/corert/lib/netcoreapp1.2/System.Reflection.Primitives.dll" /> + <ILCompilerFrameworkFiles Include="runtime.any.System.Runtime.InteropServices/$(CoreFxPackageVersion)/runtimes/corert/lib/netcoreapp1.2/System.Runtime.InteropServices.dll" /> + <ILCompilerFrameworkFiles Include="runtime.any.System.Threading.Tasks/$(CoreFxPackageVersion)/runtimes/corert/lib/netcoreapp1.2/System.Threading.Tasks.dll" /> + <ILCompilerFrameworkFiles Include="runtime.any.System.Collections/$(CoreFxPackageVersion)/runtimes/corert/lib/netcoreapp1.2/System.Collections.dll" /> + <ILCompilerFrameworkFiles Include="runtime.win.System.Diagnostics.Debug/$(CoreFxPackageVersion)/runtimes/win-corert/lib/netcoreapp1.2/System.Diagnostics.Debug.dll" Condition="'$(OsEnvironment)'=='Windows_NT'" /> + <ILCompilerFrameworkFiles Include="runtime.unix.System.Diagnostics.Debug/$(CoreFxPackageVersion)/runtimes/unix-corert/lib/netcoreapp1.2/System.Diagnostics.Debug.dll" Condition="'$(OsEnvironment)'!='Windows_NT'"/> + + <!-- End: Libraries with netcoreapp1.2corert configuration --> <ILCompilerFrameworkFiles Include="runtime.aot.System.Diagnostics.Tracing/$(CoreFxPackageVersion)/runtimes/aot/lib/netcore50/System.Diagnostics.Tracing.dll" /> @@ -142,10 +150,6 @@ <ILCompilerFrameworkFiles Include="Microsoft.NETCore.Portable.Compatibility/1.0.2/runtimes/aot/lib/netcore50/mscorlib.dll" /> - <ILCompilerFrameworkFiles Include="runtime.win7.System.Private.Uri/$(CoreFxPackageVersion)/runtimes/win-corert/lib/netcoreapp1.2/System.Private.Uri.dll" Condition="'$(OsEnvironment)'=='Windows_NT'" /> - <ILCompilerFrameworkFiles Include="runtime.unix.System.Private.Uri/$(CoreFxPackageVersion)/runtimes/unix-corert/lib/netcoreapp1.2/System.Private.Uri.dll" Condition="'$(OsEnvironment)'!='Windows_NT'" /> - <ILCompilerFrameworkFiles Include="runtime.win.System.Runtime.Extensions/$(CoreFxPackageVersion)/runtimes/win-corert/lib/netcoreapp1.2/System.Runtime.Extensions.dll" Condition="'$(OsEnvironment)'=='Windows_NT'" /> - <ILCompilerFrameworkFiles Include="runtime.unix.System.Runtime.Extensions/$(CoreFxPackageVersion)/runtimes/unix-corert/lib/netcoreapp1.2/System.Runtime.Extensions.dll" Condition="'$(OsEnvironment)'!='Windows_NT'" /> <ILCompilerFrameworkFiles Include="runtime.win.System.Console/$(CoreFxPackageVersion)/runtimes/win/lib/netstandard1.7/System.Console.dll" Condition="'$(OsEnvironment)'=='Windows_NT'" /> <ILCompilerFrameworkFiles Include="runtime.unix.System.Console/$(CoreFxPackageVersion)/runtimes/unix/lib/netstandard1.7/System.Console.dll" Condition="'$(OsEnvironment)'!='Windows_NT'" /> diff --git a/src/packaging/project.json b/src/packaging/project.json index df6f724ef..514f90539 100644 --- a/src/packaging/project.json +++ b/src/packaging/project.json @@ -1,6 +1,6 @@ { "dependencies": { - "Microsoft.NETCore.Jit": "1.2.0-beta-24815-03", + "Microsoft.NETCore.Jit": "1.2.0-beta-24911-02", "Microsoft.DotNet.ObjectWriter": "1.0.13-prerelease-00001" }, "frameworks": { diff --git a/src/packaging/uap/project.json b/src/packaging/uap/project.json index 8ed3477bd..b8ea32560 100644 --- a/src/packaging/uap/project.json +++ b/src/packaging/uap/project.json @@ -2,13 +2,8 @@ "frameworks": { "uap10.1": { "dependencies": { - "System.Reflection.Primitives": "4.4.0-beta-24906-01", - "System.Runtime.InteropServices": "4.4.0-beta-24906-01", - "System.Threading.Tasks": "4.4.0-beta-24906-01", - "System.Collections": "4.4.0-beta-24906-01", - "System.Diagnostics.Debug": "4.4.0-beta-24906-01", - "System.Diagnostics.Tracing": "4.4.0-beta-24906-01", - "System.Linq": "4.4.0-beta-24906-01", + "System.Diagnostics.Tracing": "4.4.0-beta-24913-02", + "System.Linq": "4.4.0-beta-24913-02", "Microsoft.NETCore.Portable.Compatibility": "1.0.2" } } diff --git a/tests/CoreCLR.issues.targets b/tests/CoreCLR.issues.targets index e7fc753eb..6caf9e45c 100644 --- a/tests/CoreCLR.issues.targets +++ b/tests/CoreCLR.issues.targets @@ -903,6 +903,7 @@ <ExcludeList Include="$(XunitTestBinBase)\GC\Scenarios\ServerModel\servermodel\servermodel.*" /> <ExcludeList Include="$(XunitTestBinBase)\GC\Scenarios\Dynamo\dynamo\dynamo.*" /> <ExcludeList Include="$(XunitTestBinBase)\JIT\Regression\CLR-x86-JIT\V1-M09\b16294\b16294\b16294.*" /> + <ExcludeList Include="$(XunitTestBinBase)\Regressions\common\AboveStackLimit\AboveStackLimit.*" /> <!-- System.Diagnostics.Process --> <ExcludeList Include="$(XunitTestBinBase)\GC\API\GC\Collect_Default_1\Collect_Default_1.*" /> @@ -928,6 +929,11 @@ <ExcludeList Include="$(XunitTestBinBase)\JIT\Methodical\eh\basics\throwinfilter_d\throwinfilter_d.*" /> <ExcludeList Include="$(XunitTestBinBase)\JIT\Methodical\eh\basics\throwinfilter_r\throwinfilter_r.*" /> + <!-- Windows x64 unwinder gets consused by IP in the middle of instruction --> + <!-- https://github.com/dotnet/corert/issues/2535 --> + <ExcludeList Include="$(XunitTestBinBase)\JIT\Regression\CLR-x86-JIT\V1-M09.5-PDC\b30126\b30126\b30126.*" /> + <ExcludeList Include="$(XunitTestBinBase)\JIT\Regression\CLR-x86-JIT\V1-M09.5-PDC\b30128\b30128\b30128.*" /> + <!-- Arrays with non-zero lower bounds --> <!-- https://github.com/dotnet/corert/issues/2245 --> <ExcludeList Include="$(XunitTestBinBase)\JIT\Methodical\Arrays\huge\_il_dbghuge_b\_il_dbghuge_b.*" /> @@ -1114,6 +1120,8 @@ <ExcludeList Include="$(XunitTestBinBase)\GC\Scenarios\LeakGen\leakgen\leakgen.*" /> <ExcludeList Include="$(XunitTestBinBase)\JIT\Directed\lifetime\lifetime1\lifetime1.*" /> <ExcludeList Include="$(XunitTestBinBase)\JIT\Directed\lifetime\lifetime2\lifetime2.*" /> + <ExcludeList Include="$(XunitTestBinBase)\GC\Scenarios\DoublinkList\dlbigleak\dlbigleak.*" /> + <ExcludeList Include="$(XunitTestBinBase)\GC\Scenarios\FinalNStruct\nstructtun\nstructtun.*" /> <!-- InteropExtensions.MightBeBlittable is only an approximation --> <!-- https://github.com/dotnet/corert/issues/2355 --> diff --git a/tests/CoreCLR/build-and-run-test.cmd b/tests/CoreCLR/build-and-run-test.cmd index 511094311..8fd7ed145 100644 --- a/tests/CoreCLR/build-and-run-test.cmd +++ b/tests/CoreCLR/build-and-run-test.cmd @@ -37,8 +37,8 @@ if "%CoreRT_BuildArch%" == "x64" ( call "%VS140COMNTOOLS%\..\..\VC\bin\amd64\vcvars64.bat" ) -echo msbuild /ConsoleLoggerParameters:ForceNoAlign "/p:IlcPath=%CoreRT_ToolchainDir%" "/p:Configuration=%CoreRT_BuildType%" %TestFolder%\Test.csproj -msbuild /ConsoleLoggerParameters:ForceNoAlign "/p:IlcPath=%CoreRT_ToolchainDir%" "/p:Configuration=%CoreRT_BuildType%" %TestFolder%\Test.csproj +echo msbuild /ConsoleLoggerParameters:ForceNoAlign "/p:IlcPath=%CoreRT_ToolchainDir%" "/p:Configuration=%CoreRT_BuildType%" "/p:RepoLocalBuild=true" %TestFolder%\Test.csproj +msbuild /ConsoleLoggerParameters:ForceNoAlign "/p:IlcPath=%CoreRT_ToolchainDir%" "/p:Configuration=%CoreRT_BuildType%" "/p:RepoLocalBuild=true" %TestFolder%\Test.csproj if errorlevel 1 ( set TestExitCode=!ERRORLEVEL! goto :Cleanup diff --git a/tests/runtest.cmd b/tests/runtest.cmd index ed8dc33e2..1a43f5230 100644 --- a/tests/runtest.cmd +++ b/tests/runtest.cmd @@ -181,9 +181,9 @@ goto :eof ) ) - echo msbuild /m /ConsoleLoggerParameters:ForceNoAlign "/p:IlcPath=%CoreRT_ToolchainDir%" "/p:Configuration=%CoreRT_BuildType%" !extraArgs! !__SourceFile!.csproj + echo msbuild /m /ConsoleLoggerParameters:ForceNoAlign "/p:IlcPath=%CoreRT_ToolchainDir%" "/p:Configuration=%CoreRT_BuildType%" "/p:RepoLocalBuild=true" !extraArgs! !__SourceFile!.csproj echo. - msbuild /m /ConsoleLoggerParameters:ForceNoAlign "/p:IlcPath=%CoreRT_ToolchainDir%" "/p:Configuration=%CoreRT_BuildType%" !extraArgs! !__SourceFile!.csproj + msbuild /m /ConsoleLoggerParameters:ForceNoAlign "/p:IlcPath=%CoreRT_ToolchainDir%" "/p:Configuration=%CoreRT_BuildType%" "/p:RepoLocalBuild=true" !extraArgs! !__SourceFile!.csproj endlocal set __SavedErrorLevel=%ErrorLevel% @@ -271,7 +271,7 @@ goto :eof echo CORE_ROOT IS NOW %CORE_ROOT% pushd %CoreRT_TestRoot%\CoreCLR\runtest - msbuild src\TestWrappersConfig\XUnitTooling.depproj + msbuild "/p:RepoLocalBuild=true" src\TestWrappersConfig\XUnitTooling.depproj if errorlevel 1 ( exit /b 1 ) diff --git a/tests/runtest.sh b/tests/runtest.sh index 0cc8eb8a3..c786b5c0c 100755 --- a/tests/runtest.sh +++ b/tests/runtest.sh @@ -34,8 +34,8 @@ run_test_dir() rm -rf ${__dir_path}/bin ${__dir_path}/obj local __msbuild_dir=${CoreRT_TestRoot}/../Tools - echo ${__msbuild_dir}/msbuild.sh /m /p:IlcPath=${CoreRT_ToolchainDir} /p:Configuration=${CoreRT_BuildType} ${__extra_args} ${__dir_path}/${__filename}.csproj - ${__msbuild_dir}/msbuild.sh /m /p:IlcPath=${CoreRT_ToolchainDir} /p:Configuration=${CoreRT_BuildType} ${__extra_args} ${__dir_path}/${__filename}.csproj + echo ${__msbuild_dir}/msbuild.sh /m /p:IlcPath=${CoreRT_ToolchainDir} /p:Configuration=${CoreRT_BuildType} /p:RepoLocalBuild=true ${__extra_args} ${__dir_path}/${__filename}.csproj + ${__msbuild_dir}/msbuild.sh /m /p:IlcPath=${CoreRT_ToolchainDir} /p:Configuration=${CoreRT_BuildType} /p:RepoLocalBuild=true ${__extra_args} ${__dir_path}/${__filename}.csproj runtest ${__dir_path} ${__filename} local __exitcode=$? diff --git a/tests/src/Simple/Generics/Generics.cs b/tests/src/Simple/Generics/Generics.cs index d181130ac..e4fc32932 100644 --- a/tests/src/Simple/Generics/Generics.cs +++ b/tests/src/Simple/Generics/Generics.cs @@ -3,6 +3,7 @@ // See the LICENSE file in the project root for more information. using System; +using System.Runtime.CompilerServices; class Program { @@ -16,6 +17,7 @@ class Program TestSlotsInHierarchy.Run(); TestDelegateVirtualMethod.Run(); TestDelegateInterfaceMethod.Run(); + TestThreadStaticFieldAccess.Run(); TestNameManglingCollisionRegression.Run(); TestUnusedGVMsDoNotCrashCompiler.Run(); @@ -323,6 +325,64 @@ class Program } } + class TestThreadStaticFieldAccess + { + class TypeWithThreadStaticField<T> + { + [ThreadStatic] + public static int X; + + [MethodImpl(MethodImplOptions.NoInlining)] + public static int Read() + { + return X; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + public static void Write(int x) + { + X = x; + } + } + + class BeforeFieldInitType<T> + { + [ThreadStatic] + public static int X = 1985; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + private static int ReadFromBeforeFieldInitType<T>() + { + return BeforeFieldInitType<T>.X; + } + + public static void Run() + { + // This will set the field to a value from non-shared code + TypeWithThreadStaticField<object>.X = 42; + + // Now read the value from shared code + if (TypeWithThreadStaticField<object>.Read() != 42) + throw new Exception(); + + // Set the value from shared code + TypeWithThreadStaticField<string>.Write(112); + + // Now read the value from non-shared code + if (TypeWithThreadStaticField<string>.X != 112) + throw new Exception(); + + // Check that the storage locations for string and object instantiations differ + if (TypeWithThreadStaticField<object>.Read() != 42) + throw new Exception(); + + // Make sure we run the cctor + if (ReadFromBeforeFieldInitType<object>() != 1985) + throw new Exception(); + } + } + // // Regression test for issue https://github.com/dotnet/corert/issues/1964 // |