Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/mono/corert.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTijoy Tom Kalathiparambil <tijoytk@microsoft.com>2015-12-04 01:05:33 +0300
committerTijoy Tom Kalathiparambil <tijoytk@microsoft.com>2015-12-04 01:05:33 +0300
commitfa8b120337acea5b4fcdf91de1ca52695394e228 (patch)
treebecec4d522cb48880bf9fa556c4d7ecba85bd9ff /src/Common
parent5cafb1530c89b350502c9f048d2b8e80920e3730 (diff)
Adding NativeFormat support
NativeFormat is a binary metadata format primarily used for storing layout decisions done by static code generator that the dynamic code generator needs to be aware of. [tfs-changeset: 1554094]
Diffstat (limited to 'src/Common')
-rw-r--r--src/Common/src/Internal/NativeFormat/NativeFormat.cs172
-rw-r--r--src/Common/src/Internal/NativeFormat/NativeFormatReader.Metadata.cs72
-rw-r--r--src/Common/src/Internal/NativeFormat/NativeFormatReader.String.cs89
-rw-r--r--src/Common/src/Internal/NativeFormat/NativeFormatReader.cs669
-rw-r--r--src/Common/src/Internal/NativeFormat/NativeFormatWriter.Primitives.cs197
-rw-r--r--src/Common/src/Internal/NativeFormat/NativeFormatWriter.cs618
-rw-r--r--src/Common/src/Internal/NativeFormat/TypeHashingAlgorithms.cs110
7 files changed, 1927 insertions, 0 deletions
diff --git a/src/Common/src/Internal/NativeFormat/NativeFormat.cs b/src/Common/src/Internal/NativeFormat/NativeFormat.cs
new file mode 100644
index 000000000..76cbc276f
--- /dev/null
+++ b/src/Common/src/Internal/NativeFormat/NativeFormat.cs
@@ -0,0 +1,172 @@
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
+using System;
+
+//
+// Native Format
+//
+// NativeFormat is a binary metadata format. It primarily designed for storing layout decisions done by static code
+// generator that the dynamic code generator needs to be aware of. However, it can be also used for storing general
+// managed code metadata in future. The key properties of the format are:
+//
+// - Extensible: It should be possible to attach new data to existing records without breaking existing consumers that
+// do not understand the new data yet.
+//
+// - Naturally compressed: Integers are stored using variable length encoding. Offsets are stored as relative offsets.
+//
+// - Random accesss: Random access to selected information should be fast. It is achieved by using tokens as offsets.
+//
+// - Locality: Access to related information should be accessing data that are close to each other.
+//
+// The format is essentially a collection of variable size records that can reference each other.
+//
+
+namespace Internal.NativeFormat
+{
+ //
+ // Bag is the key record type for extensibility. It is a list <id, data> pairs. Data is integer that
+ // is interpretted according to the id. It is typically relative offset of another record.
+ //
+ internal enum BagElementKind : uint
+ {
+ End = 0x00,
+ BaseType = 0x01,
+ ImplementedInterfaces = 0x02,
+
+ DictionaryLayout = 0x40,
+ TypeFlags = 0x41,
+ NonGcStaticData = 0x42,
+ GcStaticData = 0x43,
+ NonGcStaticDataSize = 0x44,
+ GcStaticDataSize = 0x45,
+ GcStaticDesc = 0x46,
+ ThreadStaticDataSize = 0x47,
+ ThreadStaticDesc = 0x48,
+ ThreadStaticIndex = 0x49,
+ ThreadStaticOffset = 0x4a,
+ FieldLayout = 0x4b,
+ VTableMethodSignatures = 0x4c,
+ SealedVTableEntries = 0x4d,
+ ClassConstructorPointer = 0x4e,
+ BaseTypeSize = 0x4f,
+ GenericVarianceInfo = 0x50,
+
+ // Add new custom bag elements that don't match to something you'd find in the ECMA metadata here.
+ }
+
+ //
+ // FixupSignature signature describes indirection. It starts with integer describing the kind of data stored in the indirection,
+ // followed by kind-specific signature.
+ //
+ enum FixupSignatureKind : uint
+ {
+ Null = 0x00,
+ TypeHandle = 0x01,
+ InterfaceCall = 0x02,
+ // unused = 0x03,
+ MethodDictionary = 0x04,
+ StaticData = 0x05,
+ UnwrapNullableType = 0x06,
+ FieldLdToken = 0x07,
+ MethodLdToken = 0x08,
+ AllocateObject = 0x09,
+ DefaultConstructor = 0x0a,
+ TlsIndex = 0x0b,
+ TlsOffset = 0x0c,
+ Method = 0x0d,
+ IsInst = 0x0e,
+ CastClass = 0x0f,
+ AllocateArray = 0x10,
+ CheckArrayElementType = 0x11,
+ TypeSize = 0x12,
+ FieldOffset = 0x13,
+ CallingConventionConverter = 0x14,
+ VTableOffset = 0x15,
+ NonGenericConstrainedMethod = 0x16,
+ GenericConstrainedMethod = 0x17,
+ NonGenericDirectConstrainedMethod = 0x18,
+
+ NotYetSupported = 0xee,
+ }
+
+ //
+ // TypeSignature describes type. The low 4 bits of the integer that is starts with describe the kind. Upper 28 bits are kind
+ // specific data. The argument signatures immediately follow for nested types.
+ //
+ enum TypeSignatureKind : uint
+ {
+ Null = 0x0,
+ Lookback = 0x1, // Go back in the stream for signature continuation (data - number of bytes to go back)
+ Modifier = 0x2, // Type modifier (data - TypeModifierKind)
+ Instantiation = 0x3, // Generic instantiation (data - number of instantiation args)
+ Variable = 0x4, // Generic variable (data - 2 * varnum + method)
+ BuiltIn = 0x5, // Built-in type (data - BuildInTypeKind)
+ External = 0x6, // External type reference (data - external type id)
+
+ MultiDimArray = 0xA, // Multi-dimensional array (data - dimension)
+ FunctionPointer = 0xB, // Function pointer (data - calling convention, arg count, args)
+ };
+
+ enum TypeModifierKind : uint
+ {
+ Array = 0x1,
+ ByRef = 0x2,
+ Pointer = 0x3,
+ };
+
+ enum StaticDataKind : uint
+ {
+ Gc = 0x1,
+ NonGc = 0x2,
+ };
+
+ enum GenericContextKind : uint
+ {
+ FromThis = 0x00,
+ FromHiddenArg = 0x01,
+ FromMethodHiddenArg = 0x02,
+
+ HasDeclaringType = 0x04,
+
+ NeedsUSGContext = 0x08,
+ };
+
+ enum CallingConventionConverterKind : uint
+ {
+ NoInstantiatingParam = 0x00, // The calling convention interpreter can assume that the calling convention of the target method has no instantiating parameter
+ HasInstantiatingParam = 0x01, // The calling convention interpreter can assume that the calling convention of the target method has an instantiating parameter
+ MaybeInstantiatingParam = 0x02, // The calling convention interpreter can assume that the calling convention of the target method may be a fat function pointer
+ }
+
+ [Flags]
+ enum TypeFlags : uint
+ {
+ HasClassConstructor = 0x1,
+ HasInstantiationDeterminedSize = 0x2,
+ };
+
+ [Flags]
+ enum MethodFlags : uint
+ {
+ HasInstantiation = 0x1,
+ IsUnboxingStub = 0x2,
+ HasFunctionPointer = 0x4,
+ FunctionPointerIsUSG = 0x8,
+ };
+
+ [Flags]
+ enum MethodCallingConvention : uint
+ {
+ Generic = 0x1,
+ Static = 0x2,
+ };
+
+ enum FieldStorage : uint
+ {
+ Instance = 0x0,
+ NonGCStatic = 0x1,
+ GCStatic = 0x2,
+ TLSStatic = 0x3,
+ }
+}
diff --git a/src/Common/src/Internal/NativeFormat/NativeFormatReader.Metadata.cs b/src/Common/src/Internal/NativeFormat/NativeFormatReader.Metadata.cs
new file mode 100644
index 000000000..6443d3ca0
--- /dev/null
+++ b/src/Common/src/Internal/NativeFormat/NativeFormatReader.Metadata.cs
@@ -0,0 +1,72 @@
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+// ---------------------------------------------------------------------------
+// Native Format Reader
+//
+// Metadata / NativeLayoutInfo reading methods
+// ---------------------------------------------------------------------------
+
+using System;
+using System.Diagnostics;
+
+namespace Internal.NativeFormat
+{
+ internal partial struct NativeParser
+ {
+ public BagElementKind GetBagElementKind()
+ {
+ return (BagElementKind)GetUnsigned();
+ }
+
+ public FixupSignatureKind GetFixupSignatureKind()
+ {
+ return (FixupSignatureKind)GetUnsigned();
+ }
+
+ public TypeSignatureKind GetTypeSignatureKind(out uint data)
+ {
+ uint val = GetUnsigned();
+ data = (val >> 4);
+ return (TypeSignatureKind)(val & 0xF);
+ }
+
+ public NativeParser GetLookbackParser(uint lookback)
+ {
+ // Adjust the lookback by the size of the TypeSignatureKind element and the minimum lookback size
+ uint adjustedLookback = lookback + NativePrimitiveDecoder.GetUnsignedEncodingSize(lookback << 4) + 2;
+ return new NativeParser(_reader, _offset - adjustedLookback);
+ }
+
+ public uint? GetUnsignedForBagElementKind(BagElementKind kindToFind)
+ {
+ var parser = this;
+
+ BagElementKind kind;
+ while ((kind = parser.GetBagElementKind()) != BagElementKind.End)
+ {
+ if (kind == kindToFind)
+ return parser.GetUnsigned();
+
+ parser.SkipInteger();
+ }
+
+ return null;
+ }
+
+ public NativeParser GetParserForBagElementKind(BagElementKind kindToFind)
+ {
+ var parser = this;
+
+ BagElementKind kind;
+ while ((kind = parser.GetBagElementKind()) != BagElementKind.End)
+ {
+ if (kind == kindToFind)
+ return parser.GetParserFromRelativeOffset();
+
+ parser.SkipInteger();
+ }
+
+ return new NativeParser();
+ }
+ }
+}
diff --git a/src/Common/src/Internal/NativeFormat/NativeFormatReader.String.cs b/src/Common/src/Internal/NativeFormat/NativeFormatReader.String.cs
new file mode 100644
index 000000000..562984fb7
--- /dev/null
+++ b/src/Common/src/Internal/NativeFormat/NativeFormatReader.String.cs
@@ -0,0 +1,89 @@
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+// ---------------------------------------------------------------------------
+// Native Format Reader
+//
+// UTF8 string reading methods
+// ---------------------------------------------------------------------------
+
+using System;
+using System.Text;
+
+namespace Internal.NativeFormat
+{
+ internal partial struct NativeParser
+ {
+ public string GetString()
+ {
+ string value;
+ _offset = _reader.DecodeString(_offset, out value);
+ return value;
+ }
+ }
+
+ internal partial class NativeReader
+ {
+ public string ReadString(uint offset)
+ {
+ string value;
+ DecodeString(offset, out value);
+ return value;
+ }
+
+ public unsafe uint DecodeString(uint offset, out string value)
+ {
+ uint numBytes;
+ offset = DecodeUnsigned(offset, out numBytes);
+
+ if (numBytes == 0)
+ {
+ value = String.Empty;
+ return offset;
+ }
+
+ uint endOffset = offset + numBytes;
+ if (endOffset < numBytes || endOffset > _size)
+ ThrowBadImageFormatException();
+
+#if NETFX_45
+ byte[] bytes = new byte[numBytes];
+ for (int i = 0; i < bytes.Length; i++)
+ bytes[i] = *(_base + offset + i);
+
+ value = Encoding.UTF8.GetString(bytes, 0, bytes.Length);
+#else
+ value = Encoding.UTF8.GetString(_base + offset, (int)numBytes);
+#endif
+
+ return endOffset;
+ }
+
+ public unsafe bool StringEquals(uint offset, string value)
+ {
+ uint originalOffset = offset;
+
+ uint numBytes;
+ offset = DecodeUnsigned(offset, out numBytes);
+
+ uint endOffset = offset + numBytes;
+ if (endOffset < numBytes || offset > _size)
+ ThrowBadImageFormatException();
+
+ if (numBytes < value.Length)
+ return false;
+
+ for (int i = 0; i < value.Length; i++)
+ {
+ int ch = *(_base + offset + i);
+ if (ch > 0x7F)
+ return ReadString(originalOffset) == value;
+
+ // We are assuming here that valid UTF8 encoded byte > 0x7F cannot map to a character with code point <= 0x7F
+ if (ch != value[i])
+ return false;
+ }
+
+ return numBytes == value.Length; // All char ANSI, all matching
+ }
+ }
+}
diff --git a/src/Common/src/Internal/NativeFormat/NativeFormatReader.cs b/src/Common/src/Internal/NativeFormat/NativeFormatReader.cs
new file mode 100644
index 000000000..d05d06dee
--- /dev/null
+++ b/src/Common/src/Internal/NativeFormat/NativeFormatReader.cs
@@ -0,0 +1,669 @@
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+// ---------------------------------------------------------------------------
+// Native Format Reader
+//
+// Utilities to read native data from images, that are written by the NativeFormatWriter engine
+// ---------------------------------------------------------------------------
+
+using System;
+using System.Diagnostics;
+using System.Runtime.CompilerServices;
+
+namespace Internal.NativeFormat
+{
+ internal unsafe struct NativePrimitiveDecoder
+ {
+ static public void ThrowBadImageFormatException()
+ {
+ Debug.Assert(false);
+ throw new BadImageFormatException();
+ }
+
+ static public byte ReadUInt8(ref byte* stream)
+ {
+ byte result = *(stream); // Assumes little endian and unaligned access
+ stream++;
+ return result;
+ }
+
+ static public ushort ReadUInt16(ref byte* stream)
+ {
+ ushort result = *(ushort*)(stream); // Assumes little endian and unaligned access
+ stream += 2;
+ return result;
+ }
+
+ static public uint ReadUInt32(ref byte* stream)
+ {
+ uint result = *(uint*)(stream); // Assumes little endian and unaligned access
+ stream += 4;
+ return result;
+ }
+
+ static public ulong ReadUInt64(ref byte* stream)
+ {
+ ulong result = *(ulong*)(stream); // Assumes little endian and unaligned access
+ stream += 8;
+ return result;
+ }
+
+ static public unsafe float ReadFloat(ref byte* stream)
+ {
+ uint value = ReadUInt32(ref stream);
+ return *(float*)(&value);
+ }
+
+ static public double ReadDouble(ref byte* stream)
+ {
+ ulong value = ReadUInt64(ref stream);
+ return *(double*)(&value);
+ }
+
+ static public uint GetUnsignedEncodingSize(uint value)
+ {
+ if (value < 128) return 1;
+ if (value < 128 * 128) return 2;
+ if (value < 128 * 128 * 128) return 3;
+ if (value < 128 * 128 * 128 * 128) return 4;
+ return 5;
+ }
+
+ static public uint DecodeUnsigned(ref byte* stream)
+ {
+ return DecodeUnsigned(ref stream, stream + Byte.MaxValue /* unknown stream end */);
+ }
+ static public uint DecodeUnsigned(ref byte* stream, byte* streamEnd)
+ {
+ if (stream >= streamEnd)
+ ThrowBadImageFormatException();
+
+ uint value = 0;
+
+ uint val = *stream;
+ if ((val & 1) == 0)
+ {
+ value = (val >> 1);
+ stream += 1;
+ }
+ else if ((val & 2) == 0)
+ {
+ if (stream + 1 >= streamEnd)
+ ThrowBadImageFormatException();
+ value = (val >> 2) |
+ (((uint)*(stream + 1)) << 6);
+ stream += 2;
+ }
+ else if ((val & 4) == 0)
+ {
+ if (stream + 2 >= streamEnd)
+ ThrowBadImageFormatException();
+ value = (val >> 3) |
+ (((uint)*(stream + 1)) << 5) |
+ (((uint)*(stream + 2)) << 13);
+ stream += 3;
+ }
+ else if ((val & 8) == 0)
+ {
+ if (stream + 3 >= streamEnd)
+ ThrowBadImageFormatException();
+ value = (val >> 4) |
+ (((uint)*(stream + 1)) << 4) |
+ (((uint)*(stream + 2)) << 12) |
+ (((uint)*(stream + 3)) << 20);
+ stream += 4;
+ }
+ else if ((val & 16) == 0)
+ {
+ stream += 1;
+ value = ReadUInt32(ref stream);
+ }
+ else
+ {
+ ThrowBadImageFormatException();
+ return 0;
+ }
+
+ return value;
+ }
+
+ static public int DecodeSigned(ref byte* stream)
+ {
+ return DecodeSigned(ref stream, stream + Byte.MaxValue /* unknown stream end */);
+ }
+ static public int DecodeSigned(ref byte* stream, byte* streamEnd)
+ {
+ if (stream >= streamEnd)
+ ThrowBadImageFormatException();
+
+ int value = 0;
+
+ int val = *(stream);
+ if ((val & 1) == 0)
+ {
+ value = ((sbyte)val) >> 1;
+ stream += 1;
+ }
+ else if ((val & 2) == 0)
+ {
+ if (stream + 1 >= streamEnd)
+ ThrowBadImageFormatException();
+ value = (val >> 2) |
+ (((int)*(sbyte*)(stream + 1)) << 6);
+ stream += 2;
+ }
+ else if ((val & 4) == 0)
+ {
+ if (stream + 2 >= streamEnd)
+ ThrowBadImageFormatException();
+ value = (val >> 3) |
+ (((int)*(stream + 1)) << 5) |
+ (((int)*(sbyte*)(stream + 2)) << 13);
+ stream += 3;
+ }
+ else if ((val & 8) == 0)
+ {
+ if (stream + 3 >= streamEnd)
+ ThrowBadImageFormatException();
+ value = (val >> 4) |
+ (((int)*(stream + 1)) << 4) |
+ (((int)*(stream + 2)) << 12) |
+ (((int)*(sbyte*)(stream + 3)) << 20);
+ stream += 4;
+ }
+ else if ((val & 16) == 0)
+ {
+ stream += 1;
+ value = (int)ReadUInt32(ref stream);
+ }
+ else
+ {
+ ThrowBadImageFormatException();
+ return 0;
+ }
+
+ return value;
+ }
+
+ static public ulong DecodeUnsignedLong(ref byte* stream)
+ {
+ return DecodeUnsignedLong(ref stream, stream + Byte.MaxValue /* unknown stream end */);
+ }
+ static public ulong DecodeUnsignedLong(ref byte* stream, byte* streamEnd)
+ {
+ if (stream >= streamEnd)
+ ThrowBadImageFormatException();
+
+ ulong value = 0;
+
+ byte val = *stream;
+ if ((val & 31) != 31)
+ {
+ value = DecodeUnsigned(ref stream, streamEnd);
+ }
+ else if ((val & 32) == 0)
+ {
+ stream += 1;
+ value = ReadUInt64(ref stream);
+ }
+ else
+ {
+ ThrowBadImageFormatException();
+ return 0;
+ }
+
+ return value;
+ }
+
+ static public long DecodeSignedLong(ref byte* stream)
+ {
+ return DecodeSignedLong(ref stream, stream + Byte.MaxValue /* unknown stream end */);
+ }
+ static public long DecodeSignedLong(ref byte* stream, byte* streamEnd)
+ {
+ if (stream >= streamEnd)
+ ThrowBadImageFormatException();
+
+ long value = 0;
+
+ byte val = *stream;
+ if ((val & 31) != 31)
+ {
+ value = DecodeSigned(ref stream, streamEnd);
+ }
+ else if ((val & 32) == 0)
+ {
+ stream += 1;
+ value = (long)ReadUInt64(ref stream);
+ }
+ else
+ {
+ ThrowBadImageFormatException();
+ return 0;
+ }
+
+ return value;
+ }
+
+ static public void SkipInteger(ref byte* stream)
+ {
+ byte val = *stream;
+ if ((val & 1) == 0)
+ {
+ stream += 1;
+ }
+ else if ((val & 2) == 0)
+ {
+ stream += 2;
+ }
+ else if ((val & 4) == 0)
+ {
+ stream += 3;
+ }
+ else if ((val & 8) == 0)
+ {
+ stream += 4;
+ }
+ else if ((val & 16) == 0)
+ {
+ stream += 5;
+ }
+ else if ((val & 32) == 0)
+ {
+ stream += 9;
+ }
+ else
+ {
+ ThrowBadImageFormatException();
+ }
+ }
+ }
+
+ internal unsafe partial class NativeReader
+ {
+ private readonly byte* _base;
+ private readonly uint _size;
+
+ public NativeReader(byte* base_, uint size)
+ {
+ // Limit the maximum blob size to prevent buffer overruns triggered by boundary integer overflows
+ if (size >= UInt32.MaxValue / 4)
+ ThrowBadImageFormatException();
+
+ Debug.Assert(base_ <= base_ + size);
+
+ _base = base_;
+ _size = size;
+ }
+
+ public uint Size
+ {
+ get
+ {
+ return _size;
+ }
+ }
+
+ public uint AddressToOffset(IntPtr address)
+ {
+ Debug.Assert((byte*)address >= _base);
+ Debug.Assert((byte*)address <= _base + _size);
+ return (uint)((byte*)address - _base);
+ }
+
+ public IntPtr OffsetToAddress(uint offset)
+ {
+ Debug.Assert(offset < _size);
+
+ return new IntPtr(_base + offset);
+ }
+
+ public void ThrowBadImageFormatException()
+ {
+ Debug.Assert(false);
+ throw new BadImageFormatException();
+ }
+
+ private uint EnsureOffsetInRange(uint offset, uint lookAhead)
+ {
+ if ((int)offset < 0 || offset + lookAhead >= _size)
+ ThrowBadImageFormatException();
+ return offset;
+ }
+
+ public byte ReadUInt8(uint offset)
+ {
+ EnsureOffsetInRange(offset, 0);
+ byte* data = _base + offset;
+ return NativePrimitiveDecoder.ReadUInt8(ref data);
+ }
+
+ public ushort ReadUInt16(uint offset)
+ {
+ EnsureOffsetInRange(offset, 1);
+ byte* data = _base + offset;
+ return NativePrimitiveDecoder.ReadUInt16(ref data);
+ }
+
+ public uint ReadUInt32(uint offset)
+ {
+ EnsureOffsetInRange(offset, 3);
+ byte* data = _base + offset;
+ return NativePrimitiveDecoder.ReadUInt32(ref data);
+ }
+
+ public ulong ReadUInt64(uint offset)
+ {
+ EnsureOffsetInRange(offset, 7);
+ byte* data = _base + offset;
+ return NativePrimitiveDecoder.ReadUInt64(ref data);
+ }
+
+ public unsafe float ReadFloat(uint offset)
+ {
+ EnsureOffsetInRange(offset, 3);
+ byte* data = _base + offset;
+ return NativePrimitiveDecoder.ReadFloat(ref data);
+ }
+
+ public double ReadDouble(uint offset)
+ {
+ EnsureOffsetInRange(offset, 7);
+ byte* data = _base + offset;
+ return NativePrimitiveDecoder.ReadDouble(ref data);
+ }
+
+ public uint DecodeUnsigned(uint offset, out uint value)
+ {
+ EnsureOffsetInRange(offset, 0);
+
+ byte* data = _base + offset;
+ value = NativePrimitiveDecoder.DecodeUnsigned(ref data, _base + _size);
+ return (uint)(data - _base);
+ }
+
+ public uint DecodeSigned(uint offset, out int value)
+ {
+ EnsureOffsetInRange(offset, 0);
+
+ byte* data = _base + offset;
+ value = NativePrimitiveDecoder.DecodeSigned(ref data, _base + _size);
+ return (uint)(data - _base);
+ }
+
+ public uint DecodeUnsignedLong(uint offset, out ulong value)
+ {
+ EnsureOffsetInRange(offset, 0);
+
+ byte* data = _base + offset;
+ value = NativePrimitiveDecoder.DecodeUnsignedLong(ref data, _base + _size);
+ return (uint)(data - _base);
+ }
+
+ public uint DecodeSignedLong(uint offset, out long value)
+ {
+ EnsureOffsetInRange(offset, 0);
+
+ byte* data = _base + offset;
+ value = NativePrimitiveDecoder.DecodeSignedLong(ref data, _base + _size);
+ return (uint)(data - _base);
+ }
+
+ public uint SkipInteger(uint offset)
+ {
+ EnsureOffsetInRange(offset, 0);
+
+ byte* data = _base + offset;
+ NativePrimitiveDecoder.SkipInteger(ref data);
+ return (uint)(data - _base);
+ }
+ }
+
+ internal partial struct NativeParser
+ {
+ private readonly NativeReader _reader;
+ private uint _offset;
+
+ public NativeParser(NativeReader reader, uint offset)
+ {
+ _reader = reader;
+ _offset = offset;
+ }
+
+ public bool IsNull
+ {
+ get
+ {
+ return _reader == null;
+ }
+ }
+
+ public NativeReader Reader
+ {
+ get
+ {
+ return _reader;
+ }
+ }
+
+ public uint Offset
+ {
+ get
+ {
+ return _offset;
+ }
+ set
+ {
+ Debug.Assert(value >= 0 && value < _reader.Size);
+ _offset = value;
+ }
+ }
+
+ public void ThrowBadImageFormatException()
+ {
+ _reader.ThrowBadImageFormatException();
+ }
+
+ public byte GetUInt8()
+ {
+ byte val = _reader.ReadUInt8(_offset);
+ _offset += 1;
+ return val;
+ }
+
+ public uint GetUnsigned()
+ {
+ uint value;
+ _offset = _reader.DecodeUnsigned(_offset, out value);
+ return value;
+ }
+
+ public int GetSigned()
+ {
+ int value;
+ _offset = _reader.DecodeSigned(_offset, out value);
+ return value;
+ }
+
+ public uint GetRelativeOffset()
+ {
+ uint pos = _offset;
+
+ int delta;
+ _offset = _reader.DecodeSigned(_offset, out delta);
+
+ return pos + (uint)delta;
+ }
+
+ public void SkipInteger()
+ {
+ _offset = _reader.SkipInteger(_offset);
+ }
+
+ public NativeParser GetParserFromRelativeOffset()
+ {
+ return new NativeParser(_reader, GetRelativeOffset());
+ }
+
+ public uint GetSequenceCount()
+ {
+ return GetUnsigned();
+ }
+ }
+
+ struct NativeHashtable
+ {
+ private NativeReader _reader;
+ private uint _baseOffset;
+ private uint _bucketMask;
+ private byte _entryIndexSize;
+
+ public NativeHashtable(NativeParser parser)
+ {
+ uint header = parser.GetUInt8();
+
+ _reader = parser.Reader;
+ _baseOffset = parser.Offset;
+
+ int numberOfBucketsShift = (int)(header >> 2);
+ if (numberOfBucketsShift > 31)
+ _reader.ThrowBadImageFormatException();
+ _bucketMask = (uint)((1 << numberOfBucketsShift) - 1);
+
+ byte entryIndexSize = (byte)(header & 3);
+ if (entryIndexSize > 2)
+ _reader.ThrowBadImageFormatException();
+ _entryIndexSize = entryIndexSize;
+ }
+
+ public bool IsNull { get { return _reader == null; } }
+
+ //
+ // The enumerator does not conform to the regular C# enumerator pattern to avoid paying
+ // its performance penalty (allocation, multiple calls per iteration)
+ //
+ public struct Enumerator
+ {
+ NativeParser _parser;
+ uint _endOffset;
+ byte _lowHashcode;
+
+ internal Enumerator(NativeParser parser, uint endOffset, byte lowHashcode)
+ {
+ _parser = parser;
+ _endOffset = endOffset;
+ _lowHashcode = lowHashcode;
+ }
+
+ public NativeParser GetNext()
+ {
+ while (_parser.Offset < _endOffset)
+ {
+ byte lowHashcode = _parser.GetUInt8();
+
+ if (lowHashcode == _lowHashcode)
+ {
+ return _parser.GetParserFromRelativeOffset();
+ }
+
+ // The entries are sorted by hashcode within the bucket. It allows us to terminate the lookup prematurely.
+ if (lowHashcode > _lowHashcode)
+ {
+ _endOffset = _parser.Offset; // Ensure that extra call to GetNext returns null parser again
+ break;
+ }
+
+ _parser.SkipInteger();
+ }
+
+ return new NativeParser();
+ }
+ }
+
+ public struct AllEntriesEnumerator
+ {
+ NativeHashtable _table;
+ NativeParser _parser;
+ uint _currentBucket;
+ uint _endOffset;
+
+ internal AllEntriesEnumerator(NativeHashtable table)
+ {
+ _table = table;
+ _currentBucket = 0;
+ _parser = _table.GetParserForBucket(_currentBucket, out _endOffset);
+ }
+
+ public NativeParser GetNext()
+ {
+ for (;;)
+ {
+ while (_parser.Offset < _endOffset)
+ {
+ byte lowHashcode = _parser.GetUInt8();
+ return _parser.GetParserFromRelativeOffset();
+ }
+
+ if (_currentBucket >= _table._bucketMask)
+ return new NativeParser();
+
+ _currentBucket++;
+ _parser = _table.GetParserForBucket(_currentBucket, out _endOffset);
+ }
+ }
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining | MethodImplOptions.NoOptimization)]
+ private NativeParser GetParserForBucket(uint bucket, out uint endOffset)
+ {
+ uint start, end;
+
+ if (_entryIndexSize == 0)
+ {
+ uint bucketOffset = _baseOffset + bucket;
+ start = _reader.ReadUInt8(bucketOffset);
+ end = _reader.ReadUInt8(bucketOffset + 1);
+ }
+ else if (_entryIndexSize == 1)
+ {
+ uint bucketOffset = _baseOffset + 2 * bucket;
+ start = _reader.ReadUInt16(bucketOffset);
+ end = _reader.ReadUInt16(bucketOffset + 2);
+ }
+ else
+ {
+ uint bucketOffset = _baseOffset + 4 * bucket;
+ start = _reader.ReadUInt32(bucketOffset);
+ end = _reader.ReadUInt32(bucketOffset + 4);
+ }
+
+ endOffset = end + _baseOffset;
+ return new NativeParser(_reader, _baseOffset + start);
+ }
+
+ // The recommended code pattern to perform lookup is:
+ //
+ // var lookup = t.Lookup(TypeHashingAlgorithms.ComputeGenericInstanceHashCode(genericTypeDefinitionHandle, genericTypeArgumentHandles));
+ // NativeParser typeParser;
+ // while (!(typeParser = lookup.GetNext()).IsNull)
+ // {
+ // typeParser.GetTypeSignatureKind(out index);
+ // ... create RuntimeTypeHandle from the external reference RVAs at [index]
+ // ... compare if RuntimeTypeHandle is an instance of pair (genericTypeDefinitionHandle, genericTypeArgumentHandles)
+ // }
+ //
+ public Enumerator Lookup(int hashcode)
+ {
+ uint endOffset;
+ uint bucket = ((uint)hashcode >> 8) & _bucketMask;
+ NativeParser parser = GetParserForBucket(bucket, out endOffset);
+
+ return new Enumerator(parser, endOffset, (byte)hashcode);
+ }
+
+ public AllEntriesEnumerator EnumerateAllEntries()
+ {
+ return new AllEntriesEnumerator(this);
+ }
+ }
+}
diff --git a/src/Common/src/Internal/NativeFormat/NativeFormatWriter.Primitives.cs b/src/Common/src/Internal/NativeFormat/NativeFormatWriter.Primitives.cs
new file mode 100644
index 000000000..59944b77f
--- /dev/null
+++ b/src/Common/src/Internal/NativeFormat/NativeFormatWriter.Primitives.cs
@@ -0,0 +1,197 @@
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
+using System;
+using System.IO;
+using System.Diagnostics;
+
+namespace Internal.NativeFormat
+{
+ internal struct NativePrimitiveEncoder
+ {
+ byte[] _buffer;
+ int _size;
+
+ public void Init()
+ {
+ _buffer = new byte[128];
+ _size = 0;
+ }
+
+ public int Size { get { return _size; } }
+ public void Clear() { _size = 0; }
+ public void RollbackTo(int offset) { _size = offset; }
+
+ public void WriteByte(byte b)
+ {
+ if (_buffer.Length == _size)
+ Array.Resize(ref _buffer, 2 * _buffer.Length);
+ _buffer[_size++] = b;
+ }
+
+ public void WriteUInt8(byte value)
+ {
+ WriteByte(value);
+ }
+
+ public void WriteUInt16(ushort value)
+ {
+ WriteByte((byte)value);
+ WriteByte((byte)(value >> 8));
+ }
+
+ public void WriteUInt32(uint value)
+ {
+ WriteByte((byte)value);
+ WriteByte((byte)(value >> 8));
+ WriteByte((byte)(value >> 16));
+ WriteByte((byte)(value >> 24));
+ }
+
+ public void WriteUInt64(ulong value)
+ {
+ WriteUInt32((uint)value);
+ WriteUInt32((uint)(value >> 32));
+ }
+
+ public unsafe void WriteFloat(float value)
+ {
+ WriteUInt32(*((uint*)&value));
+ }
+
+ public unsafe void WriteDouble(double value)
+ {
+ WriteUInt64(*((ulong*)&value));
+ }
+
+ //
+ // Same encoding as what's used by CTL
+ //
+ public void WriteUnsigned(uint d)
+ {
+ if (d < 128)
+ {
+ WriteByte((byte)(d * 2 + 0));
+ }
+ else if (d < 128 * 128)
+ {
+ WriteByte((byte)(d * 4 + 1));
+ WriteByte((byte)(d >> 6));
+ }
+ else if (d < 128 * 128 * 128)
+ {
+ WriteByte((byte)(d * 8 + 3));
+ WriteByte((byte)(d >> 5));
+ WriteByte((byte)(d >> 13));
+ }
+ else if (d < 128 * 128 * 128 * 128)
+ {
+ WriteByte((byte)(d * 16 + 7));
+ WriteByte((byte)(d >> 4));
+ WriteByte((byte)(d >> 12));
+ WriteByte((byte)(d >> 20));
+ }
+ else
+ {
+ WriteByte((byte)15);
+ WriteUInt32(d);
+ }
+ }
+
+ public static int GetUnsignedEncodingSize(uint d)
+ {
+ if (d < 128) return 1;
+ if (d < 128 * 128) return 2;
+ if (d < 128 * 128 * 128) return 3;
+ if (d < 128 * 128 * 128 * 128) return 4;
+ return 5;
+ }
+
+ public void WriteSigned(int i)
+ {
+ uint d = (uint)i;
+ if (d + 64 < 128)
+ {
+ WriteByte((byte)(d * 2 + 0));
+ }
+ else if (d + 64 * 128 < 128 * 128)
+ {
+ WriteByte((byte)(d * 4 + 1));
+ WriteByte((byte)(d >> 6));
+ }
+ else if (d + 64 * 128 * 128 < 128 * 128 * 128)
+ {
+ WriteByte((byte)(d * 8 + 3));
+ WriteByte((byte)(d >> 5));
+ WriteByte((byte)(d >> 13));
+ }
+ else if (d + 64 * 128 * 128 * 128 < 128 * 128 * 128 * 128)
+ {
+ WriteByte((byte)(d * 16 + 7));
+ WriteByte((byte)(d >> 4));
+ WriteByte((byte)(d >> 12));
+ WriteByte((byte)(d >> 20));
+ }
+ else
+ {
+ WriteByte((byte)15);
+ WriteUInt32(d);
+ }
+ }
+
+ public void WriteUnsignedLong(ulong i)
+ {
+ if ((uint)i == i)
+ {
+ WriteUnsigned((uint)i);
+ return;
+ }
+
+ WriteByte((byte)31);
+ WriteUInt64(i);
+ }
+
+ public void WriteSignedLong(long i)
+ {
+ if ((int)i == i)
+ {
+ WriteSigned((int)i);
+ return;
+ }
+
+ WriteByte((byte)31);
+ WriteUInt64((ulong)i);
+ }
+
+ public void PatchByteAt(int offset, byte value)
+ {
+ Debug.Assert(offset < _size);
+ _buffer[offset] = value;
+ }
+
+ public void Save(Stream stream)
+ {
+ stream.Write(_buffer, 0, _size);
+ }
+
+ public unsafe bool Save(byte* stream, int streamLength)
+ {
+ if (streamLength < _size)
+ {
+ Debug.Assert(false);
+ return false;
+ }
+ for (int i = 0; i < _size; i++)
+ stream[i] = _buffer[i];
+ return true;
+ }
+
+ public byte[] GetBytes()
+ {
+ byte[] retBuffer = new byte[_size];
+ for (int i = 0; i < _size; i++)
+ retBuffer[i] = _buffer[i];
+ return retBuffer;
+ }
+ }
+} \ No newline at end of file
diff --git a/src/Common/src/Internal/NativeFormat/NativeFormatWriter.cs b/src/Common/src/Internal/NativeFormat/NativeFormatWriter.cs
new file mode 100644
index 000000000..023face77
--- /dev/null
+++ b/src/Common/src/Internal/NativeFormat/NativeFormatWriter.cs
@@ -0,0 +1,618 @@
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
+using System;
+using System.IO;
+using System.Diagnostics;
+using System.Collections.Generic;
+using System.Text;
+
+// Managed mirror of NativeFormatWriter.h/.cpp
+namespace Internal.NativeFormat
+{
+#if NATIVEFORMAT_PUBLICWRITER
+ public
+#else
+ internal
+#endif
+ abstract class Vertex
+ {
+ internal int _offset = NotPlaced;
+ internal int _iteration = -1; // Iteration that the offset is valid for
+
+ internal const int NotPlaced = -1;
+ internal const int Placed = -2;
+ internal const int Unified = -3;
+
+ public Vertex()
+ {
+ }
+
+ internal abstract void Save(NativeWriter writer);
+
+ public int VertexOffset
+ {
+ get
+ {
+ Debug.Assert(_offset >= 0);
+ return _offset;
+ }
+ }
+ }
+
+#if NATIVEFORMAT_PUBLICWRITER
+ public
+#else
+ internal
+#endif
+ class Section
+ {
+ internal List<Vertex> _items = new List<Vertex>();
+ internal Dictionary<Vertex, Vertex> _placedMap = new Dictionary<Vertex, Vertex>();
+
+ public Section()
+ {
+ }
+
+ public Vertex Place(Vertex vertex)
+ {
+ if (vertex._offset == Vertex.Unified)
+ {
+ Vertex placedVertex;
+ if (_placedMap.TryGetValue(vertex, out placedVertex))
+ return placedVertex;
+
+ placedVertex = new PlacedVertex(vertex);
+ _placedMap.Add(vertex, placedVertex);
+ vertex = placedVertex;
+ }
+
+ Debug.Assert(vertex._offset == Vertex.NotPlaced);
+ vertex._offset = Vertex.Placed;
+ _items.Add(vertex);
+
+ return vertex;
+ }
+ }
+
+#if NATIVEFORMAT_PUBLICWRITER
+ public
+#else
+ internal
+#endif
+ class NativeWriter
+ {
+ List<Section> _sections = new List<Section>();
+
+ enum SavePhase
+ {
+ Initial,
+ Shrinking,
+ Growing
+ }
+
+
+ int _iteration = 0;
+ SavePhase _phase; // Current save phase
+ int _offsetAdjustment; // Cumulative offset adjustment compared to previous iteration
+ int _paddingSize; // How much padding was used
+
+ Dictionary<Vertex, Vertex> _unifier = new Dictionary<Vertex, Vertex>();
+
+ NativePrimitiveEncoder _encoder = new NativePrimitiveEncoder();
+
+#if NATIVEFORMAT_COMPRESSION
+ struct Tentative
+ {
+ internal Vertex Vertex;
+ internal int PreviousOffset;
+ }
+
+ // State used by compression
+ List<Tentative> _tentativelyWritten = new List<Tentative>(); // Tentatively written Vertices.
+ int _compressionDepth = 0;
+#endif
+
+ public NativeWriter()
+ {
+ _encoder.Init();
+ }
+
+ public Section NewSection()
+ {
+ Section section = new Section();
+ _sections.Add(section);
+ return section;
+ }
+
+ public void WriteByte(byte b) { _encoder.WriteByte(b); }
+ public void WriteUInt8(byte value) { _encoder.WriteUInt8(value); }
+ public void WriteUInt16(ushort value) { _encoder.WriteUInt16(value); }
+ public void WriteUInt32(uint value) { _encoder.WriteUInt32(value); }
+ public void WriteUInt64(ulong value) { _encoder.WriteUInt64(value); }
+ public void WriteUnsigned(uint d) { _encoder.WriteUnsigned(d); }
+ public void WriteSigned(int i) { _encoder.WriteSigned(i); }
+ public void WriteUnsignedLong(ulong i) { _encoder.WriteUnsignedLong(i); }
+ public void WriteSignedLong(long i) { _encoder.WriteSignedLong(i); }
+ public void WriteFloat(float value) { _encoder.WriteFloat(value); }
+ public void WriteDouble(double value) { _encoder.WriteDouble(value); }
+
+ public void WritePad(int size)
+ {
+ while (size > 0)
+ {
+ _encoder.WriteByte(0);
+ size--;
+ }
+ }
+
+ public bool IsGrowing()
+ {
+ return _phase == SavePhase.Growing;
+ }
+
+ public void UpdateOffsetAdjustment(int offsetDelta)
+ {
+ switch (_phase)
+ {
+ case SavePhase.Shrinking:
+ _offsetAdjustment = Math.Min(_offsetAdjustment, offsetDelta);
+ break;
+ case SavePhase.Growing:
+ _offsetAdjustment = Math.Max(_offsetAdjustment, offsetDelta);
+ break;
+ default:
+ break;
+ }
+ }
+
+ public void RollbackTo(int offset)
+ {
+ _encoder.RollbackTo(offset);
+ }
+
+ public void RollbackTo(int offset, int offsetAdjustment)
+ {
+ _offsetAdjustment = offsetAdjustment;
+ RollbackTo(offset);
+ }
+
+ public void PatchByteAt(int offset, byte value)
+ {
+ _encoder.PatchByteAt(offset, value);
+ }
+
+ // Swallow exceptions if invalid encoding is detected.
+ // This is the price we have to pay for using UTF8. Thing like High Surrogate Start Char - '\ud800'
+ // can be expressed in UTF-16 (which is the format used to store ECMA metadata), but don't have
+ // a representation in UTF-8.
+ private static Encoding _stringEncoding = new UTF8Encoding(false, false);
+
+ public void WriteString(string s)
+ {
+ // The actual bytes are only necessary for the final version during the growing plase
+ if (IsGrowing())
+ {
+ byte[] bytes = _stringEncoding.GetBytes(s);
+
+ _encoder.WriteUnsigned((uint)bytes.Length);
+ for (int i = 0; i < bytes.Length; i++)
+ _encoder.WriteByte(bytes[i]);
+ }
+ else
+ {
+ int byteCount = _stringEncoding.GetByteCount(s);
+ _encoder.WriteUnsigned((uint)byteCount);
+ WritePad(byteCount);
+ }
+ }
+
+ public void WriteRelativeOffset(Vertex val)
+ {
+ if (val._iteration == -1)
+ {
+ // If the offsets are not determined yet, use the maximum possible encoding
+ _encoder.WriteSigned(0x7FFFFFFF);
+ return;
+ }
+
+ int offset = val._offset;
+
+ // If the offset was not update in this iteration yet, adjust it by delta we have accumulated in this iteration so far.
+ // This adjustment allows the offsets to converge faster.
+ if (val._iteration < _iteration)
+ offset += _offsetAdjustment;
+
+ _encoder.WriteSigned(offset - GetCurrentOffset());
+ }
+
+ public int GetCurrentOffset()
+ {
+ return _encoder.Size;
+ }
+
+ public int GetNumberOfIterations()
+ {
+ return _iteration;
+ }
+
+ public int GetPaddingSize()
+ {
+ return _paddingSize;
+ }
+
+ public void Save(Stream stream)
+ {
+ _encoder.Clear();
+
+ _phase = SavePhase.Initial;
+ foreach (var section in _sections) foreach (var vertex in section._items)
+ {
+ vertex._offset = GetCurrentOffset();
+ vertex._iteration = _iteration;
+ vertex.Save(this);
+
+#if NATIVEFORMAT_COMPRESSION
+ // Ensure that the compressor state is fully flushed
+ Debug.Assert(_TentativelyWritten.Count == 0);
+ Debug.Assert(_compressionDepth == 0);
+#endif
+ }
+
+ // Aggresive phase that only allows offsets to shrink.
+ _phase = SavePhase.Shrinking;
+ for (; ; )
+ {
+ _iteration++;
+ _encoder.Clear();
+
+ _offsetAdjustment = 0;
+
+ foreach (var section in _sections) foreach (var vertex in section._items)
+ {
+ int currentOffset = GetCurrentOffset();
+
+ // Only allow the offsets to shrink.
+ _offsetAdjustment = Math.Min(_offsetAdjustment, currentOffset - vertex._offset);
+
+ vertex._offset += _offsetAdjustment;
+
+ if (vertex._offset < currentOffset)
+ {
+ // It is possible for the encoding of relative offsets to grow during some iterations.
+ // Ignore this growth because of it should disappear during next iteration.
+ RollbackTo(vertex._offset);
+ }
+ Debug.Assert(vertex._offset == GetCurrentOffset());
+
+ vertex._iteration = _iteration;
+
+ vertex.Save(this);
+
+#if NATIVEFORMAT_COMPRESSION
+ // Ensure that the compressor state is fully flushed
+ Debug.Assert(_tentativelyWritten.Count == 0);
+ Debug.Assert(_compressionDepth == 0);
+#endif
+ }
+
+ // We are not able to shrink anymore. We cannot just return here. It is possible that we have rolledback
+ // above because of we shrinked too much.
+ if (_offsetAdjustment == 0)
+ break;
+
+ // Limit number of shrinking interations. This limit is meant to be hit in corner cases only.
+ if (_iteration > 10)
+ break;
+ }
+
+ // Conservative phase that only allows the offsets to grow. It is guaranteed to converge.
+ _phase = SavePhase.Growing;
+ for (; ; )
+ {
+ _iteration++;
+ _encoder.Clear();
+
+ _offsetAdjustment = 0;
+ _paddingSize = 0;
+
+ foreach (var section in _sections) foreach (var vertex in section._items)
+ {
+ int currentOffset = GetCurrentOffset();
+
+ // Only allow the offsets to grow.
+ _offsetAdjustment = Math.Max(_offsetAdjustment, currentOffset - vertex._offset);
+
+ vertex._offset += _offsetAdjustment;
+
+ if (vertex._offset > currentOffset)
+ {
+ // Padding
+ int padding = vertex._offset - currentOffset;
+ _paddingSize += padding;
+ WritePad(padding);
+ }
+ Debug.Assert(vertex._offset == GetCurrentOffset());
+
+ vertex._iteration = _iteration;
+
+ vertex.Save(this);
+
+#if NATIVEFORMAT_COMPRESSION
+ // Ensure that the compressor state is fully flushed
+ Debug.Assert(_tentativelyWritten.Count == 0);
+ Debug.Assert(_compressionDepth == 0);
+#endif
+ }
+
+ if (_offsetAdjustment == 0)
+ {
+ _encoder.Save(stream);
+ return;
+ }
+ }
+ }
+
+#if NATIVEFORMAT_COMPRESSION
+ // TODO:
+#else
+ struct TypeSignatureCompressor
+ {
+ TypeSignatureCompressor(NativeWriter pWriter) { }
+ void Pack(Vertex vertex) { }
+ }
+#endif
+
+ T Unify<T>(T vertex) where T : Vertex
+ {
+ Vertex existing;
+ if (_unifier.TryGetValue(vertex, out existing))
+ return (T)existing;
+
+ Debug.Assert(vertex._offset == Vertex.NotPlaced);
+ vertex._offset = Vertex.Unified;
+ _unifier.Add(vertex, vertex);
+
+ return vertex;
+ }
+
+ public Vertex GetUnsignedConstant(uint value)
+ {
+ UnsignedConstant vertex = new UnsignedConstant(value);
+ return Unify(vertex);
+ }
+ }
+
+ class PlacedVertex : Vertex
+ {
+ Vertex _unified;
+
+ public PlacedVertex(Vertex unified)
+ {
+ _unified = unified;
+ }
+
+ internal override void Save(NativeWriter writer)
+ {
+ _unified.Save(writer);
+ }
+ }
+
+ class UnsignedConstant : Vertex
+ {
+ uint _value;
+
+ public UnsignedConstant(uint value)
+ {
+ _value = value;
+ }
+
+ internal override void Save(NativeWriter writer)
+ {
+ writer.WriteUnsigned(_value);
+ }
+
+ public override int GetHashCode()
+ {
+ return 6659 + ((int)_value) * 19;
+ }
+ public override bool Equals(object other)
+ {
+ if (!(other is UnsignedConstant))
+ return false;
+
+ UnsignedConstant p = (UnsignedConstant)other;
+ if (_value != p._value) return false;
+ return true;
+ }
+ }
+
+ class VertexHashtable : Vertex
+ {
+ struct Entry
+ {
+ public Entry(uint hashcode, Vertex vertex)
+ {
+ Offset = 0;
+ Hashcode = hashcode;
+ Vertex = vertex;
+ }
+
+ public int Offset;
+
+ public uint Hashcode;
+ public Vertex Vertex;
+
+ public static int Comparison(Entry a, Entry b)
+ {
+ return (int)(a.Hashcode /*& mask*/) - (int)(b.Hashcode /*& mask*/);
+ }
+ }
+
+ List<Entry> _Entries;
+
+ // How many entries to target per bucket. Higher fill factor means smaller size, but worse runtime perf.
+ int _nFillFactor;
+
+ // Number of buckets choosen for the table. Must be power of two. 0 means that the table is still open for mutation.
+ uint _nBuckets;
+
+ // Current size of index entry
+ int _entryIndexSize; // 0 - uint8, 1 - uint16, 2 - uint32
+
+ public const int DefaultFillFactor = 13;
+
+ public VertexHashtable(int fillFactor = DefaultFillFactor)
+ {
+ _Entries = new List<Entry>();
+ _nFillFactor = fillFactor;
+ _nBuckets = 0;
+ _entryIndexSize = 0;
+ }
+
+ public void Append(uint hashcode, Vertex element)
+ {
+ // The table needs to be open for mutation
+ Debug.Assert(_nBuckets == 0);
+
+ _Entries.Add(new Entry(hashcode, element));
+ }
+
+ // Returns 1 + log2(x) rounded up, 0 iff x == 0
+ static int HighestBit(uint x)
+ {
+ int ret = 0;
+ while (x != 0)
+ {
+ x >>= 1;
+ ret++;
+ }
+ return ret;
+ }
+
+ // Helper method to back patch entry index in the bucket table
+ static void PatchEntryIndex(NativeWriter writer, int patchOffset, int entryIndexSize, int entryIndex)
+ {
+ if (entryIndexSize == 0)
+ {
+ writer.PatchByteAt(patchOffset, (byte)entryIndex);
+ }
+ else if (entryIndexSize == 1)
+ {
+ writer.PatchByteAt(patchOffset, (byte)entryIndex);
+ writer.PatchByteAt(patchOffset + 1, (byte)(entryIndex >> 8));
+ }
+ else
+ {
+ writer.PatchByteAt(patchOffset, (byte)entryIndex);
+ writer.PatchByteAt(patchOffset + 1, (byte)(entryIndex >> 8));
+ writer.PatchByteAt(patchOffset + 2, (byte)(entryIndex >> 16));
+ writer.PatchByteAt(patchOffset + 3, (byte)(entryIndex >> 24));
+ }
+ }
+
+ void ComputeLayout()
+ {
+ uint bucketsEstimate = (uint)(_Entries.Count / _nFillFactor);
+
+ // Round number of buckets up to the power of two
+ _nBuckets = (uint)(1 << HighestBit(bucketsEstimate));
+
+ // Lowest byte of the hashcode is used for lookup within the bucket. Keep it sorted too so that
+ // we can use the ordering to terminate the lookup prematurely.
+ uint mask = ((_nBuckets - 1) << 8) | 0xFF;
+
+ // sort it by hashcode
+ _Entries.Sort(
+ (a, b) =>
+ {
+ return (int)(a.Hashcode & mask) - (int)(b.Hashcode & mask);
+ }
+ );
+
+ // Start with maximum size entries
+ _entryIndexSize = 2;
+ }
+
+ internal override void Save(NativeWriter writer)
+ {
+ // Compute the layout of the table if we have not done it yet
+ if (_nBuckets == 0)
+ ComputeLayout();
+
+ int nEntries = _Entries.Count;
+ int startOffset = writer.GetCurrentOffset();
+ uint bucketMask = (_nBuckets - 1);
+
+ // Lowest two bits are entry index size, the rest is log2 number of buckets
+ int numberOfBucketsShift = HighestBit(_nBuckets) - 1;
+ writer.WriteByte((byte)((numberOfBucketsShift << 2) | _entryIndexSize));
+
+ int bucketsOffset = writer.GetCurrentOffset();
+
+ writer.WritePad((int)((_nBuckets + 1) << _entryIndexSize));
+
+ // For faster lookup at runtime, we store the first entry index even though it is redundant (the
+ // value can be inferred from number of buckets)
+ PatchEntryIndex(writer, bucketsOffset, _entryIndexSize, writer.GetCurrentOffset() - bucketsOffset);
+
+ int iEntry = 0;
+
+ for (int iBucket = 0; iBucket < _nBuckets; iBucket++)
+ {
+ while (iEntry < nEntries)
+ {
+ if (((_Entries[iEntry].Hashcode >> 8) & bucketMask) != iBucket)
+ break;
+
+ Entry curEntry = _Entries[iEntry];
+
+ int currentOffset = writer.GetCurrentOffset();
+ writer.UpdateOffsetAdjustment(currentOffset - curEntry.Offset);
+ curEntry.Offset = currentOffset;
+ _Entries[iEntry] = curEntry;
+
+ writer.WriteByte((byte)curEntry.Hashcode);
+ writer.WriteRelativeOffset(curEntry.Vertex);
+
+ iEntry++;
+ }
+
+ int patchOffset = bucketsOffset + ((iBucket + 1) << _entryIndexSize);
+
+ PatchEntryIndex(writer, patchOffset, _entryIndexSize, writer.GetCurrentOffset() - bucketsOffset);
+ }
+ Debug.Assert(iEntry == nEntries);
+
+ int maxIndexEntry = (writer.GetCurrentOffset() - bucketsOffset);
+ int newEntryIndexSize = 0;
+ if (maxIndexEntry > 0xFF)
+ {
+ newEntryIndexSize++;
+ if (maxIndexEntry > 0xFFFF)
+ newEntryIndexSize++;
+ }
+
+ if (writer.IsGrowing())
+ {
+ if (newEntryIndexSize > _entryIndexSize)
+ {
+ // Ensure that the table will be redone with new entry index size
+ writer.UpdateOffsetAdjustment(1);
+
+ _entryIndexSize = newEntryIndexSize;
+ }
+ }
+ else
+ {
+ if (newEntryIndexSize < _entryIndexSize)
+ {
+ // Ensure that the table will be redone with new entry index size
+ writer.UpdateOffsetAdjustment(-1);
+
+ _entryIndexSize = newEntryIndexSize;
+ }
+ }
+ }
+ }
+}
diff --git a/src/Common/src/Internal/NativeFormat/TypeHashingAlgorithms.cs b/src/Common/src/Internal/NativeFormat/TypeHashingAlgorithms.cs
new file mode 100644
index 000000000..4967d61d3
--- /dev/null
+++ b/src/Common/src/Internal/NativeFormat/TypeHashingAlgorithms.cs
@@ -0,0 +1,110 @@
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+// ---------------------------------------------------------------------------
+// TypeHashingAlgorithms.cs
+//
+// Generic functions to compute the hashcode value of types
+// ---------------------------------------------------------------------------
+
+using System;
+using System.Diagnostics;
+using System.Runtime.CompilerServices;
+
+namespace Internal.NativeFormat
+{
+ static class TypeHashingAlgorithms
+ {
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private static int _rotl(int value, int shift)
+ {
+ return (int)(((uint)value << shift) | ((uint)value >> (32 - shift)));
+ }
+
+ //
+ // Returns the hashcode value of the 'src' string
+ //
+ public static int ComputeNameHashCode(string src)
+ {
+ int hash1 = 0x6DA3B944;
+ int hash2 = 0;
+
+ for (int i = 0; i < src.Length; i += 2)
+ {
+ hash1 = (hash1 + _rotl(hash1, 5)) ^ src[i];
+ if ((i + 1) < src.Length)
+ hash2 = (hash2 + _rotl(hash2, 5)) ^ src[i + 1];
+ }
+
+ hash1 += _rotl(hash1, 8);
+ hash2 += _rotl(hash2, 8);
+
+ return hash1 ^ hash2;
+ }
+
+
+ public static int ComputeArrayTypeHashCode(int elementTypeHashcode, int rank)
+ {
+ // Arrays are treated as generic types in some parts of our system. The array hashcodes are
+ // carefully crafted to be the same as the hashcodes of their implementation generic types.
+
+ int hashCode;
+ if (rank == 1)
+ {
+ hashCode = unchecked((int)0xd5313557u);
+ Debug.Assert(hashCode == ComputeNameHashCode("System.Array`1"));
+ }
+ else
+ {
+ hashCode = ComputeNameHashCode("System.MDArrayRank" + rank.ToString() + "`1");
+ }
+
+ hashCode = (hashCode + _rotl(hashCode, 13)) ^ elementTypeHashcode;
+ return (hashCode + _rotl(hashCode, 15));
+ }
+
+ public static int ComputeArrayTypeHashCode<T>(T elementType, int rank)
+ {
+ return ComputeArrayTypeHashCode(elementType.GetHashCode(), rank);
+ }
+
+
+ public static int ComputePointerTypeHashCode(int pointeeTypeHashcode)
+ {
+ return (pointeeTypeHashcode + _rotl(pointeeTypeHashcode, 5)) ^ 0x12D0;
+ }
+
+ public static int ComputePointerTypeHashCode<T>(T pointeeType)
+ {
+ return ComputePointerTypeHashCode(pointeeType.GetHashCode());
+ }
+
+
+ public static int ComputeByrefTypeHashCode(int parameterTypeHashcode)
+ {
+ return (parameterTypeHashcode + _rotl(parameterTypeHashcode, 7)) ^ 0x4C85;
+ }
+
+ public static int ComputeByrefTypeHashCode<T>(T parameterType)
+ {
+ return ComputeByrefTypeHashCode(parameterType.GetHashCode());
+ }
+
+
+ public static int ComputeNestedTypeHashCode(int enclosingTypeHashcode, int nestedTypeNameHash)
+ {
+ return (enclosingTypeHashcode + _rotl(enclosingTypeHashcode, 11)) ^ nestedTypeNameHash;
+ }
+
+
+ public static int ComputeGenericInstanceHashCode<ARG>(int genericDefinitionHashCode, ARG[] genericTypeArguments)
+ {
+ int hashcode = genericDefinitionHashCode;
+ for (int i = 0; i < genericTypeArguments.Length; i++)
+ {
+ int argumentHashCode = genericTypeArguments[i].GetHashCode();
+ hashcode = (hashcode + _rotl(hashcode, 13)) ^ argumentHashCode;
+ }
+ return (hashcode + _rotl(hashcode, 15));
+ }
+ }
+}