This is an automated email from the ASF dual-hosted git repository.
pandalee pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/fory.git
The following commit(s) were added to refs/heads/main by this push:
new 266af2a13 feat(go): add type meta encoding for meta share (#2554)
266af2a13 is described below
commit 266af2a133c12dd2d235f4f47ebd66afd54bf0b8
Author: Zhong Junjie <[email protected]>
AuthorDate: Tue Sep 2 15:29:56 2025 +0800
feat(go): add type meta encoding for meta share (#2554)
<!--
**Thanks for contributing to Fory.**
**If this is your first time opening a PR on fory, you can refer to
[CONTRIBUTING.md](https://github.com/apache/fory/blob/main/CONTRIBUTING.md).**
Contribution Checklist
- The **Apache Fory** community has requirements on the naming of pr
titles. You can also find instructions in
[CONTRIBUTING.md](https://github.com/apache/fory/blob/main/CONTRIBUTING.md).
- Fory has a strong focus on performance. If the PR you submit will have
an impact on performance, please benchmark it first and provide the
benchmark result here.
-->
## Why?
<!-- Describe the purpose of this PR. -->
type forward/backward compatible serialization is critical for online
service which different service update their data schema and deploy at
different time.
encoding format can address this : [type def encoding
spec](https://fory.apache.org/docs/specification/fory_xlang_serialization_spec#type-def)
## What does this PR do?
This PR implements type definition encoding and decoding that conforms
to the specification. This functionality can be integrated into the main
serialization flow to enable metadata sharing mode.
TODOs:
- Add encoding support for collection types (map, slice, set, etc.)
- Integrate into the main flow to enable metadata sharing
- Add support for compressed type definitions
## Related issues
<!--
Is there any related issue? If this PR closes them you say say
fix/closes:
- #xxxx0
- #xxxx1
- Fixes #xxxx2
-->
#2192
## Does this PR introduce any user-facing change?
<!--
If any user-facing interface changes, please [open an
issue](https://github.com/apache/fory/issues/new/choose) describing the
need to do so and update the document if necessary.
Delete section if not applicable.
-->
- [x] Does this PR introduce any public API change? **no**
- [x] Does this PR introduce any binary protocol compatibility change?
**no**
## Benchmark
<!--
When the PR has an impact on performance (if you don't know whether the
PR will have an impact on performance, you can submit the PR first, and
if it will have impact on performance, the code reviewer will explain
it), be sure to attach a benchmark data here.
Delete section if not applicable.
-->
---
go/fory/meta/meta_string_encoder.go | 21 +++-
go/fory/type_def.go | 226 ++++++++++++++++++++++++++++++++++++
go/fory/type_def_decoder.go | 150 ++++++++++++++++++++++++
go/fory/type_def_encoder.go | 196 +++++++++++++++++++++++++++++++
go/fory/type_def_encoder_test.go | 83 +++++++++++++
5 files changed, 671 insertions(+), 5 deletions(-)
diff --git a/go/fory/meta/meta_string_encoder.go
b/go/fory/meta/meta_string_encoder.go
index 171eff42f..0ad1502e0 100644
--- a/go/fory/meta/meta_string_encoder.go
+++ b/go/fory/meta/meta_string_encoder.go
@@ -157,28 +157,39 @@ func (e *Encoder) EncodeGeneric(chars []byte, bitsPerChar
int) (result []byte, e
}
func (e *Encoder) ComputeEncoding(input string) Encoding {
+ allEncodings := []Encoding{LOWER_SPECIAL, LOWER_UPPER_DIGIT_SPECIAL,
FIRST_TO_LOWER_SPECIAL, ALL_TO_LOWER_SPECIAL, UTF_8}
+ return e.ComputeEncodingWith(input, allEncodings)
+}
+
+func (e *Encoder) ComputeEncodingWith(input string, encodings []Encoding)
Encoding {
+ encodingFlags := make(map[Encoding]bool)
+ for _, enc := range encodings {
+ encodingFlags[enc] = true
+ }
// Special case for empty string: default to UTF_8 encoding
if len(input) == 0 {
return UTF_8
}
statistics := e.computeStringStatistics(input)
- if statistics.canLowerSpecialEncoded {
+ if statistics.canLowerSpecialEncoded && encodingFlags[LOWER_SPECIAL] {
return LOWER_SPECIAL
}
if statistics.canLowerUpperDigitSpecialEncoded {
// Here, the string contains only letters, numbers, and two
special symbols
- if statistics.digitCount != 0 {
+ if statistics.digitCount != 0 &&
encodingFlags[LOWER_UPPER_DIGIT_SPECIAL] {
return LOWER_UPPER_DIGIT_SPECIAL
}
upperCount := statistics.upperCount
chars := []byte(input)
- if upperCount == 1 && chars[0] >= 'A' && chars[0] <= 'Z' {
+ if upperCount == 1 && chars[0] >= 'A' && chars[0] <= 'Z' &&
encodingFlags[FIRST_TO_LOWER_SPECIAL] {
return FIRST_TO_LOWER_SPECIAL
}
- if (len(chars)+upperCount)*5 < len(chars)*6 {
+ if (len(chars)+upperCount)*5 < len(chars)*6 &&
encodingFlags[ALL_TO_LOWER_SPECIAL] {
return ALL_TO_LOWER_SPECIAL
}
- return LOWER_UPPER_DIGIT_SPECIAL
+ if encodingFlags[LOWER_UPPER_DIGIT_SPECIAL] {
+ return LOWER_UPPER_DIGIT_SPECIAL
+ }
}
return UTF_8
}
diff --git a/go/fory/type_def.go b/go/fory/type_def.go
new file mode 100644
index 000000000..eef803ca4
--- /dev/null
+++ b/go/fory/type_def.go
@@ -0,0 +1,226 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package fory
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/apache/fory/go/fory/meta"
+)
+
+const (
+ META_SIZE_MASK = 0xFFF
+ COMPRESS_META_FLAG = 0b1 << 13
+ HAS_FIELDS_META_FLAG = 0b1 << 12
+ NUM_HASH_BITS = 50
+)
+
+/*
+TypeDef represents a transportable value object containing type information
and field definitions.
+typeDef are layout as following:
+ - first 8 bytes: global header (50 bits hash + 1 bit compress flag + write
fields meta + 12 bits meta size)
+ - next 1 byte: meta header (2 bits reserved + 1 bit register by name flag +
5 bits num fields)
+ - next variable bytes: type id (varint) or ns name + type name
+ - next variable bytes: field definitions (see below)
+*/
+type TypeDef struct {
+ typeId TypeId
+ nsName *MetaStringBytes
+ typeName *MetaStringBytes
+ compressed bool
+ registerByName bool
+ fieldInfos []FieldInfo
+ encoded []byte
+}
+
+func NewTypeDef(typeId TypeId, nsName, typeName *MetaStringBytes,
registerByName, compressed bool, fieldInfos []FieldInfo) *TypeDef {
+ return &TypeDef{
+ typeId: typeId,
+ nsName: nsName,
+ typeName: typeName,
+ compressed: compressed,
+ registerByName: registerByName,
+ fieldInfos: fieldInfos,
+ encoded: nil,
+ }
+}
+
+func (td *TypeDef) writeTypeDef(buffer *ByteBuffer) {
+ buffer.WriteBinary(td.encoded)
+}
+
+func readTypeDef(fory *Fory, buffer *ByteBuffer) (*TypeDef, error) {
+ return decodeTypeDef(fory, buffer)
+}
+
+func skipTypeDef(buffer *ByteBuffer, header int64) {
+ sz := int(header & META_SIZE_MASK)
+ if sz == META_SIZE_MASK {
+ sz += int(buffer.ReadVarUint32())
+ }
+ buffer.IncreaseReaderIndex(sz)
+}
+
+// buildTypeDef constructs a TypeDef from a value
+func buildTypeDef(fory *Fory, value reflect.Value) (*TypeDef, error) {
+ fieldInfos, err := buildFieldInfos(fory, value)
+ if err != nil {
+ return nil, fmt.Errorf("failed to extract field infos: %w", err)
+ }
+
+ info, err := fory.typeResolver.getTypeInfo(value, true)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get type info for value %v:
%w", value, err)
+ }
+ typeId := TypeId(info.TypeID)
+ registerByName := IsNamespacedType(typeId)
+ typeDef := NewTypeDef(typeId, info.PkgPathBytes, info.NameBytes,
registerByName, false, fieldInfos)
+
+ // encoding the typeDef, and save the encoded bytes
+ encoded, err := encodingTypeDef(fory.typeResolver, typeDef)
+ if err != nil {
+ return nil, fmt.Errorf("failed to encode class definition: %w",
err)
+ }
+
+ typeDef.encoded = encoded
+ return typeDef, nil
+}
+
+/*
+FieldInfo contains information about a single field in a struct
+field info layout as following:
+ - first 1 byte: header (2 bits field name encoding + 4 bits size +
nullability flag + ref tracking flag)
+ - next variable bytes: FieldType info
+ - next variable bytes: field name or tag id
+*/
+type FieldInfo struct {
+ name string
+ nameEncoding meta.Encoding
+ nullable bool
+ trackingRef bool
+ fieldType FieldType
+}
+
+// buildFieldInfos extracts field information from a struct value
+func buildFieldInfos(fory *Fory, value reflect.Value) ([]FieldInfo, error) {
+ var fieldInfos []FieldInfo
+
+ typ := value.Type()
+ for i := 0; i < typ.NumField(); i++ {
+ field := typ.Field(i)
+ fieldValue := value.Field(i)
+
+ var fieldInfo FieldInfo
+ fieldName := field.Name
+
+ nameEncoding :=
fory.typeResolver.typeNameEncoder.ComputeEncodingWith(fieldName,
fieldNameEncodings)
+
+ ft, err := buildFieldType(fory, fieldValue)
+ if err != nil {
+ return nil, fmt.Errorf("failed to build field type for
field %s: %w", fieldName, err)
+ }
+ fieldInfo = FieldInfo{
+ name: fieldName,
+ nameEncoding: nameEncoding,
+ nullable: nullable(field.Type),
+ trackingRef: fory.referenceTracking,
+ fieldType: ft,
+ }
+ fieldInfos = append(fieldInfos, fieldInfo)
+ }
+ return fieldInfos, nil
+}
+
+// FieldType interface represents different field types, including object,
collection, and map types
+type FieldType interface {
+ TypeId() TypeId
+ write(*ByteBuffer)
+}
+
+// BaseFieldType provides common functionality for field types
+type BaseFieldType struct {
+ typeId TypeId
+}
+
+func (b *BaseFieldType) TypeId() TypeId { return b.typeId }
+func (b *BaseFieldType) write(buffer *ByteBuffer) {
+ buffer.WriteVarUint32Small7(uint32(b.typeId))
+}
+
+// readFieldInfo reads field type info from the buffer according to the TypeId
+func readFieldType(buffer *ByteBuffer) (FieldType, error) {
+ typeId := buffer.ReadVarUint32Small7()
+ if typeId == LIST || typeId == SET {
+ panic("not implement yet")
+ } else if typeId == MAP {
+ panic("not implement yet")
+ }
+
+ return NewObjectFieldType(TypeId(typeId)), nil
+}
+
+// CollectionFieldType represents collection types like List, Set
+type CollectionFieldType struct {
+ BaseFieldType
+ elementType FieldType
+}
+
+// MapFieldType represents map types
+type MapFieldType struct {
+ BaseFieldType
+ keyType FieldType
+ valueType FieldType
+}
+
+// ObjectFieldType represents object field types that aren't registered or
collection/map types
+type ObjectFieldType struct {
+ BaseFieldType
+}
+
+func NewObjectFieldType(typeId TypeId) *ObjectFieldType {
+ return &ObjectFieldType{
+ BaseFieldType: BaseFieldType{
+ typeId: typeId,
+ },
+ }
+}
+
+// todo: implement buildFieldType for collection and map types
+// buildFieldType builds field type from reflect.Type, handling collection,
map and object types
+func buildFieldType(fory *Fory, fieldValue reflect.Value) (FieldType, error) {
+ fieldType := fieldValue.Type()
+
+ var typeId TypeId
+ typeInfo, err := fory.typeResolver.getTypeInfo(fieldValue, true)
+ if err != nil {
+ return nil, err
+ }
+ typeId = TypeId(typeInfo.TypeID)
+
+ if fieldType.Kind() == reflect.Slice || fieldType.Kind() ==
reflect.Array || fieldType.Kind() == SET {
+ panic("not implement yet")
+ }
+
+ if fieldType.Kind() == reflect.Map {
+ panic("not implement yet")
+ }
+
+ // For all other types, treat as ObjectFieldType
+ return NewObjectFieldType(typeId), nil
+}
diff --git a/go/fory/type_def_decoder.go b/go/fory/type_def_decoder.go
new file mode 100644
index 000000000..cefd8fe02
--- /dev/null
+++ b/go/fory/type_def_decoder.go
@@ -0,0 +1,150 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package fory
+
+import (
+ "fmt"
+)
+
+/*
+decodeTypeDef decodes a TypeDef from the buffer
+typeDef are layout as following:
+ - first 8 bytes: global header (50 bits hash + 1 bit compress flag + write
fields meta + 12 bits meta size)
+ - next 1 byte: meta header (2 bits reserved + 1 bit register by name flag +
5 bits num fields)
+ - next variable bytes: type id (varint) or ns name + type name
+ - next variable bytes: field definitions (see below)
+*/
+func decodeTypeDef(fory *Fory, buffer *ByteBuffer) (*TypeDef, error) {
+ // Read 8-byte global header
+ globalHeader := uint64(buffer.ReadInt64())
+ hasFieldsMeta := (globalHeader & HAS_FIELDS_META_FLAG) != 0
+ isCompressed := (globalHeader & COMPRESS_META_FLAG) != 0
+ metaSize := int(globalHeader & META_SIZE_MASK)
+ if metaSize == META_SIZE_MASK {
+ metaSize += int(buffer.ReadVarUint32())
+ }
+
+ // Store the encoded bytes for the TypeDef (including meta header and
metadata)
+ // todo: handle compression if is_compressed is true
+ if isCompressed {
+ }
+ encoded := buffer.ReadBinary(metaSize)
+ metaBuffer := NewByteBuffer(encoded)
+
+ // Read 1-byte meta header
+ metaHeaderByte, err := metaBuffer.ReadByte()
+ if err != nil {
+ return nil, err
+ }
+ // Extract field count from lower 5 bits
+ fieldCount := int(metaHeaderByte & SmallNumFieldsThreshold)
+ if fieldCount == SmallNumFieldsThreshold {
+ fieldCount += int(metaBuffer.ReadVarUint32())
+ }
+ registeredByName := (metaHeaderByte & REGISTER_BY_NAME_FLAG) != 0
+
+ // Read name or type ID according to the registerByName flag
+ var typeId TypeId
+ var nsBytes, nameBytes *MetaStringBytes
+ if registeredByName {
+ // Read namespace and type name for namespaced types
+ readingNsBytes, err :=
fory.typeResolver.metaStringResolver.ReadMetaStringBytes(metaBuffer)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read package path:
%w", err)
+ }
+ nsBytes = readingNsBytes
+ readingNameBytes, err :=
fory.typeResolver.metaStringResolver.ReadMetaStringBytes(metaBuffer)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read type name: %w",
err)
+ }
+ nameBytes = readingNameBytes
+ info, exists :=
fory.typeResolver.nsTypeToTypeInfo[nsTypeKey{nsBytes.Hashcode,
nameBytes.Hashcode}]
+ if !exists {
+ return nil, fmt.Errorf("type not registered")
+ }
+ typeId = TypeId(info.TypeID)
+ } else {
+ typeId = TypeId(metaBuffer.ReadVarInt32())
+ }
+
+ // Read fields information
+ fieldInfos := make([]FieldInfo, fieldCount)
+ if hasFieldsMeta {
+ for i := 0; i < fieldCount; i++ {
+ fieldInfo, err := readFieldInfo(fory.typeResolver,
metaBuffer)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read field
info %d: %w", i, err)
+ }
+ fieldInfos[i] = fieldInfo
+ }
+ }
+
+ // Create TypeDef
+ typeDef := NewTypeDef(typeId, nsBytes, nameBytes, registeredByName,
isCompressed, fieldInfos)
+ typeDef.encoded = encoded
+
+ return typeDef, nil
+}
+
+/*
+readFieldInfo reads a single field's information from the buffer
+field info layout as following:
+ - first 1 byte: header (2 bits field name encoding + 4 bits size +
nullability flag + ref tracking flag)
+ - next variable bytes: FieldType info
+ - next variable bytes: field name or tag id
+*/
+func readFieldInfo(typeResolver *typeResolver, buffer *ByteBuffer) (FieldInfo,
error) {
+ // Read field header
+ headerByte, err := buffer.ReadByte()
+ if err != nil {
+ return FieldInfo{}, fmt.Errorf("failed to read field header:
%w", err)
+ }
+
+ // Resolve the header
+ nameEncodingFlag := (headerByte >> 6) & 0b11
+ nameEncoding := fieldNameEncodings[nameEncodingFlag]
+ nameLen := int((headerByte >> 2) & 0x0F)
+ refTracking := (headerByte & 0b1) != 0
+ isNullable := (headerByte & 0b10) != 0
+ if nameLen == 0x0F {
+ nameLen = FieldNameSizeThreshold + int(buffer.ReadVarUint32())
+ } else {
+ nameLen++ // Adjust for 1-based encoding
+ }
+
+ // reading field type
+ ft, err := readFieldType(buffer)
+ if err != nil {
+ return FieldInfo{}, err
+ }
+
+ // Reading field name based on encoding
+ nameBytes := buffer.ReadBinary(nameLen)
+ fieldName, err := typeResolver.typeNameDecoder.Decode(nameBytes,
nameEncoding)
+ if err != nil {
+ return FieldInfo{}, fmt.Errorf("failed to decode field name:
%w", err)
+ }
+
+ return FieldInfo{
+ name: fieldName,
+ nameEncoding: nameEncoding,
+ fieldType: ft,
+ nullable: isNullable,
+ trackingRef: refTracking,
+ }, nil
+}
diff --git a/go/fory/type_def_encoder.go b/go/fory/type_def_encoder.go
new file mode 100644
index 000000000..03d08731c
--- /dev/null
+++ b/go/fory/type_def_encoder.go
@@ -0,0 +1,196 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package fory
+
+import (
+ "fmt"
+
+ "github.com/apache/fory/go/fory/meta"
+ "github.com/spaolacci/murmur3"
+)
+
+const (
+ SmallNumFieldsThreshold = 31
+ REGISTER_BY_NAME_FLAG = 0b1 << 5
+ FieldNameSizeThreshold = 15
+)
+
+// Encoding `UTF8/ALL_TO_LOWER_SPECIAL/LOWER_UPPER_DIGIT_SPECIAL/TAG_ID` for
fieldName
+var fieldNameEncodings = []meta.Encoding{
+ meta.UTF_8,
+ meta.ALL_TO_LOWER_SPECIAL,
+ meta.LOWER_UPPER_DIGIT_SPECIAL,
+ // todo: add support for TAG_ID encoding
+}
+
+func getFieldNameEncodingIndex(encoding meta.Encoding) int {
+ for i, enc := range fieldNameEncodings {
+ if enc == encoding {
+ return i
+ }
+ }
+ return 0 // Default to UTF_8 if not found
+}
+
+/*
+encodingTypeDef encodes a TypeDef into binary format according to the
specification
+typeDef are layout as following:
+- first 8 bytes: global header (50 bits hash + 1 bit compress flag + write
fields meta + 12 bits meta size)
+- next 1 byte: meta header (2 bits reserved + 1 bit register by name flag + 5
bits num fields)
+- next variable bytes: type id (varint) or ns name + type name
+- next variable bytes: field infos (see below)
+*/
+func encodingTypeDef(typeResolver *typeResolver, typeDef *TypeDef) ([]byte,
error) {
+ buffer := NewByteBuffer(nil)
+
+ if err := writeMetaHeader(buffer, typeDef); err != nil {
+ return nil, fmt.Errorf("failed to write meta header: %w", err)
+ }
+
+ if typeDef.registerByName {
+ if err :=
typeResolver.metaStringResolver.WriteMetaStringBytes(buffer, typeDef.nsName);
err != nil {
+ return nil, err
+ }
+ if err :=
typeResolver.metaStringResolver.WriteMetaStringBytes(buffer, typeDef.typeName);
err != nil {
+ return nil, err
+ }
+ } else {
+ buffer.WriteVarInt32(int32(typeDef.typeId))
+ }
+
+ if err := writeFieldsInfo(typeResolver, buffer, typeDef.fieldInfos);
err != nil {
+ return nil, fmt.Errorf("failed to write fields info: %w", err)
+ }
+
+ result, err := prependGlobalHeader(buffer, false,
len(typeDef.fieldInfos) > 0)
+ if err != nil {
+ return nil, fmt.Errorf("failed to write global binary header:
%w", err)
+ }
+
+ return result.GetByteSlice(0, result.WriterIndex()), nil
+}
+
+// prependGlobalHeader writes the 8-byte global header
+func prependGlobalHeader(buffer *ByteBuffer, isCompressed bool, hasFieldsMeta
bool) (*ByteBuffer, error) {
+ var header uint64
+ metaSize := buffer.WriterIndex()
+
+ hashValue := murmur3.Sum64WithSeed(buffer.GetByteSlice(0, metaSize), 47)
+ header |= hashValue << (64 - NUM_HASH_BITS)
+
+ if hasFieldsMeta {
+ header |= HAS_FIELDS_META_FLAG
+ }
+
+ if isCompressed {
+ header |= COMPRESS_META_FLAG
+ }
+
+ if metaSize < META_SIZE_MASK {
+ header |= uint64(metaSize) & 0xFFF
+ } else {
+ header |= 0xFFF // Set to max value, actual size will follow
+ }
+
+ result := NewByteBuffer(make([]byte, metaSize+8))
+ result.WriteInt64(int64(header))
+
+ if metaSize >= META_SIZE_MASK {
+ result.WriteVarUint32(uint32(metaSize - META_SIZE_MASK))
+ }
+ result.WriteBinary(buffer.GetByteSlice(0, metaSize))
+
+ return result, nil
+}
+
+// writeMetaHeader writes the 1-byte meta header
+func writeMetaHeader(buffer *ByteBuffer, typeDef *TypeDef) error {
+ // 2 bits reserved + 1 bit register by name flag + 5 bits num fields
+ offset := buffer.writerIndex
+ if err := buffer.WriteByte(0xFF); err != nil {
+ return err
+ }
+ fieldInfos := typeDef.fieldInfos
+ header := len(fieldInfos)
+ if header > SmallNumFieldsThreshold {
+ header = SmallNumFieldsThreshold
+ buffer.WriteVarUint32(uint32(len(fieldInfos) -
SmallNumFieldsThreshold))
+ }
+ if typeDef.registerByName {
+ header |= REGISTER_BY_NAME_FLAG
+ }
+
+ buffer.PutUint8(offset, uint8(header))
+ return nil
+}
+
+// writeFieldsInfo writes field information according to the specification
+// field info layout as following:
+// - first 1 byte: header (2 bits field name encoding + 4 bits size +
nullability flag + ref tracking flag)
+// - next variable bytes: FieldType info
+// - next variable bytes: field name or tag id
+func writeFieldsInfo(typeResolver *typeResolver, buffer *ByteBuffer,
fieldInfos []FieldInfo) error {
+ for _, field := range fieldInfos {
+ if err := writeFieldInfo(typeResolver, buffer, field); err !=
nil {
+ return fmt.Errorf("failed to write field info for field
%s: %w", field.name, err)
+ }
+ }
+ return nil
+}
+
+// writeFieldInfo writes a single field's information
+func writeFieldInfo(typeResolver *typeResolver, buffer *ByteBuffer, field
FieldInfo) error {
+ // Write field header
+ // 2 bits field name encoding + 4 bits size + nullability flag + ref
tracking flag
+ offset := buffer.writerIndex
+ if err := buffer.WriteByte(0xFF); err != nil {
+ return err
+ }
+ var header uint8
+ if field.trackingRef {
+ header |= 0b1
+ }
+ if field.nullable {
+ header |= 0b10
+ }
+ // store index of encoding in the 2 highest bits
+ encodingFlag := byte(getFieldNameEncodingIndex(field.nameEncoding))
+ header |= encodingFlag << 6
+ metaString, err :=
typeResolver.typeNameEncoder.EncodeWithEncoding(field.name, field.nameEncoding)
+ if err != nil {
+ return err
+ }
+ nameLen := len(metaString.GetEncodedBytes())
+ if nameLen < FieldNameSizeThreshold {
+ header |= uint8((nameLen-1)&0x0F) << 2 // 1-based encoding
+ } else {
+ header |= 0x0F << 2 // Max value, actual length will follow
+ buffer.WriteVarUint32(uint32(nameLen - FieldNameSizeThreshold))
+ }
+ buffer.PutUint8(offset, header)
+
+ // Write field type
+ field.fieldType.write(buffer)
+
+ // todo: support tag id
+ // write field name
+ if _, err := buffer.Write(metaString.GetEncodedBytes()); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/go/fory/type_def_encoder_test.go b/go/fory/type_def_encoder_test.go
new file mode 100644
index 000000000..472249183
--- /dev/null
+++ b/go/fory/type_def_encoder_test.go
@@ -0,0 +1,83 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package fory
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+// Test structs for encoding/decoding
+type SimpleStruct struct {
+ ID int32
+ Name string
+}
+
+// TestTypeDefEncodingDecoding tests the encoding and decoding of TypeDef
+func TestTypeDefEncodingDecoding(t *testing.T) {
+ // Create a Fory instance for testing
+ fory := NewFory(false)
+
+ // Create a test struct instance
+ testStruct := SimpleStruct{
+ ID: 42,
+ Name: "test",
+ }
+
+ if err := fory.RegisterTagType("example.SimpleStruct", testStruct); err
!= nil {
+ t.Fatalf("Failed to register tag type: %v", err)
+ }
+
+ // Build TypeDef from the struct
+ structValue := reflect.ValueOf(testStruct)
+ originalTypeDef, err := buildTypeDef(fory, structValue)
+ if err != nil {
+ t.Fatalf("Failed to build TypeDef: %v", err)
+ }
+
+ // Create a buffer with the encoded data
+ buffer := NewByteBuffer(make([]byte, 0, 256))
+ originalTypeDef.writeTypeDef(buffer)
+
+ // Decode the TypeDef
+ decodedTypeDef, err := readTypeDef(fory, buffer)
+ if err != nil {
+ t.Fatalf("Failed to decode TypeDef: %v", err)
+ }
+
+ // Verify typeId(ignore sign)
+ assert.True(t, decodedTypeDef.typeId == originalTypeDef.typeId ||
decodedTypeDef.typeId == -originalTypeDef.typeId, "TypeId mismatch")
+ assert.Equal(t, originalTypeDef.registerByName,
decodedTypeDef.registerByName, "RegisterByName mismatch")
+ assert.Equal(t, originalTypeDef.compressed, decodedTypeDef.compressed,
"Compressed flag mismatch")
+
+ // Verify field count matches
+ assert.Equal(t, len(originalTypeDef.fieldInfos),
len(decodedTypeDef.fieldInfos), "Field count mismatch")
+
+ // Verify field names match
+ for i, originalField := range originalTypeDef.fieldInfos {
+ decodedField := decodedTypeDef.fieldInfos[i]
+
+ assert.Equal(t, originalField.name, decodedField.name, "Field
name mismatch at index %d", i)
+ assert.Equal(t, originalField.nameEncoding,
decodedField.nameEncoding, "Field name encoding mismatch at index %d", i)
+ assert.Equal(t, originalField.nullable, decodedField.nullable,
"Field nullable mismatch at index %d", i)
+ assert.Equal(t, originalField.trackingRef,
decodedField.trackingRef, "Field trackingRef mismatch at index %d", i)
+ assert.Equal(t, originalField.fieldType.TypeId(),
decodedField.fieldType.TypeId(), "Field type ID mismatch at index %d", i)
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]