chore: migrate to gitea
Some checks failed
golangci-lint / lint (push) Failing after 1m30s
Test / test (push) Failing after 2m17s

This commit is contained in:
2026-01-27 00:40:46 +01:00
parent 1e05874160
commit f8df24c29d
3169 changed files with 1216434 additions and 1587 deletions

201
vendor/github.com/bytedance/sonic/loader/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

245
vendor/github.com/bytedance/sonic/loader/funcdata.go generated vendored Normal file
View File

@@ -0,0 +1,245 @@
/**
* Copyright 2023 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package loader
import (
`encoding`
`encoding/binary`
`fmt`
`reflect`
`strings`
`sync`
`unsafe`
)
const (
_MinLC uint8 = 1
_PtrSize uint8 = 8
)
const (
_N_FUNCDATA = 8
_INVALID_FUNCDATA_OFFSET = ^uint32(0)
_FUNC_SIZE = unsafe.Sizeof(_func{})
_MINFUNC = 16 // minimum size for a function
_BUCKETSIZE = 256 * _MINFUNC
_SUBBUCKETS = 16
_SUB_BUCKETSIZE = _BUCKETSIZE / _SUBBUCKETS
)
// Note: This list must match the list in runtime/symtab.go.
const (
FuncFlag_TOPFRAME = 1 << iota
FuncFlag_SPWRITE
FuncFlag_ASM
)
// PCDATA and FUNCDATA table indexes.
//
// See funcdata.h and $GROOT/src/cmd/internal/objabi/funcdata.go.
const (
_FUNCDATA_ArgsPointerMaps = 0
_FUNCDATA_LocalsPointerMaps = 1
_FUNCDATA_StackObjects = 2
_FUNCDATA_InlTree = 3
_FUNCDATA_OpenCodedDeferInfo = 4
_FUNCDATA_ArgInfo = 5
_FUNCDATA_ArgLiveInfo = 6
_FUNCDATA_WrapInfo = 7
// ArgsSizeUnknown is set in Func.argsize to mark all functions
// whose argument size is unknown (C vararg functions, and
// assembly code without an explicit specification).
// This value is generated by the compiler, assembler, or linker.
ArgsSizeUnknown = -0x80000000
)
// moduledata used to cache the funcdata and findfuncbucket of one module
var moduleCache = struct {
m map[*moduledata][]byte
sync.Mutex
}{
m: make(map[*moduledata][]byte),
}
// Func contains information about a function.
type Func struct {
ID uint8 // see runtime/symtab.go
Flag uint8 // see runtime/symtab.go
ArgsSize int32 // args byte size
EntryOff uint32 // start pc, offset to moduledata.text
TextSize uint32 // size of func text
DeferReturn uint32 // offset of start of a deferreturn call instruction from entry, if any.
FileIndex uint32 // index into filetab
Name string // name of function
// PC data
Pcsp *Pcdata // PC -> SP delta
Pcfile *Pcdata // PC -> file index
Pcline *Pcdata // PC -> line number
PcUnsafePoint *Pcdata // PC -> unsafe point, must be PCDATA_UnsafePointSafe or PCDATA_UnsafePointUnsafe
PcStackMapIndex *Pcdata // PC -> stack map index, relative to ArgsPointerMaps and LocalsPointerMaps
PcInlTreeIndex *Pcdata // PC -> inlining tree index, relative to InlTree
PcArgLiveIndex *Pcdata // PC -> arg live index, relative to ArgLiveInfo
// Func data, must implement encoding.BinaryMarshaler
ArgsPointerMaps encoding.BinaryMarshaler // concrete type: *StackMap
LocalsPointerMaps encoding.BinaryMarshaler // concrete type: *StackMap
StackObjects encoding.BinaryMarshaler
InlTree encoding.BinaryMarshaler
OpenCodedDeferInfo encoding.BinaryMarshaler
ArgInfo encoding.BinaryMarshaler
ArgLiveInfo encoding.BinaryMarshaler
WrapInfo encoding.BinaryMarshaler
}
func getOffsetOf(data interface{}, field string) uintptr {
t := reflect.TypeOf(data)
fv, ok := t.FieldByName(field)
if !ok {
panic(fmt.Sprintf("field %s not found in struct %s", field, t.Name()))
}
return fv.Offset
}
func rnd(v int64, r int64) int64 {
if r <= 0 {
return v
}
v += r - 1
c := v % r
if c < 0 {
c += r
}
v -= c
return v
}
var (
byteOrder binary.ByteOrder = binary.LittleEndian
)
func funcNameParts(name string) (string, string, string) {
i := strings.IndexByte(name, '[')
if i < 0 {
return name, "", ""
}
// TODO: use LastIndexByte once the bootstrap compiler is >= Go 1.5.
j := len(name) - 1
for j > i && name[j] != ']' {
j--
}
if j <= i {
return name, "", ""
}
return name[:i], "[...]", name[j+1:]
}
// func name table format:
// nameOff[0] -> namePartA namePartB namePartC \x00
// nameOff[1] -> namePartA namePartB namePartC \x00
// ...
func makeFuncnameTab(funcs []Func) (tab []byte, offs []int32) {
offs = make([]int32, len(funcs))
offset := 1
tab = []byte{0}
for i, f := range funcs {
offs[i] = int32(offset)
a, b, c := funcNameParts(f.Name)
tab = append(tab, a...)
tab = append(tab, b...)
tab = append(tab, c...)
tab = append(tab, 0)
offset += len(a) + len(b) + len(c) + 1
}
return
}
// CU table format:
// cuOffsets[0] -> filetabOffset[0] filetabOffset[1] ... filetabOffset[len(CUs[0].fileNames)-1]
// cuOffsets[1] -> filetabOffset[len(CUs[0].fileNames)] ... filetabOffset[len(CUs[0].fileNames) + len(CUs[1].fileNames)-1]
// ...
//
// file name table format:
// filetabOffset[0] -> CUs[0].fileNames[0] \x00
// ...
// filetabOffset[len(CUs[0]-1)] -> CUs[0].fileNames[len(CUs[0].fileNames)-1] \x00
// ...
// filetabOffset[SUM(CUs,fileNames)-1] -> CUs[len(CU)-1].fileNames[len(CUs[len(CU)-1].fileNames)-1] \x00
func makeFilenametab(cus []compilationUnit) (cutab []uint32, filetab []byte, cuOffsets []uint32) {
cuOffsets = make([]uint32, len(cus))
cuOffset := 0
fileOffset := 0
for i, cu := range cus {
cuOffsets[i] = uint32(cuOffset)
for _, name := range cu.fileNames {
cutab = append(cutab, uint32(fileOffset))
fileOffset += len(name) + 1
filetab = append(filetab, name...)
filetab = append(filetab, 0)
}
cuOffset += len(cu.fileNames)
}
return
}
func writeFuncdata(out *[]byte, funcs []Func) (fstart int, funcdataOffs [][]uint32) {
fstart = len(*out)
*out = append(*out, byte(0))
offs := uint32(1)
funcdataOffs = make([][]uint32, len(funcs))
for i, f := range funcs {
var writer = func(fd encoding.BinaryMarshaler) {
var ab []byte
var err error
if fd != nil {
ab, err = fd.MarshalBinary()
if err != nil {
panic(err)
}
funcdataOffs[i] = append(funcdataOffs[i], offs)
} else {
ab = []byte{0}
funcdataOffs[i] = append(funcdataOffs[i], _INVALID_FUNCDATA_OFFSET)
}
*out = append(*out, ab...)
offs += uint32(len(ab))
}
writer(f.ArgsPointerMaps)
writer(f.LocalsPointerMaps)
writer(f.StackObjects)
writer(f.InlTree)
writer(f.OpenCodedDeferInfo)
writer(f.ArgInfo)
writer(f.ArgLiveInfo)
writer(f.WrapInfo)
}
return
}

View File

@@ -0,0 +1,460 @@
//go:build !go1.17 || go1.27
// +build !go1.17 go1.27
/*
* Copyright 2021 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package loader
import (
`os`
`unsafe`
`sort`
`github.com/bytedance/sonic/loader/internal/rt`
)
const (
_Magic uint32 = 0xfffffffa
)
type pcHeader struct {
magic uint32 // 0xFFFFFFF0
pad1, pad2 uint8 // 0,0
minLC uint8 // min instruction size
ptrSize uint8 // size of a ptr in bytes
nfunc int // number of functions in the module
nfiles uint // number of entries in the file tab
funcnameOffset uintptr // offset to the funcnametab variable from pcHeader
cuOffset uintptr // offset to the cutab variable from pcHeader
filetabOffset uintptr // offset to the filetab variable from pcHeader
pctabOffset uintptr // offset to the pctab variable from pcHeader
pclnOffset uintptr // offset to the pclntab variable from pcHeader
}
type moduledata struct {
pcHeader *pcHeader
funcnametab []byte
cutab []uint32
filetab []byte
pctab []byte
pclntable []byte
ftab []funcTab
findfunctab uintptr
minpc, maxpc uintptr // first func address, last func address + last func size
text, etext uintptr // start/end of text, (etext-text) must be greater than MIN_FUNC
noptrdata, enoptrdata uintptr
data, edata uintptr
bss, ebss uintptr
noptrbss, enoptrbss uintptr
end, gcdata, gcbss uintptr
types, etypes uintptr
textsectmap []textSection // see runtime/symtab.go: textAddr()
typelinks []int32 // offsets from types
itablinks []*rt.GoItab
ptab []ptabEntry
pluginpath string
pkghashes []modulehash
modulename string
modulehashes []modulehash
hasmain uint8 // 1 if module contains the main function, 0 otherwise
gcdatamask, gcbssmask bitVector
typemap map[int32]*rt.GoType // offset to *_rtype in previous module
bad bool // module failed to load and should be ignored
next *moduledata
}
type _func struct {
entry uintptr // start pc, as offset from moduledata.text/pcHeader.textStart
nameOff int32 // function name, as index into moduledata.funcnametab.
args int32 // in/out args size
deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any.
pcsp uint32
pcfile uint32
pcln uint32
npcdata uint32
cuOffset uint32 // runtime.cutab offset of this function's CU
funcID uint8 // set for certain special runtime functions
_ [2]byte // pad
nfuncdata uint8 //
// The end of the struct is followed immediately by two variable-length
// arrays that reference the pcdata and funcdata locations for this
// function.
// pcdata contains the offset into moduledata.pctab for the start of
// that index's table. e.g.,
// &moduledata.pctab[_func.pcdata[_PCDATA_UnsafePoint]] is the start of
// the unsafe point table.
//
// An offset of 0 indicates that there is no table.
//
// pcdata [npcdata]uint32
// funcdata contains the offset past moduledata.gofunc which contains a
// pointer to that index's funcdata. e.g.,
// *(moduledata.gofunc + _func.funcdata[_FUNCDATA_ArgsPointerMaps]) is
// the argument pointer map.
//
// An offset of ^uint32(0) indicates that there is no entry.
//
// funcdata [nfuncdata]uint32
}
type funcTab struct {
entry uintptr
funcoff uintptr
}
type bitVector struct {
n int32 // # of bits
bytedata *uint8
}
type ptabEntry struct {
name int32
typ int32
}
type textSection struct {
vaddr uintptr // prelinked section vaddr
end uintptr // vaddr + section length
baseaddr uintptr // relocated section address
}
type modulehash struct {
modulename string
linktimehash string
runtimehash *string
}
// findfuncbucket is an array of these structures.
// Each bucket represents 4096 bytes of the text segment.
// Each subbucket represents 256 bytes of the text segment.
// To find a function given a pc, locate the bucket and subbucket for
// that pc. Add together the idx and subbucket value to obtain a
// function index. Then scan the functab array starting at that
// index to find the target function.
// This table uses 20 bytes for every 4096 bytes of code, or ~0.5% overhead.
type findfuncbucket struct {
idx uint32
_SUBBUCKETS [16]byte
}
type compilationUnit struct {
fileNames []string
}
func makeFtab(funcs []_func, maxpc uintptr) (ftab []funcTab, pclntabSize int64, startLocations []uint32) {
// Allocate space for the pc->func table. This structure consists of a pc offset
// and an offset to the func structure. After that, we have a single pc
// value that marks the end of the last function in the binary.
pclntabSize = int64(len(funcs)*2*int(_PtrSize) + int(_PtrSize))
startLocations = make([]uint32, len(funcs))
for i, f := range funcs {
pclntabSize = rnd(pclntabSize, int64(_PtrSize))
//writePCToFunc
startLocations[i] = uint32(pclntabSize)
pclntabSize += int64(uint8(_FUNC_SIZE) + f.nfuncdata*_PtrSize + uint8(f.npcdata)*4)
}
ftab = make([]funcTab, 0, len(funcs)+1)
// write a map of pc->func info offsets
for i, f := range funcs {
ftab = append(ftab, funcTab{uintptr(f.entry), uintptr(startLocations[i])})
}
// Final entry of table is just end pc offset.
ftab = append(ftab, funcTab{maxpc, 0})
return
}
// Pcln table format: [...]funcTab + [...]_Func
func makePclntable(size int64, startLocations []uint32, funcs []_func, maxpc uintptr, pcdataOffs [][]uint32, funcdataAddr uintptr, funcdataOffs [][]uint32) (pclntab []byte) {
pclntab = make([]byte, size, size)
// write a map of pc->func info offsets
offs := 0
for i, f := range funcs {
byteOrder.PutUint64(pclntab[offs:offs+8], uint64(f.entry))
byteOrder.PutUint64(pclntab[offs+8:offs+16], uint64(startLocations[i]))
offs += 16
}
// Final entry of table is just end pc offset.
byteOrder.PutUint64(pclntab[offs:offs+8], uint64(maxpc))
offs += 8
// write func info table
for i, f := range funcs {
off := startLocations[i]
// write _func structure to pclntab
byteOrder.PutUint64(pclntab[off:off+8], uint64(f.entry))
off += 8
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.nameOff))
off += 4
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.args))
off += 4
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.deferreturn))
off += 4
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.pcsp))
off += 4
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.pcfile))
off += 4
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.pcln))
off += 4
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.npcdata))
off += 4
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.cuOffset))
off += 4
pclntab[off] = f.funcID
// NOTICE: _[2]byte alignment
off += 3
pclntab[off] = f.nfuncdata
off += 1
// NOTICE: _func.pcdata always starts from PcUnsafePoint, which is index 3
for j := 3; j < len(pcdataOffs[i]); j++ {
byteOrder.PutUint32(pclntab[off:off+4], uint32(pcdataOffs[i][j]))
off += 4
}
off = uint32(rnd(int64(off), int64(_PtrSize)))
// funcdata refs as offsets from gofunc
for _, funcdata := range funcdataOffs[i] {
if funcdata == _INVALID_FUNCDATA_OFFSET {
byteOrder.PutUint64(pclntab[off:off+8], 0)
} else {
byteOrder.PutUint64(pclntab[off:off+8], uint64(funcdataAddr)+uint64(funcdata))
}
off += 8
}
}
return
}
// findfunc table used to map pc to belonging func,
// returns the index in the func table.
//
// All text section are divided into buckets sized _BUCKETSIZE(4K):
// every bucket is divided into _SUBBUCKETS sized _SUB_BUCKETSIZE(64),
// and it has a base idx to plus the offset stored in jth subbucket.
// see findfunc() in runtime/symtab.go
func writeFindfunctab(out *[]byte, ftab []funcTab) (start int) {
start = len(*out)
max := ftab[len(ftab)-1].entry
min := ftab[0].entry
nbuckets := (max - min + _BUCKETSIZE - 1) / _BUCKETSIZE
n := (max - min + _SUB_BUCKETSIZE - 1) / _SUB_BUCKETSIZE
tab := make([]findfuncbucket, 0, nbuckets)
var s, e = 0, 0
for i := 0; i<int(nbuckets); i++ {
// store the start func of the bucket
var fb = findfuncbucket{idx: uint32(s)}
// find the last e-th func of the bucket
var pc = min + uintptr((i+1)*_BUCKETSIZE)
for ; e < len(ftab)-1 && ftab[e+1].entry <= pc; e++ {}
for j := 0; j<_SUBBUCKETS && (i*_SUBBUCKETS+j)<int(n); j++ {
// store the start func of the subbucket
fb._SUBBUCKETS[j] = byte(uint32(s) - fb.idx)
// find the s-th end func of the subbucket
pc = min + uintptr(i*_BUCKETSIZE) + uintptr((j+1)*_SUB_BUCKETSIZE)
for ; s < len(ftab)-1 && ftab[s+1].entry <= pc; s++ {}
}
s = e
tab = append(tab, fb)
}
// write findfuncbucket
if len(tab) > 0 {
size := int(unsafe.Sizeof(findfuncbucket{}))*len(tab)
*out = append(*out, rt.BytesFrom(unsafe.Pointer(&tab[0]), size, size)...)
}
return
}
func makeModuledata(name string, filenames []string, funcsp *[]Func, text []byte) (mod *moduledata) {
mod = new(moduledata)
mod.modulename = name
// sort funcs by entry
funcs := *funcsp
sort.Slice(funcs, func(i, j int) bool {
return funcs[i].EntryOff < funcs[j].EntryOff
})
*funcsp = funcs
// make filename table
cu := make([]string, 0, len(filenames))
cu = append(cu, filenames...)
cutab, filetab, cuOffs := makeFilenametab([]compilationUnit{{cu}})
mod.cutab = cutab
mod.filetab = filetab
// make funcname table
funcnametab, nameOffs := makeFuncnameTab(funcs)
mod.funcnametab = funcnametab
// mmap() text and funcdata segments
p := os.Getpagesize()
size := int(rnd(int64(len(text)), int64(p)))
addr := mmap(size)
// copy the machine code
s := rt.BytesFrom(unsafe.Pointer(addr), len(text), size)
copy(s, text)
// make it executable
mprotect(addr, size)
// assign addresses
mod.text = addr
mod.etext = addr + uintptr(size)
mod.minpc = addr
mod.maxpc = addr + uintptr(len(text))
// make pcdata table
// NOTICE: _func only use offset to index pcdata, thus no need mmap() pcdata
cuOff := cuOffs[0]
pctab, pcdataOffs, _funcs := makePctab(funcs, addr, cuOff, nameOffs)
mod.pctab = pctab
// write func data
// NOTICE: _func use mod.gofunc+offset to directly point funcdata, thus need cache funcdata
// TODO: estimate accurate capacity
cache := make([]byte, 0, len(funcs)*int(_PtrSize))
fstart, funcdataOffs := writeFuncdata(&cache, funcs)
// make pc->func (binary search) func table
ftab, pclntSize, startLocations := makeFtab(_funcs, mod.maxpc)
mod.ftab = ftab
// write pc->func (modmap) findfunc table
ffstart := writeFindfunctab(&cache, ftab)
// cache funcdata and findfuncbucket
moduleCache.Lock()
moduleCache.m[mod] = cache
moduleCache.Unlock()
mod.findfunctab = uintptr(rt.IndexByte(cache, ffstart))
funcdataAddr := uintptr(rt.IndexByte(cache, fstart))
// make pclnt table
pclntab := makePclntable(pclntSize, startLocations, _funcs, mod.maxpc, pcdataOffs, funcdataAddr, funcdataOffs)
mod.pclntable = pclntab
// make pc header
mod.pcHeader = &pcHeader {
magic : _Magic,
minLC : _MinLC,
ptrSize : _PtrSize,
nfunc : len(funcs),
nfiles: uint(len(cu)),
funcnameOffset: getOffsetOf(moduledata{}, "funcnametab"),
cuOffset: getOffsetOf(moduledata{}, "cutab"),
filetabOffset: getOffsetOf(moduledata{}, "filetab"),
pctabOffset: getOffsetOf(moduledata{}, "pctab"),
pclnOffset: getOffsetOf(moduledata{}, "pclntable"),
}
// special case: gcdata and gcbss must by non-empty
mod.gcdata = uintptr(unsafe.Pointer(&emptyByte))
mod.gcbss = uintptr(unsafe.Pointer(&emptyByte))
return
}
// makePctab generates pcdelta->valuedelta tables for functions,
// and returns the table and the entry offset of every kind pcdata in the table.
func makePctab(funcs []Func, addr uintptr, cuOffset uint32, nameOffset []int32) (pctab []byte, pcdataOffs [][]uint32, _funcs []_func) {
_funcs = make([]_func, len(funcs))
// Pctab offsets of 0 are considered invalid in the runtime. We respect
// that by just padding a single byte at the beginning of runtime.pctab,
// that way no real offsets can be zero.
pctab = make([]byte, 1, 12*len(funcs)+1)
pcdataOffs = make([][]uint32, len(funcs))
for i, f := range funcs {
_f := &_funcs[i]
var writer = func(pc *Pcdata) {
var ab []byte
var err error
if pc != nil {
ab, err = pc.MarshalBinary()
if err != nil {
panic(err)
}
pcdataOffs[i] = append(pcdataOffs[i], uint32(len(pctab)))
} else {
ab = []byte{0}
pcdataOffs[i] = append(pcdataOffs[i], _PCDATA_INVALID_OFFSET)
}
pctab = append(pctab, ab...)
}
if f.Pcsp != nil {
_f.pcsp = uint32(len(pctab))
}
writer(f.Pcsp)
if f.Pcfile != nil {
_f.pcfile = uint32(len(pctab))
}
writer(f.Pcfile)
if f.Pcline != nil {
_f.pcln = uint32(len(pctab))
}
writer(f.Pcline)
writer(f.PcUnsafePoint)
writer(f.PcStackMapIndex)
writer(f.PcInlTreeIndex)
writer(f.PcArgLiveIndex)
_f.entry = addr + uintptr(f.EntryOff)
_f.nameOff = nameOffset[i]
_f.args = f.ArgsSize
_f.deferreturn = f.DeferReturn
// NOTICE: _func.pcdata is always as [PCDATA_UnsafePoint(0) : PCDATA_ArgLiveIndex(3)]
_f.npcdata = uint32(_N_PCDATA)
_f.cuOffset = cuOffset
_f.funcID = f.ID
_f.nfuncdata = uint8(_N_FUNCDATA)
}
return
}
func registerFunction(name string, pc uintptr, textSize uintptr, fp int, args int, size uintptr, argptrs uintptr, localptrs uintptr) {}

View File

@@ -0,0 +1,461 @@
//go:build go1.17 && !go1.18
// +build go1.17,!go1.18
/*
* Copyright 2021 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package loader
import (
`os`
`unsafe`
`sort`
`github.com/bytedance/sonic/loader/internal/rt`
)
const (
_Magic uint32 = 0xfffffffa
)
type pcHeader struct {
magic uint32 // 0xFFFFFFF0
pad1, pad2 uint8 // 0,0
minLC uint8 // min instruction size
ptrSize uint8 // size of a ptr in bytes
nfunc int // number of functions in the module
nfiles uint // number of entries in the file tab
funcnameOffset uintptr // offset to the funcnametab variable from pcHeader
cuOffset uintptr // offset to the cutab variable from pcHeader
filetabOffset uintptr // offset to the filetab variable from pcHeader
pctabOffset uintptr // offset to the pctab variable from pcHeader
pclnOffset uintptr // offset to the pclntab variable from pcHeader
}
type moduledata struct {
pcHeader *pcHeader
funcnametab []byte
cutab []uint32
filetab []byte
pctab []byte
pclntable []byte
ftab []funcTab
findfunctab uintptr
minpc, maxpc uintptr // first func address, last func address + last func size
text, etext uintptr // start/end of text, (etext-text) must be greater than MIN_FUNC
noptrdata, enoptrdata uintptr
data, edata uintptr
bss, ebss uintptr
noptrbss, enoptrbss uintptr
end, gcdata, gcbss uintptr
types, etypes uintptr
textsectmap []textSection // see runtime/symtab.go: textAddr()
typelinks []int32 // offsets from types
itablinks []*rt.GoItab
ptab []ptabEntry
pluginpath string
pkghashes []modulehash
modulename string
modulehashes []modulehash
hasmain uint8 // 1 if module contains the main function, 0 otherwise
gcdatamask, gcbssmask bitVector
typemap map[int32]*rt.GoType // offset to *_rtype in previous module
bad bool // module failed to load and should be ignored
next *moduledata
}
type _func struct {
entry uintptr // start pc, as offset from moduledata.text/pcHeader.textStart
nameOff int32 // function name, as index into moduledata.funcnametab.
args int32 // in/out args size
deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any.
pcsp uint32
pcfile uint32
pcln uint32
npcdata uint32
cuOffset uint32 // runtime.cutab offset of this function's CU
funcID uint8 // set for certain special runtime functions
_ [2]byte // pad
nfuncdata uint8 //
// The end of the struct is followed immediately by two variable-length
// arrays that reference the pcdata and funcdata locations for this
// function.
// pcdata contains the offset into moduledata.pctab for the start of
// that index's table. e.g.,
// &moduledata.pctab[_func.pcdata[_PCDATA_UnsafePoint]] is the start of
// the unsafe point table.
//
// An offset of 0 indicates that there is no table.
//
// pcdata [npcdata]uint32
// funcdata contains the offset past moduledata.gofunc which contains a
// pointer to that index's funcdata. e.g.,
// *(moduledata.gofunc + _func.funcdata[_FUNCDATA_ArgsPointerMaps]) is
// the argument pointer map.
//
// An offset of ^uint32(0) indicates that there is no entry.
//
// funcdata [nfuncdata]uint32
}
type funcTab struct {
entry uintptr
funcoff uintptr
}
type bitVector struct {
n int32 // # of bits
bytedata *uint8
}
type ptabEntry struct {
name int32
typ int32
}
type textSection struct {
vaddr uintptr // prelinked section vaddr
end uintptr // vaddr + section length
baseaddr uintptr // relocated section address
}
type modulehash struct {
modulename string
linktimehash string
runtimehash *string
}
// findfuncbucket is an array of these structures.
// Each bucket represents 4096 bytes of the text segment.
// Each subbucket represents 256 bytes of the text segment.
// To find a function given a pc, locate the bucket and subbucket for
// that pc. Add together the idx and subbucket value to obtain a
// function index. Then scan the functab array starting at that
// index to find the target function.
// This table uses 20 bytes for every 4096 bytes of code, or ~0.5% overhead.
type findfuncbucket struct {
idx uint32
_SUBBUCKETS [16]byte
}
type compilationUnit struct {
fileNames []string
}
func makeFtab(funcs []_func, maxpc uintptr) (ftab []funcTab, pclntabSize int64, startLocations []uint32) {
// Allocate space for the pc->func table. This structure consists of a pc offset
// and an offset to the func structure. After that, we have a single pc
// value that marks the end of the last function in the binary.
pclntabSize = int64(len(funcs)*2*int(_PtrSize) + int(_PtrSize))
startLocations = make([]uint32, len(funcs))
for i, f := range funcs {
pclntabSize = rnd(pclntabSize, int64(_PtrSize))
//writePCToFunc
startLocations[i] = uint32(pclntabSize)
pclntabSize += int64(uint8(_FUNC_SIZE) + f.nfuncdata*_PtrSize + uint8(f.npcdata)*4)
}
ftab = make([]funcTab, 0, len(funcs)+1)
// write a map of pc->func info offsets
for i, f := range funcs {
ftab = append(ftab, funcTab{uintptr(f.entry), uintptr(startLocations[i])})
}
// Final entry of table is just end pc offset.
ftab = append(ftab, funcTab{maxpc, 0})
return
}
// Pcln table format: [...]funcTab + [...]_Func
func makePclntable(size int64, startLocations []uint32, funcs []_func, maxpc uintptr, pcdataOffs [][]uint32, funcdataAddr uintptr, funcdataOffs [][]uint32) (pclntab []byte) {
pclntab = make([]byte, size, size)
// write a map of pc->func info offsets
offs := 0
for i, f := range funcs {
byteOrder.PutUint64(pclntab[offs:offs+8], uint64(f.entry))
byteOrder.PutUint64(pclntab[offs+8:offs+16], uint64(startLocations[i]))
offs += 16
}
// Final entry of table is just end pc offset.
byteOrder.PutUint64(pclntab[offs:offs+8], uint64(maxpc))
offs += 8
// write func info table
for i, f := range funcs {
off := startLocations[i]
// write _func structure to pclntab
byteOrder.PutUint64(pclntab[off:off+8], uint64(f.entry))
off += 8
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.nameOff))
off += 4
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.args))
off += 4
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.deferreturn))
off += 4
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.pcsp))
off += 4
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.pcfile))
off += 4
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.pcln))
off += 4
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.npcdata))
off += 4
byteOrder.PutUint32(pclntab[off:off+4], uint32(f.cuOffset))
off += 4
pclntab[off] = f.funcID
// NOTICE: _[2]byte alignment
off += 3
pclntab[off] = f.nfuncdata
off += 1
// NOTICE: _func.pcdata always starts from PcUnsafePoint, which is index 3
for j := 3; j < len(pcdataOffs[i]); j++ {
byteOrder.PutUint32(pclntab[off:off+4], uint32(pcdataOffs[i][j]))
off += 4
}
off = uint32(rnd(int64(off), int64(_PtrSize)))
// funcdata refs as offsets from gofunc
for _, funcdata := range funcdataOffs[i] {
if funcdata == _INVALID_FUNCDATA_OFFSET {
byteOrder.PutUint64(pclntab[off:off+8], 0)
} else {
byteOrder.PutUint64(pclntab[off:off+8], uint64(funcdataAddr)+uint64(funcdata))
}
off += 8
}
}
return
}
// findfunc table used to map pc to belonging func,
// returns the index in the func table.
//
// All text section are divided into buckets sized _BUCKETSIZE(4K):
// every bucket is divided into _SUBBUCKETS sized _SUB_BUCKETSIZE(64),
// and it has a base idx to plus the offset stored in jth subbucket.
// see findfunc() in runtime/symtab.go
func writeFindfunctab(out *[]byte, ftab []funcTab) (start int) {
start = len(*out)
max := ftab[len(ftab)-1].entry
min := ftab[0].entry
nbuckets := (max - min + _BUCKETSIZE - 1) / _BUCKETSIZE
n := (max - min + _SUB_BUCKETSIZE - 1) / _SUB_BUCKETSIZE
tab := make([]findfuncbucket, 0, nbuckets)
var s, e = 0, 0
for i := 0; i<int(nbuckets); i++ {
// store the start func of the bucket
var fb = findfuncbucket{idx: uint32(s)}
// find the last e-th func of the bucket
var pc = min + uintptr((i+1)*_BUCKETSIZE)
for ; e < len(ftab)-1 && ftab[e+1].entry <= pc; e++ {}
for j := 0; j<_SUBBUCKETS && (i*_SUBBUCKETS+j)<int(n); j++ {
// store the start func of the subbucket
fb._SUBBUCKETS[j] = byte(uint32(s) - fb.idx)
// find the s-th end func of the subbucket
pc = min + uintptr(i*_BUCKETSIZE) + uintptr((j+1)*_SUB_BUCKETSIZE)
for ; s < len(ftab)-1 && ftab[s+1].entry <= pc; s++ {}
}
s = e
tab = append(tab, fb)
}
// write findfuncbucket
if len(tab) > 0 {
size := int(unsafe.Sizeof(findfuncbucket{}))*len(tab)
*out = append(*out, rt.BytesFrom(unsafe.Pointer(&tab[0]), size, size)...)
}
return
}
func makeModuledata(name string, filenames []string, funcsp *[]Func, text []byte) (mod *moduledata) {
mod = new(moduledata)
mod.modulename = name
// sort funcs by entry
funcs := *funcsp
sort.Slice(funcs, func(i, j int) bool {
return funcs[i].EntryOff < funcs[j].EntryOff
})
*funcsp = funcs
// make filename table
cu := make([]string, 0, len(filenames))
cu = append(cu, filenames...)
cutab, filetab, cuOffs := makeFilenametab([]compilationUnit{{cu}})
mod.cutab = cutab
mod.filetab = filetab
// make funcname table
funcnametab, nameOffs := makeFuncnameTab(funcs)
mod.funcnametab = funcnametab
// mmap() text and funcdata segments
p := os.Getpagesize()
size := int(rnd(int64(len(text)), int64(p)))
addr := mmap(size)
// copy the machine code
s := rt.BytesFrom(unsafe.Pointer(addr), len(text), size)
copy(s, text)
// make it executable
mprotect(addr, size)
// assign addresses
mod.text = addr
mod.etext = addr + uintptr(size)
mod.minpc = addr
mod.maxpc = addr + uintptr(len(text))
// make pcdata table
// NOTICE: _func only use offset to index pcdata, thus no need mmap() pcdata
cuOff := cuOffs[0]
pctab, pcdataOffs, _funcs := makePctab(funcs, addr, cuOff, nameOffs)
mod.pctab = pctab
// write func data
// NOTICE: _func use mod.gofunc+offset to directly point funcdata, thus need cache funcdata
// TODO: estimate accurate capacity
cache := make([]byte, 0, len(funcs)*int(_PtrSize))
fstart, funcdataOffs := writeFuncdata(&cache, funcs)
// make pc->func (binary search) func table
ftab, pclntSize, startLocations := makeFtab(_funcs, mod.maxpc)
mod.ftab = ftab
// write pc->func (modmap) findfunc table
ffstart := writeFindfunctab(&cache, ftab)
// cache funcdata and findfuncbucket
moduleCache.Lock()
moduleCache.m[mod] = cache
moduleCache.Unlock()
mod.findfunctab = uintptr(rt.IndexByte(cache, ffstart))
funcdataAddr := uintptr(rt.IndexByte(cache, fstart))
// make pclnt table
pclntab := makePclntable(pclntSize, startLocations, _funcs, mod.maxpc, pcdataOffs, funcdataAddr, funcdataOffs)
mod.pclntable = pclntab
// make pc header
mod.pcHeader = &pcHeader {
magic : _Magic,
minLC : _MinLC,
ptrSize : _PtrSize,
nfunc : len(funcs),
nfiles: uint(len(cu)),
funcnameOffset: getOffsetOf(moduledata{}, "funcnametab"),
cuOffset: getOffsetOf(moduledata{}, "cutab"),
filetabOffset: getOffsetOf(moduledata{}, "filetab"),
pctabOffset: getOffsetOf(moduledata{}, "pctab"),
pclnOffset: getOffsetOf(moduledata{}, "pclntable"),
}
// special case: gcdata and gcbss must by non-empty
mod.gcdata = uintptr(unsafe.Pointer(&emptyByte))
mod.gcbss = uintptr(unsafe.Pointer(&emptyByte))
return
}
// makePctab generates pcdelta->valuedelta tables for functions,
// and returns the table and the entry offset of every kind pcdata in the table.
func makePctab(funcs []Func, addr uintptr, cuOffset uint32, nameOffset []int32) (pctab []byte, pcdataOffs [][]uint32, _funcs []_func) {
_funcs = make([]_func, len(funcs))
// Pctab offsets of 0 are considered invalid in the runtime. We respect
// that by just padding a single byte at the beginning of runtime.pctab,
// that way no real offsets can be zero.
pctab = make([]byte, 1, 12*len(funcs)+1)
pcdataOffs = make([][]uint32, len(funcs))
for i, f := range funcs {
_f := &_funcs[i]
var writer = func(pc *Pcdata) {
var ab []byte
var err error
if pc != nil {
ab, err = pc.MarshalBinary()
if err != nil {
panic(err)
}
pcdataOffs[i] = append(pcdataOffs[i], uint32(len(pctab)))
} else {
ab = []byte{0}
pcdataOffs[i] = append(pcdataOffs[i], _PCDATA_INVALID_OFFSET)
}
pctab = append(pctab, ab...)
}
if f.Pcsp != nil {
_f.pcsp = uint32(len(pctab))
}
writer(f.Pcsp)
if f.Pcfile != nil {
_f.pcfile = uint32(len(pctab))
}
writer(f.Pcfile)
if f.Pcline != nil {
_f.pcln = uint32(len(pctab))
}
writer(f.Pcline)
writer(f.PcUnsafePoint)
writer(f.PcStackMapIndex)
writer(f.PcInlTreeIndex)
writer(f.PcArgLiveIndex)
_f.entry = addr + uintptr(f.EntryOff)
_f.nameOff = nameOffset[i]
_f.args = f.ArgsSize
_f.deferreturn = f.DeferReturn
// NOTICE: _func.pcdata is always as [PCDATA_UnsafePoint(0) : PCDATA_ArgLiveIndex(3)]
_f.npcdata = uint32(_N_PCDATA)
_f.cuOffset = cuOffset
_f.funcID = f.ID
_f.nfuncdata = uint8(_N_FUNCDATA)
}
return
}
func registerFunction(name string, pc uintptr, textSize uintptr, fp int, args int, size uintptr, argptrs uintptr, localptrs uintptr) {}

View File

@@ -0,0 +1,113 @@
// go:build go1.18 && !go1.20
//go:build go1.18 && !go1.20
// +build go1.18,!go1.20
/*
* Copyright 2021 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package loader
import (
`github.com/bytedance/sonic/loader/internal/rt`
)
const (
_Magic uint32 = 0xfffffff0
)
type moduledata struct {
pcHeader *pcHeader
funcnametab []byte
cutab []uint32
filetab []byte
pctab []byte
pclntable []byte
ftab []funcTab
findfunctab uintptr
minpc, maxpc uintptr // first func address, last func address + last func size
text, etext uintptr // start/end of text, (etext-text) must be greater than MIN_FUNC
noptrdata, enoptrdata uintptr
data, edata uintptr
bss, ebss uintptr
noptrbss, enoptrbss uintptr
end, gcdata, gcbss uintptr
types, etypes uintptr
rodata uintptr
gofunc uintptr // go.func.* is actual funcinfo object in image
textsectmap []textSection // see runtime/symtab.go: textAddr()
typelinks []int32 // offsets from types
itablinks []*rt.GoItab
ptab []ptabEntry
pluginpath string
pkghashes []modulehash
modulename string
modulehashes []modulehash
hasmain uint8 // 1 if module contains the main function, 0 otherwise
gcdatamask, gcbssmask bitVector
typemap map[int32]*rt.GoType // offset to *_rtype in previous module
bad bool // module failed to load and should be ignored
next *moduledata
}
type _func struct {
entryOff uint32 // start pc, as offset from moduledata.text/pcHeader.textStart
nameOff int32 // function name, as index into moduledata.funcnametab.
args int32 // in/out args size
deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any.
pcsp uint32
pcfile uint32
pcln uint32
npcdata uint32
cuOffset uint32 // runtime.cutab offset of this function's CU
funcID uint8 // set for certain special runtime functions
flag uint8
_ [1]byte // pad
nfuncdata uint8 //
// The end of the struct is followed immediately by two variable-length
// arrays that reference the pcdata and funcdata locations for this
// function.
// pcdata contains the offset into moduledata.pctab for the start of
// that index's table. e.g.,
// &moduledata.pctab[_func.pcdata[_PCDATA_UnsafePoint]] is the start of
// the unsafe point table.
//
// An offset of 0 indicates that there is no table.
//
// pcdata [npcdata]uint32
// funcdata contains the offset past moduledata.gofunc which contains a
// pointer to that index's funcdata. e.g.,
// *(moduledata.gofunc + _func.funcdata[_FUNCDATA_ArgsPointerMaps]) is
// the argument pointer map.
//
// An offset of ^uint32(0) indicates that there is no entry.
//
// funcdata [nfuncdata]uint32
}

View File

@@ -0,0 +1,114 @@
//go:build go1.20 && !go1.21
// +build go1.20,!go1.21
/*
* Copyright 2021 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package loader
import (
`github.com/bytedance/sonic/loader/internal/rt`
)
const (
_Magic uint32 = 0xFFFFFFF1
)
type moduledata struct {
pcHeader *pcHeader
funcnametab []byte
cutab []uint32
filetab []byte
pctab []byte
pclntable []byte
ftab []funcTab
findfunctab uintptr
minpc, maxpc uintptr // first func address, last func address + last func size
text, etext uintptr // start/end of text, (etext-text) must be greater than MIN_FUNC
noptrdata, enoptrdata uintptr
data, edata uintptr
bss, ebss uintptr
noptrbss, enoptrbss uintptr
covctrs, ecovctrs uintptr
end, gcdata, gcbss uintptr
types, etypes uintptr
rodata uintptr
gofunc uintptr // go.func.* is actual funcinfo object in image
textsectmap []textSection // see runtime/symtab.go: textAddr()
typelinks []int32 // offsets from types
itablinks []*rt.GoItab
ptab []ptabEntry
pluginpath string
pkghashes []modulehash
modulename string
modulehashes []modulehash
hasmain uint8 // 1 if module contains the main function, 0 otherwise
gcdatamask, gcbssmask bitVector
typemap map[int32]*rt.GoType // offset to *_rtype in previous module
bad bool // module failed to load and should be ignored
next *moduledata
}
type _func struct {
entryOff uint32 // start pc, as offset from moduledata.text/pcHeader.textStart
nameOff int32 // function name, as index into moduledata.funcnametab.
args int32 // in/out args size
deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any.
pcsp uint32
pcfile uint32
pcln uint32
npcdata uint32
cuOffset uint32 // runtime.cutab offset of this function's CU
startLine int32 // line number of start of function (func keyword/TEXT directive)
funcID uint8 // set for certain special runtime functions
flag uint8
_ [1]byte // pad
nfuncdata uint8 //
// The end of the struct is followed immediately by two variable-length
// arrays that reference the pcdata and funcdata locations for this
// function.
// pcdata contains the offset into moduledata.pctab for the start of
// that index's table. e.g.,
// &moduledata.pctab[_func.pcdata[_PCDATA_UnsafePoint]] is the start of
// the unsafe point table.
//
// An offset of 0 indicates that there is no table.
//
// pcdata [npcdata]uint32
// funcdata contains the offset past moduledata.gofunc which contains a
// pointer to that index's funcdata. e.g.,
// *(moduledata.gofunc + _func.funcdata[_FUNCDATA_ArgsPointerMaps]) is
// the argument pointer map.
//
// An offset of ^uint32(0) indicates that there is no entry.
//
// funcdata [nfuncdata]uint32
}

View File

@@ -0,0 +1,119 @@
//go:build go1.21 && !go1.23
// +build go1.21,!go1.23
/*
* Copyright 2021 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package loader
import (
`unsafe`
`github.com/bytedance/sonic/loader/internal/rt`
)
const (
_Magic uint32 = 0xFFFFFFF1
)
type moduledata struct {
pcHeader *pcHeader
funcnametab []byte
cutab []uint32
filetab []byte
pctab []byte
pclntable []byte
ftab []funcTab
findfunctab uintptr
minpc, maxpc uintptr // first func address, last func address + last func size
text, etext uintptr // start/end of text, (etext-text) must be greater than MIN_FUNC
noptrdata, enoptrdata uintptr
data, edata uintptr
bss, ebss uintptr
noptrbss, enoptrbss uintptr
covctrs, ecovctrs uintptr
end, gcdata, gcbss uintptr
types, etypes uintptr
rodata uintptr
gofunc uintptr // go.func.* is actual funcinfo object in image
textsectmap []textSection // see runtime/symtab.go: textAddr()
typelinks []int32 // offsets from types
itablinks []*rt.GoItab
ptab []ptabEntry
pluginpath string
pkghashes []modulehash
// This slice records the initializing tasks that need to be
// done to start up the program. It is built by the linker.
inittasks []unsafe.Pointer
modulename string
modulehashes []modulehash
hasmain uint8 // 1 if module contains the main function, 0 otherwise
gcdatamask, gcbssmask bitVector
typemap map[int32]*rt.GoType // offset to *_rtype in previous module
bad bool // module failed to load and should be ignored
next *moduledata
}
type _func struct {
entryOff uint32 // start pc, as offset from moduledata.text/pcHeader.textStart
nameOff int32 // function name, as index into moduledata.funcnametab.
args int32 // in/out args size
deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any.
pcsp uint32
pcfile uint32
pcln uint32
npcdata uint32
cuOffset uint32 // runtime.cutab offset of this function's CU
startLine int32 // line number of start of function (func keyword/TEXT directive)
funcID uint8 // set for certain special runtime functions
flag uint8
_ [1]byte // pad
nfuncdata uint8 //
// The end of the struct is followed immediately by two variable-length
// arrays that reference the pcdata and funcdata locations for this
// function.
// pcdata contains the offset into moduledata.pctab for the start of
// that index's table. e.g.,
// &moduledata.pctab[_func.pcdata[_PCDATA_UnsafePoint]] is the start of
// the unsafe point table.
//
// An offset of 0 indicates that there is no table.
//
// pcdata [npcdata]uint32
// funcdata contains the offset past moduledata.gofunc which contains a
// pointer to that index's funcdata. e.g.,
// *(moduledata.gofunc + _func.funcdata[_FUNCDATA_ArgsPointerMaps]) is
// the argument pointer map.
//
// An offset of ^uint32(0) indicates that there is no entry.
//
// funcdata [nfuncdata]uint32
}

View File

@@ -0,0 +1,118 @@
//go:build go1.23 && !go1.26
// +build go1.23,!go1.26
/*
* Copyright 2021 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package loader
import (
`unsafe`
`github.com/bytedance/sonic/loader/internal/rt`
)
const (
_Magic uint32 = 0xFFFFFFF1
)
type moduledata struct {
pcHeader *pcHeader
funcnametab []byte
cutab []uint32
filetab []byte
pctab []byte
pclntable []byte
ftab []funcTab
findfunctab uintptr
minpc, maxpc uintptr // first func address, last func address + last func size
text, etext uintptr // start/end of text, (etext-text) must be greater than MIN_FUNC
noptrdata, enoptrdata uintptr
data, edata uintptr
bss, ebss uintptr
noptrbss, enoptrbss uintptr
covctrs, ecovctrs uintptr
end, gcdata, gcbss uintptr
types, etypes uintptr
rodata uintptr
gofunc uintptr // go.func.* is actual funcinfo object in image
textsectmap []textSection // see runtime/symtab.go: textAddr()
typelinks []int32 // offsets from types
itablinks []*rt.GoItab
ptab []ptabEntry
pluginpath string
pkghashes []modulehash
// This slice records the initializing tasks that need to be
// done to start up the program. It is built by the linker.
inittasks []unsafe.Pointer
modulename string
modulehashes []modulehash
hasmain uint8 // 1 if module contains the main function, 0 otherwise
bad bool // module failed to load and should be ignored
gcdatamask, gcbssmask bitVector
typemap map[int32]*rt.GoType // offset to *_rtype in previous module
next *moduledata
}
type _func struct {
entryOff uint32 // start pc, as offset from moduledata.text/pcHeader.textStart
nameOff int32 // function name, as index into moduledata.funcnametab.
args int32 // in/out args size
deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any.
pcsp uint32
pcfile uint32
pcln uint32
npcdata uint32
cuOffset uint32 // runtime.cutab offset of this function's CU
startLine int32 // line number of start of function (func keyword/TEXT directive)
funcID uint8 // set for certain special runtime functions
flag uint8
_ [1]byte // pad
nfuncdata uint8 //
// The end of the struct is followed immediately by two variable-length
// arrays that reference the pcdata and funcdata locations for this
// function.
// pcdata contains the offset into moduledata.pctab for the start of
// that index's table. e.g.,
// &moduledata.pctab[_func.pcdata[_PCDATA_UnsafePoint]] is the start of
// the unsafe point table.
//
// An offset of 0 indicates that there is no table.
//
// pcdata [npcdata]uint32
// funcdata contains the offset past moduledata.gofunc which contains a
// pointer to that index's funcdata. e.g.,
// *(moduledata.gofunc + _func.funcdata[_FUNCDATA_ArgsPointerMaps]) is
// the argument pointer map.
//
// An offset of ^uint32(0) indicates that there is no entry.
//
// funcdata [nfuncdata]uint32
}

View File

@@ -0,0 +1,123 @@
//go:build go1.26 && !go1.27
// +build go1.26,!go1.27
/*
* Copyright 2021 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package loader
import (
`unsafe`
`github.com/bytedance/sonic/loader/internal/rt`
)
const (
_Magic uint32 = 0xFFFFFFF1
)
type moduledata struct {
pcHeader *pcHeader
funcnametab []byte
cutab []uint32
filetab []byte
pctab []byte
pclntable []byte
ftab []funcTab
findfunctab uintptr
minpc, maxpc uintptr // first func address, last func address + last func size
text, etext uintptr // start/end of text, (etext-text) must be greater than MIN_FUNC
noptrdata, enoptrdata uintptr
data, edata uintptr
bss, ebss uintptr
noptrbss, enoptrbss uintptr
covctrs, ecovctrs uintptr
end, gcdata, gcbss uintptr
types, etypes uintptr
rodata uintptr
gofunc uintptr // go.func.* is actual funcinfo object in image
epclntab uintptr
textsectmap []textSection // see runtime/symtab.go: textAddr()
typelinks []int32 // offsets from types
itablinks []*rt.GoItab
ptab []ptabEntry
pluginpath string
pkghashes []modulehash
// This slice records the initializing tasks that need to be
// done to start up the program. It is built by the linker.
inittasks []unsafe.Pointer
modulename string
modulehashes []modulehash
hasmain uint8 // 1 if module contains the main function, 0 otherwise
bad bool // module failed to load and should be ignored
gcdatamask, gcbssmask bitVector
typemap map[int32]*rt.GoType // offset to *_rtype in previous module
next *moduledata
}
type _func struct {
entryOff uint32 // start pc, as offset from moduledata.text/pcHeader.textStart
nameOff int32 // function name, as index into moduledata.funcnametab.
args int32 // in/out args size
deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any.
pcsp uint32
pcfile uint32
pcln uint32
npcdata uint32
cuOffset uint32 // runtime.cutab offset of this function's CU
startLine int32 // line number of start of function (func keyword/TEXT directive)
funcID uint8 // set for certain special runtime functions
flag uint8
_ [1]byte // pad
nfuncdata uint8 //
// The end of the struct is followed immediately by two variable-length
// arrays that reference the pcdata and funcdata locations for this
// function.
// pcdata contains the offset into moduledata.pctab for the start of
// that index's table. e.g.,
// &moduledata.pctab[_func.pcdata[_PCDATA_UnsafePoint]] is the start of
// the unsafe point table.
//
// An offset of 0 indicates that there is no table.
//
// pcdata [npcdata]uint32
// funcdata contains the offset past moduledata.gofunc which contains a
// pointer to that index's funcdata. e.g.,
// *(moduledata.gofunc + _func.funcdata[_FUNCDATA_ArgsPointerMaps]) is
// the argument pointer map.
//
// An offset of ^uint32(0) indicates that there is no entry.
//
// funcdata [nfuncdata]uint32
}
func setEpclntab(mod *moduledata, val uintptr) {
mod.epclntab = val
}

View File

@@ -0,0 +1,24 @@
//go:build go1.18 && !go1.26
// +build go1.18,!go1.26
/*
* Copyright 2021 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package loader
func setEpclntab(mod *moduledata, val uintptr) {
// No-op for versions < 1.26
}

View File

@@ -0,0 +1,197 @@
/*
* Copyright 2022 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package abi
import (
`fmt`
`reflect`
`sort`
`strings`
`github.com/bytedance/sonic/loader/internal/rt`
)
type FunctionLayout struct {
FP uint32
Args []Parameter
Rets []Parameter
}
func (self FunctionLayout) String() string {
return self.formatFn()
}
func (self FunctionLayout) ArgSize() uint32 {
size := uintptr(0)
for _, arg := range self.Args {
size += arg.Type.Size()
}
return uint32(size)
}
type slot struct {
p bool
m uint32
}
func (self FunctionLayout) StackMap() *rt.StackMap {
var st []slot
var mb rt.StackMapBuilder
/* add arguments */
for _, v := range self.Args {
st = append(st, slot {
m: v.Mem,
p: v.IsPointer,
})
}
/* add stack-passed return values */
for _, v := range self.Rets {
if !v.InRegister {
st = append(st, slot {
m: v.Mem,
p: v.IsPointer,
})
}
}
/* sort by memory offset */
sort.Slice(st, func(i int, j int) bool {
return st[i].m < st[j].m
})
/* add the bits */
for _, v := range st {
mb.AddField(v.p)
}
/* build the stack map */
return mb.Build()
}
func (self FunctionLayout) formatFn() string {
fp := self.FP
return fmt.Sprintf("\n%#04x\nRets:\n%s\nArgs:\n%s", fp, self.formatSeq(self.Rets, &fp), self.formatSeq(self.Args, &fp))
}
func (self FunctionLayout) formatSeq(v []Parameter, fp *uint32) string {
nb := len(v)
mm := make([]string, 0, len(v))
/* convert each part */
for i := nb-1; i >=0; i-- {
*fp -= PtrSize
mm = append(mm, fmt.Sprintf("%#04x %s", *fp, v[i].String()))
}
/* join them together */
return strings.Join(mm, "\n")
}
type Frame struct {
desc *FunctionLayout
locals []bool
ccall bool
}
func NewFrame(desc *FunctionLayout, locals []bool, ccall bool) Frame {
fr := Frame{}
fr.desc = desc
fr.locals = locals
fr.ccall = ccall
return fr
}
func (self *Frame) String() string {
out := self.desc.String()
off := -8
out += fmt.Sprintf("\n%#4x [Return PC]", off)
off -= 8
out += fmt.Sprintf("\n%#4x [RBP]", off)
off -= 8
for _, v := range ReservedRegs(self.ccall) {
out += fmt.Sprintf("\n%#4x [%v]", off, v)
off -= PtrSize
}
for _, b := range self.locals {
out += fmt.Sprintf("\n%#4x [%v]", off, b)
off -= PtrSize
}
return out
}
func (self *Frame) Prev() uint32 {
return self.Size() + PtrSize
}
func (self *Frame) Size() uint32 {
return uint32(self.Offs() + PtrSize)
}
func (self *Frame) Offs() uint32 {
return uint32(len(ReservedRegs(self.ccall)) * PtrSize + len(self.locals)*PtrSize)
}
func (self *Frame) ArgPtrs() *rt.StackMap {
return self.desc.StackMap()
}
func (self *Frame) LocalPtrs() *rt.StackMap {
var m rt.StackMapBuilder
for _, b := range self.locals {
m.AddFields(len(ReservedRegs(self.ccall)), b)
}
return m.Build()
}
func alignUp(n uint32, a int) uint32 {
return (uint32(n) + uint32(a) - 1) &^ (uint32(a) - 1)
}
func isPointer(vt reflect.Type) bool {
switch vt.Kind() {
case reflect.Bool : fallthrough
case reflect.Int : fallthrough
case reflect.Int8 : fallthrough
case reflect.Int16 : fallthrough
case reflect.Int32 : fallthrough
case reflect.Int64 : fallthrough
case reflect.Uint : fallthrough
case reflect.Uint8 : fallthrough
case reflect.Uint16 : fallthrough
case reflect.Uint32 : fallthrough
case reflect.Uint64 : fallthrough
case reflect.Float32 : fallthrough
case reflect.Float64 : fallthrough
case reflect.Uintptr : return false
case reflect.Chan : fallthrough
case reflect.Func : fallthrough
case reflect.Map : fallthrough
case reflect.Ptr : fallthrough
case reflect.UnsafePointer : return true
case reflect.Complex64 : fallthrough
case reflect.Complex128 : fallthrough
case reflect.Array : fallthrough
case reflect.Struct : panic("abi: unsupported types")
default : panic("abi: invalid value type")
}
}

View File

@@ -0,0 +1,301 @@
/*
* Copyright 2022 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package abi
import (
"fmt"
"reflect"
"unsafe"
x64 "github.com/bytedance/sonic/loader/internal/iasm/x86_64"
)
type (
Register = x64.Register
Register64 = x64.Register64
XMMRegister = x64.XMMRegister
Program = x64.Program
MemoryOperand = x64.MemoryOperand
Label = x64.Label
)
var (
Ptr = x64.Ptr
DefaultArch = x64.DefaultArch
CreateLabel = x64.CreateLabel
)
const (
RAX = x64.RAX
RSP = x64.RSP
RBP = x64.RBP
R12 = x64.R12
R14 = x64.R14
R15 = x64.R15
)
const (
PtrSize = 8 // pointer size
PtrAlign = 8 // pointer alignment
)
var iregOrderC = []Register{
x64.RDI,
x64.RSI,
x64.RDX,
x64.RCX,
x64.R8,
x64.R9,
}
var xregOrderC = []Register{
x64.XMM0,
x64.XMM1,
x64.XMM2,
x64.XMM3,
x64.XMM4,
x64.XMM5,
x64.XMM6,
x64.XMM7,
}
var (
intType = reflect.TypeOf(0)
ptrType = reflect.TypeOf(unsafe.Pointer(nil))
)
func (self *Frame) argv(i int) *MemoryOperand {
return Ptr(RSP, int32(self.Prev()+self.desc.Args[i].Mem))
}
// spillv is used for growstack spill registers
func (self *Frame) spillv(i int) *MemoryOperand {
// remain one slot for caller return pc
return Ptr(RSP, PtrSize+int32(self.desc.Args[i].Mem))
}
func (self *Frame) retv(i int) *MemoryOperand {
return Ptr(RSP, int32(self.Prev()+self.desc.Rets[i].Mem))
}
func (self *Frame) resv(i int) *MemoryOperand {
return Ptr(RSP, int32(self.Offs()-uint32((i+1)*PtrSize)))
}
func (self *Frame) emitGrowStack(p *Program, entry *Label) {
// spill all register arguments
for i, v := range self.desc.Args {
if v.InRegister {
if v.IsFloat == floatKind64 {
p.MOVSD(v.Reg, self.spillv(i))
} else if v.IsFloat == floatKind32 {
p.MOVSS(v.Reg, self.spillv(i))
} else {
p.MOVQ(v.Reg, self.spillv(i))
}
}
}
// call runtime.morestack_noctxt
p.MOVQ(F_morestack_noctxt, R12)
p.CALLQ(R12)
// load all register arguments
for i, v := range self.desc.Args {
if v.InRegister {
if v.IsFloat == floatKind64 {
p.MOVSD(self.spillv(i), v.Reg)
} else if v.IsFloat == floatKind32 {
p.MOVSS(self.spillv(i), v.Reg)
} else {
p.MOVQ(self.spillv(i), v.Reg)
}
}
}
// jump back to the function entry
p.JMP(entry)
}
func (self *Frame) GrowStackTextSize() uint32 {
p := DefaultArch.CreateProgram()
// spill all register arguments
for i, v := range self.desc.Args {
if v.InRegister {
if v.IsFloat == floatKind64 {
p.MOVSD(v.Reg, self.spillv(i))
} else if v.IsFloat == floatKind32 {
p.MOVSS(v.Reg, self.spillv(i))
} else {
p.MOVQ(v.Reg, self.spillv(i))
}
}
}
// call runtime.morestack_noctxt
p.MOVQ(F_morestack_noctxt, R12)
p.CALLQ(R12)
// load all register arguments
for i, v := range self.desc.Args {
if v.InRegister {
if v.IsFloat == floatKind64 {
p.MOVSD(self.spillv(i), v.Reg)
} else if v.IsFloat == floatKind32 {
p.MOVSS(self.spillv(i), v.Reg)
} else {
p.MOVQ(self.spillv(i), v.Reg)
}
}
}
// jump back to the function entry
l := CreateLabel("")
p.Link(l)
p.JMP(l)
return uint32(len(p.Assemble(0)))
}
func (self *Frame) emitPrologue(p *Program) {
p.SUBQ(self.Size(), RSP)
p.MOVQ(RBP, Ptr(RSP, int32(self.Offs())))
p.LEAQ(Ptr(RSP, int32(self.Offs())), RBP)
}
func (self *Frame) emitEpilogue(p *Program) {
p.MOVQ(Ptr(RSP, int32(self.Offs())), RBP)
p.ADDQ(self.Size(), RSP)
p.RET()
}
func (self *Frame) emitReserveRegs(p *Program) {
// spill reserved registers
for i, r := range ReservedRegs(self.ccall) {
switch r.(type) {
case Register64:
p.MOVQ(r, self.resv(i))
case XMMRegister:
p.MOVSD(r, self.resv(i))
default:
panic(fmt.Sprintf("unsupported register type %t to reserve", r))
}
}
}
func (self *Frame) emitSpillPtrs(p *Program) {
// spill pointer argument registers
for i, r := range self.desc.Args {
if r.InRegister && r.IsPointer {
p.MOVQ(r.Reg, self.argv(i))
}
}
}
func (self *Frame) emitClearPtrs(p *Program) {
// spill pointer argument registers
for i, r := range self.desc.Args {
if r.InRegister && r.IsPointer {
p.MOVQ(int64(0), self.argv(i))
}
}
}
func (self *Frame) emitCallC(p *Program, addr uintptr) {
p.MOVQ(addr, RAX)
p.CALLQ(RAX)
}
type floatKind uint8
const (
notFloatKind floatKind = iota
floatKind32
floatKind64
)
type Parameter struct {
InRegister bool
IsPointer bool
IsFloat floatKind
Reg Register
Mem uint32
Type reflect.Type
}
func mkIReg(vt reflect.Type, reg Register64) (p Parameter) {
p.Reg = reg
p.Type = vt
p.InRegister = true
p.IsPointer = isPointer(vt)
return
}
func isFloat(vt reflect.Type) floatKind {
switch vt.Kind() {
case reflect.Float32:
return floatKind32
case reflect.Float64:
return floatKind64
default:
return notFloatKind
}
}
func mkXReg(vt reflect.Type, reg XMMRegister) (p Parameter) {
p.Reg = reg
p.Type = vt
p.InRegister = true
p.IsFloat = isFloat(vt)
return
}
func mkStack(vt reflect.Type, mem uint32) (p Parameter) {
p.Mem = mem
p.Type = vt
p.InRegister = false
p.IsPointer = isPointer(vt)
p.IsFloat = isFloat(vt)
return
}
func (self Parameter) String() string {
if self.InRegister {
return fmt.Sprintf("[%%%s, Pointer(%v), Float(%v)]", self.Reg, self.IsPointer, self.IsFloat)
} else {
return fmt.Sprintf("[%d(FP), Pointer(%v), Float(%v)]", self.Mem, self.IsPointer, self.IsFloat)
}
}
func CallC(addr uintptr, fr Frame, maxStack uintptr) []byte {
p := DefaultArch.CreateProgram()
stack := CreateLabel("_stack_grow")
entry := CreateLabel("_entry")
p.Link(entry)
fr.emitStackCheck(p, stack, maxStack)
fr.emitPrologue(p)
fr.emitReserveRegs(p)
fr.emitSpillPtrs(p)
fr.emitExchangeArgs(p)
fr.emitCallC(p, addr)
fr.emitExchangeRets(p)
fr.emitRestoreRegs(p)
fr.emitEpilogue(p)
p.Link(stack)
fr.emitGrowStack(p, entry)
return p.Assemble(0)
}

View File

@@ -0,0 +1,215 @@
//go:build !go1.17
// +build !go1.17
/*
* Copyright 2022 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package abi
import (
"fmt"
"reflect"
"runtime"
)
func ReservedRegs(callc bool) []Register {
return nil
}
func salloc(p []Parameter, sp uint32, vt reflect.Type) (uint32, []Parameter) {
switch vt.Kind() {
case reflect.Bool:
return sp + 8, append(p, mkStack(reflect.TypeOf(false), sp))
case reflect.Int:
return sp + 8, append(p, mkStack(intType, sp))
case reflect.Int8:
return sp + 8, append(p, mkStack(reflect.TypeOf(int8(0)), sp))
case reflect.Int16:
return sp + 8, append(p, mkStack(reflect.TypeOf(int16(0)), sp))
case reflect.Int32:
return sp + 8, append(p, mkStack(reflect.TypeOf(int32(0)), sp))
case reflect.Int64:
return sp + 8, append(p, mkStack(reflect.TypeOf(int64(0)), sp))
case reflect.Uint:
return sp + 8, append(p, mkStack(reflect.TypeOf(uint(0)), sp))
case reflect.Uint8:
return sp + 8, append(p, mkStack(reflect.TypeOf(uint8(0)), sp))
case reflect.Uint16:
return sp + 8, append(p, mkStack(reflect.TypeOf(uint16(0)), sp))
case reflect.Uint32:
return sp + 8, append(p, mkStack(reflect.TypeOf(uint32(0)), sp))
case reflect.Uint64:
return sp + 8, append(p, mkStack(reflect.TypeOf(uint64(0)), sp))
case reflect.Uintptr:
return sp + 8, append(p, mkStack(reflect.TypeOf(uintptr(0)), sp))
case reflect.Float32:
return sp + 8, append(p, mkStack(reflect.TypeOf(float32(0)), sp))
case reflect.Float64:
return sp + 8, append(p, mkStack(reflect.TypeOf(float64(0)), sp))
case reflect.Complex64:
panic("abi: go116: not implemented: complex64")
case reflect.Complex128:
panic("abi: go116: not implemented: complex128")
case reflect.Array:
panic("abi: go116: not implemented: arrays")
case reflect.Chan:
return sp + 8, append(p, mkStack(reflect.TypeOf((chan int)(nil)), sp))
case reflect.Func:
return sp + 8, append(p, mkStack(reflect.TypeOf((func())(nil)), sp))
case reflect.Map:
return sp + 8, append(p, mkStack(reflect.TypeOf((map[int]int)(nil)), sp))
case reflect.Ptr:
return sp + 8, append(p, mkStack(reflect.TypeOf((*int)(nil)), sp))
case reflect.UnsafePointer:
return sp + 8, append(p, mkStack(ptrType, sp))
case reflect.Interface:
return sp + 16, append(p, mkStack(ptrType, sp), mkStack(ptrType, sp+8))
case reflect.Slice:
return sp + 24, append(p, mkStack(ptrType, sp), mkStack(intType, sp+8), mkStack(intType, sp+16))
case reflect.String:
return sp + 16, append(p, mkStack(ptrType, sp), mkStack(intType, sp+8))
case reflect.Struct:
panic("abi: go116: not implemented: structs")
default:
panic("abi: invalid value type")
}
}
func NewFunctionLayout(ft reflect.Type) FunctionLayout {
var sp uint32
var fn FunctionLayout
/* assign every arguments */
for i := 0; i < ft.NumIn(); i++ {
sp, fn.Args = salloc(fn.Args, sp, ft.In(i))
}
/* assign every return value */
for i := 0; i < ft.NumOut(); i++ {
sp, fn.Rets = salloc(fn.Rets, sp, ft.Out(i))
}
/* update function ID and stack pointer */
fn.FP = sp
return fn
}
func (self *Frame) emitExchangeArgs(p *Program) {
iregArgs, xregArgs := 0, 0
for _, v := range self.desc.Args {
if v.IsFloat != notFloatKind {
xregArgs += 1
} else {
iregArgs += 1
}
}
if iregArgs > len(iregOrderC) {
panic("too many arguments, only support at most 6 integer arguments now")
}
if xregArgs > len(xregOrderC) {
panic("too many arguments, only support at most 8 float arguments now")
}
ic, xc := iregArgs, xregArgs
for i := 0; i < len(self.desc.Args); i++ {
arg := self.desc.Args[i]
if arg.IsFloat == floatKind64 {
p.MOVSD(self.argv(i), xregOrderC[xregArgs-xc])
xc -= 1
} else if arg.IsFloat == floatKind32 {
p.MOVSS(self.argv(i), xregOrderC[xregArgs-xc])
xc -= 1
} else {
p.MOVQ(self.argv(i), iregOrderC[iregArgs-ic])
ic -= 1
}
}
}
func (self *Frame) emitStackCheck(p *Program, to *Label, maxStack uintptr) {
// get the current goroutine
switch runtime.GOOS {
case "linux":
p.MOVQ(Abs(-8), R14).FS()
case "darwin":
p.MOVQ(Abs(0x30), R14).GS()
case "windows":
break // windows always stores G pointer at R14
default:
panic("unsupported operating system")
}
// check the stack guard
p.LEAQ(Ptr(RSP, -int32(self.Size()+uint32(maxStack))), RAX)
p.CMPQ(Ptr(R14, _G_stackguard0), RAX)
p.JBE(to)
}
func (self *Frame) StackCheckTextSize() uint32 {
p := DefaultArch.CreateProgram()
// get the current goroutine
switch runtime.GOOS {
case "linux":
p.MOVQ(Abs(-8), R14).FS()
case "darwin":
p.MOVQ(Abs(0x30), R14).GS()
case "windows":
break // windows always stores G pointer at R14
default:
panic("unsupported operating system")
}
// check the stack guard
p.LEAQ(Ptr(RSP, -int32(self.Size())), RAX)
p.CMPQ(Ptr(R14, _G_stackguard0), RAX)
l := CreateLabel("")
p.Link(l)
p.JBE(l)
return uint32(len(p.Assemble(0)))
}
func (self *Frame) emitExchangeRets(p *Program) {
if len(self.desc.Rets) > 1 {
panic("too many results, only support one result now")
}
// store result
if len(self.desc.Rets) == 1 {
if self.desc.Rets[0].IsFloat == floatKind64 {
p.MOVSD(xregOrderC[0], self.retv(0))
} else if self.desc.Rets[0].IsFloat == floatKind32 {
p.MOVSS(xregOrderC[0], self.retv(0))
} else {
p.MOVQ(RAX, self.retv(0))
}
}
}
func (self *Frame) emitRestoreRegs(p *Program) {
// load reserved registers
for i, r := range ReservedRegs(self.ccall) {
switch r.(type) {
case Register64:
p.MOVQ(self.resv(i), r)
case XMMRegister:
p.MOVSD(self.resv(i), r)
default:
panic(fmt.Sprintf("unsupported register type %t to reserve", r))
}
}
}

View File

@@ -0,0 +1,345 @@
//go:build go1.17
// +build go1.17
/*
* Copyright 2022 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Go Internal ABI implementation
*
* This module implements the function layout algorithm described by the Go internal ABI.
* See https://github.com/golang/go/blob/master/src/cmd/compile/abi-internal.md for more info.
*/
package abi
import (
"fmt"
"reflect"
x64 "github.com/bytedance/sonic/loader/internal/iasm/x86_64"
)
/** Frame Structure of the Generated Function
FP +------------------------------+
| . . . |
| 2nd reg argument spill space |
+ 1st reg argument spill space |
| <pointer-sized alignment> |
| . . . |
| 2nd stack-assigned result |
+ 1st stack-assigned result |
| <pointer-sized alignment> |
| . . . |
| 2nd stack-assigned argument |
| 1st stack-assigned argument |
| stack-assigned receiver |
prev() +------------------------------+ (Previous Frame)
Return PC |
size() -------------------------------|
Saved RBP |
offs() -------------------------------|
1th Reserved Registers |
-------------------------------|
2th Reserved Registers |
-------------------------------|
Local Variables |
RSP -------------------------------|↓ lower addresses
*/
const zeroRegGo = x64.XMM15
var iregOrderGo = [...]Register64{
x64.RAX, // RDI
x64.RBX, // RSI
x64.RCX, // RDX
x64.RDI, // RCX
x64.RSI, // R8
x64.R8, // R9
x64.R9,
x64.R10,
x64.R11,
}
var xregOrderGo = [...]XMMRegister{
x64.XMM0,
x64.XMM1,
x64.XMM2,
x64.XMM3,
x64.XMM4,
x64.XMM5,
x64.XMM6,
x64.XMM7,
x64.XMM8,
x64.XMM9,
x64.XMM10,
x64.XMM11,
x64.XMM12,
x64.XMM13,
x64.XMM14,
}
func ReservedRegs(callc bool) []Register {
if callc {
return nil
}
return []Register{
R14, // current goroutine
R15, // GOT reference
}
}
type stackAlloc struct {
s uint32
i int
x int
}
func (self *stackAlloc) reset() {
self.i, self.x = 0, 0
}
func (self *stackAlloc) ireg(vt reflect.Type) (p Parameter) {
p = mkIReg(vt, iregOrderGo[self.i])
self.i++
return
}
func (self *stackAlloc) xreg(vt reflect.Type) (p Parameter) {
p = mkXReg(vt, xregOrderGo[self.x])
self.x++
return
}
func (self *stackAlloc) stack(vt reflect.Type) (p Parameter) {
p = mkStack(vt, self.s)
self.s += uint32(vt.Size())
return
}
func (self *stackAlloc) spill(n uint32, a int) uint32 {
self.s = alignUp(self.s, a) + n
return self.s
}
func (self *stackAlloc) alloc(p []Parameter, vt reflect.Type) []Parameter {
nb := vt.Size()
vk := vt.Kind()
/* zero-sized objects are allocated on stack */
if nb == 0 {
return append(p, mkStack(intType, self.s))
}
/* check for value type */
switch vk {
case reflect.Bool:
return self.valloc(p, reflect.TypeOf(false))
case reflect.Int:
return self.valloc(p, intType)
case reflect.Int8:
return self.valloc(p, reflect.TypeOf(int8(0)))
case reflect.Int16:
return self.valloc(p, reflect.TypeOf(int16(0)))
case reflect.Int32:
return self.valloc(p, reflect.TypeOf(uint32(0)))
case reflect.Int64:
return self.valloc(p, reflect.TypeOf(int64(0)))
case reflect.Uint:
return self.valloc(p, reflect.TypeOf(uint(0)))
case reflect.Uint8:
return self.valloc(p, reflect.TypeOf(uint8(0)))
case reflect.Uint16:
return self.valloc(p, reflect.TypeOf(uint16(0)))
case reflect.Uint32:
return self.valloc(p, reflect.TypeOf(uint32(0)))
case reflect.Uint64:
return self.valloc(p, reflect.TypeOf(uint64(0)))
case reflect.Uintptr:
return self.valloc(p, reflect.TypeOf(uintptr(0)))
case reflect.Float32:
return self.valloc(p, reflect.TypeOf(float32(0)))
case reflect.Float64:
return self.valloc(p, reflect.TypeOf(float64(0)))
case reflect.Complex64:
panic("abi: go117: not implemented: complex64")
case reflect.Complex128:
panic("abi: go117: not implemented: complex128")
case reflect.Array:
panic("abi: go117: not implemented: arrays")
case reflect.Chan:
return self.valloc(p, reflect.TypeOf((chan int)(nil)))
case reflect.Func:
return self.valloc(p, reflect.TypeOf((func())(nil)))
case reflect.Map:
return self.valloc(p, reflect.TypeOf((map[int]int)(nil)))
case reflect.Ptr:
return self.valloc(p, reflect.TypeOf((*int)(nil)))
case reflect.UnsafePointer:
return self.valloc(p, ptrType)
case reflect.Interface:
return self.valloc(p, ptrType, ptrType)
case reflect.Slice:
return self.valloc(p, ptrType, intType, intType)
case reflect.String:
return self.valloc(p, ptrType, intType)
case reflect.Struct:
panic("abi: go117: not implemented: structs")
default:
panic("abi: invalid value type")
}
}
func (self *stackAlloc) valloc(p []Parameter, vts ...reflect.Type) []Parameter {
for _, vt := range vts {
enum := isFloat(vt)
if enum != notFloatKind && self.x < len(xregOrderGo) {
p = append(p, self.xreg(vt))
} else if enum == notFloatKind && self.i < len(iregOrderGo) {
p = append(p, self.ireg(vt))
} else {
p = append(p, self.stack(vt))
}
}
return p
}
func NewFunctionLayout(ft reflect.Type) FunctionLayout {
var sa stackAlloc
var fn FunctionLayout
/* assign every arguments */
for i := 0; i < ft.NumIn(); i++ {
fn.Args = sa.alloc(fn.Args, ft.In(i))
}
/* reset the register counter, and add a pointer alignment field */
sa.reset()
/* assign every return value */
for i := 0; i < ft.NumOut(); i++ {
fn.Rets = sa.alloc(fn.Rets, ft.Out(i))
}
sa.spill(0, PtrAlign)
/* assign spill slots */
for i := 0; i < len(fn.Args); i++ {
if fn.Args[i].InRegister {
fn.Args[i].Mem = sa.spill(PtrSize, PtrAlign) - PtrSize
}
}
/* add the final pointer alignment field */
fn.FP = sa.spill(0, PtrAlign)
return fn
}
func (self *Frame) emitExchangeArgs(p *Program) {
iregArgs := make([]Parameter, 0, len(self.desc.Args))
xregArgs := 0
for _, v := range self.desc.Args {
if v.InRegister {
if v.IsFloat != notFloatKind {
xregArgs += 1
} else {
iregArgs = append(iregArgs, v)
}
} else {
panic("not support stack-assigned arguments now")
}
}
if xregArgs > len(xregOrderC) {
panic("too many arguments, only support at most 8 integer register arguments now")
}
switch len(iregArgs) {
case 0, 1, 2, 3:
{
//Fast-Path: when arguments count are less than four, just exchange the registers
for i := 0; i < len(iregArgs); i++ {
p.MOVQ(iregOrderGo[i], iregOrderC[i])
}
}
case 4, 5, 6:
{
// need to spill 3th ~ regArgs registers before exchange
for i := 3; i < len(iregArgs); i++ {
arg := iregArgs[i]
// pointer args have already been spilled
if !arg.IsPointer {
p.MOVQ(iregOrderGo[i], Ptr(RSP, int32(self.Prev()+arg.Mem)))
}
}
p.MOVQ(iregOrderGo[0], iregOrderC[0])
p.MOVQ(iregOrderGo[1], iregOrderC[1])
p.MOVQ(iregOrderGo[2], iregOrderC[2])
for i := 3; i < len(iregArgs); i++ {
arg := iregArgs[i]
p.MOVQ(Ptr(RSP, int32(self.Prev()+arg.Mem)), iregOrderC[i])
}
}
default:
panic("too many arguments, only support at most 6 integer register arguments now")
}
}
func (self *Frame) emitStackCheck(p *Program, to *Label, maxStack uintptr) {
p.LEAQ(Ptr(RSP, int32(-(self.Size()+uint32(maxStack)))), R12)
p.CMPQ(Ptr(R14, _G_stackguard0), R12)
p.JBE(to)
}
func (self *Frame) StackCheckTextSize() uint32 {
p := DefaultArch.CreateProgram()
p.LEAQ(Ptr(RSP, int32(-(self.Size()))), R12)
p.CMPQ(Ptr(R14, _G_stackguard0), R12)
to := CreateLabel("")
p.Link(to)
p.JBE(to)
return uint32(len(p.Assemble(0)))
}
func (self *Frame) emitExchangeRets(p *Program) {
if len(self.desc.Rets) > 1 {
panic("too many results, only support one result now")
}
// store result
if len(self.desc.Rets) == 1 && !self.desc.Rets[0].InRegister {
if self.desc.Rets[0].IsFloat == floatKind64 {
p.MOVSD(xregOrderC[0], self.retv(0))
} else if self.desc.Rets[0].IsFloat == floatKind32 {
p.MOVSS(xregOrderC[0], self.retv(0))
} else {
p.MOVQ(RAX, self.retv(0))
}
}
}
func (self *Frame) emitRestoreRegs(p *Program) {
// load reserved registers
for i, r := range ReservedRegs(self.ccall) {
switch r.(type) {
case Register64:
p.MOVQ(self.resv(i), r)
case XMMRegister:
p.MOVSD(self.resv(i), r)
default:
panic(fmt.Sprintf("unsupported register type %t to reserve", r))
}
}
// zero xmm15 for go abi
p.XORPS(zeroRegGo, zeroRegGo)
}

View File

@@ -0,0 +1,35 @@
/**
* Copyright 2023 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package abi
import (
_ `unsafe`
`github.com/bytedance/sonic/loader/internal/rt`
)
const (
_G_stackguard0 = 0x10
)
var (
F_morestack_noctxt = uintptr(rt.FuncAddr(morestack_noctxt))
)
//go:linkname morestack_noctxt runtime.morestack_noctxt
func morestack_noctxt()

View File

@@ -0,0 +1,273 @@
//
// Copyright 2024 CloudWeGo Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package expr
import (
"fmt"
)
// Type is tyep expression type.
type Type int
const (
// CONST indicates that the expression is a constant.
CONST Type = iota
// TERM indicates that the expression is a Term reference.
TERM
// EXPR indicates that the expression is a unary or binary expression.
EXPR
)
var typeNames = map[Type]string{
EXPR: "Expr",
TERM: "Term",
CONST: "Const",
}
// String returns the string representation of a Type.
func (self Type) String() string {
if v, ok := typeNames[self]; ok {
return v
} else {
return fmt.Sprintf("expr.Type(%d)", self)
}
}
// Operator represents an operation to perform when Type is EXPR.
type Operator uint8
const (
// ADD performs "Add Expr.Left and Expr.Right".
ADD Operator = iota
// SUB performs "Subtract Expr.Left by Expr.Right".
SUB
// MUL performs "Multiply Expr.Left by Expr.Right".
MUL
// DIV performs "Divide Expr.Left by Expr.Right".
DIV
// MOD performs "Modulo Expr.Left by Expr.Right".
MOD
// AND performs "Bitwise AND Expr.Left and Expr.Right".
AND
// OR performs "Bitwise OR Expr.Left and Expr.Right".
OR
// XOR performs "Bitwise XOR Expr.Left and Expr.Right".
XOR
// SHL performs "Bitwise Shift Expr.Left to the Left by Expr.Right Bits".
SHL
// SHR performs "Bitwise Shift Expr.Left to the Right by Expr.Right Bits".
SHR
// POW performs "Raise Expr.Left to the power of Expr.Right"
POW
// NOT performs "Bitwise Invert Expr.Left".
NOT
// NEG performs "Negate Expr.Left".
NEG
)
var operatorNames = map[Operator]string{
ADD: "Add",
SUB: "Subtract",
MUL: "Multiply",
DIV: "Divide",
MOD: "Modulo",
AND: "And",
OR: "Or",
XOR: "ExclusiveOr",
SHL: "ShiftLeft",
SHR: "ShiftRight",
POW: "Power",
NOT: "Invert",
NEG: "Negate",
}
// String returns the string representation of a Type.
func (self Operator) String() string {
if v, ok := operatorNames[self]; ok {
return v
} else {
return fmt.Sprintf("expr.Operator(%d)", self)
}
}
// Expr represents an expression node.
type Expr struct {
Type Type
Term Term
Op Operator
Left *Expr
Right *Expr
Const int64
}
// Ref creates an expression from a Term.
func Ref(t Term) (p *Expr) {
p = newExpression()
p.Term = t
p.Type = TERM
return
}
// Int creates an expression from an integer.
func Int(v int64) (p *Expr) {
p = newExpression()
p.Type = CONST
p.Const = v
return
}
func (self *Expr) clear() {
if self.Term != nil {
self.Term.Free()
}
if self.Left != nil {
self.Left.Free()
}
if self.Right != nil {
self.Right.Free()
}
}
// Free returns the Expr into pool.
// Any operation performed after Free is undefined behavior.
func (self *Expr) Free() {
self.clear()
freeExpression(self)
}
// Evaluate evaluates the expression into an integer.
// It also implements the Term interface.
func (self *Expr) Evaluate() (int64, error) {
switch self.Type {
case EXPR:
return self.eval()
case TERM:
return self.Term.Evaluate()
case CONST:
return self.Const, nil
default:
panic("invalid expression type: " + self.Type.String())
}
}
/** Expression Combinator **/
func combine(a *Expr, op Operator, b *Expr) (r *Expr) {
r = newExpression()
r.Op = op
r.Type = EXPR
r.Left = a
r.Right = b
return
}
func (self *Expr) Add(v *Expr) *Expr { return combine(self, ADD, v) }
func (self *Expr) Sub(v *Expr) *Expr { return combine(self, SUB, v) }
func (self *Expr) Mul(v *Expr) *Expr { return combine(self, MUL, v) }
func (self *Expr) Div(v *Expr) *Expr { return combine(self, DIV, v) }
func (self *Expr) Mod(v *Expr) *Expr { return combine(self, MOD, v) }
func (self *Expr) And(v *Expr) *Expr { return combine(self, AND, v) }
func (self *Expr) Or(v *Expr) *Expr { return combine(self, OR, v) }
func (self *Expr) Xor(v *Expr) *Expr { return combine(self, XOR, v) }
func (self *Expr) Shl(v *Expr) *Expr { return combine(self, SHL, v) }
func (self *Expr) Shr(v *Expr) *Expr { return combine(self, SHR, v) }
func (self *Expr) Pow(v *Expr) *Expr { return combine(self, POW, v) }
func (self *Expr) Not() *Expr { return combine(self, NOT, nil) }
func (self *Expr) Neg() *Expr { return combine(self, NEG, nil) }
/** Expression Evaluator **/
var binaryEvaluators = [256]func(int64, int64) (int64, error){
ADD: func(a, b int64) (int64, error) { return a + b, nil },
SUB: func(a, b int64) (int64, error) { return a - b, nil },
MUL: func(a, b int64) (int64, error) { return a * b, nil },
DIV: idiv,
MOD: imod,
AND: func(a, b int64) (int64, error) { return a & b, nil },
OR: func(a, b int64) (int64, error) { return a | b, nil },
XOR: func(a, b int64) (int64, error) { return a ^ b, nil },
SHL: func(a, b int64) (int64, error) { return a << b, nil },
SHR: func(a, b int64) (int64, error) { return a >> b, nil },
POW: ipow,
}
func (self *Expr) eval() (int64, error) {
var lhs int64
var rhs int64
var err error
var vfn func(int64, int64) (int64, error)
/* evaluate LHS */
if lhs, err = self.Left.Evaluate(); err != nil {
return 0, err
}
/* check for unary operators */
switch self.Op {
case NOT:
return self.unaryNot(lhs)
case NEG:
return self.unaryNeg(lhs)
}
/* check for operators */
if vfn = binaryEvaluators[self.Op]; vfn == nil {
panic("invalid operator: " + self.Op.String())
}
/* must be a binary expression */
if self.Right == nil {
panic("operator " + self.Op.String() + " is a binary operator")
}
/* evaluate RHS, and call the operator */
if rhs, err = self.Right.Evaluate(); err != nil {
return 0, err
} else {
return vfn(lhs, rhs)
}
}
func (self *Expr) unaryNot(v int64) (int64, error) {
if self.Right == nil {
return ^v, nil
} else {
panic("operator Invert is an unary operator")
}
}
func (self *Expr) unaryNeg(v int64) (int64, error) {
if self.Right == nil {
return -v, nil
} else {
panic("operator Negate is an unary operator")
}
}

View File

@@ -0,0 +1,53 @@
//
// Copyright 2024 CloudWeGo Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package expr
import (
"fmt"
)
// SyntaxError represents a syntax error in the expression.
type SyntaxError struct {
Pos int
Reason string
}
func newSyntaxError(pos int, reason string) *SyntaxError {
return &SyntaxError{
Pos: pos,
Reason: reason,
}
}
func (self *SyntaxError) Error() string {
return fmt.Sprintf("Syntax error at position %d: %s", self.Pos, self.Reason)
}
// RuntimeError is an error which would occur at run time.
type RuntimeError struct {
Reason string
}
func newRuntimeError(reason string) *RuntimeError {
return &RuntimeError{
Reason: reason,
}
}
func (self *RuntimeError) Error() string {
return "Runtime error: " + self.Reason
}

View File

@@ -0,0 +1,67 @@
//
// Copyright 2024 CloudWeGo Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package expr
import (
"fmt"
)
func idiv(v int64, d int64) (int64, error) {
if d != 0 {
return v / d, nil
} else {
return 0, newRuntimeError("division by zero")
}
}
func imod(v int64, d int64) (int64, error) {
if d != 0 {
return v % d, nil
} else {
return 0, newRuntimeError("division by zero")
}
}
func ipow(v int64, e int64) (int64, error) {
mul := v
ret := int64(1)
/* value must be 0 or positive */
if v < 0 {
return 0, newRuntimeError(fmt.Sprintf("negative base value: %d", v))
}
/* exponent must be non-negative */
if e < 0 {
return 0, newRuntimeError(fmt.Sprintf("negative exponent: %d", e))
}
/* fast power first round */
if (e & 1) != 0 {
ret *= mul
}
/* fast power remaining rounds */
for e >>= 1; e != 0; e >>= 1 {
if mul *= mul; (e & 1) != 0 {
ret *= mul
}
}
/* all done */
return ret, nil
}

View File

@@ -0,0 +1,331 @@
//
// Copyright 2024 CloudWeGo Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package expr
import (
"strconv"
"unicode"
"unsafe"
)
type _TokenKind uint8
const (
_T_end _TokenKind = iota + 1
_T_int
_T_punc
_T_name
)
const (
_OP2 = 0x80
_POW = _OP2 | '*'
_SHL = _OP2 | '<'
_SHR = _OP2 | '>'
)
type _Slice struct {
p unsafe.Pointer
n int
c int
}
type _Token struct {
pos int
ptr *rune
u64 uint64
tag _TokenKind
}
func (self _Token) str() (v string) {
return string(self.rbuf())
}
func (self _Token) rbuf() (v []rune) {
(*_Slice)(unsafe.Pointer(&v)).c = int(self.u64)
(*_Slice)(unsafe.Pointer(&v)).n = int(self.u64)
(*_Slice)(unsafe.Pointer(&v)).p = unsafe.Pointer(self.ptr)
return
}
func tokenEnd(p int) _Token {
return _Token{
pos: p,
tag: _T_end,
}
}
func tokenInt(p int, v uint64) _Token {
return _Token{
pos: p,
u64: v,
tag: _T_int,
}
}
func tokenPunc(p int, v rune) _Token {
return _Token{
pos: p,
tag: _T_punc,
u64: uint64(v),
}
}
func tokenName(p int, v []rune) _Token {
return _Token{
pos: p,
ptr: &v[0],
tag: _T_name,
u64: uint64(len(v)),
}
}
// Repository represents a repository of Term's.
type Repository interface {
Get(name string) (Term, error)
}
// Parser parses an expression string to it's AST representation.
type Parser struct {
pos int
src []rune
}
var binaryOps = [...]func(*Expr, *Expr) *Expr{
'+': (*Expr).Add,
'-': (*Expr).Sub,
'*': (*Expr).Mul,
'/': (*Expr).Div,
'%': (*Expr).Mod,
'&': (*Expr).And,
'^': (*Expr).Xor,
'|': (*Expr).Or,
_SHL: (*Expr).Shl,
_SHR: (*Expr).Shr,
_POW: (*Expr).Pow,
}
var precedence = [...]map[int]bool{
{_SHL: true, _SHR: true},
{'|': true},
{'^': true},
{'&': true},
{'+': true, '-': true},
{'*': true, '/': true, '%': true},
{_POW: true},
}
func (self *Parser) ch() rune {
return self.src[self.pos]
}
func (self *Parser) eof() bool {
return self.pos >= len(self.src)
}
func (self *Parser) rch() (v rune) {
v, self.pos = self.src[self.pos], self.pos+1
return
}
func (self *Parser) hex(ss []rune) bool {
if len(ss) == 1 && ss[0] == '0' {
return unicode.ToLower(self.ch()) == 'x'
} else if len(ss) <= 1 || unicode.ToLower(ss[1]) != 'x' {
return unicode.IsDigit(self.ch())
} else {
return ishexdigit(self.ch())
}
}
func (self *Parser) int(p int, ss []rune) (_Token, error) {
var err error
var val uint64
/* find all the digits */
for !self.eof() && self.hex(ss) {
ss = append(ss, self.rch())
}
/* parse the value */
if val, err = strconv.ParseUint(string(ss), 0, 64); err != nil {
return _Token{}, err
} else {
return tokenInt(p, val), nil
}
}
func (self *Parser) name(p int, ss []rune) _Token {
for !self.eof() && isident(self.ch()) {
ss = append(ss, self.rch())
}
return tokenName(p, ss)
}
func (self *Parser) read(p int, ch rune) (_Token, error) {
if isdigit(ch) {
return self.int(p, []rune{ch})
} else if isident0(ch) {
return self.name(p, []rune{ch}), nil
} else if isop2ch(ch) && !self.eof() && self.ch() == ch {
return tokenPunc(p, _OP2|self.rch()), nil
} else if isop1ch(ch) {
return tokenPunc(p, ch), nil
} else {
return _Token{}, newSyntaxError(self.pos, "invalid character "+strconv.QuoteRuneToASCII(ch))
}
}
func (self *Parser) next() (_Token, error) {
for {
var p int
var c rune
/* check for EOF */
if self.eof() {
return tokenEnd(self.pos), nil
}
/* read the next char */
p = self.pos
c = self.rch()
/* parse the token if not a space */
if !unicode.IsSpace(c) {
return self.read(p, c)
}
}
}
func (self *Parser) grab(tk _Token, repo Repository) (*Expr, error) {
if repo == nil {
return nil, newSyntaxError(tk.pos, "unresolved symbol: "+tk.str())
} else if term, err := repo.Get(tk.str()); err != nil {
return nil, err
} else {
return Ref(term), nil
}
}
func (self *Parser) nest(nest int, repo Repository) (*Expr, error) {
var err error
var ret *Expr
var ntk _Token
/* evaluate the nested expression */
if ret, err = self.expr(0, nest+1, repo); err != nil {
return nil, err
}
/* must follows with a ')' */
if ntk, err = self.next(); err != nil {
return nil, err
} else if ntk.tag != _T_punc || ntk.u64 != ')' {
return nil, newSyntaxError(ntk.pos, "')' expected")
} else {
return ret, nil
}
}
func (self *Parser) unit(nest int, repo Repository) (*Expr, error) {
if tk, err := self.next(); err != nil {
return nil, err
} else if tk.tag == _T_int {
return Int(int64(tk.u64)), nil
} else if tk.tag == _T_name {
return self.grab(tk, repo)
} else if tk.tag == _T_punc && tk.u64 == '(' {
return self.nest(nest, repo)
} else if tk.tag == _T_punc && tk.u64 == '+' {
return self.unit(nest, repo)
} else if tk.tag == _T_punc && tk.u64 == '-' {
return neg2(self.unit(nest, repo))
} else if tk.tag == _T_punc && tk.u64 == '~' {
return not2(self.unit(nest, repo))
} else {
return nil, newSyntaxError(tk.pos, "integer, unary operator or nested expression expected")
}
}
func (self *Parser) term(prec int, nest int, repo Repository) (*Expr, error) {
var err error
var val *Expr
/* parse the LHS operand */
if val, err = self.expr(prec+1, nest, repo); err != nil {
return nil, err
}
/* parse all the operators of the same precedence */
for {
var op int
var rv *Expr
var tk _Token
/* peek the next token */
pp := self.pos
tk, err = self.next()
/* check for errors */
if err != nil {
return nil, err
}
/* encountered EOF */
if tk.tag == _T_end {
return val, nil
}
/* must be an operator */
if tk.tag != _T_punc {
return nil, newSyntaxError(tk.pos, "operators expected")
}
/* check for the operator precedence */
if op = int(tk.u64); !precedence[prec][op] {
self.pos = pp
return val, nil
}
/* evaluate the RHS operand, and combine the value */
if rv, err = self.expr(prec+1, nest, repo); err != nil {
return nil, err
} else {
val = binaryOps[op](val, rv)
}
}
}
func (self *Parser) expr(prec int, nest int, repo Repository) (*Expr, error) {
if prec >= len(precedence) {
return self.unit(nest, repo)
} else {
return self.term(prec, nest, repo)
}
}
// Parse parses the expression, and returns it's AST tree.
func (self *Parser) Parse(repo Repository) (*Expr, error) {
return self.expr(0, 0, repo)
}
// SetSource resets the expression parser and sets the expression source.
func (self *Parser) SetSource(src string) *Parser {
self.pos = 0
self.src = []rune(src)
return self
}

View File

@@ -0,0 +1,42 @@
//
// Copyright 2024 CloudWeGo Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package expr
import (
"sync"
)
var (
expressionPool sync.Pool
)
func newExpression() *Expr {
if v := expressionPool.Get(); v == nil {
return new(Expr)
} else {
return resetExpression(v.(*Expr))
}
}
func freeExpression(p *Expr) {
expressionPool.Put(p)
}
func resetExpression(p *Expr) *Expr {
*p = Expr{}
return p
}

View File

@@ -0,0 +1,23 @@
//
// Copyright 2024 CloudWeGo Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package expr
// Term represents a value that can Evaluate() into an integer.
type Term interface {
Free()
Evaluate() (int64, error)
}

View File

@@ -0,0 +1,77 @@
//
// Copyright 2024 CloudWeGo Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package expr
var op1ch = [...]bool{
'+': true,
'-': true,
'*': true,
'/': true,
'%': true,
'&': true,
'|': true,
'^': true,
'~': true,
'(': true,
')': true,
}
var op2ch = [...]bool{
'*': true,
'<': true,
'>': true,
}
func neg2(v *Expr, err error) (*Expr, error) {
if err != nil {
return nil, err
} else {
return v.Neg(), nil
}
}
func not2(v *Expr, err error) (*Expr, error) {
if err != nil {
return nil, err
} else {
return v.Not(), nil
}
}
func isop1ch(ch rune) bool {
return ch >= 0 && int(ch) < len(op1ch) && op1ch[ch]
}
func isop2ch(ch rune) bool {
return ch >= 0 && int(ch) < len(op2ch) && op2ch[ch]
}
func isdigit(ch rune) bool {
return ch >= '0' && ch <= '9'
}
func isident(ch rune) bool {
return isdigit(ch) || isident0(ch)
}
func isident0(ch rune) bool {
return (ch == '_') || (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z')
}
func ishexdigit(ch rune) bool {
return isdigit(ch) || (ch >= 'a' && ch <= 'f') || (ch >= 'A' && ch <= 'F')
}

View File

@@ -0,0 +1,251 @@
//
// Copyright 2024 CloudWeGo Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package x86_64
import (
"fmt"
)
// ISA represents an extension to x86-64 instruction set.
type ISA uint64
const (
ISA_CPUID ISA = 1 << iota
ISA_RDTSC
ISA_RDTSCP
ISA_CMOV
ISA_MOVBE
ISA_POPCNT
ISA_LZCNT
ISA_TBM
ISA_BMI
ISA_BMI2
ISA_ADX
ISA_MMX
ISA_MMX_PLUS
ISA_FEMMS
ISA_3DNOW
ISA_3DNOW_PLUS
ISA_SSE
ISA_SSE2
ISA_SSE3
ISA_SSSE3
ISA_SSE4A
ISA_SSE4_1
ISA_SSE4_2
ISA_FMA3
ISA_FMA4
ISA_XOP
ISA_F16C
ISA_AVX
ISA_AVX2
ISA_AVX512F
ISA_AVX512BW
ISA_AVX512DQ
ISA_AVX512VL
ISA_AVX512PF
ISA_AVX512ER
ISA_AVX512CD
ISA_AVX512VBMI
ISA_AVX512IFMA
ISA_AVX512VPOPCNTDQ
ISA_AVX512_4VNNIW
ISA_AVX512_4FMAPS
ISA_PREFETCH
ISA_PREFETCHW
ISA_PREFETCHWT1
ISA_CLFLUSH
ISA_CLFLUSHOPT
ISA_CLWB
ISA_CLZERO
ISA_RDRAND
ISA_RDSEED
ISA_PCLMULQDQ
ISA_AES
ISA_SHA
ISA_MONITOR
ISA_MONITORX
ISA_ALL = ^ISA(0)
)
var _ISA_NAMES = map[ISA]string{
ISA_CPUID: "CPUID",
ISA_RDTSC: "RDTSC",
ISA_RDTSCP: "RDTSCP",
ISA_CMOV: "CMOV",
ISA_MOVBE: "MOVBE",
ISA_POPCNT: "POPCNT",
ISA_LZCNT: "LZCNT",
ISA_TBM: "TBM",
ISA_BMI: "BMI",
ISA_BMI2: "BMI2",
ISA_ADX: "ADX",
ISA_MMX: "MMX",
ISA_MMX_PLUS: "MMX+",
ISA_FEMMS: "FEMMS",
ISA_3DNOW: "3dnow!",
ISA_3DNOW_PLUS: "3dnow!+",
ISA_SSE: "SSE",
ISA_SSE2: "SSE2",
ISA_SSE3: "SSE3",
ISA_SSSE3: "SSSE3",
ISA_SSE4A: "SSE4A",
ISA_SSE4_1: "SSE4.1",
ISA_SSE4_2: "SSE4.2",
ISA_FMA3: "FMA3",
ISA_FMA4: "FMA4",
ISA_XOP: "XOP",
ISA_F16C: "F16C",
ISA_AVX: "AVX",
ISA_AVX2: "AVX2",
ISA_AVX512F: "AVX512F",
ISA_AVX512BW: "AVX512BW",
ISA_AVX512DQ: "AVX512DQ",
ISA_AVX512VL: "AVX512VL",
ISA_AVX512PF: "AVX512PF",
ISA_AVX512ER: "AVX512ER",
ISA_AVX512CD: "AVX512CD",
ISA_AVX512VBMI: "AVX512VBMI",
ISA_AVX512IFMA: "AVX512IFMA",
ISA_AVX512VPOPCNTDQ: "AVX512VPOPCNTDQ",
ISA_AVX512_4VNNIW: "AVX512_4VNNIW",
ISA_AVX512_4FMAPS: "AVX512_4FMAPS",
ISA_PREFETCH: "PREFETCH",
ISA_PREFETCHW: "PREFETCHW",
ISA_PREFETCHWT1: "PREFETCHWT1",
ISA_CLFLUSH: "CLFLUSH",
ISA_CLFLUSHOPT: "CLFLUSHOPT",
ISA_CLWB: "CLWB",
ISA_CLZERO: "CLZERO",
ISA_RDRAND: "RDRAND",
ISA_RDSEED: "RDSEED",
ISA_PCLMULQDQ: "PCLMULQDQ",
ISA_AES: "AES",
ISA_SHA: "SHA",
ISA_MONITOR: "MONITOR",
ISA_MONITORX: "MONITORX",
}
var _ISA_MAPPING = map[string]ISA{
"CPUID": ISA_CPUID,
"RDTSC": ISA_RDTSC,
"RDTSCP": ISA_RDTSCP,
"CMOV": ISA_CMOV,
"MOVBE": ISA_MOVBE,
"POPCNT": ISA_POPCNT,
"LZCNT": ISA_LZCNT,
"TBM": ISA_TBM,
"BMI": ISA_BMI,
"BMI2": ISA_BMI2,
"ADX": ISA_ADX,
"MMX": ISA_MMX,
"MMX+": ISA_MMX_PLUS,
"FEMMS": ISA_FEMMS,
"3dnow!": ISA_3DNOW,
"3dnow!+": ISA_3DNOW_PLUS,
"SSE": ISA_SSE,
"SSE2": ISA_SSE2,
"SSE3": ISA_SSE3,
"SSSE3": ISA_SSSE3,
"SSE4A": ISA_SSE4A,
"SSE4.1": ISA_SSE4_1,
"SSE4.2": ISA_SSE4_2,
"FMA3": ISA_FMA3,
"FMA4": ISA_FMA4,
"XOP": ISA_XOP,
"F16C": ISA_F16C,
"AVX": ISA_AVX,
"AVX2": ISA_AVX2,
"AVX512F": ISA_AVX512F,
"AVX512BW": ISA_AVX512BW,
"AVX512DQ": ISA_AVX512DQ,
"AVX512VL": ISA_AVX512VL,
"AVX512PF": ISA_AVX512PF,
"AVX512ER": ISA_AVX512ER,
"AVX512CD": ISA_AVX512CD,
"AVX512VBMI": ISA_AVX512VBMI,
"AVX512IFMA": ISA_AVX512IFMA,
"AVX512VPOPCNTDQ": ISA_AVX512VPOPCNTDQ,
"AVX512_4VNNIW": ISA_AVX512_4VNNIW,
"AVX512_4FMAPS": ISA_AVX512_4FMAPS,
"PREFETCH": ISA_PREFETCH,
"PREFETCHW": ISA_PREFETCHW,
"PREFETCHWT1": ISA_PREFETCHWT1,
"CLFLUSH": ISA_CLFLUSH,
"CLFLUSHOPT": ISA_CLFLUSHOPT,
"CLWB": ISA_CLWB,
"CLZERO": ISA_CLZERO,
"RDRAND": ISA_RDRAND,
"RDSEED": ISA_RDSEED,
"PCLMULQDQ": ISA_PCLMULQDQ,
"AES": ISA_AES,
"SHA": ISA_SHA,
"MONITOR": ISA_MONITOR,
"MONITORX": ISA_MONITORX,
}
func (self ISA) String() string {
if v, ok := _ISA_NAMES[self]; ok {
return v
} else {
return fmt.Sprintf("(invalid: %#x)", uint64(self))
}
}
// ParseISA parses name into ISA, it will panic if the name is invalid.
func ParseISA(name string) ISA {
if v, ok := _ISA_MAPPING[name]; ok {
return v
} else {
panic("invalid ISA name: " + name)
}
}
// Arch represents the x86_64 architecture.
type Arch struct {
isa ISA
}
// DefaultArch is the default architecture with all ISA enabled.
var DefaultArch = CreateArch()
// CreateArch creates a new Arch with all ISA enabled.
func CreateArch() *Arch {
return new(Arch).EnableISA(ISA_ALL)
}
// HasISA checks if a particular ISA was enabled.
func (self *Arch) HasISA(isa ISA) bool {
return (self.isa & isa) != 0
}
// EnableISA enables a particular ISA.
func (self *Arch) EnableISA(isa ISA) *Arch {
self.isa |= isa
return self
}
// DisableISA disables a particular ISA.
func (self *Arch) DisableISA(isa ISA) *Arch {
self.isa &^= isa
return self
}
// CreateProgram creates a new empty program.
func (self *Arch) CreateProgram() *Program {
return newProgram(self)
}

View File

@@ -0,0 +1,16 @@
//
// Copyright 2024 CloudWeGo Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//

View File

@@ -0,0 +1,79 @@
//
// Copyright 2024 CloudWeGo Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package x86_64
import (
"reflect"
"unsafe"
)
type _GoType struct {
size uintptr
pdata uintptr
hash uint32
flags uint8
align uint8
falign uint8
kflags uint8
traits unsafe.Pointer
gcdata *byte
str int32
ptrx int32
}
const (
_KindMask = (1 << 5) - 1
)
func (self *_GoType) kind() reflect.Kind {
return reflect.Kind(self.kflags & _KindMask)
}
type _GoSlice struct {
ptr unsafe.Pointer
len int
cap int
}
type _GoEface struct {
vt *_GoType
ptr unsafe.Pointer
}
func (self *_GoEface) kind() reflect.Kind {
if self.vt != nil {
return self.vt.kind()
} else {
return reflect.Invalid
}
}
func (self *_GoEface) toInt64() int64 {
if self.vt.size == 8 {
return *(*int64)(self.ptr)
} else if self.vt.size == 4 {
return int64(*(*int32)(self.ptr))
} else if self.vt.size == 2 {
return int64(*(*int16)(self.ptr))
} else {
return int64(*(*int8)(self.ptr))
}
}
func efaceOf(v interface{}) _GoEface {
return *(*_GoEface)(unsafe.Pointer(&v))
}

View File

@@ -0,0 +1,836 @@
//
// Copyright 2024 CloudWeGo Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package x86_64
import (
"encoding/binary"
"math"
)
/** Operand Encoding Helpers **/
func imml(v interface{}) byte {
return byte(toImmAny(v) & 0x0f)
}
func relv(v interface{}) int64 {
switch r := v.(type) {
case *Label:
return 0
case RelativeOffset:
return int64(r)
default:
panic("invalid relative offset")
}
}
func addr(v interface{}) interface{} {
switch a := v.(*MemoryOperand).Addr; a.Type {
case Memory:
return a.Memory
case Offset:
return a.Offset
case Reference:
return a.Reference
default:
panic("invalid memory operand type")
}
}
func bcode(v interface{}) byte {
if m, ok := v.(*MemoryOperand); !ok {
panic("v is not a memory operand")
} else if m.Broadcast == 0 {
return 0
} else {
return 1
}
}
func vcode(v interface{}) byte {
switch r := v.(type) {
case XMMRegister:
return byte(r)
case YMMRegister:
return byte(r)
case ZMMRegister:
return byte(r)
case MaskedRegister:
return vcode(r.Reg)
default:
panic("v is not a vector register")
}
}
func kcode(v interface{}) byte {
switch r := v.(type) {
case KRegister:
return byte(r)
case XMMRegister:
return 0
case YMMRegister:
return 0
case ZMMRegister:
return 0
case RegisterMask:
return byte(r.K)
case MaskedRegister:
return byte(r.Mask.K)
case *MemoryOperand:
return toKcodeMem(r)
default:
panic("v is not a maskable operand")
}
}
func zcode(v interface{}) byte {
switch r := v.(type) {
case KRegister:
return 0
case XMMRegister:
return 0
case YMMRegister:
return 0
case ZMMRegister:
return 0
case RegisterMask:
return toZcodeRegM(r)
case MaskedRegister:
return toZcodeRegM(r.Mask)
case *MemoryOperand:
return toZcodeMem(r)
default:
panic("v is not a maskable operand")
}
}
func lcode(v interface{}) byte {
switch r := v.(type) {
case Register8:
return byte(r & 0x07)
case Register16:
return byte(r & 0x07)
case Register32:
return byte(r & 0x07)
case Register64:
return byte(r & 0x07)
case KRegister:
return byte(r & 0x07)
case MMRegister:
return byte(r & 0x07)
case XMMRegister:
return byte(r & 0x07)
case YMMRegister:
return byte(r & 0x07)
case ZMMRegister:
return byte(r & 0x07)
case MaskedRegister:
return lcode(r.Reg)
default:
panic("v is not a register")
}
}
func hcode(v interface{}) byte {
switch r := v.(type) {
case Register8:
return byte(r>>3) & 1
case Register16:
return byte(r>>3) & 1
case Register32:
return byte(r>>3) & 1
case Register64:
return byte(r>>3) & 1
case KRegister:
return byte(r>>3) & 1
case MMRegister:
return byte(r>>3) & 1
case XMMRegister:
return byte(r>>3) & 1
case YMMRegister:
return byte(r>>3) & 1
case ZMMRegister:
return byte(r>>3) & 1
case MaskedRegister:
return hcode(r.Reg)
default:
panic("v is not a register")
}
}
func ecode(v interface{}) byte {
switch r := v.(type) {
case Register8:
return byte(r>>4) & 1
case Register16:
return byte(r>>4) & 1
case Register32:
return byte(r>>4) & 1
case Register64:
return byte(r>>4) & 1
case KRegister:
return byte(r>>4) & 1
case MMRegister:
return byte(r>>4) & 1
case XMMRegister:
return byte(r>>4) & 1
case YMMRegister:
return byte(r>>4) & 1
case ZMMRegister:
return byte(r>>4) & 1
case MaskedRegister:
return ecode(r.Reg)
default:
panic("v is not a register")
}
}
func hlcode(v interface{}) byte {
switch r := v.(type) {
case Register8:
return toHLcodeReg8(r)
case Register16:
return byte(r & 0x0f)
case Register32:
return byte(r & 0x0f)
case Register64:
return byte(r & 0x0f)
case KRegister:
return byte(r & 0x0f)
case MMRegister:
return byte(r & 0x0f)
case XMMRegister:
return byte(r & 0x0f)
case YMMRegister:
return byte(r & 0x0f)
case ZMMRegister:
return byte(r & 0x0f)
case MaskedRegister:
return hlcode(r.Reg)
default:
panic("v is not a register")
}
}
func ehcode(v interface{}) byte {
switch r := v.(type) {
case Register8:
return byte(r>>3) & 0x03
case Register16:
return byte(r>>3) & 0x03
case Register32:
return byte(r>>3) & 0x03
case Register64:
return byte(r>>3) & 0x03
case KRegister:
return byte(r>>3) & 0x03
case MMRegister:
return byte(r>>3) & 0x03
case XMMRegister:
return byte(r>>3) & 0x03
case YMMRegister:
return byte(r>>3) & 0x03
case ZMMRegister:
return byte(r>>3) & 0x03
case MaskedRegister:
return ehcode(r.Reg)
default:
panic("v is not a register")
}
}
func toImmAny(v interface{}) int64 {
if x, ok := asInt64(v); ok {
return x
} else {
panic("value is not an integer")
}
}
func toHcodeOpt(v interface{}) byte {
if v == nil {
return 0
} else {
return hcode(v)
}
}
func toEcodeVMM(v interface{}, x byte) byte {
switch r := v.(type) {
case XMMRegister:
return ecode(r)
case YMMRegister:
return ecode(r)
case ZMMRegister:
return ecode(r)
default:
return x
}
}
func toKcodeMem(v *MemoryOperand) byte {
if !v.Masked {
return 0
} else {
return byte(v.Mask.K)
}
}
func toZcodeMem(v *MemoryOperand) byte {
if !v.Masked || v.Mask.Z {
return 0
} else {
return 1
}
}
func toZcodeRegM(v RegisterMask) byte {
if v.Z {
return 1
} else {
return 0
}
}
func toHLcodeReg8(v Register8) byte {
switch v {
case AH:
fallthrough
case BH:
fallthrough
case CH:
fallthrough
case DH:
panic("ah/bh/ch/dh registers never use 4-bit encoding")
default:
return byte(v & 0x0f)
}
}
/** Instruction Encoding Helpers **/
const (
_N_inst = 16
)
const (
_F_rel1 = 1 << iota
_F_rel4
)
type _Encoding struct {
len int
flags int
bytes [_N_inst]byte
encoder func(m *_Encoding, v []interface{})
}
// buf ensures len + n <= len(bytes).
func (self *_Encoding) buf(n int) []byte {
if i := self.len; i+n > _N_inst {
panic("instruction too long")
} else {
return self.bytes[i:]
}
}
// emit encodes a single byte.
func (self *_Encoding) emit(v byte) {
self.buf(1)[0] = v
self.len++
}
// imm1 encodes a single byte immediate value.
func (self *_Encoding) imm1(v int64) {
self.emit(byte(v))
}
// imm2 encodes a two-byte immediate value in little-endian.
func (self *_Encoding) imm2(v int64) {
binary.LittleEndian.PutUint16(self.buf(2), uint16(v))
self.len += 2
}
// imm4 encodes a 4-byte immediate value in little-endian.
func (self *_Encoding) imm4(v int64) {
binary.LittleEndian.PutUint32(self.buf(4), uint32(v))
self.len += 4
}
// imm8 encodes an 8-byte immediate value in little-endian.
func (self *_Encoding) imm8(v int64) {
binary.LittleEndian.PutUint64(self.buf(8), uint64(v))
self.len += 8
}
// vex2 encodes a 2-byte or 3-byte VEX prefix.
//
// 2-byte VEX prefix:
//
// Requires: VEX.W = 0, VEX.mmmmm = 0b00001 and VEX.B = VEX.X = 0
//
// +----------------+
//
// Byte 0: | Bits 0-7: 0xc5 |
//
// +----------------+
//
// +-----------+----------------+----------+--------------+
//
// Byte 1: | Bit 7: ~R | Bits 3-6 ~vvvv | Bit 2: L | Bits 0-1: pp |
//
// +-----------+----------------+----------+--------------+
//
// 3-byte VEX prefix:
// +----------------+
//
// Byte 0: | Bits 0-7: 0xc4 |
//
// +----------------+
//
// +-----------+-----------+-----------+-------------------+
//
// Byte 1: | Bit 7: ~R | Bit 6: ~X | Bit 5: ~B | Bits 0-4: 0b00001 |
//
// +-----------+-----------+-----------+-------------------+
//
// +----------+-----------------+----------+--------------+
//
// Byte 2: | Bit 7: 0 | Bits 3-6: ~vvvv | Bit 2: L | Bits 0-1: pp |
//
// +----------+-----------------+----------+--------------+
func (self *_Encoding) vex2(lpp byte, r byte, rm interface{}, vvvv byte) {
var b byte
var x byte
/* VEX.R must be a single-bit mask */
if r > 1 {
panic("VEX.R must be a 1-bit mask")
}
/* VEX.Lpp must be a 3-bit mask */
if lpp&^0b111 != 0 {
panic("VEX.Lpp must be a 3-bit mask")
}
/* VEX.vvvv must be a 4-bit mask */
if vvvv&^0b1111 != 0 {
panic("VEX.vvvv must be a 4-bit mask")
}
/* encode the RM bits if any */
if rm != nil {
switch v := rm.(type) {
case *Label:
break
case Register:
b = hcode(v)
case MemoryAddress:
b, x = toHcodeOpt(v.Base), toHcodeOpt(v.Index)
case RelativeOffset:
break
default:
panic("rm is expected to be a register or a memory address")
}
}
/* if VEX.B and VEX.X are zeroes, 2-byte VEX prefix can be used */
if x == 0 && b == 0 {
self.emit(0xc5)
self.emit(0xf8 ^ (r << 7) ^ (vvvv << 3) ^ lpp)
} else {
self.emit(0xc4)
self.emit(0xe1 ^ (r << 7) ^ (x << 6) ^ (b << 5))
self.emit(0x78 ^ (vvvv << 3) ^ lpp)
}
}
// vex3 encodes a 3-byte VEX or XOP prefix.
//
// 3-byte VEX/XOP prefix
// +-----------------------------------+
//
// Byte 0: | Bits 0-7: 0xc4 (VEX) / 0x8f (XOP) |
//
// +-----------------------------------+
//
// +-----------+-----------+-----------+-----------------+
//
// Byte 1: | Bit 7: ~R | Bit 6: ~X | Bit 5: ~B | Bits 0-4: mmmmm |
//
// +-----------+-----------+-----------+-----------------+
//
// +----------+-----------------+----------+--------------+
//
// Byte 2: | Bit 7: W | Bits 3-6: ~vvvv | Bit 2: L | Bits 0-1: pp |
//
// +----------+-----------------+----------+--------------+
func (self *_Encoding) vex3(esc byte, mmmmm byte, wlpp byte, r byte, rm interface{}, vvvv byte) {
var b byte
var x byte
/* VEX.R must be a single-bit mask */
if r > 1 {
panic("VEX.R must be a 1-bit mask")
}
/* VEX.vvvv must be a 4-bit mask */
if vvvv&^0b1111 != 0 {
panic("VEX.vvvv must be a 4-bit mask")
}
/* escape must be a 3-byte VEX (0xc4) or XOP (0x8f) prefix */
if esc != 0xc4 && esc != 0x8f {
panic("escape must be a 3-byte VEX (0xc4) or XOP (0x8f) prefix")
}
/* VEX.W____Lpp is expected to have no bits set except 0, 1, 2 and 7 */
if wlpp&^0b10000111 != 0 {
panic("VEX.W____Lpp is expected to have no bits set except 0, 1, 2 and 7")
}
/* VEX.m-mmmm is expected to be a 5-bit mask */
if mmmmm&^0b11111 != 0 {
panic("VEX.m-mmmm is expected to be a 5-bit mask")
}
/* encode the RM bits */
switch v := rm.(type) {
case *Label:
break
case MemoryAddress:
b, x = toHcodeOpt(v.Base), toHcodeOpt(v.Index)
case RelativeOffset:
break
default:
panic("rm is expected to be a register or a memory address")
}
/* encode the 3-byte VEX or XOP prefix */
self.emit(esc)
self.emit(0xe0 ^ (r << 7) ^ (x << 6) ^ (b << 5) ^ mmmmm)
self.emit(0x78 ^ (vvvv << 3) ^ wlpp)
}
// evex encodes a 4-byte EVEX prefix.
func (self *_Encoding) evex(mm byte, w1pp byte, ll byte, rr byte, rm interface{}, vvvvv byte, aaa byte, zz byte, bb byte) {
var b byte
var x byte
/* EVEX.b must be a single-bit mask */
if bb > 1 {
panic("EVEX.b must be a 1-bit mask")
}
/* EVEX.z must be a single-bit mask */
if zz > 1 {
panic("EVEX.z must be a 1-bit mask")
}
/* EVEX.mm must be a 2-bit mask */
if mm&^0b11 != 0 {
panic("EVEX.mm must be a 2-bit mask")
}
/* EVEX.L'L must be a 2-bit mask */
if ll&^0b11 != 0 {
panic("EVEX.L'L must be a 2-bit mask")
}
/* EVEX.R'R must be a 2-bit mask */
if rr&^0b11 != 0 {
panic("EVEX.R'R must be a 2-bit mask")
}
/* EVEX.aaa must be a 3-bit mask */
if aaa&^0b111 != 0 {
panic("EVEX.aaa must be a 3-bit mask")
}
/* EVEX.v'vvvv must be a 5-bit mask */
if vvvvv&^0b11111 != 0 {
panic("EVEX.v'vvvv must be a 5-bit mask")
}
/* EVEX.W____1pp is expected to have no bits set except 0, 1, 2, and 7 */
if w1pp&^0b10000011 != 0b100 {
panic("EVEX.W____1pp is expected to have no bits set except 0, 1, 2, and 7")
}
/* extract bits from EVEX.R'R and EVEX.v'vvvv */
r1, r0 := rr>>1, rr&1
v1, v0 := vvvvv>>4, vvvvv&0b1111
/* encode the RM bits if any */
if rm != nil {
switch m := rm.(type) {
case *Label:
break
case Register:
b, x = hcode(m), ecode(m)
case MemoryAddress:
b, x, v1 = toHcodeOpt(m.Base), toHcodeOpt(m.Index), toEcodeVMM(m.Index, v1)
case RelativeOffset:
break
default:
panic("rm is expected to be a register or a memory address")
}
}
/* EVEX prefix bytes */
p0 := (r0 << 7) | (x << 6) | (b << 5) | (r1 << 4) | mm
p1 := (v0 << 3) | w1pp
p2 := (zz << 7) | (ll << 5) | (b << 4) | (v1 << 3) | aaa
/* p0: invert RXBR' (bits 4-7)
* p1: invert vvvv (bits 3-6)
* p2: invert V' (bit 3) */
self.emit(0x62)
self.emit(p0 ^ 0xf0)
self.emit(p1 ^ 0x78)
self.emit(p2 ^ 0x08)
}
// rexm encodes a mandatory REX prefix.
func (self *_Encoding) rexm(w byte, r byte, rm interface{}) {
var b byte
var x byte
/* REX.R must be 0 or 1 */
if r != 0 && r != 1 {
panic("REX.R must be 0 or 1")
}
/* REX.W must be 0 or 1 */
if w != 0 && w != 1 {
panic("REX.W must be 0 or 1")
}
/* encode the RM bits */
switch v := rm.(type) {
case *Label:
break
case MemoryAddress:
b, x = toHcodeOpt(v.Base), toHcodeOpt(v.Index)
case RelativeOffset:
break
default:
panic("rm is expected to be a register or a memory address")
}
/* encode the REX prefix */
self.emit(0x40 | (w << 3) | (r << 2) | (x << 1) | b)
}
// rexo encodes an optional REX prefix.
func (self *_Encoding) rexo(r byte, rm interface{}, force bool) {
var b byte
var x byte
/* REX.R must be 0 or 1 */
if r != 0 && r != 1 {
panic("REX.R must be 0 or 1")
}
/* encode the RM bits */
switch v := rm.(type) {
case *Label:
break
case Register:
b = hcode(v)
case MemoryAddress:
b, x = toHcodeOpt(v.Base), toHcodeOpt(v.Index)
case RelativeOffset:
break
default:
panic("rm is expected to be a register or a memory address")
}
/* if REX.R, REX.X, and REX.B are all zeroes, REX prefix can be omitted */
if force || r != 0 || x != 0 || b != 0 {
self.emit(0x40 | (r << 2) | (x << 1) | b)
}
}
// mrsd encodes ModR/M, SIB and Displacement.
//
// ModR/M byte
//
// +----------------+---------------+---------------+
// | Bits 6-7: Mode | Bits 3-5: Reg | Bits 0-2: R/M |
// +----------------+---------------+---------------+
//
// SIB byte
//
// +-----------------+-----------------+----------------+
// | Bits 6-7: Scale | Bits 3-5: Index | Bits 0-2: Base |
// +-----------------+-----------------+----------------+
func (self *_Encoding) mrsd(reg byte, rm interface{}, disp8v int32) {
var ok bool
var mm MemoryAddress
var ro RelativeOffset
/* ModRM encodes the lower 3-bit of the register */
if reg > 7 {
panic("invalid register bits")
}
/* check the displacement scale */
switch disp8v {
case 1:
break
case 2:
break
case 4:
break
case 8:
break
case 16:
break
case 32:
break
case 64:
break
default:
panic("invalid displacement size")
}
/* special case: unresolved labels, assuming a zero offset */
if _, ok = rm.(*Label); ok {
self.emit(0x05 | (reg << 3))
self.imm4(0)
return
}
/* special case: RIP-relative offset
* ModRM.Mode == 0 and ModeRM.R/M == 5 indicates (rip + disp32) addressing */
if ro, ok = rm.(RelativeOffset); ok {
self.emit(0x05 | (reg << 3))
self.imm4(int64(ro))
return
}
/* must be a generic memory address */
if mm, ok = rm.(MemoryAddress); !ok {
panic("rm must be a memory address")
}
/* absolute addressing, encoded as disp(%rbp,%rsp,1) */
if mm.Base == nil && mm.Index == nil {
self.emit(0x04 | (reg << 3))
self.emit(0x25)
self.imm4(int64(mm.Displacement))
return
}
/* no SIB byte */
if mm.Index == nil && lcode(mm.Base) != 0b100 {
cc := lcode(mm.Base)
dv := mm.Displacement
/* ModRM.Mode == 0 (no displacement) */
if dv == 0 && mm.Base != RBP && mm.Base != R13 {
if cc == 0b101 {
panic("rbp/r13 is not encodable as a base register (interpreted as disp32 address)")
} else {
self.emit((reg << 3) | cc)
return
}
}
/* ModRM.Mode == 1 (8-bit displacement) */
if dq := dv / disp8v; dq >= math.MinInt8 && dq <= math.MaxInt8 && dv%disp8v == 0 {
self.emit(0x40 | (reg << 3) | cc)
self.imm1(int64(dq))
return
}
/* ModRM.Mode == 2 (32-bit displacement) */
self.emit(0x80 | (reg << 3) | cc)
self.imm4(int64(mm.Displacement))
return
}
/* all encodings below use ModRM.R/M = 4 (0b100) to indicate the presence of SIB */
if mm.Index == RSP {
panic("rsp is not encodable as an index register (interpreted as no index)")
}
/* index = 4 (0b100) denotes no-index encoding */
var scale byte
var index byte = 0x04
/* encode the scale byte */
if mm.Scale != 0 {
switch mm.Scale {
case 1:
scale = 0
case 2:
scale = 1
case 4:
scale = 2
case 8:
scale = 3
default:
panic("invalid scale value")
}
}
/* encode the index byte */
if mm.Index != nil {
index = lcode(mm.Index)
}
/* SIB.Base = 5 (0b101) and ModRM.Mode = 0 indicates no-base encoding with disp32 */
if mm.Base == nil {
self.emit((reg << 3) | 0b100)
self.emit((scale << 6) | (index << 3) | 0b101)
self.imm4(int64(mm.Displacement))
return
}
/* base L-code & displacement value */
cc := lcode(mm.Base)
dv := mm.Displacement
/* ModRM.Mode == 0 (no displacement) */
if dv == 0 && cc != 0b101 {
self.emit((reg << 3) | 0b100)
self.emit((scale << 6) | (index << 3) | cc)
return
}
/* ModRM.Mode == 1 (8-bit displacement) */
if dq := dv / disp8v; dq >= math.MinInt8 && dq <= math.MaxInt8 && dv%disp8v == 0 {
self.emit(0x44 | (reg << 3))
self.emit((scale << 6) | (index << 3) | cc)
self.imm1(int64(dq))
return
}
/* ModRM.Mode == 2 (32-bit displacement) */
self.emit(0x84 | (reg << 3))
self.emit((scale << 6) | (index << 3) | cc)
self.imm4(int64(mm.Displacement))
}
// encode invokes the encoder to encode this instruction.
func (self *_Encoding) encode(v []interface{}) int {
self.len = 0
self.encoder(self, v)
return self.len
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,24 @@
//
// Copyright 2024 CloudWeGo Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by "mkasm_amd64.py", DO NOT EDIT.
package x86_64
const (
_N_args = 5
_N_forms = 23
)

View File

@@ -0,0 +1,665 @@
//
// Copyright 2024 CloudWeGo Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package x86_64
import (
"errors"
"fmt"
"math"
"reflect"
"strconv"
"strings"
"sync/atomic"
)
// RelativeOffset represents an RIP-relative offset.
type RelativeOffset int32
// String implements the fmt.Stringer interface.
func (self RelativeOffset) String() string {
if self == 0 {
return "(%rip)"
} else {
return fmt.Sprintf("%d(%%rip)", self)
}
}
// RoundingControl represents a floating-point rounding option.
type RoundingControl uint8
const (
// RN_SAE represents "Round Nearest", which is the default rounding option.
RN_SAE RoundingControl = iota
// RD_SAE represents "Round Down".
RD_SAE
// RU_SAE represents "Round Up".
RU_SAE
// RZ_SAE represents "Round towards Zero".
RZ_SAE
)
var _RC_NAMES = map[RoundingControl]string{
RN_SAE: "rn-sae",
RD_SAE: "rd-sae",
RU_SAE: "ru-sae",
RZ_SAE: "rz-sae",
}
func (self RoundingControl) String() string {
if v, ok := _RC_NAMES[self]; ok {
return v
} else {
panic("invalid RoundingControl value")
}
}
// ExceptionControl represents the "Suppress All Exceptions" flag.
type ExceptionControl uint8
const (
// SAE represents the flag "Suppress All Exceptions" for floating point operations.
SAE ExceptionControl = iota
)
func (ExceptionControl) String() string {
return "sae"
}
// AddressType indicates which kind of value that an Addressable object contains.
type AddressType uint
const (
// None indicates the Addressable does not contain any addressable value.
None AddressType = iota
// Memory indicates the Addressable contains a memory address.
Memory
// Offset indicates the Addressable contains an RIP-relative offset.
Offset
// Reference indicates the Addressable contains a label reference.
Reference
)
// Disposable is a type of object that can be Free'd manually.
type Disposable interface {
Free()
}
// Label represents a location within the program.
type Label struct {
refs int64
Name string
Dest *Instruction
}
func (self *Label) offset(p uintptr, n int) RelativeOffset {
if self.Dest == nil {
panic("unresolved label: " + self.Name)
} else {
return RelativeOffset(self.Dest.pc - p - uintptr(n))
}
}
// Free decreases the reference count of a Label, if the
// refcount drops to 0, the Label will be recycled.
func (self *Label) Free() {
if atomic.AddInt64(&self.refs, -1) == 0 {
//freeLabel(self)
}
}
// String implements the fmt.Stringer interface.
func (self *Label) String() string {
if self.Dest == nil {
return fmt.Sprintf("%s(%%rip)", self.Name)
} else {
return fmt.Sprintf("%s(%%rip)@%#x", self.Name, self.Dest.pc)
}
}
// Retain increases the reference count of a Label.
func (self *Label) Retain() *Label {
atomic.AddInt64(&self.refs, 1)
return self
}
// Evaluate implements the interface expr.Term.
func (self *Label) Evaluate() (int64, error) {
if self.Dest != nil {
return int64(self.Dest.pc), nil
} else {
return 0, errors.New("unresolved label: " + self.Name)
}
}
// Addressable is a union to represent an addressable operand.
type Addressable struct {
Type AddressType
Memory MemoryAddress
Offset RelativeOffset
Reference *Label
}
// String implements the fmt.Stringer interface.
func (self *Addressable) String() string {
switch self.Type {
case None:
return "(not addressable)"
case Memory:
return self.Memory.String()
case Offset:
return self.Offset.String()
case Reference:
return self.Reference.String()
default:
return "(invalid addressable)"
}
}
// MemoryOperand represents a memory operand for an instruction.
type MemoryOperand struct {
refs int64
Size int
Addr Addressable
Mask RegisterMask
Masked bool
Broadcast uint8
}
const (
_Sizes = 0b10000000100010111 // bit-mask for valid sizes (0, 1, 2, 4, 8, 16)
)
func (self *MemoryOperand) isVMX(evex bool) bool {
return self.Addr.Type == Memory && self.Addr.Memory.isVMX(evex)
}
func (self *MemoryOperand) isVMY(evex bool) bool {
return self.Addr.Type == Memory && self.Addr.Memory.isVMY(evex)
}
func (self *MemoryOperand) isVMZ() bool {
return self.Addr.Type == Memory && self.Addr.Memory.isVMZ()
}
func (self *MemoryOperand) isMem() bool {
if (_Sizes & (1 << self.Broadcast)) == 0 {
return false
} else if self.Addr.Type == Memory {
return self.Addr.Memory.isMem()
} else if self.Addr.Type == Offset {
return true
} else if self.Addr.Type == Reference {
return true
} else {
return false
}
}
func (self *MemoryOperand) isSize(n int) bool {
return self.Size == 0 || self.Size == n
}
func (self *MemoryOperand) isBroadcast(n int, b uint8) bool {
return self.Size == n && self.Broadcast == b
}
func (self *MemoryOperand) formatMask() string {
if !self.Masked {
return ""
} else {
return self.Mask.String()
}
}
func (self *MemoryOperand) formatBroadcast() string {
if self.Broadcast == 0 {
return ""
} else {
return fmt.Sprintf("{1to%d}", self.Broadcast)
}
}
func (self *MemoryOperand) ensureAddrValid() {
switch self.Addr.Type {
case None:
break
case Memory:
self.Addr.Memory.EnsureValid()
case Offset:
break
case Reference:
break
default:
panic("invalid address type")
}
}
func (self *MemoryOperand) ensureSizeValid() {
if (_Sizes & (1 << self.Size)) == 0 {
panic("invalid memory operand size")
}
}
func (self *MemoryOperand) ensureBroadcastValid() {
if (_Sizes & (1 << self.Broadcast)) == 0 {
panic("invalid memory operand broadcast")
}
}
// Free decreases the reference count of a MemoryOperand, if the
// refcount drops to 0, the Label will be recycled.
func (self *MemoryOperand) Free() {
if atomic.AddInt64(&self.refs, -1) == 0 {
//freeMemoryOperand(self)
}
}
// String implements the fmt.Stringer interface.
func (self *MemoryOperand) String() string {
return self.Addr.String() + self.formatMask() + self.formatBroadcast()
}
// Retain increases the reference count of a MemoryOperand.
func (self *MemoryOperand) Retain() *MemoryOperand {
atomic.AddInt64(&self.refs, 1)
return self
}
// EnsureValid checks if the memory operand is valid, if not, it panics.
func (self *MemoryOperand) EnsureValid() {
self.ensureAddrValid()
self.ensureSizeValid()
self.ensureBroadcastValid()
}
// MemoryAddress represents a memory address.
type MemoryAddress struct {
Base Register
Index Register
Scale uint8
Displacement int32
}
const (
_Scales = 0b100010111 // bit-mask for valid scales (0, 1, 2, 4, 8)
)
func (self *MemoryAddress) isVMX(evex bool) bool {
return self.isMemBase() && (self.Index == nil || isXMM(self.Index) || (evex && isEVEXXMM(self.Index)))
}
func (self *MemoryAddress) isVMY(evex bool) bool {
return self.isMemBase() && (self.Index == nil || isYMM(self.Index) || (evex && isEVEXYMM(self.Index)))
}
func (self *MemoryAddress) isVMZ() bool {
return self.isMemBase() && (self.Index == nil || isZMM(self.Index))
}
func (self *MemoryAddress) isMem() bool {
return self.isMemBase() && (self.Index == nil || isReg64(self.Index))
}
func (self *MemoryAddress) isMemBase() bool {
return (self.Base == nil || isReg64(self.Base)) && // `Base` must be 64-bit if present
(self.Scale == 0) == (self.Index == nil) && // `Scale` and `Index` depends on each other
(_Scales&(1<<self.Scale)) != 0 // `Scale` can only be 0, 1, 2, 4 or 8
}
// String implements the fmt.Stringer interface.
func (self *MemoryAddress) String() string {
var dp int
var sb strings.Builder
/* the displacement part */
if dp = int(self.Displacement); dp != 0 {
sb.WriteString(strconv.Itoa(dp))
}
/* the base register */
if sb.WriteByte('('); self.Base != nil {
sb.WriteByte('%')
sb.WriteString(self.Base.String())
}
/* index is optional */
if self.Index != nil {
sb.WriteString(",%")
sb.WriteString(self.Index.String())
/* scale is also optional */
if self.Scale >= 2 {
sb.WriteByte(',')
sb.WriteString(strconv.Itoa(int(self.Scale)))
}
}
/* close the bracket */
sb.WriteByte(')')
return sb.String()
}
// EnsureValid checks if the memory address is valid, if not, it panics.
func (self *MemoryAddress) EnsureValid() {
if !self.isMemBase() || (self.Index != nil && !isIndexable(self.Index)) {
panic("not a valid memory address")
}
}
// Ref constructs a memory reference to a label.
func Ref(ref *Label) (v *MemoryOperand) {
v = CreateMemoryOperand()
v.Addr.Type = Reference
v.Addr.Reference = ref
return
}
// Abs construct a simple memory address that represents absolute addressing.
func Abs(disp int32) *MemoryOperand {
return Sib(nil, nil, 0, disp)
}
// Ptr constructs a simple memory operand with base and displacement.
func Ptr(base Register, disp int32) *MemoryOperand {
return Sib(base, nil, 0, disp)
}
// Sib constructs a simple memory operand that represents a complete memory address.
func Sib(base Register, index Register, scale uint8, disp int32) (v *MemoryOperand) {
v = CreateMemoryOperand()
v.Addr.Type = Memory
v.Addr.Memory.Base = base
v.Addr.Memory.Index = index
v.Addr.Memory.Scale = scale
v.Addr.Memory.Displacement = disp
v.EnsureValid()
return
}
/** Operand Matching Helpers **/
const _IntMask = (1 << reflect.Int) |
(1 << reflect.Int8) |
(1 << reflect.Int16) |
(1 << reflect.Int32) |
(1 << reflect.Int64) |
(1 << reflect.Uint) |
(1 << reflect.Uint8) |
(1 << reflect.Uint16) |
(1 << reflect.Uint32) |
(1 << reflect.Uint64) |
(1 << reflect.Uintptr)
func isInt(k reflect.Kind) bool {
return (_IntMask & (1 << k)) != 0
}
func asInt64(v interface{}) (int64, bool) {
if isSpecial(v) {
return 0, false
} else if x := efaceOf(v); isInt(x.kind()) {
return x.toInt64(), true
} else {
return 0, false
}
}
func inRange(v interface{}, low int64, high int64) bool {
x, ok := asInt64(v)
return ok && x >= low && x <= high
}
func isSpecial(v interface{}) bool {
switch v.(type) {
case Register8:
return true
case Register16:
return true
case Register32:
return true
case Register64:
return true
case KRegister:
return true
case MMRegister:
return true
case XMMRegister:
return true
case YMMRegister:
return true
case ZMMRegister:
return true
case RelativeOffset:
return true
case RoundingControl:
return true
case ExceptionControl:
return true
default:
return false
}
}
func isIndexable(v interface{}) bool {
return isZMM(v) || isReg64(v) || isEVEXXMM(v) || isEVEXYMM(v)
}
func isImm4(v interface{}) bool { return inRange(v, 0, 15) }
func isImm8(v interface{}) bool { return inRange(v, math.MinInt8, math.MaxUint8) }
func isImm16(v interface{}) bool { return inRange(v, math.MinInt16, math.MaxUint16) }
func isImm32(v interface{}) bool { return inRange(v, math.MinInt32, math.MaxUint32) }
func isImm64(v interface{}) bool { _, r := asInt64(v); return r }
func isConst1(v interface{}) bool { x, r := asInt64(v); return r && x == 1 }
func isConst3(v interface{}) bool { x, r := asInt64(v); return r && x == 3 }
func isRel8(v interface{}) bool {
x, r := v.(RelativeOffset)
return r && x >= math.MinInt8 && x <= math.MaxInt8
}
func isRel32(v interface{}) bool { _, r := v.(RelativeOffset); return r }
func isLabel(v interface{}) bool { _, r := v.(*Label); return r }
func isReg8(v interface{}) bool { _, r := v.(Register8); return r }
func isReg8REX(v interface{}) bool {
x, r := v.(Register8)
return r && (x&0x80) == 0 && x >= SPL
}
func isReg16(v interface{}) bool { _, r := v.(Register16); return r }
func isReg32(v interface{}) bool { _, r := v.(Register32); return r }
func isReg64(v interface{}) bool { _, r := v.(Register64); return r }
func isMM(v interface{}) bool { _, r := v.(MMRegister); return r }
func isXMM(v interface{}) bool { x, r := v.(XMMRegister); return r && x <= XMM15 }
func isEVEXXMM(v interface{}) bool { _, r := v.(XMMRegister); return r }
func isXMMk(v interface{}) bool {
x, r := v.(MaskedRegister)
return isXMM(v) || (r && isXMM(x.Reg) && !x.Mask.Z)
}
func isXMMkz(v interface{}) bool {
x, r := v.(MaskedRegister)
return isXMM(v) || (r && isXMM(x.Reg))
}
func isYMM(v interface{}) bool { x, r := v.(YMMRegister); return r && x <= YMM15 }
func isEVEXYMM(v interface{}) bool { _, r := v.(YMMRegister); return r }
func isYMMk(v interface{}) bool {
x, r := v.(MaskedRegister)
return isYMM(v) || (r && isYMM(x.Reg) && !x.Mask.Z)
}
func isYMMkz(v interface{}) bool {
x, r := v.(MaskedRegister)
return isYMM(v) || (r && isYMM(x.Reg))
}
func isZMM(v interface{}) bool { _, r := v.(ZMMRegister); return r }
func isZMMk(v interface{}) bool {
x, r := v.(MaskedRegister)
return isZMM(v) || (r && isZMM(x.Reg) && !x.Mask.Z)
}
func isZMMkz(v interface{}) bool {
x, r := v.(MaskedRegister)
return isZMM(v) || (r && isZMM(x.Reg))
}
func isK(v interface{}) bool { _, r := v.(KRegister); return r }
func isKk(v interface{}) bool {
x, r := v.(MaskedRegister)
return isK(v) || (r && isK(x.Reg) && !x.Mask.Z)
}
func isM(v interface{}) bool {
x, r := v.(*MemoryOperand)
return r && x.isMem() && x.Broadcast == 0 && !x.Masked
}
func isMk(v interface{}) bool {
x, r := v.(*MemoryOperand)
return r && x.isMem() && x.Broadcast == 0 && !(x.Masked && x.Mask.Z)
}
func isMkz(v interface{}) bool {
x, r := v.(*MemoryOperand)
return r && x.isMem() && x.Broadcast == 0
}
func isM8(v interface{}) bool {
x, r := v.(*MemoryOperand)
return r && isM(v) && x.isSize(1)
}
func isM16(v interface{}) bool {
x, r := v.(*MemoryOperand)
return r && isM(v) && x.isSize(2)
}
func isM16kz(v interface{}) bool {
x, r := v.(*MemoryOperand)
return r && isMkz(v) && x.isSize(2)
}
func isM32(v interface{}) bool {
x, r := v.(*MemoryOperand)
return r && isM(v) && x.isSize(4)
}
func isM32k(v interface{}) bool {
x, r := v.(*MemoryOperand)
return r && isMk(v) && x.isSize(4)
}
func isM32kz(v interface{}) bool {
x, r := v.(*MemoryOperand)
return r && isMkz(v) && x.isSize(4)
}
func isM64(v interface{}) bool {
x, r := v.(*MemoryOperand)
return r && isM(v) && x.isSize(8)
}
func isM64k(v interface{}) bool {
x, r := v.(*MemoryOperand)
return r && isMk(v) && x.isSize(8)
}
func isM64kz(v interface{}) bool {
x, r := v.(*MemoryOperand)
return r && isMkz(v) && x.isSize(8)
}
func isM128(v interface{}) bool {
x, r := v.(*MemoryOperand)
return r && isM(v) && x.isSize(16)
}
func isM128kz(v interface{}) bool {
x, r := v.(*MemoryOperand)
return r && isMkz(v) && x.isSize(16)
}
func isM256(v interface{}) bool {
x, r := v.(*MemoryOperand)
return r && isM(v) && x.isSize(32)
}
func isM256kz(v interface{}) bool {
x, r := v.(*MemoryOperand)
return r && isMkz(v) && x.isSize(32)
}
func isM512(v interface{}) bool {
x, r := v.(*MemoryOperand)
return r && isM(v) && x.isSize(64)
}
func isM512kz(v interface{}) bool {
x, r := v.(*MemoryOperand)
return r && isMkz(v) && x.isSize(64)
}
func isM64M32bcst(v interface{}) bool {
x, r := v.(*MemoryOperand)
return isM64(v) || (r && x.isBroadcast(4, 2))
}
func isM128M32bcst(v interface{}) bool {
x, r := v.(*MemoryOperand)
return isM128(v) || (r && x.isBroadcast(4, 4))
}
func isM256M32bcst(v interface{}) bool {
x, r := v.(*MemoryOperand)
return isM256(v) || (r && x.isBroadcast(4, 8))
}
func isM512M32bcst(v interface{}) bool {
x, r := v.(*MemoryOperand)
return isM512(v) || (r && x.isBroadcast(4, 16))
}
func isM128M64bcst(v interface{}) bool {
x, r := v.(*MemoryOperand)
return isM128(v) || (r && x.isBroadcast(8, 2))
}
func isM256M64bcst(v interface{}) bool {
x, r := v.(*MemoryOperand)
return isM256(v) || (r && x.isBroadcast(8, 4))
}
func isM512M64bcst(v interface{}) bool {
x, r := v.(*MemoryOperand)
return isM512(v) || (r && x.isBroadcast(8, 8))
}
func isVMX(v interface{}) bool {
x, r := v.(*MemoryOperand)
return r && x.isVMX(false) && !x.Masked
}
func isEVEXVMX(v interface{}) bool {
x, r := v.(*MemoryOperand)
return r && x.isVMX(true) && !x.Masked
}
func isVMXk(v interface{}) bool { x, r := v.(*MemoryOperand); return r && x.isVMX(true) }
func isVMY(v interface{}) bool {
x, r := v.(*MemoryOperand)
return r && x.isVMY(false) && !x.Masked
}
func isEVEXVMY(v interface{}) bool {
x, r := v.(*MemoryOperand)
return r && x.isVMY(true) && !x.Masked
}
func isVMYk(v interface{}) bool { x, r := v.(*MemoryOperand); return r && x.isVMY(true) }
func isVMZ(v interface{}) bool {
x, r := v.(*MemoryOperand)
return r && x.isVMZ() && !x.Masked
}
func isVMZk(v interface{}) bool { x, r := v.(*MemoryOperand); return r && x.isVMZ() }
func isSAE(v interface{}) bool { _, r := v.(ExceptionControl); return r }
func isER(v interface{}) bool { _, r := v.(RoundingControl); return r }
func isImmExt(v interface{}, ext int, min int64, max int64) bool {
if x, ok := asInt64(v); !ok {
return false
} else if m := int64(1) << (8 * ext); x < m && x >= m+min {
return true
} else {
return x <= max && x >= min
}
}
func isImm8Ext(v interface{}, ext int) bool {
return isImmExt(v, ext, math.MinInt8, math.MaxInt8)
}
func isImm32Ext(v interface{}, ext int) bool {
return isImmExt(v, ext, math.MinInt32, math.MaxInt32)
}

View File

@@ -0,0 +1,54 @@
//
// Copyright 2024 CloudWeGo Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package x86_64
// CreateLabel creates a new Label, it may allocate a new one or grab one from a pool.
func CreateLabel(name string) *Label {
p := new(Label)
/* initialize the label */
p.refs = 1
p.Name = name
return p
}
func newProgram(arch *Arch) *Program {
p := new(Program)
/* initialize the program */
p.arch = arch
return p
}
func newInstruction(name string, argc int, argv Operands) *Instruction {
p := new(Instruction)
/* initialize the instruction */
p.name = name
p.argc = argc
p.argv = argv
return p
}
// CreateMemoryOperand creates a new MemoryOperand, it may allocate a new one or grab one from a pool.
func CreateMemoryOperand() *MemoryOperand {
p := new(MemoryOperand)
/* initialize the memory operand */
p.refs = 1
return p
}

View File

@@ -0,0 +1,584 @@
//
// Copyright 2024 CloudWeGo Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package x86_64
import (
"fmt"
"math"
"math/bits"
"github.com/bytedance/sonic/loader/internal/iasm/expr"
)
type (
_PseudoType int
_InstructionEncoder func(*Program, ...interface{}) *Instruction
)
const (
_PseudoNop _PseudoType = iota + 1
_PseudoByte
_PseudoWord
_PseudoLong
_PseudoQuad
_PseudoData
_PseudoAlign
)
func (self _PseudoType) String() string {
switch self {
case _PseudoNop:
return ".nop"
case _PseudoByte:
return ".byte"
case _PseudoWord:
return ".word"
case _PseudoLong:
return ".long"
case _PseudoQuad:
return ".quad"
case _PseudoData:
return ".data"
case _PseudoAlign:
return ".align"
default:
panic("unreachable")
}
}
type _Pseudo struct {
kind _PseudoType
data []byte
uint uint64
expr *expr.Expr
}
func (self *_Pseudo) free() {
if self.expr != nil {
self.expr.Free()
}
}
func (self *_Pseudo) encode(m *[]byte, pc uintptr) int {
switch self.kind {
case _PseudoNop:
return 0
case _PseudoByte:
self.encodeByte(m)
return 1
case _PseudoWord:
self.encodeWord(m)
return 2
case _PseudoLong:
self.encodeLong(m)
return 4
case _PseudoQuad:
self.encodeQuad(m)
return 8
case _PseudoData:
self.encodeData(m)
return len(self.data)
case _PseudoAlign:
self.encodeAlign(m, pc)
return self.alignSize(pc)
default:
panic("invalid pseudo instruction")
}
}
func (self *_Pseudo) evalExpr(low int64, high int64) int64 {
if v, err := self.expr.Evaluate(); err != nil {
panic(err)
} else if v < low || v > high {
panic(fmt.Sprintf("expression out of range [%d, %d]: %d", low, high, v))
} else {
return v
}
}
func (self *_Pseudo) alignSize(pc uintptr) int {
if !ispow2(self.uint) {
panic(fmt.Sprintf("alignment should be a power of 2, not %d", self.uint))
} else {
return align(int(pc), bits.TrailingZeros64(self.uint)) - int(pc)
}
}
func (self *_Pseudo) encodeData(m *[]byte) {
if m != nil {
*m = append(*m, self.data...)
}
}
func (self *_Pseudo) encodeByte(m *[]byte) {
if m != nil {
append8(m, byte(self.evalExpr(math.MinInt8, math.MaxUint8)))
}
}
func (self *_Pseudo) encodeWord(m *[]byte) {
if m != nil {
append16(m, uint16(self.evalExpr(math.MinInt16, math.MaxUint16)))
}
}
func (self *_Pseudo) encodeLong(m *[]byte) {
if m != nil {
append32(m, uint32(self.evalExpr(math.MinInt32, math.MaxUint32)))
}
}
func (self *_Pseudo) encodeQuad(m *[]byte) {
if m != nil {
if v, err := self.expr.Evaluate(); err != nil {
panic(err)
} else {
append64(m, uint64(v))
}
}
}
func (self *_Pseudo) encodeAlign(m *[]byte, pc uintptr) {
if m != nil {
if self.expr == nil {
expandmm(m, self.alignSize(pc), 0)
} else {
expandmm(m, self.alignSize(pc), byte(self.evalExpr(math.MinInt8, math.MaxUint8)))
}
}
}
// Operands represents a sequence of operand required by an instruction.
type Operands [_N_args]interface{}
// InstructionDomain represents the domain of an instruction.
type InstructionDomain uint8
const (
DomainGeneric InstructionDomain = iota
DomainMMXSSE
DomainAVX
DomainFMA
DomainCrypto
DomainMask
DomainAMDSpecific
DomainMisc
DomainPseudo
)
type (
_BranchType uint8
)
const (
_B_none _BranchType = iota
_B_conditional
_B_unconditional
)
// Instruction represents an unencoded instruction.
type Instruction struct {
next *Instruction
pc uintptr
nb int
len int
argc int
name string
argv Operands
forms [_N_forms]_Encoding
pseudo _Pseudo
branch _BranchType
domain InstructionDomain
prefix []byte
}
func (self *Instruction) add(flags int, encoder func(m *_Encoding, v []interface{})) {
self.forms[self.len].flags = flags
self.forms[self.len].encoder = encoder
self.len++
}
func (self *Instruction) free() {
self.clear()
self.pseudo.free()
//freeInstruction(self)
}
func (self *Instruction) clear() {
for i := 0; i < self.argc; i++ {
if v, ok := self.argv[i].(Disposable); ok {
v.Free()
}
}
}
func (self *Instruction) check(e *_Encoding) bool {
if (e.flags & _F_rel1) != 0 {
return isRel8(self.argv[0])
} else if (e.flags & _F_rel4) != 0 {
return isRel32(self.argv[0]) || isLabel(self.argv[0])
} else {
return true
}
}
func (self *Instruction) encode(m *[]byte) int {
n := math.MaxInt64
p := (*_Encoding)(nil)
/* encode prefixes if any */
if self.nb = len(self.prefix); m != nil {
*m = append(*m, self.prefix...)
}
/* check for pseudo-instructions */
if self.pseudo.kind != 0 {
self.nb += self.pseudo.encode(m, self.pc)
return self.nb
}
/* find the shortest encoding */
for i := 0; i < self.len; i++ {
if e := &self.forms[i]; self.check(e) {
if v := e.encode(self.argv[:self.argc]); v < n {
n = v
p = e
}
}
}
/* add to buffer if needed */
if m != nil {
*m = append(*m, p.bytes[:n]...)
}
/* update the instruction length */
self.nb += n
return self.nb
}
/** Instruction Prefixes **/
const (
_P_cs = 0x2e
_P_ds = 0x3e
_P_es = 0x26
_P_fs = 0x64
_P_gs = 0x65
_P_ss = 0x36
_P_lock = 0xf0
)
// CS overrides the memory operation of this instruction to CS.
func (self *Instruction) CS() *Instruction {
self.prefix = append(self.prefix, _P_cs)
return self
}
// DS overrides the memory operation of this instruction to DS,
// this is the default section for most instructions if not specified.
func (self *Instruction) DS() *Instruction {
self.prefix = append(self.prefix, _P_ds)
return self
}
// ES overrides the memory operation of this instruction to ES.
func (self *Instruction) ES() *Instruction {
self.prefix = append(self.prefix, _P_es)
return self
}
// FS overrides the memory operation of this instruction to FS.
func (self *Instruction) FS() *Instruction {
self.prefix = append(self.prefix, _P_fs)
return self
}
// GS overrides the memory operation of this instruction to GS.
func (self *Instruction) GS() *Instruction {
self.prefix = append(self.prefix, _P_gs)
return self
}
// SS overrides the memory operation of this instruction to SS.
func (self *Instruction) SS() *Instruction {
self.prefix = append(self.prefix, _P_ss)
return self
}
// LOCK causes the processor's LOCK# signal to be asserted during execution of
// the accompanying instruction (turns the instruction into an atomic instruction).
// In a multiprocessor environment, the LOCK# signal insures that the processor
// has exclusive use of any shared memory while the signal is asserted.
func (self *Instruction) LOCK() *Instruction {
self.prefix = append(self.prefix, _P_lock)
return self
}
/** Basic Instruction Properties **/
// Name returns the instruction name.
func (self *Instruction) Name() string {
return self.name
}
// Domain returns the domain of this instruction.
func (self *Instruction) Domain() InstructionDomain {
return self.domain
}
// Operands returns the operands of this instruction.
func (self *Instruction) Operands() []interface{} {
return self.argv[:self.argc]
}
// Program represents a sequence of instructions.
type Program struct {
arch *Arch
head *Instruction
tail *Instruction
}
const (
_N_near = 2 // near-branch (-128 ~ +127) takes 2 bytes to encode
_N_far_cond = 6 // conditional far-branch takes 6 bytes to encode
_N_far_uncond = 5 // unconditional far-branch takes 5 bytes to encode
)
func (self *Program) clear() {
for p, q := self.head, self.head; p != nil; p = q {
q = p.next
p.free()
}
}
func (self *Program) alloc(name string, argc int, argv Operands) *Instruction {
p := self.tail
q := newInstruction(name, argc, argv)
/* attach to tail if any */
if p != nil {
p.next = q
} else {
self.head = q
}
/* set the new tail */
self.tail = q
return q
}
func (self *Program) pseudo(kind _PseudoType) (p *Instruction) {
p = self.alloc(kind.String(), 0, Operands{})
p.domain = DomainPseudo
p.pseudo.kind = kind
return
}
func (self *Program) require(isa ISA) {
if !self.arch.HasISA(isa) {
panic("ISA '" + isa.String() + "' was not enabled")
}
}
func (self *Program) branchSize(p *Instruction) int {
switch p.branch {
case _B_none:
panic("p is not a branch")
case _B_conditional:
return _N_far_cond
case _B_unconditional:
return _N_far_uncond
default:
panic("invalid instruction")
}
}
/** Pseudo-Instructions **/
// Byte is a pseudo-instruction to add raw byte to the assembled code.
func (self *Program) Byte(v *expr.Expr) (p *Instruction) {
p = self.pseudo(_PseudoByte)
p.pseudo.expr = v
return
}
// Word is a pseudo-instruction to add raw uint16 as little-endian to the assembled code.
func (self *Program) Word(v *expr.Expr) (p *Instruction) {
p = self.pseudo(_PseudoWord)
p.pseudo.expr = v
return
}
// Long is a pseudo-instruction to add raw uint32 as little-endian to the assembled code.
func (self *Program) Long(v *expr.Expr) (p *Instruction) {
p = self.pseudo(_PseudoLong)
p.pseudo.expr = v
return
}
// Quad is a pseudo-instruction to add raw uint64 as little-endian to the assembled code.
func (self *Program) Quad(v *expr.Expr) (p *Instruction) {
p = self.pseudo(_PseudoQuad)
p.pseudo.expr = v
return
}
// Data is a pseudo-instruction to add raw bytes to the assembled code.
func (self *Program) Data(v []byte) (p *Instruction) {
p = self.pseudo(_PseudoData)
p.pseudo.data = v
return
}
// Align is a pseudo-instruction to ensure the PC is aligned to a certain value.
func (self *Program) Align(align uint64, padding *expr.Expr) (p *Instruction) {
p = self.pseudo(_PseudoAlign)
p.pseudo.uint = align
p.pseudo.expr = padding
return
}
/** Program Assembler **/
// Free returns the Program object into pool.
// Any operation performed after Free is undefined behavior.
//
// NOTE: This also frees all the instructions, labels, memory
//
// operands and expressions associated with this program.
func (self *Program) Free() {
self.clear()
//freeProgram(self)
}
// Link pins a label at the current position.
func (self *Program) Link(p *Label) {
if p.Dest != nil {
panic("label was already linked")
} else {
p.Dest = self.pseudo(_PseudoNop)
}
}
// Assemble assembles and links the entire program into machine code.
func (self *Program) Assemble(pc uintptr) (ret []byte) {
orig := pc
next := true
offs := uintptr(0)
/* Pass 0: PC-precompute, assume all labeled branches are far-branches. */
for p := self.head; p != nil; p = p.next {
if p.pc = pc; !isLabel(p.argv[0]) || p.branch == _B_none {
pc += uintptr(p.encode(nil))
} else {
pc += uintptr(self.branchSize(p))
}
}
/* allocate space for the machine code */
nb := int(pc - orig)
ret = make([]byte, 0, nb)
/* Pass 1: adjust all the jumps */
for next {
next = false
offs = uintptr(0)
/* scan all the branches */
for p := self.head; p != nil; p = p.next {
var ok bool
var lb *Label
/* re-calculate the alignment here */
if nb = p.nb; p.pseudo.kind == _PseudoAlign {
p.pc -= offs
offs += uintptr(nb - p.encode(nil))
continue
}
/* adjust the program counter */
p.pc -= offs
lb, ok = p.argv[0].(*Label)
/* only care about labeled far-branches */
if !ok || p.nb == _N_near || p.branch == _B_none {
continue
}
/* calculate the jump offset */
size := self.branchSize(p)
diff := lb.offset(p.pc, size)
/* too far to be a near jump */
if diff > 127 || diff < -128 {
p.nb = size
continue
}
/* a far jump becomes a near jump, calculate
* the PC adjustment value and assemble again */
next = true
p.nb = _N_near
offs += uintptr(size - _N_near)
}
}
/* Pass 3: link all the cross-references */
for p := self.head; p != nil; p = p.next {
for i := 0; i < p.argc; i++ {
var ok bool
var lb *Label
var op *MemoryOperand
/* resolve labels */
if lb, ok = p.argv[i].(*Label); ok {
p.argv[i] = lb.offset(p.pc, p.nb)
continue
}
/* check for memory operands */
if op, ok = p.argv[i].(*MemoryOperand); !ok {
continue
}
/* check for label references */
if op.Addr.Type != Reference {
continue
}
/* replace the label with the real offset */
op.Addr.Type = Offset
op.Addr.Offset = op.Addr.Reference.offset(p.pc, p.nb)
}
}
/* Pass 4: actually encode all the instructions */
for p := self.head; p != nil; p = p.next {
p.encode(&ret)
}
/* all done */
return ret
}
// AssembleAndFree is like Assemble, but it frees the Program after assembling.
func (self *Program) AssembleAndFree(pc uintptr) (ret []byte) {
ret = self.Assemble(pc)
self.Free()
return
}

View File

@@ -0,0 +1,747 @@
//
// Copyright 2024 CloudWeGo Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package x86_64
import (
"fmt"
)
// Register represents a hardware register.
type Register interface {
fmt.Stringer
implRegister()
}
type (
Register8 byte
Register16 byte
Register32 byte
Register64 byte
)
type (
KRegister byte
MMRegister byte
XMMRegister byte
YMMRegister byte
ZMMRegister byte
)
// RegisterMask is a KRegister used to mask another register.
type RegisterMask struct {
Z bool
K KRegister
}
// String implements the fmt.Stringer interface.
func (self RegisterMask) String() string {
if !self.Z {
return fmt.Sprintf("{%%%s}", self.K)
} else {
return fmt.Sprintf("{%%%s}{z}", self.K)
}
}
// MaskedRegister is a Register masked by a RegisterMask.
type MaskedRegister struct {
Reg Register
Mask RegisterMask
}
// String implements the fmt.Stringer interface.
func (self MaskedRegister) String() string {
return self.Reg.String() + self.Mask.String()
}
const (
AL Register8 = iota
CL
DL
BL
SPL
BPL
SIL
DIL
R8b
R9b
R10b
R11b
R12b
R13b
R14b
R15b
)
const (
AH = SPL | 0x80
CH = BPL | 0x80
DH = SIL | 0x80
BH = DIL | 0x80
)
const (
AX Register16 = iota
CX
DX
BX
SP
BP
SI
DI
R8w
R9w
R10w
R11w
R12w
R13w
R14w
R15w
)
const (
EAX Register32 = iota
ECX
EDX
EBX
ESP
EBP
ESI
EDI
R8d
R9d
R10d
R11d
R12d
R13d
R14d
R15d
)
const (
RAX Register64 = iota
RCX
RDX
RBX
RSP
RBP
RSI
RDI
R8
R9
R10
R11
R12
R13
R14
R15
)
const (
K0 KRegister = iota
K1
K2
K3
K4
K5
K6
K7
)
const (
MM0 MMRegister = iota
MM1
MM2
MM3
MM4
MM5
MM6
MM7
)
const (
XMM0 XMMRegister = iota
XMM1
XMM2
XMM3
XMM4
XMM5
XMM6
XMM7
XMM8
XMM9
XMM10
XMM11
XMM12
XMM13
XMM14
XMM15
XMM16
XMM17
XMM18
XMM19
XMM20
XMM21
XMM22
XMM23
XMM24
XMM25
XMM26
XMM27
XMM28
XMM29
XMM30
XMM31
)
const (
YMM0 YMMRegister = iota
YMM1
YMM2
YMM3
YMM4
YMM5
YMM6
YMM7
YMM8
YMM9
YMM10
YMM11
YMM12
YMM13
YMM14
YMM15
YMM16
YMM17
YMM18
YMM19
YMM20
YMM21
YMM22
YMM23
YMM24
YMM25
YMM26
YMM27
YMM28
YMM29
YMM30
YMM31
)
const (
ZMM0 ZMMRegister = iota
ZMM1
ZMM2
ZMM3
ZMM4
ZMM5
ZMM6
ZMM7
ZMM8
ZMM9
ZMM10
ZMM11
ZMM12
ZMM13
ZMM14
ZMM15
ZMM16
ZMM17
ZMM18
ZMM19
ZMM20
ZMM21
ZMM22
ZMM23
ZMM24
ZMM25
ZMM26
ZMM27
ZMM28
ZMM29
ZMM30
ZMM31
)
func (self Register8) implRegister() {}
func (self Register16) implRegister() {}
func (self Register32) implRegister() {}
func (self Register64) implRegister() {}
func (self KRegister) implRegister() {}
func (self MMRegister) implRegister() {}
func (self XMMRegister) implRegister() {}
func (self YMMRegister) implRegister() {}
func (self ZMMRegister) implRegister() {}
func (self Register8) String() string {
if int(self) >= len(r8names) {
return "???"
} else {
return r8names[self]
}
}
func (self Register16) String() string {
if int(self) >= len(r16names) {
return "???"
} else {
return r16names[self]
}
}
func (self Register32) String() string {
if int(self) >= len(r32names) {
return "???"
} else {
return r32names[self]
}
}
func (self Register64) String() string {
if int(self) >= len(r64names) {
return "???"
} else {
return r64names[self]
}
}
func (self KRegister) String() string {
if int(self) >= len(knames) {
return "???"
} else {
return knames[self]
}
}
func (self MMRegister) String() string {
if int(self) >= len(mmnames) {
return "???"
} else {
return mmnames[self]
}
}
func (self XMMRegister) String() string {
if int(self) >= len(xmmnames) {
return "???"
} else {
return xmmnames[self]
}
}
func (self YMMRegister) String() string {
if int(self) >= len(ymmnames) {
return "???"
} else {
return ymmnames[self]
}
}
func (self ZMMRegister) String() string {
if int(self) >= len(zmmnames) {
return "???"
} else {
return zmmnames[self]
}
}
// Registers maps register name into Register instances.
var Registers = map[string]Register{
"al": AL,
"cl": CL,
"dl": DL,
"bl": BL,
"spl": SPL,
"bpl": BPL,
"sil": SIL,
"dil": DIL,
"r8b": R8b,
"r9b": R9b,
"r10b": R10b,
"r11b": R11b,
"r12b": R12b,
"r13b": R13b,
"r14b": R14b,
"r15b": R15b,
"ah": AH,
"ch": CH,
"dh": DH,
"bh": BH,
"ax": AX,
"cx": CX,
"dx": DX,
"bx": BX,
"sp": SP,
"bp": BP,
"si": SI,
"di": DI,
"r8w": R8w,
"r9w": R9w,
"r10w": R10w,
"r11w": R11w,
"r12w": R12w,
"r13w": R13w,
"r14w": R14w,
"r15w": R15w,
"eax": EAX,
"ecx": ECX,
"edx": EDX,
"ebx": EBX,
"esp": ESP,
"ebp": EBP,
"esi": ESI,
"edi": EDI,
"r8d": R8d,
"r9d": R9d,
"r10d": R10d,
"r11d": R11d,
"r12d": R12d,
"r13d": R13d,
"r14d": R14d,
"r15d": R15d,
"rax": RAX,
"rcx": RCX,
"rdx": RDX,
"rbx": RBX,
"rsp": RSP,
"rbp": RBP,
"rsi": RSI,
"rdi": RDI,
"r8": R8,
"r9": R9,
"r10": R10,
"r11": R11,
"r12": R12,
"r13": R13,
"r14": R14,
"r15": R15,
"k0": K0,
"k1": K1,
"k2": K2,
"k3": K3,
"k4": K4,
"k5": K5,
"k6": K6,
"k7": K7,
"mm0": MM0,
"mm1": MM1,
"mm2": MM2,
"mm3": MM3,
"mm4": MM4,
"mm5": MM5,
"mm6": MM6,
"mm7": MM7,
"xmm0": XMM0,
"xmm1": XMM1,
"xmm2": XMM2,
"xmm3": XMM3,
"xmm4": XMM4,
"xmm5": XMM5,
"xmm6": XMM6,
"xmm7": XMM7,
"xmm8": XMM8,
"xmm9": XMM9,
"xmm10": XMM10,
"xmm11": XMM11,
"xmm12": XMM12,
"xmm13": XMM13,
"xmm14": XMM14,
"xmm15": XMM15,
"xmm16": XMM16,
"xmm17": XMM17,
"xmm18": XMM18,
"xmm19": XMM19,
"xmm20": XMM20,
"xmm21": XMM21,
"xmm22": XMM22,
"xmm23": XMM23,
"xmm24": XMM24,
"xmm25": XMM25,
"xmm26": XMM26,
"xmm27": XMM27,
"xmm28": XMM28,
"xmm29": XMM29,
"xmm30": XMM30,
"xmm31": XMM31,
"ymm0": YMM0,
"ymm1": YMM1,
"ymm2": YMM2,
"ymm3": YMM3,
"ymm4": YMM4,
"ymm5": YMM5,
"ymm6": YMM6,
"ymm7": YMM7,
"ymm8": YMM8,
"ymm9": YMM9,
"ymm10": YMM10,
"ymm11": YMM11,
"ymm12": YMM12,
"ymm13": YMM13,
"ymm14": YMM14,
"ymm15": YMM15,
"ymm16": YMM16,
"ymm17": YMM17,
"ymm18": YMM18,
"ymm19": YMM19,
"ymm20": YMM20,
"ymm21": YMM21,
"ymm22": YMM22,
"ymm23": YMM23,
"ymm24": YMM24,
"ymm25": YMM25,
"ymm26": YMM26,
"ymm27": YMM27,
"ymm28": YMM28,
"ymm29": YMM29,
"ymm30": YMM30,
"ymm31": YMM31,
"zmm0": ZMM0,
"zmm1": ZMM1,
"zmm2": ZMM2,
"zmm3": ZMM3,
"zmm4": ZMM4,
"zmm5": ZMM5,
"zmm6": ZMM6,
"zmm7": ZMM7,
"zmm8": ZMM8,
"zmm9": ZMM9,
"zmm10": ZMM10,
"zmm11": ZMM11,
"zmm12": ZMM12,
"zmm13": ZMM13,
"zmm14": ZMM14,
"zmm15": ZMM15,
"zmm16": ZMM16,
"zmm17": ZMM17,
"zmm18": ZMM18,
"zmm19": ZMM19,
"zmm20": ZMM20,
"zmm21": ZMM21,
"zmm22": ZMM22,
"zmm23": ZMM23,
"zmm24": ZMM24,
"zmm25": ZMM25,
"zmm26": ZMM26,
"zmm27": ZMM27,
"zmm28": ZMM28,
"zmm29": ZMM29,
"zmm30": ZMM30,
"zmm31": ZMM31,
}
/** Register Name Tables **/
var r8names = [...]string{
AL: "al",
CL: "cl",
DL: "dl",
BL: "bl",
SPL: "spl",
BPL: "bpl",
SIL: "sil",
DIL: "dil",
R8b: "r8b",
R9b: "r9b",
R10b: "r10b",
R11b: "r11b",
R12b: "r12b",
R13b: "r13b",
R14b: "r14b",
R15b: "r15b",
AH: "ah",
CH: "ch",
DH: "dh",
BH: "bh",
}
var r16names = [...]string{
AX: "ax",
CX: "cx",
DX: "dx",
BX: "bx",
SP: "sp",
BP: "bp",
SI: "si",
DI: "di",
R8w: "r8w",
R9w: "r9w",
R10w: "r10w",
R11w: "r11w",
R12w: "r12w",
R13w: "r13w",
R14w: "r14w",
R15w: "r15w",
}
var r32names = [...]string{
EAX: "eax",
ECX: "ecx",
EDX: "edx",
EBX: "ebx",
ESP: "esp",
EBP: "ebp",
ESI: "esi",
EDI: "edi",
R8d: "r8d",
R9d: "r9d",
R10d: "r10d",
R11d: "r11d",
R12d: "r12d",
R13d: "r13d",
R14d: "r14d",
R15d: "r15d",
}
var r64names = [...]string{
RAX: "rax",
RCX: "rcx",
RDX: "rdx",
RBX: "rbx",
RSP: "rsp",
RBP: "rbp",
RSI: "rsi",
RDI: "rdi",
R8: "r8",
R9: "r9",
R10: "r10",
R11: "r11",
R12: "r12",
R13: "r13",
R14: "r14",
R15: "r15",
}
var knames = [...]string{
K0: "k0",
K1: "k1",
K2: "k2",
K3: "k3",
K4: "k4",
K5: "k5",
K6: "k6",
K7: "k7",
}
var mmnames = [...]string{
MM0: "mm0",
MM1: "mm1",
MM2: "mm2",
MM3: "mm3",
MM4: "mm4",
MM5: "mm5",
MM6: "mm6",
MM7: "mm7",
}
var xmmnames = [...]string{
XMM0: "xmm0",
XMM1: "xmm1",
XMM2: "xmm2",
XMM3: "xmm3",
XMM4: "xmm4",
XMM5: "xmm5",
XMM6: "xmm6",
XMM7: "xmm7",
XMM8: "xmm8",
XMM9: "xmm9",
XMM10: "xmm10",
XMM11: "xmm11",
XMM12: "xmm12",
XMM13: "xmm13",
XMM14: "xmm14",
XMM15: "xmm15",
XMM16: "xmm16",
XMM17: "xmm17",
XMM18: "xmm18",
XMM19: "xmm19",
XMM20: "xmm20",
XMM21: "xmm21",
XMM22: "xmm22",
XMM23: "xmm23",
XMM24: "xmm24",
XMM25: "xmm25",
XMM26: "xmm26",
XMM27: "xmm27",
XMM28: "xmm28",
XMM29: "xmm29",
XMM30: "xmm30",
XMM31: "xmm31",
}
var ymmnames = [...]string{
YMM0: "ymm0",
YMM1: "ymm1",
YMM2: "ymm2",
YMM3: "ymm3",
YMM4: "ymm4",
YMM5: "ymm5",
YMM6: "ymm6",
YMM7: "ymm7",
YMM8: "ymm8",
YMM9: "ymm9",
YMM10: "ymm10",
YMM11: "ymm11",
YMM12: "ymm12",
YMM13: "ymm13",
YMM14: "ymm14",
YMM15: "ymm15",
YMM16: "ymm16",
YMM17: "ymm17",
YMM18: "ymm18",
YMM19: "ymm19",
YMM20: "ymm20",
YMM21: "ymm21",
YMM22: "ymm22",
YMM23: "ymm23",
YMM24: "ymm24",
YMM25: "ymm25",
YMM26: "ymm26",
YMM27: "ymm27",
YMM28: "ymm28",
YMM29: "ymm29",
YMM30: "ymm30",
YMM31: "ymm31",
}
var zmmnames = [...]string{
ZMM0: "zmm0",
ZMM1: "zmm1",
ZMM2: "zmm2",
ZMM3: "zmm3",
ZMM4: "zmm4",
ZMM5: "zmm5",
ZMM6: "zmm6",
ZMM7: "zmm7",
ZMM8: "zmm8",
ZMM9: "zmm9",
ZMM10: "zmm10",
ZMM11: "zmm11",
ZMM12: "zmm12",
ZMM13: "zmm13",
ZMM14: "zmm14",
ZMM15: "zmm15",
ZMM16: "zmm16",
ZMM17: "zmm17",
ZMM18: "zmm18",
ZMM19: "zmm19",
ZMM20: "zmm20",
ZMM21: "zmm21",
ZMM22: "zmm22",
ZMM23: "zmm23",
ZMM24: "zmm24",
ZMM25: "zmm25",
ZMM26: "zmm26",
ZMM27: "zmm27",
ZMM28: "zmm28",
ZMM29: "zmm29",
ZMM30: "zmm30",
ZMM31: "zmm31",
}

View File

@@ -0,0 +1,147 @@
//
// Copyright 2024 CloudWeGo Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package x86_64
import (
"encoding/binary"
"errors"
"reflect"
"strconv"
"unicode/utf8"
"unsafe"
)
const (
_CC_digit = 1 << iota
_CC_ident
_CC_ident0
_CC_number
)
func ispow2(v uint64) bool {
return (v & (v - 1)) == 0
}
func isdigit(cc rune) bool {
return '0' <= cc && cc <= '9'
}
func isalpha(cc rune) bool {
return (cc >= 'a' && cc <= 'z') || (cc >= 'A' && cc <= 'Z')
}
func isident(cc rune) bool {
return cc == '_' || isalpha(cc) || isdigit(cc)
}
func isident0(cc rune) bool {
return cc == '_' || isalpha(cc)
}
func isnumber(cc rune) bool {
return (cc == 'b' || cc == 'B') ||
(cc == 'o' || cc == 'O') ||
(cc == 'x' || cc == 'X') ||
(cc >= '0' && cc <= '9') ||
(cc >= 'a' && cc <= 'f') ||
(cc >= 'A' && cc <= 'F')
}
func align(v int, n int) int {
return (((v - 1) >> n) + 1) << n
}
func append8(m *[]byte, v byte) {
*m = append(*m, v)
}
func append16(m *[]byte, v uint16) {
p := len(*m)
*m = append(*m, 0, 0)
binary.LittleEndian.PutUint16((*m)[p:], v)
}
func append32(m *[]byte, v uint32) {
p := len(*m)
*m = append(*m, 0, 0, 0, 0)
binary.LittleEndian.PutUint32((*m)[p:], v)
}
func append64(m *[]byte, v uint64) {
p := len(*m)
*m = append(*m, 0, 0, 0, 0, 0, 0, 0, 0)
binary.LittleEndian.PutUint64((*m)[p:], v)
}
func expandmm(m *[]byte, n int, v byte) {
sl := (*_GoSlice)(unsafe.Pointer(m))
nb := sl.len + n
/* grow as needed */
if nb > cap(*m) {
*m = growslice(byteType, *m, nb)
}
/* fill the new area */
memset(unsafe.Pointer(uintptr(sl.ptr)+uintptr(sl.len)), v, uintptr(n))
sl.len = nb
}
func memset(p unsafe.Pointer, c byte, n uintptr) {
if c != 0 {
memsetv(p, c, n)
} else {
memclrNoHeapPointers(p, n)
}
}
func memsetv(p unsafe.Pointer, c byte, n uintptr) {
for i := uintptr(0); i < n; i++ {
*(*byte)(unsafe.Pointer(uintptr(p) + i)) = c
}
}
func literal64(v string) (uint64, error) {
var nb int
var ch rune
var ex error
var mm [12]byte
/* unquote the runes */
for v != "" {
if ch, _, v, ex = strconv.UnquoteChar(v, '\''); ex != nil {
return 0, ex
} else if nb += utf8.EncodeRune(mm[nb:], ch); nb > 8 {
return 0, errors.New("multi-char constant too large")
}
}
/* convert to uint64 */
return *(*uint64)(unsafe.Pointer(&mm)), nil
}
var (
byteWrap = reflect.TypeOf(byte(0))
byteType = (*_GoType)(efaceOf(byteWrap).ptr)
)
//go:linkname growslice runtime.growslice
func growslice(_ *_GoType, _ []byte, _ int) []byte
//go:noescape
//go:linkname memclrNoHeapPointers runtime.memclrNoHeapPointers
func memclrNoHeapPointers(_ unsafe.Pointer, _ uintptr)

View File

@@ -0,0 +1,62 @@
/*
* Copyright 2021 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rt
import (
`unsafe`
`reflect`
)
//go:nosplit
func Mem2Str(v []byte) (s string) {
(*GoString)(unsafe.Pointer(&s)).Len = (*GoSlice)(unsafe.Pointer(&v)).Len
(*GoString)(unsafe.Pointer(&s)).Ptr = (*GoSlice)(unsafe.Pointer(&v)).Ptr
return
}
//go:nosplit
func Str2Mem(s string) (v []byte) {
(*GoSlice)(unsafe.Pointer(&v)).Cap = (*GoString)(unsafe.Pointer(&s)).Len
(*GoSlice)(unsafe.Pointer(&v)).Len = (*GoString)(unsafe.Pointer(&s)).Len
(*GoSlice)(unsafe.Pointer(&v)).Ptr = (*GoString)(unsafe.Pointer(&s)).Ptr
return
}
func BytesFrom(p unsafe.Pointer, n int, c int) (r []byte) {
(*GoSlice)(unsafe.Pointer(&r)).Ptr = p
(*GoSlice)(unsafe.Pointer(&r)).Len = n
(*GoSlice)(unsafe.Pointer(&r)).Cap = c
return
}
func FuncAddr(f interface{}) unsafe.Pointer {
if vv := UnpackEface(f); vv.Type.Kind() != reflect.Func {
panic("f is not a function")
} else {
return *(*unsafe.Pointer)(vv.Value)
}
}
//go:nocheckptr
func IndexChar(src string, index int) unsafe.Pointer {
return unsafe.Pointer(uintptr((*GoString)(unsafe.Pointer(&src)).Ptr) + uintptr(index))
}
//go:nocheckptr
func IndexByte(ptr []byte, index int) unsafe.Pointer {
return unsafe.Pointer(uintptr((*GoSlice)(unsafe.Pointer(&ptr)).Ptr) + uintptr(index))
}

View File

@@ -0,0 +1,183 @@
/*
* Copyright 2021 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rt
import (
`reflect`
`unsafe`
)
var (
reflectRtypeItab = findReflectRtypeItab()
)
// GoType.KindFlags const
const (
F_direct = 1 << 5
F_kind_mask = (1 << 5) - 1
)
// GoType.Flags const
const (
tflagUncommon uint8 = 1 << 0
tflagExtraStar uint8 = 1 << 1
tflagNamed uint8 = 1 << 2
tflagRegularMemory uint8 = 1 << 3
)
type GoType struct {
Size uintptr
PtrData uintptr
Hash uint32
Flags uint8
Align uint8
FieldAlign uint8
KindFlags uint8
Traits unsafe.Pointer
GCData *byte
Str int32
PtrToSelf int32
}
func (self *GoType) IsNamed() bool {
return (self.Flags & tflagNamed) != 0
}
func (self *GoType) Kind() reflect.Kind {
return reflect.Kind(self.KindFlags & F_kind_mask)
}
func (self *GoType) Pack() (t reflect.Type) {
(*GoIface)(unsafe.Pointer(&t)).Itab = reflectRtypeItab
(*GoIface)(unsafe.Pointer(&t)).Value = unsafe.Pointer(self)
return
}
func (self *GoType) String() string {
return self.Pack().String()
}
func (self *GoType) Indirect() bool {
return self.KindFlags & F_direct == 0
}
type GoItab struct {
it unsafe.Pointer
Vt *GoType
hv uint32
_ [4]byte
fn [1]uintptr
}
type GoIface struct {
Itab *GoItab
Value unsafe.Pointer
}
type GoEface struct {
Type *GoType
Value unsafe.Pointer
}
func (self GoEface) Pack() (v interface{}) {
*(*GoEface)(unsafe.Pointer(&v)) = self
return
}
type GoPtrType struct {
GoType
Elem *GoType
}
type GoMapType struct {
GoType
Key *GoType
Elem *GoType
Bucket *GoType
Hasher func(unsafe.Pointer, uintptr) uintptr
KeySize uint8
ElemSize uint8
BucketSize uint16
Flags uint32
}
func (self *GoMapType) IndirectElem() bool {
return self.Flags & 2 != 0
}
type GoStructType struct {
GoType
Pkg *byte
Fields []GoStructField
}
type GoStructField struct {
Name *byte
Type *GoType
OffEmbed uintptr
}
type GoInterfaceType struct {
GoType
PkgPath *byte
Methods []GoInterfaceMethod
}
type GoInterfaceMethod struct {
Name int32
Type int32
}
type GoSlice struct {
Ptr unsafe.Pointer
Len int
Cap int
}
type GoString struct {
Ptr unsafe.Pointer
Len int
}
func PtrElem(t *GoType) *GoType {
return (*GoPtrType)(unsafe.Pointer(t)).Elem
}
func MapType(t *GoType) *GoMapType {
return (*GoMapType)(unsafe.Pointer(t))
}
func IfaceType(t *GoType) *GoInterfaceType {
return (*GoInterfaceType)(unsafe.Pointer(t))
}
func UnpackType(t reflect.Type) *GoType {
return (*GoType)((*GoIface)(unsafe.Pointer(&t)).Value)
}
func UnpackEface(v interface{}) GoEface {
return *(*GoEface)(unsafe.Pointer(&v))
}
func UnpackIface(v interface{}) GoIface {
return *(*GoIface)(unsafe.Pointer(&v))
}
func findReflectRtypeItab() *GoItab {
v := reflect.TypeOf(struct{}{})
return (*GoIface)(unsafe.Pointer(&v)).Itab
}

View File

@@ -0,0 +1,228 @@
/**
* Copyright 2023 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rt
import (
`fmt`
`strings`
`unsafe`
)
type Bitmap struct {
N int
B []byte
}
func (self *Bitmap) grow() {
if self.N >= len(self.B) * 8 {
self.B = append(self.B, 0)
}
}
func (self *Bitmap) mark(i int, bv int) {
if bv != 0 {
self.B[i / 8] |= 1 << (i % 8)
} else {
self.B[i / 8] &^= 1 << (i % 8)
}
}
func (self *Bitmap) Set(i int, bv int) {
if i >= self.N {
panic("bitmap: invalid bit position")
} else {
self.mark(i, bv)
}
}
func (self *Bitmap) Append(bv int) {
self.grow()
self.mark(self.N, bv)
self.N++
}
func (self *Bitmap) AppendMany(n int, bv int) {
for i := 0; i < n; i++ {
self.Append(bv)
}
}
func (b *Bitmap) String() string {
var buf strings.Builder
for _, byteVal := range b.B {
fmt.Fprintf(&buf, "%08b ", byteVal)
}
return fmt.Sprintf("Bits: %s(total %d bits, %d bytes)", buf.String(), b.N, len(b.B))
}
type BitVec struct {
N uintptr
B unsafe.Pointer
}
func (self BitVec) Bit(i uintptr) byte {
return (*(*byte)(unsafe.Pointer(uintptr(self.B) + i / 8)) >> (i % 8)) & 1
}
func (self BitVec) String() string {
var i uintptr
var v []string
/* add each bit */
for i = 0; i < self.N; i++ {
v = append(v, fmt.Sprintf("%d", self.Bit(i)))
}
/* join them together */
return fmt.Sprintf(
"BitVec { %s }",
strings.Join(v, ", "),
)
}
/*
reference Golang 1.22.0 code:
```
args := bitvec.New(int32(maxArgs / int64(types.PtrSize)))
aoff := objw.Uint32(&argsSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps
aoff = objw.Uint32(&argsSymTmp, aoff, uint32(args.N)) // number of bits in each bitmap
locals := bitvec.New(int32(maxLocals / int64(types.PtrSize)))
loff := objw.Uint32(&liveSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps
loff = objw.Uint32(&liveSymTmp, loff, uint32(locals.N)) // number of bits in each bitmap
for _, live := range lv.stackMaps {
args.Clear()
locals.Clear()
lv.pointerMap(live, lv.vars, args, locals)
aoff = objw.BitVec(&argsSymTmp, aoff, args)
loff = objw.BitVec(&liveSymTmp, loff, locals)
}
```
*/
type StackMap struct {
// number of bitmaps
N int32
// number of bits of each bitmap
L int32
// bitmap1, bitmap2, ... bitmapN
B [1]byte
}
func (self *StackMap) Get(i int) BitVec {
return BitVec {
N: uintptr(self.L),
B: unsafe.Pointer(uintptr(unsafe.Pointer(&self.B)) + uintptr(i * self.BitmapBytes())),
}
}
func (self *StackMap) String() string {
sb := strings.Builder{}
sb.WriteString("StackMap {")
/* dump every stack map */
for i := 0; i < int(self.N); i++ {
sb.WriteRune('\n')
sb.WriteString(" " + self.Get(i).String())
}
/* close the stackmap */
sb.WriteString("\n}")
return sb.String()
}
func (self *StackMap) BitmapLen() int {
return int(self.L)
}
func (self *StackMap) BitmapBytes() int {
return int(self.L + 7) >> 3
}
func (self *StackMap) BitmapNums() int {
return int(self.N)
}
func (self *StackMap) StackMapHeaderSize() int {
return int(unsafe.Sizeof(self.L)) + int(unsafe.Sizeof(self.N))
}
func (self *StackMap) MarshalBinary() ([]byte, error) {
size := self.BinaryLen()
return BytesFrom(unsafe.Pointer(self), size, size), nil
}
func (self *StackMap) BinaryLen() int {
return self.StackMapHeaderSize() + self.BitmapBytes() * self.BitmapNums()
}
var (
byteType = UnpackEface(byte(0)).Type
)
const (
_StackMapSize = unsafe.Sizeof(StackMap{})
)
//go:linkname mallocgc runtime.mallocgc
//goland:noinspection GoUnusedParameter
func mallocgc(nb uintptr, vt *GoType, zero bool) unsafe.Pointer
type StackMapBuilder struct {
b Bitmap
}
//go:nocheckptr
func (self *StackMapBuilder) Build() (p *StackMap) {
nb := len(self.b.B)
allocatedSize := _StackMapSize + uintptr(nb) - 1
bm := mallocgc(allocatedSize, byteType, false)
/* initialize as 1 bitmap of N bits */
p = (*StackMap)(bm)
p.N, p.L = 1, int32(self.b.N)
copy(BytesFrom(unsafe.Pointer(&p.B), nb, nb), self.b.B)
/* assert length */
if allocatedSize < uintptr(p.BinaryLen()) {
panic(fmt.Sprintf("stackmap allocation too small: allocated %d, required %d", allocatedSize, p.BinaryLen()))
}
return
}
func (self *StackMapBuilder) AddField(ptr bool) {
if ptr {
self.b.Append(1)
} else {
self.b.Append(0)
}
}
func (self *StackMapBuilder) AddFields(n int, ptr bool) {
if ptr {
self.b.AppendMany(n, 1)
} else {
self.b.AppendMany(n, 0)
}
}

37
vendor/github.com/bytedance/sonic/loader/loader.go generated vendored Normal file
View File

@@ -0,0 +1,37 @@
/**
* Copyright 2023 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package loader
import (
`unsafe`
)
// Function is a function pointer
type Function unsafe.Pointer
// Options used to load a module
type Options struct {
// NoPreempt is used to disable async preemption for this module
NoPreempt bool
}
// Loader is a helper used to load a module simply
type Loader struct {
Name string // module name
File string // file name
Options
}

View File

@@ -0,0 +1,109 @@
/*
* Copyright 2021 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package loader
import (
`github.com/bytedance/sonic/loader/internal/rt`
)
// LoadFuncs loads only one function as module, and returns the function pointer
// - text: machine code
// - funcName: function name
// - frameSize: stack frame size.
// - argSize: argument total size (in bytes)
// - argPtrs: indicates if a slot (8 Bytes) of arguments memory stores pointer, from low to high
// - localPtrs: indicates if a slot (8 Bytes) of local variants memory stores pointer, from low to high
//
// WARN:
// - the function MUST has fixed SP offset equaling to this, otherwise it go.gentraceback will fail
// - the function MUST has only one stack map for all arguments and local variants
func (self Loader) LoadOne(text []byte, funcName string, frameSize int, argSize int, argPtrs []bool, localPtrs []bool, pcdata Pcdata) Function {
size := uint32(len(text))
fn := Func{
Name: funcName,
TextSize: size,
ArgsSize: int32(argSize),
}
fn.Pcsp = &pcdata
if self.NoPreempt {
fn.PcUnsafePoint = &Pcdata{
{PC: size, Val: PCDATA_UnsafePointUnsafe},
}
} else {
fn.PcUnsafePoint = &Pcdata{
{PC: size, Val: PCDATA_UnsafePointSafe},
}
}
// NOTICE: suppose the function has only one stack map at index 0
fn.PcStackMapIndex = &Pcdata{
{PC: size, Val: 0},
}
if argPtrs != nil {
args := rt.StackMapBuilder{}
for _, b := range argPtrs {
args.AddField(b)
}
fn.ArgsPointerMaps = args.Build()
}
if localPtrs != nil {
locals := rt.StackMapBuilder{}
for _, b := range localPtrs {
locals.AddField(b)
}
fn.LocalsPointerMaps = locals.Build()
}
out := Load(text, []Func{fn}, self.Name + funcName, []string{self.File})
return out[0]
}
// Load loads given machine codes and corresponding function information into go moduledata
// and returns runnable function pointer
// WARN: this API is experimental, use it carefully
func Load(text []byte, funcs []Func, modulename string, filenames []string) (out []Function) {
ids := make([]string, len(funcs))
for i, f := range funcs {
ids[i] = f.Name
}
// generate module data and allocate memory address
mod := makeModuledata(modulename, filenames, &funcs, text)
// verify and register the new module
moduledataverify1(mod)
registerModule(mod)
//
// encapsulate function address
out = make([]Function, len(funcs))
for i, s := range ids {
for _, f := range funcs {
if f.Name == s {
m := uintptr(mod.text + uintptr(f.EntryOff))
out[i] = Function(&m)
}
}
}
return
}

45
vendor/github.com/bytedance/sonic/loader/mmap_unix.go generated vendored Normal file
View File

@@ -0,0 +1,45 @@
//go:build !windows
// +build !windows
/**
* Copyright 2023 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package loader
import (
`syscall`
)
const (
_AP = syscall.MAP_ANON | syscall.MAP_PRIVATE
_RX = syscall.PROT_READ | syscall.PROT_EXEC
_RW = syscall.PROT_READ | syscall.PROT_WRITE
)
func mmap(nb int) uintptr {
if m, _, e := syscall.RawSyscall6(syscall.SYS_MMAP, 0, uintptr(nb), _RW, _AP, 0, 0); e != 0 {
panic(e)
} else {
return m
}
}
func mprotect(p uintptr, nb int) {
if _, _, err := syscall.RawSyscall(syscall.SYS_MPROTECT, p, uintptr(nb), _RX); err != 0 {
panic(err)
}
}

View File

@@ -0,0 +1,84 @@
//go:build windows
// +build windows
// build
/*
* Copyright 2021 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package loader
import (
`syscall`
`unsafe`
)
const (
MEM_COMMIT = 0x00001000
MEM_RESERVE = 0x00002000
)
var (
libKernel32 = syscall.NewLazyDLL("KERNEL32.DLL")
libKernel32_VirtualAlloc = libKernel32.NewProc("VirtualAlloc")
libKernel32_VirtualProtect = libKernel32.NewProc("VirtualProtect")
)
func mmap(nb int) uintptr {
addr, err := winapi_VirtualAlloc(0, nb, MEM_COMMIT|MEM_RESERVE, syscall.PAGE_READWRITE)
if err != nil {
panic(err)
}
return addr
}
func mprotect(p uintptr, nb int) (oldProtect int) {
err := winapi_VirtualProtect(p, nb, syscall.PAGE_EXECUTE_READ, &oldProtect)
if err != nil {
panic(err)
}
return
}
// winapi_VirtualAlloc allocate memory
// Doc: https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualalloc
func winapi_VirtualAlloc(lpAddr uintptr, dwSize int, flAllocationType int, flProtect int) (uintptr, error) {
r1, _, err := libKernel32_VirtualAlloc.Call(
lpAddr,
uintptr(dwSize),
uintptr(flAllocationType),
uintptr(flProtect),
)
if r1 == 0 {
return 0, err
}
return r1, nil
}
// winapi_VirtualProtect change memory protection
// Doc: https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualprotect
func winapi_VirtualProtect(lpAddr uintptr, dwSize int, flNewProtect int, lpflOldProtect *int) error {
r1, _, err := libKernel32_VirtualProtect.Call(
lpAddr,
uintptr(dwSize),
uintptr(flNewProtect),
uintptr(unsafe.Pointer(lpflOldProtect)),
)
if r1 == 0 {
return err
}
return nil
}

359
vendor/github.com/bytedance/sonic/loader/moduledata.go generated vendored Normal file
View File

@@ -0,0 +1,359 @@
// go:build go1.18 && !go1.27
// +build go1.18,!go1.27
/*
* Copyright 2021 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package loader
import (
`os`
`sort`
`unsafe`
`github.com/bytedance/sonic/loader/internal/rt`
)
type funcTab struct {
entry uint32
funcoff uint32
}
type pcHeader struct {
magic uint32 // 0xFFFFFFF0
pad1, pad2 uint8 // 0,0
minLC uint8 // min instruction size
ptrSize uint8 // size of a ptr in bytes
nfunc int // number of functions in the module
nfiles uint // number of entries in the file tab
textStart uintptr // base for function entry PC offsets in this module, equal to moduledata.text
funcnameOffset uintptr // offset to the funcnametab variable from pcHeader
cuOffset uintptr // offset to the cutab variable from pcHeader
filetabOffset uintptr // offset to the filetab variable from pcHeader
pctabOffset uintptr // offset to the pctab variable from pcHeader
pclnOffset uintptr // offset to the pclntab variable from pcHeader
}
type bitVector struct {
n int32 // # of bits
bytedata *uint8
}
type ptabEntry struct {
name int32
typ int32
}
type textSection struct {
vaddr uintptr // prelinked section vaddr
end uintptr // vaddr + section length
baseaddr uintptr // relocated section address
}
type modulehash struct {
modulename string
linktimehash string
runtimehash *string
}
// findfuncbucket is an array of these structures.
// Each bucket represents 4096 bytes of the text segment.
// Each subbucket represents 256 bytes of the text segment.
// To find a function given a pc, locate the bucket and subbucket for
// that pc. Add together the idx and subbucket value to obtain a
// function index. Then scan the functab array starting at that
// index to find the target function.
// This table uses 20 bytes for every 4096 bytes of code, or ~0.5% overhead.
type findfuncbucket struct {
idx uint32
_SUBBUCKETS [16]byte
}
type compilationUnit struct {
fileNames []string
}
func makeFtab(funcs []_func, maxpc uint32) (ftab []funcTab, pclntabSize int64, startLocations []uint32) {
// Allocate space for the pc->func table. This structure consists of a pc offset
// and an offset to the func structure. After that, we have a single pc
// value that marks the end of the last function in the binary.
pclntabSize = int64(len(funcs)*2*int(_PtrSize) + int(_PtrSize))
startLocations = make([]uint32, len(funcs))
for i, f := range funcs {
pclntabSize = rnd(pclntabSize, int64(_PtrSize))
//writePCToFunc
startLocations[i] = uint32(pclntabSize)
pclntabSize += int64(uint8(_FUNC_SIZE)+f.nfuncdata*4+uint8(f.npcdata)*4)
}
ftab = make([]funcTab, 0, len(funcs)+1)
// write a map of pc->func info offsets
for i, f := range funcs {
ftab = append(ftab, funcTab{uint32(f.entryOff), uint32(startLocations[i])})
}
// Final entry of table is just end pc offset.
ftab = append(ftab, funcTab{maxpc, 0})
return
}
// Pcln table format: [...]funcTab + [...]_Func
func makePclntable(size int64, startLocations []uint32, funcs []_func, maxpc uint32, pcdataOffs [][]uint32, funcdataOffs [][]uint32) (pclntab []byte) {
// Allocate space for the pc->func table. This structure consists of a pc offset
// and an offset to the func structure. After that, we have a single pc
// value that marks the end of the last function in the binary.
pclntab = make([]byte, size, size)
// write a map of pc->func info offsets
offs := 0
for i, f := range funcs {
byteOrder.PutUint32(pclntab[offs:offs+4], uint32(f.entryOff))
byteOrder.PutUint32(pclntab[offs+4:offs+8], uint32(startLocations[i]))
offs += 8
}
// Final entry of table is just end pc offset.
byteOrder.PutUint32(pclntab[offs:offs+4], maxpc)
// write func info table
for i := range funcs {
off := startLocations[i]
// write _func structure to pclntab
fb := rt.BytesFrom(unsafe.Pointer(&funcs[i]), int(_FUNC_SIZE), int(_FUNC_SIZE))
copy(pclntab[off:off+uint32(_FUNC_SIZE)], fb)
off += uint32(_FUNC_SIZE)
// NOTICE: _func.pcdata always starts from PcUnsafePoint, which is index 3
for j := 3; j < len(pcdataOffs[i]); j++ {
byteOrder.PutUint32(pclntab[off:off+4], uint32(pcdataOffs[i][j]))
off += 4
}
// funcdata refs as offsets from gofunc
for _, funcdata := range funcdataOffs[i] {
byteOrder.PutUint32(pclntab[off:off+4], uint32(funcdata))
off += 4
}
}
return
}
// findfunc table used to map pc to belonging func,
// returns the index in the func table.
//
// All text section are divided into buckets sized _BUCKETSIZE(4K):
// every bucket is divided into _SUBBUCKETS sized _SUB_BUCKETSIZE(64),
// and it has a base idx to plus the offset stored in jth subbucket.
// see findfunc() in runtime/symtab.go
func writeFindfunctab(out *[]byte, ftab []funcTab) (start int) {
start = len(*out)
max := ftab[len(ftab)-1].entry
min := ftab[0].entry
nbuckets := (max - min + _BUCKETSIZE - 1) / _BUCKETSIZE
n := (max - min + _SUB_BUCKETSIZE - 1) / _SUB_BUCKETSIZE
tab := make([]findfuncbucket, 0, nbuckets)
var s, e = 0, 0
for i := 0; i<int(nbuckets); i++ {
// store the start s-th func of the bucket
var fb = findfuncbucket{idx: uint32(s)}
// find the last e-th func of the bucket
var pc = min + uint32((i+1)*_BUCKETSIZE)
for ; e < len(ftab)-1 && ftab[e+1].entry <= pc; e++ {}
for j := 0; j<_SUBBUCKETS && (i*_SUBBUCKETS+j)<int(n); j++ {
// store the start func of the subbucket
fb._SUBBUCKETS[j] = byte(uint32(s) - fb.idx)
// find the s-th end func of the subbucket
pc = min + uint32(i*_BUCKETSIZE) + uint32((j+1)*_SUB_BUCKETSIZE)
for ; s < len(ftab)-1 && ftab[s+1].entry <= pc; s++ {}
}
s = e
tab = append(tab, fb)
}
// write findfuncbucket
if len(tab) > 0 {
size := int(unsafe.Sizeof(findfuncbucket{}))*len(tab)
*out = append(*out, rt.BytesFrom(unsafe.Pointer(&tab[0]), size, size)...)
}
return
}
func makeModuledata(name string, filenames []string, funcsp *[]Func, text []byte) (mod *moduledata) {
mod = new(moduledata)
mod.modulename = name
// sort funcs by entry
funcs := *funcsp
sort.Slice(funcs, func(i, j int) bool {
return funcs[i].EntryOff < funcs[j].EntryOff
})
*funcsp = funcs
// make filename table
cu := make([]string, 0, len(filenames))
cu = append(cu, filenames...)
cutab, filetab, cuOffs := makeFilenametab([]compilationUnit{{cu}})
mod.cutab = cutab
mod.filetab = filetab
// make funcname table
funcnametab, nameOffs := makeFuncnameTab(funcs)
mod.funcnametab = funcnametab
// mmap() text and funcdata segments
p := os.Getpagesize()
size := int(rnd(int64(len(text)), int64(p)))
addr := mmap(size)
// copy the machine code
s := rt.BytesFrom(unsafe.Pointer(addr), len(text), size)
copy(s, text)
// make it executable
mprotect(addr, size)
// assign addresses
mod.text = addr
mod.etext = addr + uintptr(size)
mod.minpc = addr
mod.maxpc = addr + uintptr(len(text))
// make pcdata table
// NOTICE: _func only use offset to index pcdata, thus no need mmap() pcdata
cuOff := cuOffs[0]
pctab, pcdataOffs, _funcs := makePctab(funcs, cuOff, nameOffs)
mod.pctab = pctab
// write func data
// NOTICE: _func use mod.gofunc+offset to directly point funcdata, thus need cache funcdata
// TODO: estimate accurate capacity
cache := make([]byte, 0, len(funcs)*int(_PtrSize))
fstart, funcdataOffs := writeFuncdata(&cache, funcs)
// make pc->func (binary search) func table
ftab, pclntSize, startLocations := makeFtab(_funcs, uint32(len(text)))
mod.ftab = ftab
// write pc->func (modmap) findfunc table
ffstart := writeFindfunctab(&cache, ftab)
// cache funcdata and findfuncbucket
moduleCache.Lock()
moduleCache.m[mod] = cache
moduleCache.Unlock()
mod.gofunc = uintptr(unsafe.Pointer(&cache[fstart]))
mod.findfunctab = uintptr(unsafe.Pointer(&cache[ffstart]))
// make pclnt table
pclntab := makePclntable(pclntSize, startLocations, _funcs, uint32(len(text)), pcdataOffs, funcdataOffs)
mod.pclntable = pclntab
if len(pclntab) > 0 {
setEpclntab(mod, uintptr(unsafe.Pointer(&pclntab[0]))+uintptr(len(pclntab)))
}
// make pc header
mod.pcHeader = &pcHeader {
magic : _Magic,
minLC : _MinLC,
ptrSize : _PtrSize,
nfunc : len(funcs),
nfiles: uint(len(cu)),
textStart: mod.text,
funcnameOffset: getOffsetOf(moduledata{}, "funcnametab"),
cuOffset: getOffsetOf(moduledata{}, "cutab"),
filetabOffset: getOffsetOf(moduledata{}, "filetab"),
pctabOffset: getOffsetOf(moduledata{}, "pctab"),
pclnOffset: getOffsetOf(moduledata{}, "pclntable"),
}
// special case: gcdata and gcbss must by non-empty
mod.gcdata = uintptr(unsafe.Pointer(&emptyByte))
mod.gcbss = uintptr(unsafe.Pointer(&emptyByte))
return
}
// makePctab generates pcdelta->valuedelta tables for functions,
// and returns the table and the entry offset of every kind pcdata in the table.
func makePctab(funcs []Func, cuOffset uint32, nameOffset []int32) (pctab []byte, pcdataOffs [][]uint32, _funcs []_func) {
_funcs = make([]_func, len(funcs))
// Pctab offsets of 0 are considered invalid in the runtime. We respect
// that by just padding a single byte at the beginning of runtime.pctab,
// that way no real offsets can be zero.
pctab = make([]byte, 1, 12*len(funcs)+1)
pcdataOffs = make([][]uint32, len(funcs))
for i, f := range funcs {
_f := &_funcs[i]
var writer = func(pc *Pcdata) {
var ab []byte
var err error
if pc != nil {
ab, err = pc.MarshalBinary()
if err != nil {
panic(err)
}
pcdataOffs[i] = append(pcdataOffs[i], uint32(len(pctab)))
} else {
ab = []byte{0}
pcdataOffs[i] = append(pcdataOffs[i], _PCDATA_INVALID_OFFSET)
}
pctab = append(pctab, ab...)
}
if f.Pcsp != nil {
_f.pcsp = uint32(len(pctab))
}
writer(f.Pcsp)
if f.Pcfile != nil {
_f.pcfile = uint32(len(pctab))
}
writer(f.Pcfile)
if f.Pcline != nil {
_f.pcln = uint32(len(pctab))
}
writer(f.Pcline)
writer(f.PcUnsafePoint)
writer(f.PcStackMapIndex)
writer(f.PcInlTreeIndex)
writer(f.PcArgLiveIndex)
_f.entryOff = f.EntryOff
_f.nameOff = nameOffset[i]
_f.args = f.ArgsSize
_f.deferreturn = f.DeferReturn
// NOTICE: _func.pcdata is always as [PCDATA_UnsafePoint(0) : PCDATA_ArgLiveIndex(3)]
_f.npcdata = uint32(_N_PCDATA)
_f.cuOffset = cuOffset
_f.funcID = f.ID
_f.flag = f.Flag
_f.nfuncdata = uint8(_N_FUNCDATA)
}
return
}
func registerFunction(name string, pc uintptr, textSize uintptr, fp int, args int, size uintptr, argptrs uintptr, localptrs uintptr) {}

93
vendor/github.com/bytedance/sonic/loader/pcdata.go generated vendored Normal file
View File

@@ -0,0 +1,93 @@
/**
* Copyright 2023 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package loader
import (
`encoding/binary`
)
const (
_N_PCDATA = 4
_PCDATA_UnsafePoint = 0
_PCDATA_StackMapIndex = 1
_PCDATA_InlTreeIndex = 2
_PCDATA_ArgLiveIndex = 3
_PCDATA_INVALID_OFFSET = 0
)
const (
// PCDATA_UnsafePoint values.
PCDATA_UnsafePointSafe = -1 // Safe for async preemption
PCDATA_UnsafePointUnsafe = -2 // Unsafe for async preemption
// PCDATA_Restart1(2) apply on a sequence of instructions, within
// which if an async preemption happens, we should back off the PC
// to the start of the sequence when resume.
// We need two so we can distinguish the start/end of the sequence
// in case that two sequences are next to each other.
PCDATA_Restart1 = -3
PCDATA_Restart2 = -4
// Like PCDATA_RestartAtEntry, but back to function entry if async
// preempted.
PCDATA_RestartAtEntry = -5
_PCDATA_START_VAL = -1
)
var emptyByte byte
// Pcvalue is the program count corresponding to the value Val
// WARN: we use relative value here (to function entry)
type Pcvalue struct {
PC uint32 // program count relative to function entry
Val int32 // value relative to the value in function entry
}
// Pcdata represents pc->value mapping table.
// WARN: we use ** [Pcdata[i].PC, Pcdata[i+1].PC) **
// as the range where the Pcdata[i].Val is effective.
type Pcdata []Pcvalue
// see https://docs.google.com/document/d/1lyPIbmsYbXnpNj57a261hgOYVpNRcgydurVQIyZOz_o/pub
func (self Pcdata) MarshalBinary() (data []byte, err error) {
// delta value always starts from -1
sv := int32(_PCDATA_START_VAL)
sp := uint32(0)
buf := make([]byte, binary.MaxVarintLen32)
for _, v := range self {
if v.PC < sp {
panic("PC must be in ascending order!")
}
dp := uint64(v.PC - sp)
dv := int64(v.Val - sv)
if dv == 0 || dp == 0 {
continue
}
n := binary.PutVarint(buf, dv)
data = append(data, buf[:n]...)
n2 := binary.PutUvarint(buf, dp)
data = append(data, buf[:n2]...)
sp = v.PC
sv = v.Val
}
// put 0 to indicate ends
data = append(data, 0)
return
}

55
vendor/github.com/bytedance/sonic/loader/register.go generated vendored Normal file
View File

@@ -0,0 +1,55 @@
// +build !bytedance_tango
/**
* Copyright 2024 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package loader
import (
"sync/atomic"
"unsafe"
)
func registerModule(mod *moduledata) {
registerModuleLockFree(&lastmoduledatap, mod)
}
func registerModuleLockFree(tail **moduledata, mod *moduledata) {
for {
oldTail := loadModule(tail)
if casModule(tail, oldTail, mod) {
storeModule(&oldTail.next, mod)
break
}
}
}
func loadModule(p **moduledata) *moduledata {
return (*moduledata)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
}
func storeModule(p **moduledata, value *moduledata) {
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(value))
}
func casModule(p **moduledata, oldValue *moduledata, newValue *moduledata) bool {
return atomic.CompareAndSwapPointer(
(*unsafe.Pointer)(unsafe.Pointer(p)),
unsafe.Pointer(oldValue),
unsafe.Pointer(newValue),
)
}

View File

@@ -0,0 +1,34 @@
// +build bytedance_tango
/**
* Copyright 2024 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package loader
import (
"sync"
_ "unsafe"
)
//go:linkname pluginsMu plugin.pluginsMu
var pluginsMu sync.Mutex
func registerModule(mod *moduledata) {
pluginsMu.Lock()
defer pluginsMu.Unlock()
lastmoduledatap.next = mod
lastmoduledatap = mod
}

28
vendor/github.com/bytedance/sonic/loader/stubs.go generated vendored Normal file
View File

@@ -0,0 +1,28 @@
/**
* Copyright 2023 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package loader
import (
_ `unsafe`
)
//go:linkname lastmoduledatap runtime.lastmoduledatap
//goland:noinspection GoUnusedGlobalVariable
var lastmoduledatap *moduledata
//go:linkname moduledataverify1 runtime.moduledataverify1
func moduledataverify1(_ *moduledata)

185
vendor/github.com/bytedance/sonic/loader/wrapper.go generated vendored Normal file
View File

@@ -0,0 +1,185 @@
/**
* Copyright 2023 ByteDance Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package loader
import (
`reflect`
`unsafe`
`github.com/bytedance/sonic/loader/internal/abi`
`github.com/bytedance/sonic/loader/internal/rt`
)
var _C_Redzone = []bool{false, false, false, false}
// CFunc is a function information for C func
type CFunc struct {
// C function name
Name string
// entry pc relative to entire text segment
EntryOff uint32
// function text size in bytes
TextSize uint32
// maximum stack depth of the function
MaxStack uintptr
// PC->SP delta lists of the function
Pcsp [][2]uint32
}
// GoC is the wrapper for Go calls to C
type GoC struct {
// CName is the name of corresponding C function
CName string
// CEntry points out where to store the entry address of corresponding C function.
// It won't be set if nil
CEntry *uintptr
// GoFunc is the POINTER of corresponding go stub function.
// It is used to generate Go-C ABI conversion wrapper and receive the wrapper's address
// eg. &func(a int, b int) int
// FOR
// int add(int a, int b)
// It won't be set if nil
GoFunc interface{}
}
// WrapGoC wraps C functions and loader it into Go stubs
func WrapGoC(text []byte, natives []CFunc, stubs []GoC, modulename string, filename string) {
funcs := make([]Func, len(natives))
// register C funcs
for i, f := range natives {
fn := Func{
Flag: FuncFlag_ASM,
EntryOff: f.EntryOff,
TextSize: f.TextSize,
Name: f.Name,
}
if len(f.Pcsp) != 0 {
fn.Pcsp = (*Pcdata)(unsafe.Pointer(&natives[i].Pcsp))
}
// NOTICE: always forbid async preempt
fn.PcUnsafePoint = &Pcdata{
{PC: f.TextSize, Val: PCDATA_UnsafePointUnsafe},
}
// NOTICE: always refer to first file
fn.Pcfile = &Pcdata{
{PC: f.TextSize, Val: 0},
}
// NOTICE: always refer to first line
fn.Pcline = &Pcdata{
{PC: f.TextSize, Val: 1},
}
// NOTICE: copystack need locals stackmap
fn.PcStackMapIndex = &Pcdata{
{PC: f.TextSize, Val: 0},
}
sm := rt.StackMapBuilder{}
sm.AddField(false)
fn.ArgsPointerMaps = sm.Build()
fn.LocalsPointerMaps = sm.Build()
funcs[i] = fn
}
rets := Load(text, funcs, modulename, []string{filename})
// got absolute entry address
native_entry := **(**uintptr)(unsafe.Pointer(&rets[0]))
// println("native_entry: ", native_entry)
wraps := make([]Func, 0, len(stubs))
wrapIds := make([]int, 0, len(stubs))
code := make([]byte, 0, len(wraps))
entryOff := uint32(0)
// register go wrappers
for i := range stubs {
for j := range natives {
if stubs[i].CName != natives[j].Name {
continue
}
// calculate corresponding C entry
pc := uintptr(native_entry + uintptr(natives[j].EntryOff))
if stubs[i].CEntry != nil {
*stubs[i].CEntry = pc
}
// no need to generate wrapper, continue next
if stubs[i].GoFunc == nil {
continue
}
// assemble wrapper codes
layout := abi.NewFunctionLayout(reflect.TypeOf(stubs[i].GoFunc).Elem())
frame := abi.NewFrame(&layout, _C_Redzone, true)
tcode := abi.CallC(pc, frame, natives[j].MaxStack)
code = append(code, tcode...)
size := uint32(len(tcode))
fn := Func{
Flag: FuncFlag_ASM,
ArgsSize: int32(layout.ArgSize()),
EntryOff: entryOff,
TextSize: size,
Name: stubs[i].CName + "_go",
}
// add check-stack and grow-stack texts' pcsp
fn.Pcsp = &Pcdata{
{PC: uint32(frame.StackCheckTextSize()), Val: 0},
{PC: size - uint32(frame.GrowStackTextSize()), Val: int32(frame.Size())},
{PC: size, Val: 0},
}
// NOTICE: always refer to first file
fn.Pcfile = &Pcdata{
{PC: size, Val: 0},
}
// NOTICE: always refer to first line
fn.Pcline = &Pcdata{
{PC: size, Val: 1},
}
// NOTICE: always forbid async preempt
fn.PcUnsafePoint = &Pcdata{
{PC: size, Val: PCDATA_UnsafePointUnsafe},
}
// register pointer stackmaps
fn.PcStackMapIndex = &Pcdata{
{PC: size, Val: 0},
}
fn.ArgsPointerMaps = frame.ArgPtrs()
fn.LocalsPointerMaps = frame.LocalPtrs()
entryOff += size
wraps = append(wraps, fn)
wrapIds = append(wrapIds, i)
}
}
gofuncs := Load(code, wraps, modulename+"/go", []string{filename+".go"})
// set go func value
for i := range gofuncs {
idx := wrapIds[i]
w := rt.UnpackEface(stubs[idx].GoFunc)
*(*Function)(w.Value) = gofuncs[i]
}
}