chore: migrate to gitea
This commit is contained in:
27
vendor/golang.org/x/arch/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/arch/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright 2015 The Go Authors.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
22
vendor/golang.org/x/arch/PATENTS
generated
vendored
Normal file
22
vendor/golang.org/x/arch/PATENTS
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
||||
3
vendor/golang.org/x/arch/x86/x86asm/Makefile
generated
vendored
Normal file
3
vendor/golang.org/x/arch/x86/x86asm/Makefile
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
tables.go: ../x86map/map.go ../x86.csv
|
||||
go run ../x86map/map.go -fmt=decoder ../x86.csv >_tables.go && gofmt _tables.go >tables.go && rm _tables.go
|
||||
|
||||
1724
vendor/golang.org/x/arch/x86/x86asm/decode.go
generated
vendored
Normal file
1724
vendor/golang.org/x/arch/x86/x86asm/decode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
958
vendor/golang.org/x/arch/x86/x86asm/gnu.go
generated
vendored
Normal file
958
vendor/golang.org/x/arch/x86/x86asm/gnu.go
generated
vendored
Normal file
@@ -0,0 +1,958 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package x86asm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// GNUSyntax returns the GNU assembler syntax for the instruction, as defined by GNU binutils.
|
||||
// This general form is often called “AT&T syntax” as a reference to AT&T System V Unix.
|
||||
func GNUSyntax(inst Inst, pc uint64, symname SymLookup) string {
|
||||
// Rewrite instruction to mimic GNU peculiarities.
|
||||
// Note that inst has been passed by value and contains
|
||||
// no pointers, so any changes we make here are local
|
||||
// and will not propagate back out to the caller.
|
||||
|
||||
if symname == nil {
|
||||
symname = func(uint64) (string, uint64) { return "", 0 }
|
||||
}
|
||||
|
||||
// Adjust opcode [sic].
|
||||
switch inst.Op {
|
||||
case FDIV, FDIVR, FSUB, FSUBR, FDIVP, FDIVRP, FSUBP, FSUBRP:
|
||||
// DC E0, DC F0: libopcodes swaps FSUBR/FSUB and FDIVR/FDIV, at least
|
||||
// if you believe the Intel manual is correct (the encoding is irregular as given;
|
||||
// libopcodes uses the more regular expected encoding).
|
||||
// TODO(rsc): Test to ensure Intel manuals are correct and report to libopcodes maintainers?
|
||||
// NOTE: iant thinks this is deliberate, but we can't find the history.
|
||||
_, reg1 := inst.Args[0].(Reg)
|
||||
_, reg2 := inst.Args[1].(Reg)
|
||||
if reg1 && reg2 && (inst.Opcode>>24 == 0xDC || inst.Opcode>>24 == 0xDE) {
|
||||
switch inst.Op {
|
||||
case FDIV:
|
||||
inst.Op = FDIVR
|
||||
case FDIVR:
|
||||
inst.Op = FDIV
|
||||
case FSUB:
|
||||
inst.Op = FSUBR
|
||||
case FSUBR:
|
||||
inst.Op = FSUB
|
||||
case FDIVP:
|
||||
inst.Op = FDIVRP
|
||||
case FDIVRP:
|
||||
inst.Op = FDIVP
|
||||
case FSUBP:
|
||||
inst.Op = FSUBRP
|
||||
case FSUBRP:
|
||||
inst.Op = FSUBP
|
||||
}
|
||||
}
|
||||
|
||||
case MOVNTSD:
|
||||
// MOVNTSD is F2 0F 2B /r.
|
||||
// MOVNTSS is F3 0F 2B /r (supposedly; not in manuals).
|
||||
// Usually inner prefixes win for display,
|
||||
// so that F3 F2 0F 2B 11 is REP MOVNTSD
|
||||
// and F2 F3 0F 2B 11 is REPN MOVNTSS.
|
||||
// Libopcodes always prefers MOVNTSS regardless of prefix order.
|
||||
if countPrefix(&inst, 0xF3) > 0 {
|
||||
found := false
|
||||
for i := len(inst.Prefix) - 1; i >= 0; i-- {
|
||||
switch inst.Prefix[i] & 0xFF {
|
||||
case 0xF3:
|
||||
if !found {
|
||||
found = true
|
||||
inst.Prefix[i] |= PrefixImplicit
|
||||
}
|
||||
case 0xF2:
|
||||
inst.Prefix[i] &^= PrefixImplicit
|
||||
}
|
||||
}
|
||||
inst.Op = MOVNTSS
|
||||
}
|
||||
}
|
||||
|
||||
// Add implicit arguments.
|
||||
switch inst.Op {
|
||||
case MONITOR:
|
||||
inst.Args[0] = EDX
|
||||
inst.Args[1] = ECX
|
||||
inst.Args[2] = EAX
|
||||
if inst.AddrSize == 16 {
|
||||
inst.Args[2] = AX
|
||||
}
|
||||
|
||||
case MWAIT:
|
||||
if inst.Mode == 64 {
|
||||
inst.Args[0] = RCX
|
||||
inst.Args[1] = RAX
|
||||
} else {
|
||||
inst.Args[0] = ECX
|
||||
inst.Args[1] = EAX
|
||||
}
|
||||
}
|
||||
|
||||
// Adjust which prefixes will be displayed.
|
||||
// The rule is to display all the prefixes not implied by
|
||||
// the usual instruction display, that is, all the prefixes
|
||||
// except the ones with PrefixImplicit set.
|
||||
// However, of course, there are exceptions to the rule.
|
||||
switch inst.Op {
|
||||
case CRC32:
|
||||
// CRC32 has a mandatory F2 prefix.
|
||||
// If there are multiple F2s and no F3s, the extra F2s do not print.
|
||||
// (And Decode has already marked them implicit.)
|
||||
// However, if there is an F3 anywhere, then the extra F2s do print.
|
||||
// If there are multiple F2 prefixes *and* an (ignored) F3,
|
||||
// then libopcodes prints the extra F2s as REPNs.
|
||||
if countPrefix(&inst, 0xF2) > 1 {
|
||||
unmarkImplicit(&inst, 0xF2)
|
||||
markLastImplicit(&inst, 0xF2)
|
||||
}
|
||||
|
||||
// An unused data size override should probably be shown,
|
||||
// to distinguish DATA16 CRC32B from plain CRC32B,
|
||||
// but libopcodes always treats the final override as implicit
|
||||
// and the others as explicit.
|
||||
unmarkImplicit(&inst, PrefixDataSize)
|
||||
markLastImplicit(&inst, PrefixDataSize)
|
||||
|
||||
case CVTSI2SD, CVTSI2SS:
|
||||
if !isMem(inst.Args[1]) {
|
||||
markLastImplicit(&inst, PrefixDataSize)
|
||||
}
|
||||
|
||||
case CVTSD2SI, CVTSS2SI, CVTTSD2SI, CVTTSS2SI,
|
||||
ENTER, FLDENV, FNSAVE, FNSTENV, FRSTOR, LGDT, LIDT, LRET,
|
||||
POP, PUSH, RET, SGDT, SIDT, SYSRET, XBEGIN:
|
||||
markLastImplicit(&inst, PrefixDataSize)
|
||||
|
||||
case LOOP, LOOPE, LOOPNE, MONITOR:
|
||||
markLastImplicit(&inst, PrefixAddrSize)
|
||||
|
||||
case MOV:
|
||||
// The 16-bit and 32-bit forms of MOV Sreg, dst and MOV src, Sreg
|
||||
// cannot be distinguished when src or dst refers to memory, because
|
||||
// Sreg is always a 16-bit value, even when we're doing a 32-bit
|
||||
// instruction. Because the instruction tables distinguished these two,
|
||||
// any operand size prefix has been marked as used (to decide which
|
||||
// branch to take). Unmark it, so that it will show up in disassembly,
|
||||
// so that the reader can tell the size of memory operand.
|
||||
// up with the same arguments
|
||||
dst, _ := inst.Args[0].(Reg)
|
||||
src, _ := inst.Args[1].(Reg)
|
||||
if ES <= src && src <= GS && isMem(inst.Args[0]) || ES <= dst && dst <= GS && isMem(inst.Args[1]) {
|
||||
unmarkImplicit(&inst, PrefixDataSize)
|
||||
}
|
||||
|
||||
case MOVDQU:
|
||||
if countPrefix(&inst, 0xF3) > 1 {
|
||||
unmarkImplicit(&inst, 0xF3)
|
||||
markLastImplicit(&inst, 0xF3)
|
||||
}
|
||||
|
||||
case MOVQ2DQ:
|
||||
markLastImplicit(&inst, PrefixDataSize)
|
||||
|
||||
case SLDT, SMSW, STR, FXRSTOR, XRSTOR, XSAVE, XSAVEOPT, CMPXCHG8B:
|
||||
if isMem(inst.Args[0]) {
|
||||
unmarkImplicit(&inst, PrefixDataSize)
|
||||
}
|
||||
|
||||
case SYSEXIT:
|
||||
unmarkImplicit(&inst, PrefixDataSize)
|
||||
}
|
||||
|
||||
if isCondJmp[inst.Op] || isLoop[inst.Op] || inst.Op == JCXZ || inst.Op == JECXZ || inst.Op == JRCXZ {
|
||||
if countPrefix(&inst, PrefixCS) > 0 && countPrefix(&inst, PrefixDS) > 0 {
|
||||
for i, p := range inst.Prefix {
|
||||
switch p & 0xFFF {
|
||||
case PrefixPN, PrefixPT:
|
||||
inst.Prefix[i] &= 0xF0FF // cut interpretation bits, producing original segment prefix
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// XACQUIRE/XRELEASE adjustment.
|
||||
if inst.Op == MOV {
|
||||
// MOV into memory is a candidate for turning REP into XRELEASE.
|
||||
// However, if the REP is followed by a REPN, that REPN blocks the
|
||||
// conversion.
|
||||
haveREPN := false
|
||||
for i := len(inst.Prefix) - 1; i >= 0; i-- {
|
||||
switch inst.Prefix[i] &^ PrefixIgnored {
|
||||
case PrefixREPN:
|
||||
haveREPN = true
|
||||
case PrefixXRELEASE:
|
||||
if haveREPN {
|
||||
inst.Prefix[i] = PrefixREP
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We only format the final F2/F3 as XRELEASE/XACQUIRE.
|
||||
haveXA := false
|
||||
haveXR := false
|
||||
for i := len(inst.Prefix) - 1; i >= 0; i-- {
|
||||
switch inst.Prefix[i] &^ PrefixIgnored {
|
||||
case PrefixXRELEASE:
|
||||
if !haveXR {
|
||||
haveXR = true
|
||||
} else {
|
||||
inst.Prefix[i] = PrefixREP
|
||||
}
|
||||
|
||||
case PrefixXACQUIRE:
|
||||
if !haveXA {
|
||||
haveXA = true
|
||||
} else {
|
||||
inst.Prefix[i] = PrefixREPN
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Determine opcode.
|
||||
op := strings.ToLower(inst.Op.String())
|
||||
if alt := gnuOp[inst.Op]; alt != "" {
|
||||
op = alt
|
||||
}
|
||||
|
||||
// Determine opcode suffix.
|
||||
// Libopcodes omits the suffix if the width of the operation
|
||||
// can be inferred from a register arguments. For example,
|
||||
// add $1, %ebx has no suffix because you can tell from the
|
||||
// 32-bit register destination that it is a 32-bit add,
|
||||
// but in addl $1, (%ebx), the destination is memory, so the
|
||||
// size is not evident without the l suffix.
|
||||
needSuffix := true
|
||||
SuffixLoop:
|
||||
for i, a := range inst.Args {
|
||||
if a == nil {
|
||||
break
|
||||
}
|
||||
switch a := a.(type) {
|
||||
case Reg:
|
||||
switch inst.Op {
|
||||
case MOVSX, MOVZX:
|
||||
continue
|
||||
|
||||
case SHL, SHR, RCL, RCR, ROL, ROR, SAR:
|
||||
if i == 1 {
|
||||
// shift count does not tell us operand size
|
||||
continue
|
||||
}
|
||||
|
||||
case CRC32:
|
||||
// The source argument does tell us operand size,
|
||||
// but libopcodes still always puts a suffix on crc32.
|
||||
continue
|
||||
|
||||
case PUSH, POP:
|
||||
// Even though segment registers are 16-bit, push and pop
|
||||
// can save/restore them from 32-bit slots, so they
|
||||
// do not imply operand size.
|
||||
if ES <= a && a <= GS {
|
||||
continue
|
||||
}
|
||||
|
||||
case CVTSI2SD, CVTSI2SS:
|
||||
// The integer register argument takes priority.
|
||||
if X0 <= a && a <= X15 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if AL <= a && a <= R15 || ES <= a && a <= GS || X0 <= a && a <= X15 || M0 <= a && a <= M7 {
|
||||
needSuffix = false
|
||||
break SuffixLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if needSuffix {
|
||||
switch inst.Op {
|
||||
case CMPXCHG8B, FLDCW, FNSTCW, FNSTSW, LDMXCSR, LLDT, LMSW, LTR, PCLMULQDQ,
|
||||
SETA, SETAE, SETB, SETBE, SETE, SETG, SETGE, SETL, SETLE, SETNE, SETNO, SETNP, SETNS, SETO, SETP, SETS,
|
||||
SLDT, SMSW, STMXCSR, STR, VERR, VERW:
|
||||
// For various reasons, libopcodes emits no suffix for these instructions.
|
||||
|
||||
case CRC32:
|
||||
op += byteSizeSuffix(argBytes(&inst, inst.Args[1]))
|
||||
|
||||
case LGDT, LIDT, SGDT, SIDT:
|
||||
op += byteSizeSuffix(inst.DataSize / 8)
|
||||
|
||||
case MOVZX, MOVSX:
|
||||
// Integer size conversions get two suffixes.
|
||||
op = op[:4] + byteSizeSuffix(argBytes(&inst, inst.Args[1])) + byteSizeSuffix(argBytes(&inst, inst.Args[0]))
|
||||
|
||||
case LOOP, LOOPE, LOOPNE:
|
||||
// Add w suffix to indicate use of CX register instead of ECX.
|
||||
if inst.AddrSize == 16 {
|
||||
op += "w"
|
||||
}
|
||||
|
||||
case CALL, ENTER, JMP, LCALL, LEAVE, LJMP, LRET, RET, SYSRET, XBEGIN:
|
||||
// Add w suffix to indicate use of 16-bit target.
|
||||
// Exclude JMP rel8.
|
||||
if inst.Opcode>>24 == 0xEB {
|
||||
break
|
||||
}
|
||||
if inst.DataSize == 16 && inst.Mode != 16 {
|
||||
markLastImplicit(&inst, PrefixDataSize)
|
||||
op += "w"
|
||||
} else if inst.Mode == 64 {
|
||||
op += "q"
|
||||
}
|
||||
|
||||
case FRSTOR, FNSAVE, FNSTENV, FLDENV:
|
||||
// Add s suffix to indicate shortened FPU state (I guess).
|
||||
if inst.DataSize == 16 {
|
||||
op += "s"
|
||||
}
|
||||
|
||||
case PUSH, POP:
|
||||
if markLastImplicit(&inst, PrefixDataSize) {
|
||||
op += byteSizeSuffix(inst.DataSize / 8)
|
||||
} else if inst.Mode == 64 {
|
||||
op += "q"
|
||||
} else {
|
||||
op += byteSizeSuffix(inst.MemBytes)
|
||||
}
|
||||
|
||||
default:
|
||||
if isFloat(inst.Op) {
|
||||
// I can't explain any of this, but it's what libopcodes does.
|
||||
switch inst.MemBytes {
|
||||
default:
|
||||
if (inst.Op == FLD || inst.Op == FSTP) && isMem(inst.Args[0]) {
|
||||
op += "t"
|
||||
}
|
||||
case 4:
|
||||
if isFloatInt(inst.Op) {
|
||||
op += "l"
|
||||
} else {
|
||||
op += "s"
|
||||
}
|
||||
case 8:
|
||||
if isFloatInt(inst.Op) {
|
||||
op += "ll"
|
||||
} else {
|
||||
op += "l"
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
op += byteSizeSuffix(inst.MemBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// Adjust special case opcodes.
|
||||
switch inst.Op {
|
||||
case 0:
|
||||
if inst.Prefix[0] != 0 {
|
||||
return strings.ToLower(inst.Prefix[0].String())
|
||||
}
|
||||
|
||||
case INT:
|
||||
if inst.Opcode>>24 == 0xCC {
|
||||
inst.Args[0] = nil
|
||||
op = "int3"
|
||||
}
|
||||
|
||||
case CMPPS, CMPPD, CMPSD_XMM, CMPSS:
|
||||
imm, ok := inst.Args[2].(Imm)
|
||||
if ok && 0 <= imm && imm < 8 {
|
||||
inst.Args[2] = nil
|
||||
op = cmppsOps[imm] + op[3:]
|
||||
}
|
||||
|
||||
case PCLMULQDQ:
|
||||
imm, ok := inst.Args[2].(Imm)
|
||||
if ok && imm&^0x11 == 0 {
|
||||
inst.Args[2] = nil
|
||||
op = pclmulqOps[(imm&0x10)>>3|(imm&1)]
|
||||
}
|
||||
|
||||
case XLATB:
|
||||
if markLastImplicit(&inst, PrefixAddrSize) {
|
||||
op = "xlat" // not xlatb
|
||||
}
|
||||
}
|
||||
|
||||
// Build list of argument strings.
|
||||
var (
|
||||
usedPrefixes bool // segment prefixes consumed by Mem formatting
|
||||
args []string // formatted arguments
|
||||
)
|
||||
for i, a := range inst.Args {
|
||||
if a == nil {
|
||||
break
|
||||
}
|
||||
switch inst.Op {
|
||||
case MOVSB, MOVSW, MOVSD, MOVSQ, OUTSB, OUTSW, OUTSD:
|
||||
if i == 0 {
|
||||
usedPrefixes = true // disable use of prefixes for first argument
|
||||
} else {
|
||||
usedPrefixes = false
|
||||
}
|
||||
}
|
||||
if a == Imm(1) && (inst.Opcode>>24)&^1 == 0xD0 {
|
||||
continue
|
||||
}
|
||||
args = append(args, gnuArg(&inst, pc, symname, a, &usedPrefixes))
|
||||
}
|
||||
|
||||
// The default is to print the arguments in reverse Intel order.
|
||||
// A few instructions inhibit this behavior.
|
||||
switch inst.Op {
|
||||
case BOUND, LCALL, ENTER, LJMP:
|
||||
// no reverse
|
||||
default:
|
||||
// reverse args
|
||||
for i, j := 0, len(args)-1; i < j; i, j = i+1, j-1 {
|
||||
args[i], args[j] = args[j], args[i]
|
||||
}
|
||||
}
|
||||
|
||||
// Build prefix string.
|
||||
// Must be after argument formatting, which can turn off segment prefixes.
|
||||
var (
|
||||
prefix = "" // output string
|
||||
numAddr = 0
|
||||
numData = 0
|
||||
implicitData = false
|
||||
)
|
||||
for _, p := range inst.Prefix {
|
||||
if p&0xFF == PrefixDataSize && p&PrefixImplicit != 0 {
|
||||
implicitData = true
|
||||
}
|
||||
}
|
||||
for _, p := range inst.Prefix {
|
||||
if p == 0 || p.IsVEX() {
|
||||
break
|
||||
}
|
||||
if p&PrefixImplicit != 0 {
|
||||
continue
|
||||
}
|
||||
switch p &^ (PrefixIgnored | PrefixInvalid) {
|
||||
default:
|
||||
if p.IsREX() {
|
||||
if p&0xFF == PrefixREX {
|
||||
prefix += "rex "
|
||||
} else {
|
||||
prefix += "rex." + p.String()[4:] + " "
|
||||
}
|
||||
break
|
||||
}
|
||||
prefix += strings.ToLower(p.String()) + " "
|
||||
|
||||
case PrefixPN:
|
||||
op += ",pn"
|
||||
continue
|
||||
|
||||
case PrefixPT:
|
||||
op += ",pt"
|
||||
continue
|
||||
|
||||
case PrefixAddrSize, PrefixAddr16, PrefixAddr32:
|
||||
// For unknown reasons, if the addr16 prefix is repeated,
|
||||
// libopcodes displays all but the last as addr32, even though
|
||||
// the addressing form used in a memory reference is clearly
|
||||
// still 16-bit.
|
||||
n := 32
|
||||
if inst.Mode == 32 {
|
||||
n = 16
|
||||
}
|
||||
numAddr++
|
||||
if countPrefix(&inst, PrefixAddrSize) > numAddr {
|
||||
n = inst.Mode
|
||||
}
|
||||
prefix += fmt.Sprintf("addr%d ", n)
|
||||
continue
|
||||
|
||||
case PrefixData16, PrefixData32:
|
||||
if implicitData && countPrefix(&inst, PrefixDataSize) > 1 {
|
||||
// Similar to the addr32 logic above, but it only kicks in
|
||||
// when something used the data size prefix (one is implicit).
|
||||
n := 16
|
||||
if inst.Mode == 16 {
|
||||
n = 32
|
||||
}
|
||||
numData++
|
||||
if countPrefix(&inst, PrefixDataSize) > numData {
|
||||
if inst.Mode == 16 {
|
||||
n = 16
|
||||
} else {
|
||||
n = 32
|
||||
}
|
||||
}
|
||||
prefix += fmt.Sprintf("data%d ", n)
|
||||
continue
|
||||
}
|
||||
prefix += strings.ToLower(p.String()) + " "
|
||||
}
|
||||
}
|
||||
|
||||
// Finally! Put it all together.
|
||||
text := prefix + op
|
||||
if args != nil {
|
||||
text += " "
|
||||
// Indirect call/jmp gets a star to distinguish from direct jump address.
|
||||
if (inst.Op == CALL || inst.Op == JMP || inst.Op == LJMP || inst.Op == LCALL) && (isMem(inst.Args[0]) || isReg(inst.Args[0])) {
|
||||
text += "*"
|
||||
}
|
||||
text += strings.Join(args, ",")
|
||||
}
|
||||
return text
|
||||
}
|
||||
|
||||
// gnuArg returns the GNU syntax for the argument x from the instruction inst.
|
||||
// If *usedPrefixes is false and x is a Mem, then the formatting
|
||||
// includes any segment prefixes and sets *usedPrefixes to true.
|
||||
func gnuArg(inst *Inst, pc uint64, symname SymLookup, x Arg, usedPrefixes *bool) string {
|
||||
if x == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
switch x := x.(type) {
|
||||
case Reg:
|
||||
switch inst.Op {
|
||||
case CVTSI2SS, CVTSI2SD, CVTSS2SI, CVTSD2SI, CVTTSD2SI, CVTTSS2SI:
|
||||
if inst.DataSize == 16 && EAX <= x && x <= R15L {
|
||||
x -= EAX - AX
|
||||
}
|
||||
|
||||
case IN, INSB, INSW, INSD, OUT, OUTSB, OUTSW, OUTSD:
|
||||
// DX is the port, but libopcodes prints it as if it were a memory reference.
|
||||
if x == DX {
|
||||
return "(%dx)"
|
||||
}
|
||||
case VMOVDQA, VMOVDQU, VMOVNTDQA, VMOVNTDQ:
|
||||
return strings.Replace(gccRegName[x], "xmm", "ymm", -1)
|
||||
}
|
||||
return gccRegName[x]
|
||||
case Mem:
|
||||
if s, disp := memArgToSymbol(x, pc, inst.Len, symname); s != "" {
|
||||
suffix := ""
|
||||
if disp != 0 {
|
||||
suffix = fmt.Sprintf("%+d", disp)
|
||||
}
|
||||
return fmt.Sprintf("%s%s", s, suffix)
|
||||
}
|
||||
seg := ""
|
||||
var haveCS, haveDS, haveES, haveFS, haveGS, haveSS bool
|
||||
switch x.Segment {
|
||||
case CS:
|
||||
haveCS = true
|
||||
case DS:
|
||||
haveDS = true
|
||||
case ES:
|
||||
haveES = true
|
||||
case FS:
|
||||
haveFS = true
|
||||
case GS:
|
||||
haveGS = true
|
||||
case SS:
|
||||
haveSS = true
|
||||
}
|
||||
switch inst.Op {
|
||||
case INSB, INSW, INSD, STOSB, STOSW, STOSD, STOSQ, SCASB, SCASW, SCASD, SCASQ:
|
||||
// These do not accept segment prefixes, at least in the GNU rendering.
|
||||
default:
|
||||
if *usedPrefixes {
|
||||
break
|
||||
}
|
||||
for i := len(inst.Prefix) - 1; i >= 0; i-- {
|
||||
p := inst.Prefix[i] &^ PrefixIgnored
|
||||
if p == 0 {
|
||||
continue
|
||||
}
|
||||
switch p {
|
||||
case PrefixCS:
|
||||
if !haveCS {
|
||||
haveCS = true
|
||||
inst.Prefix[i] |= PrefixImplicit
|
||||
}
|
||||
case PrefixDS:
|
||||
if !haveDS {
|
||||
haveDS = true
|
||||
inst.Prefix[i] |= PrefixImplicit
|
||||
}
|
||||
case PrefixES:
|
||||
if !haveES {
|
||||
haveES = true
|
||||
inst.Prefix[i] |= PrefixImplicit
|
||||
}
|
||||
case PrefixFS:
|
||||
if !haveFS {
|
||||
haveFS = true
|
||||
inst.Prefix[i] |= PrefixImplicit
|
||||
}
|
||||
case PrefixGS:
|
||||
if !haveGS {
|
||||
haveGS = true
|
||||
inst.Prefix[i] |= PrefixImplicit
|
||||
}
|
||||
case PrefixSS:
|
||||
if !haveSS {
|
||||
haveSS = true
|
||||
inst.Prefix[i] |= PrefixImplicit
|
||||
}
|
||||
}
|
||||
}
|
||||
*usedPrefixes = true
|
||||
}
|
||||
if haveCS {
|
||||
seg += "%cs:"
|
||||
}
|
||||
if haveDS {
|
||||
seg += "%ds:"
|
||||
}
|
||||
if haveSS {
|
||||
seg += "%ss:"
|
||||
}
|
||||
if haveES {
|
||||
seg += "%es:"
|
||||
}
|
||||
if haveFS {
|
||||
seg += "%fs:"
|
||||
}
|
||||
if haveGS {
|
||||
seg += "%gs:"
|
||||
}
|
||||
disp := ""
|
||||
if x.Disp != 0 {
|
||||
disp = fmt.Sprintf("%#x", x.Disp)
|
||||
}
|
||||
if x.Scale == 0 || x.Index == 0 && x.Scale == 1 && (x.Base == ESP || x.Base == RSP || x.Base == 0 && inst.Mode == 64) {
|
||||
if x.Base == 0 {
|
||||
return seg + disp
|
||||
}
|
||||
return fmt.Sprintf("%s%s(%s)", seg, disp, gccRegName[x.Base])
|
||||
}
|
||||
base := gccRegName[x.Base]
|
||||
if x.Base == 0 {
|
||||
base = ""
|
||||
}
|
||||
index := gccRegName[x.Index]
|
||||
if x.Index == 0 {
|
||||
if inst.AddrSize == 64 {
|
||||
index = "%riz"
|
||||
} else {
|
||||
index = "%eiz"
|
||||
}
|
||||
}
|
||||
if AX <= x.Base && x.Base <= DI {
|
||||
// 16-bit addressing - no scale
|
||||
return fmt.Sprintf("%s%s(%s,%s)", seg, disp, base, index)
|
||||
}
|
||||
return fmt.Sprintf("%s%s(%s,%s,%d)", seg, disp, base, index, x.Scale)
|
||||
case Rel:
|
||||
if pc == 0 {
|
||||
return fmt.Sprintf(".%+#x", int64(x))
|
||||
} else {
|
||||
addr := pc + uint64(inst.Len) + uint64(x)
|
||||
if s, base := symname(addr); s != "" && addr == base {
|
||||
return fmt.Sprintf("%s", s)
|
||||
} else {
|
||||
addr := pc + uint64(inst.Len) + uint64(x)
|
||||
return fmt.Sprintf("%#x", addr)
|
||||
}
|
||||
}
|
||||
case Imm:
|
||||
if (inst.Op == MOV || inst.Op == PUSH) && inst.DataSize == 32 { // See comment in plan9x.go.
|
||||
if s, base := symname(uint64(x)); s != "" {
|
||||
suffix := ""
|
||||
if uint64(x) != base {
|
||||
suffix = fmt.Sprintf("%+d", uint64(x)-base)
|
||||
}
|
||||
return fmt.Sprintf("$%s%s", s, suffix)
|
||||
}
|
||||
}
|
||||
if inst.Mode == 32 {
|
||||
return fmt.Sprintf("$%#x", uint32(x))
|
||||
}
|
||||
return fmt.Sprintf("$%#x", int64(x))
|
||||
}
|
||||
return x.String()
|
||||
}
|
||||
|
||||
var gccRegName = [...]string{
|
||||
0: "REG0",
|
||||
AL: "%al",
|
||||
CL: "%cl",
|
||||
BL: "%bl",
|
||||
DL: "%dl",
|
||||
AH: "%ah",
|
||||
CH: "%ch",
|
||||
BH: "%bh",
|
||||
DH: "%dh",
|
||||
SPB: "%spl",
|
||||
BPB: "%bpl",
|
||||
SIB: "%sil",
|
||||
DIB: "%dil",
|
||||
R8B: "%r8b",
|
||||
R9B: "%r9b",
|
||||
R10B: "%r10b",
|
||||
R11B: "%r11b",
|
||||
R12B: "%r12b",
|
||||
R13B: "%r13b",
|
||||
R14B: "%r14b",
|
||||
R15B: "%r15b",
|
||||
AX: "%ax",
|
||||
CX: "%cx",
|
||||
BX: "%bx",
|
||||
DX: "%dx",
|
||||
SP: "%sp",
|
||||
BP: "%bp",
|
||||
SI: "%si",
|
||||
DI: "%di",
|
||||
R8W: "%r8w",
|
||||
R9W: "%r9w",
|
||||
R10W: "%r10w",
|
||||
R11W: "%r11w",
|
||||
R12W: "%r12w",
|
||||
R13W: "%r13w",
|
||||
R14W: "%r14w",
|
||||
R15W: "%r15w",
|
||||
EAX: "%eax",
|
||||
ECX: "%ecx",
|
||||
EDX: "%edx",
|
||||
EBX: "%ebx",
|
||||
ESP: "%esp",
|
||||
EBP: "%ebp",
|
||||
ESI: "%esi",
|
||||
EDI: "%edi",
|
||||
R8L: "%r8d",
|
||||
R9L: "%r9d",
|
||||
R10L: "%r10d",
|
||||
R11L: "%r11d",
|
||||
R12L: "%r12d",
|
||||
R13L: "%r13d",
|
||||
R14L: "%r14d",
|
||||
R15L: "%r15d",
|
||||
RAX: "%rax",
|
||||
RCX: "%rcx",
|
||||
RDX: "%rdx",
|
||||
RBX: "%rbx",
|
||||
RSP: "%rsp",
|
||||
RBP: "%rbp",
|
||||
RSI: "%rsi",
|
||||
RDI: "%rdi",
|
||||
R8: "%r8",
|
||||
R9: "%r9",
|
||||
R10: "%r10",
|
||||
R11: "%r11",
|
||||
R12: "%r12",
|
||||
R13: "%r13",
|
||||
R14: "%r14",
|
||||
R15: "%r15",
|
||||
IP: "%ip",
|
||||
EIP: "%eip",
|
||||
RIP: "%rip",
|
||||
F0: "%st",
|
||||
F1: "%st(1)",
|
||||
F2: "%st(2)",
|
||||
F3: "%st(3)",
|
||||
F4: "%st(4)",
|
||||
F5: "%st(5)",
|
||||
F6: "%st(6)",
|
||||
F7: "%st(7)",
|
||||
M0: "%mm0",
|
||||
M1: "%mm1",
|
||||
M2: "%mm2",
|
||||
M3: "%mm3",
|
||||
M4: "%mm4",
|
||||
M5: "%mm5",
|
||||
M6: "%mm6",
|
||||
M7: "%mm7",
|
||||
X0: "%xmm0",
|
||||
X1: "%xmm1",
|
||||
X2: "%xmm2",
|
||||
X3: "%xmm3",
|
||||
X4: "%xmm4",
|
||||
X5: "%xmm5",
|
||||
X6: "%xmm6",
|
||||
X7: "%xmm7",
|
||||
X8: "%xmm8",
|
||||
X9: "%xmm9",
|
||||
X10: "%xmm10",
|
||||
X11: "%xmm11",
|
||||
X12: "%xmm12",
|
||||
X13: "%xmm13",
|
||||
X14: "%xmm14",
|
||||
X15: "%xmm15",
|
||||
CS: "%cs",
|
||||
SS: "%ss",
|
||||
DS: "%ds",
|
||||
ES: "%es",
|
||||
FS: "%fs",
|
||||
GS: "%gs",
|
||||
GDTR: "%gdtr",
|
||||
IDTR: "%idtr",
|
||||
LDTR: "%ldtr",
|
||||
MSW: "%msw",
|
||||
TASK: "%task",
|
||||
CR0: "%cr0",
|
||||
CR1: "%cr1",
|
||||
CR2: "%cr2",
|
||||
CR3: "%cr3",
|
||||
CR4: "%cr4",
|
||||
CR5: "%cr5",
|
||||
CR6: "%cr6",
|
||||
CR7: "%cr7",
|
||||
CR8: "%cr8",
|
||||
CR9: "%cr9",
|
||||
CR10: "%cr10",
|
||||
CR11: "%cr11",
|
||||
CR12: "%cr12",
|
||||
CR13: "%cr13",
|
||||
CR14: "%cr14",
|
||||
CR15: "%cr15",
|
||||
DR0: "%db0",
|
||||
DR1: "%db1",
|
||||
DR2: "%db2",
|
||||
DR3: "%db3",
|
||||
DR4: "%db4",
|
||||
DR5: "%db5",
|
||||
DR6: "%db6",
|
||||
DR7: "%db7",
|
||||
TR0: "%tr0",
|
||||
TR1: "%tr1",
|
||||
TR2: "%tr2",
|
||||
TR3: "%tr3",
|
||||
TR4: "%tr4",
|
||||
TR5: "%tr5",
|
||||
TR6: "%tr6",
|
||||
TR7: "%tr7",
|
||||
}
|
||||
|
||||
var gnuOp = map[Op]string{
|
||||
CBW: "cbtw",
|
||||
CDQ: "cltd",
|
||||
CMPSD: "cmpsl",
|
||||
CMPSD_XMM: "cmpsd",
|
||||
CWD: "cwtd",
|
||||
CWDE: "cwtl",
|
||||
CQO: "cqto",
|
||||
INSD: "insl",
|
||||
IRET: "iretw",
|
||||
IRETD: "iret",
|
||||
IRETQ: "iretq",
|
||||
LODSB: "lods",
|
||||
LODSD: "lods",
|
||||
LODSQ: "lods",
|
||||
LODSW: "lods",
|
||||
MOVSD: "movsl",
|
||||
MOVSD_XMM: "movsd",
|
||||
OUTSD: "outsl",
|
||||
POPA: "popaw",
|
||||
POPAD: "popa",
|
||||
POPF: "popfw",
|
||||
POPFD: "popf",
|
||||
PUSHA: "pushaw",
|
||||
PUSHAD: "pusha",
|
||||
PUSHF: "pushfw",
|
||||
PUSHFD: "pushf",
|
||||
SCASB: "scas",
|
||||
SCASD: "scas",
|
||||
SCASQ: "scas",
|
||||
SCASW: "scas",
|
||||
STOSB: "stos",
|
||||
STOSD: "stos",
|
||||
STOSQ: "stos",
|
||||
STOSW: "stos",
|
||||
XLATB: "xlat",
|
||||
}
|
||||
|
||||
var cmppsOps = []string{
|
||||
"cmpeq",
|
||||
"cmplt",
|
||||
"cmple",
|
||||
"cmpunord",
|
||||
"cmpneq",
|
||||
"cmpnlt",
|
||||
"cmpnle",
|
||||
"cmpord",
|
||||
}
|
||||
|
||||
var pclmulqOps = []string{
|
||||
"pclmullqlqdq",
|
||||
"pclmulhqlqdq",
|
||||
"pclmullqhqdq",
|
||||
"pclmulhqhqdq",
|
||||
}
|
||||
|
||||
func countPrefix(inst *Inst, target Prefix) int {
|
||||
n := 0
|
||||
for _, p := range inst.Prefix {
|
||||
if p&0xFF == target&0xFF {
|
||||
n++
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func markLastImplicit(inst *Inst, prefix Prefix) bool {
|
||||
for i := len(inst.Prefix) - 1; i >= 0; i-- {
|
||||
p := inst.Prefix[i]
|
||||
if p&0xFF == prefix {
|
||||
inst.Prefix[i] |= PrefixImplicit
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func unmarkImplicit(inst *Inst, prefix Prefix) {
|
||||
for i := len(inst.Prefix) - 1; i >= 0; i-- {
|
||||
p := inst.Prefix[i]
|
||||
if p&0xFF == prefix {
|
||||
inst.Prefix[i] &^= PrefixImplicit
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func byteSizeSuffix(b int) string {
|
||||
switch b {
|
||||
case 1:
|
||||
return "b"
|
||||
case 2:
|
||||
return "w"
|
||||
case 4:
|
||||
return "l"
|
||||
case 8:
|
||||
return "q"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func argBytes(inst *Inst, arg Arg) int {
|
||||
if isMem(arg) {
|
||||
return inst.MemBytes
|
||||
}
|
||||
return regBytes(arg)
|
||||
}
|
||||
|
||||
func isFloat(op Op) bool {
|
||||
switch op {
|
||||
case FADD, FCOM, FCOMP, FDIV, FDIVR, FIADD, FICOM, FICOMP, FIDIV, FIDIVR, FILD, FIMUL, FIST, FISTP, FISTTP, FISUB, FISUBR, FLD, FMUL, FST, FSTP, FSUB, FSUBR:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isFloatInt(op Op) bool {
|
||||
switch op {
|
||||
case FIADD, FICOM, FICOMP, FIDIV, FIDIVR, FILD, FIMUL, FIST, FISTP, FISTTP, FISUB, FISUBR:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
649
vendor/golang.org/x/arch/x86/x86asm/inst.go
generated
vendored
Normal file
649
vendor/golang.org/x/arch/x86/x86asm/inst.go
generated
vendored
Normal file
@@ -0,0 +1,649 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package x86asm implements decoding of x86 machine code.
|
||||
package x86asm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// An Inst is a single instruction.
|
||||
type Inst struct {
|
||||
Prefix Prefixes // Prefixes applied to the instruction.
|
||||
Op Op // Opcode mnemonic
|
||||
Opcode uint32 // Encoded opcode bits, left aligned (first byte is Opcode>>24, etc)
|
||||
Args Args // Instruction arguments, in Intel order
|
||||
Mode int // processor mode in bits: 16, 32, or 64
|
||||
AddrSize int // address size in bits: 16, 32, or 64
|
||||
DataSize int // operand size in bits: 16, 32, or 64
|
||||
MemBytes int // size of memory argument in bytes: 1, 2, 4, 8, 16, and so on.
|
||||
Len int // length of encoded instruction in bytes
|
||||
PCRel int // length of PC-relative address in instruction encoding
|
||||
PCRelOff int // index of start of PC-relative address in instruction encoding
|
||||
}
|
||||
|
||||
// Prefixes is an array of prefixes associated with a single instruction.
|
||||
// The prefixes are listed in the same order as found in the instruction:
|
||||
// each prefix byte corresponds to one slot in the array. The first zero
|
||||
// in the array marks the end of the prefixes.
|
||||
type Prefixes [14]Prefix
|
||||
|
||||
// A Prefix represents an Intel instruction prefix.
|
||||
// The low 8 bits are the actual prefix byte encoding,
|
||||
// and the top 8 bits contain distinguishing bits and metadata.
|
||||
type Prefix uint16
|
||||
|
||||
const (
|
||||
// Metadata about the role of a prefix in an instruction.
|
||||
PrefixImplicit Prefix = 0x8000 // prefix is implied by instruction text
|
||||
PrefixIgnored Prefix = 0x4000 // prefix is ignored: either irrelevant or overridden by a later prefix
|
||||
PrefixInvalid Prefix = 0x2000 // prefix makes entire instruction invalid (bad LOCK)
|
||||
|
||||
// Memory segment overrides.
|
||||
PrefixES Prefix = 0x26 // ES segment override
|
||||
PrefixCS Prefix = 0x2E // CS segment override
|
||||
PrefixSS Prefix = 0x36 // SS segment override
|
||||
PrefixDS Prefix = 0x3E // DS segment override
|
||||
PrefixFS Prefix = 0x64 // FS segment override
|
||||
PrefixGS Prefix = 0x65 // GS segment override
|
||||
|
||||
// Branch prediction.
|
||||
PrefixPN Prefix = 0x12E // predict not taken (conditional branch only)
|
||||
PrefixPT Prefix = 0x13E // predict taken (conditional branch only)
|
||||
|
||||
// Size attributes.
|
||||
PrefixDataSize Prefix = 0x66 // operand size override
|
||||
PrefixData16 Prefix = 0x166
|
||||
PrefixData32 Prefix = 0x266
|
||||
PrefixAddrSize Prefix = 0x67 // address size override
|
||||
PrefixAddr16 Prefix = 0x167
|
||||
PrefixAddr32 Prefix = 0x267
|
||||
|
||||
// One of a kind.
|
||||
PrefixLOCK Prefix = 0xF0 // lock
|
||||
PrefixREPN Prefix = 0xF2 // repeat not zero
|
||||
PrefixXACQUIRE Prefix = 0x1F2
|
||||
PrefixBND Prefix = 0x2F2
|
||||
PrefixREP Prefix = 0xF3 // repeat
|
||||
PrefixXRELEASE Prefix = 0x1F3
|
||||
|
||||
// The REX prefixes must be in the range [PrefixREX, PrefixREX+0x10).
|
||||
// the other bits are set or not according to the intended use.
|
||||
PrefixREX Prefix = 0x40 // REX 64-bit extension prefix
|
||||
PrefixREXW Prefix = 0x08 // extension bit W (64-bit instruction width)
|
||||
PrefixREXR Prefix = 0x04 // extension bit R (r field in modrm)
|
||||
PrefixREXX Prefix = 0x02 // extension bit X (index field in sib)
|
||||
PrefixREXB Prefix = 0x01 // extension bit B (r/m field in modrm or base field in sib)
|
||||
PrefixVEX2Bytes Prefix = 0xC5 // Short form of vex prefix
|
||||
PrefixVEX3Bytes Prefix = 0xC4 // Long form of vex prefix
|
||||
)
|
||||
|
||||
// IsREX reports whether p is a REX prefix byte.
|
||||
func (p Prefix) IsREX() bool {
|
||||
return p&0xF0 == PrefixREX
|
||||
}
|
||||
|
||||
func (p Prefix) IsVEX() bool {
|
||||
return p&0xFF == PrefixVEX2Bytes || p&0xFF == PrefixVEX3Bytes
|
||||
}
|
||||
|
||||
func (p Prefix) String() string {
|
||||
p &^= PrefixImplicit | PrefixIgnored | PrefixInvalid
|
||||
if s := prefixNames[p]; s != "" {
|
||||
return s
|
||||
}
|
||||
|
||||
if p.IsREX() {
|
||||
s := "REX."
|
||||
if p&PrefixREXW != 0 {
|
||||
s += "W"
|
||||
}
|
||||
if p&PrefixREXR != 0 {
|
||||
s += "R"
|
||||
}
|
||||
if p&PrefixREXX != 0 {
|
||||
s += "X"
|
||||
}
|
||||
if p&PrefixREXB != 0 {
|
||||
s += "B"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
return fmt.Sprintf("Prefix(%#x)", int(p))
|
||||
}
|
||||
|
||||
// An Op is an x86 opcode.
|
||||
type Op uint32
|
||||
|
||||
func (op Op) String() string {
|
||||
i := int(op)
|
||||
if i < 0 || i >= len(opNames) || opNames[i] == "" {
|
||||
return fmt.Sprintf("Op(%d)", i)
|
||||
}
|
||||
return opNames[i]
|
||||
}
|
||||
|
||||
// An Args holds the instruction arguments.
|
||||
// If an instruction has fewer than 4 arguments,
|
||||
// the final elements in the array are nil.
|
||||
type Args [4]Arg
|
||||
|
||||
// An Arg is a single instruction argument,
|
||||
// one of these types: Reg, Mem, Imm, Rel.
|
||||
type Arg interface {
|
||||
String() string
|
||||
isArg()
|
||||
}
|
||||
|
||||
// Note that the implements of Arg that follow are all sized
|
||||
// so that on a 64-bit machine the data can be inlined in
|
||||
// the interface value instead of requiring an allocation.
|
||||
|
||||
// A Reg is a single register.
|
||||
// The zero Reg value has no name but indicates “no register.”
|
||||
type Reg uint8
|
||||
|
||||
const (
|
||||
_ Reg = iota
|
||||
|
||||
// 8-bit
|
||||
AL
|
||||
CL
|
||||
DL
|
||||
BL
|
||||
AH
|
||||
CH
|
||||
DH
|
||||
BH
|
||||
SPB
|
||||
BPB
|
||||
SIB
|
||||
DIB
|
||||
R8B
|
||||
R9B
|
||||
R10B
|
||||
R11B
|
||||
R12B
|
||||
R13B
|
||||
R14B
|
||||
R15B
|
||||
|
||||
// 16-bit
|
||||
AX
|
||||
CX
|
||||
DX
|
||||
BX
|
||||
SP
|
||||
BP
|
||||
SI
|
||||
DI
|
||||
R8W
|
||||
R9W
|
||||
R10W
|
||||
R11W
|
||||
R12W
|
||||
R13W
|
||||
R14W
|
||||
R15W
|
||||
|
||||
// 32-bit
|
||||
EAX
|
||||
ECX
|
||||
EDX
|
||||
EBX
|
||||
ESP
|
||||
EBP
|
||||
ESI
|
||||
EDI
|
||||
R8L
|
||||
R9L
|
||||
R10L
|
||||
R11L
|
||||
R12L
|
||||
R13L
|
||||
R14L
|
||||
R15L
|
||||
|
||||
// 64-bit
|
||||
RAX
|
||||
RCX
|
||||
RDX
|
||||
RBX
|
||||
RSP
|
||||
RBP
|
||||
RSI
|
||||
RDI
|
||||
R8
|
||||
R9
|
||||
R10
|
||||
R11
|
||||
R12
|
||||
R13
|
||||
R14
|
||||
R15
|
||||
|
||||
// Instruction pointer.
|
||||
IP // 16-bit
|
||||
EIP // 32-bit
|
||||
RIP // 64-bit
|
||||
|
||||
// 387 floating point registers.
|
||||
F0
|
||||
F1
|
||||
F2
|
||||
F3
|
||||
F4
|
||||
F5
|
||||
F6
|
||||
F7
|
||||
|
||||
// MMX registers.
|
||||
M0
|
||||
M1
|
||||
M2
|
||||
M3
|
||||
M4
|
||||
M5
|
||||
M6
|
||||
M7
|
||||
|
||||
// XMM registers.
|
||||
X0
|
||||
X1
|
||||
X2
|
||||
X3
|
||||
X4
|
||||
X5
|
||||
X6
|
||||
X7
|
||||
X8
|
||||
X9
|
||||
X10
|
||||
X11
|
||||
X12
|
||||
X13
|
||||
X14
|
||||
X15
|
||||
|
||||
// Segment registers.
|
||||
ES
|
||||
CS
|
||||
SS
|
||||
DS
|
||||
FS
|
||||
GS
|
||||
|
||||
// System registers.
|
||||
GDTR
|
||||
IDTR
|
||||
LDTR
|
||||
MSW
|
||||
TASK
|
||||
|
||||
// Control registers.
|
||||
CR0
|
||||
CR1
|
||||
CR2
|
||||
CR3
|
||||
CR4
|
||||
CR5
|
||||
CR6
|
||||
CR7
|
||||
CR8
|
||||
CR9
|
||||
CR10
|
||||
CR11
|
||||
CR12
|
||||
CR13
|
||||
CR14
|
||||
CR15
|
||||
|
||||
// Debug registers.
|
||||
DR0
|
||||
DR1
|
||||
DR2
|
||||
DR3
|
||||
DR4
|
||||
DR5
|
||||
DR6
|
||||
DR7
|
||||
DR8
|
||||
DR9
|
||||
DR10
|
||||
DR11
|
||||
DR12
|
||||
DR13
|
||||
DR14
|
||||
DR15
|
||||
|
||||
// Task registers.
|
||||
TR0
|
||||
TR1
|
||||
TR2
|
||||
TR3
|
||||
TR4
|
||||
TR5
|
||||
TR6
|
||||
TR7
|
||||
)
|
||||
|
||||
const regMax = TR7
|
||||
|
||||
func (Reg) isArg() {}
|
||||
|
||||
func (r Reg) String() string {
|
||||
i := int(r)
|
||||
if i < 0 || i >= len(regNames) || regNames[i] == "" {
|
||||
return fmt.Sprintf("Reg(%d)", i)
|
||||
}
|
||||
return regNames[i]
|
||||
}
|
||||
|
||||
// A Mem is a memory reference.
|
||||
// The general form is Segment:[Base+Scale*Index+Disp].
|
||||
type Mem struct {
|
||||
Segment Reg
|
||||
Base Reg
|
||||
Scale uint8
|
||||
Index Reg
|
||||
Disp int64
|
||||
}
|
||||
|
||||
func (Mem) isArg() {}
|
||||
|
||||
func (m Mem) String() string {
|
||||
var base, plus, scale, index, disp string
|
||||
|
||||
if m.Base != 0 {
|
||||
base = m.Base.String()
|
||||
}
|
||||
if m.Scale != 0 {
|
||||
if m.Base != 0 {
|
||||
plus = "+"
|
||||
}
|
||||
if m.Scale > 1 {
|
||||
scale = fmt.Sprintf("%d*", m.Scale)
|
||||
}
|
||||
index = m.Index.String()
|
||||
}
|
||||
if m.Disp != 0 || m.Base == 0 && m.Scale == 0 {
|
||||
disp = fmt.Sprintf("%+#x", m.Disp)
|
||||
}
|
||||
return "[" + base + plus + scale + index + disp + "]"
|
||||
}
|
||||
|
||||
// A Rel is an offset relative to the current instruction pointer.
|
||||
type Rel int32
|
||||
|
||||
func (Rel) isArg() {}
|
||||
|
||||
func (r Rel) String() string {
|
||||
return fmt.Sprintf(".%+d", r)
|
||||
}
|
||||
|
||||
// An Imm is an integer constant.
|
||||
type Imm int64
|
||||
|
||||
func (Imm) isArg() {}
|
||||
|
||||
func (i Imm) String() string {
|
||||
return fmt.Sprintf("%#x", int64(i))
|
||||
}
|
||||
|
||||
func (i Inst) String() string {
|
||||
var buf bytes.Buffer
|
||||
for _, p := range i.Prefix {
|
||||
if p == 0 {
|
||||
break
|
||||
}
|
||||
if p&PrefixImplicit != 0 {
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(&buf, "%v ", p)
|
||||
}
|
||||
fmt.Fprintf(&buf, "%v", i.Op)
|
||||
sep := " "
|
||||
for _, v := range i.Args {
|
||||
if v == nil {
|
||||
break
|
||||
}
|
||||
fmt.Fprintf(&buf, "%s%v", sep, v)
|
||||
sep = ", "
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func isReg(a Arg) bool {
|
||||
_, ok := a.(Reg)
|
||||
return ok
|
||||
}
|
||||
|
||||
func isSegReg(a Arg) bool {
|
||||
r, ok := a.(Reg)
|
||||
return ok && ES <= r && r <= GS
|
||||
}
|
||||
|
||||
func isMem(a Arg) bool {
|
||||
_, ok := a.(Mem)
|
||||
return ok
|
||||
}
|
||||
|
||||
func isImm(a Arg) bool {
|
||||
_, ok := a.(Imm)
|
||||
return ok
|
||||
}
|
||||
|
||||
func regBytes(a Arg) int {
|
||||
r, ok := a.(Reg)
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
if AL <= r && r <= R15B {
|
||||
return 1
|
||||
}
|
||||
if AX <= r && r <= R15W {
|
||||
return 2
|
||||
}
|
||||
if EAX <= r && r <= R15L {
|
||||
return 4
|
||||
}
|
||||
if RAX <= r && r <= R15 {
|
||||
return 8
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func isSegment(p Prefix) bool {
|
||||
switch p {
|
||||
case PrefixCS, PrefixDS, PrefixES, PrefixFS, PrefixGS, PrefixSS:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// The Op definitions and string list are in tables.go.
|
||||
|
||||
var prefixNames = map[Prefix]string{
|
||||
PrefixCS: "CS",
|
||||
PrefixDS: "DS",
|
||||
PrefixES: "ES",
|
||||
PrefixFS: "FS",
|
||||
PrefixGS: "GS",
|
||||
PrefixSS: "SS",
|
||||
PrefixLOCK: "LOCK",
|
||||
PrefixREP: "REP",
|
||||
PrefixREPN: "REPN",
|
||||
PrefixAddrSize: "ADDRSIZE",
|
||||
PrefixDataSize: "DATASIZE",
|
||||
PrefixAddr16: "ADDR16",
|
||||
PrefixData16: "DATA16",
|
||||
PrefixAddr32: "ADDR32",
|
||||
PrefixData32: "DATA32",
|
||||
PrefixBND: "BND",
|
||||
PrefixXACQUIRE: "XACQUIRE",
|
||||
PrefixXRELEASE: "XRELEASE",
|
||||
PrefixREX: "REX",
|
||||
PrefixPT: "PT",
|
||||
PrefixPN: "PN",
|
||||
}
|
||||
|
||||
var regNames = [...]string{
|
||||
AL: "AL",
|
||||
CL: "CL",
|
||||
BL: "BL",
|
||||
DL: "DL",
|
||||
AH: "AH",
|
||||
CH: "CH",
|
||||
BH: "BH",
|
||||
DH: "DH",
|
||||
SPB: "SPB",
|
||||
BPB: "BPB",
|
||||
SIB: "SIB",
|
||||
DIB: "DIB",
|
||||
R8B: "R8B",
|
||||
R9B: "R9B",
|
||||
R10B: "R10B",
|
||||
R11B: "R11B",
|
||||
R12B: "R12B",
|
||||
R13B: "R13B",
|
||||
R14B: "R14B",
|
||||
R15B: "R15B",
|
||||
AX: "AX",
|
||||
CX: "CX",
|
||||
BX: "BX",
|
||||
DX: "DX",
|
||||
SP: "SP",
|
||||
BP: "BP",
|
||||
SI: "SI",
|
||||
DI: "DI",
|
||||
R8W: "R8W",
|
||||
R9W: "R9W",
|
||||
R10W: "R10W",
|
||||
R11W: "R11W",
|
||||
R12W: "R12W",
|
||||
R13W: "R13W",
|
||||
R14W: "R14W",
|
||||
R15W: "R15W",
|
||||
EAX: "EAX",
|
||||
ECX: "ECX",
|
||||
EDX: "EDX",
|
||||
EBX: "EBX",
|
||||
ESP: "ESP",
|
||||
EBP: "EBP",
|
||||
ESI: "ESI",
|
||||
EDI: "EDI",
|
||||
R8L: "R8L",
|
||||
R9L: "R9L",
|
||||
R10L: "R10L",
|
||||
R11L: "R11L",
|
||||
R12L: "R12L",
|
||||
R13L: "R13L",
|
||||
R14L: "R14L",
|
||||
R15L: "R15L",
|
||||
RAX: "RAX",
|
||||
RCX: "RCX",
|
||||
RDX: "RDX",
|
||||
RBX: "RBX",
|
||||
RSP: "RSP",
|
||||
RBP: "RBP",
|
||||
RSI: "RSI",
|
||||
RDI: "RDI",
|
||||
R8: "R8",
|
||||
R9: "R9",
|
||||
R10: "R10",
|
||||
R11: "R11",
|
||||
R12: "R12",
|
||||
R13: "R13",
|
||||
R14: "R14",
|
||||
R15: "R15",
|
||||
IP: "IP",
|
||||
EIP: "EIP",
|
||||
RIP: "RIP",
|
||||
F0: "F0",
|
||||
F1: "F1",
|
||||
F2: "F2",
|
||||
F3: "F3",
|
||||
F4: "F4",
|
||||
F5: "F5",
|
||||
F6: "F6",
|
||||
F7: "F7",
|
||||
M0: "M0",
|
||||
M1: "M1",
|
||||
M2: "M2",
|
||||
M3: "M3",
|
||||
M4: "M4",
|
||||
M5: "M5",
|
||||
M6: "M6",
|
||||
M7: "M7",
|
||||
X0: "X0",
|
||||
X1: "X1",
|
||||
X2: "X2",
|
||||
X3: "X3",
|
||||
X4: "X4",
|
||||
X5: "X5",
|
||||
X6: "X6",
|
||||
X7: "X7",
|
||||
X8: "X8",
|
||||
X9: "X9",
|
||||
X10: "X10",
|
||||
X11: "X11",
|
||||
X12: "X12",
|
||||
X13: "X13",
|
||||
X14: "X14",
|
||||
X15: "X15",
|
||||
CS: "CS",
|
||||
SS: "SS",
|
||||
DS: "DS",
|
||||
ES: "ES",
|
||||
FS: "FS",
|
||||
GS: "GS",
|
||||
GDTR: "GDTR",
|
||||
IDTR: "IDTR",
|
||||
LDTR: "LDTR",
|
||||
MSW: "MSW",
|
||||
TASK: "TASK",
|
||||
CR0: "CR0",
|
||||
CR1: "CR1",
|
||||
CR2: "CR2",
|
||||
CR3: "CR3",
|
||||
CR4: "CR4",
|
||||
CR5: "CR5",
|
||||
CR6: "CR6",
|
||||
CR7: "CR7",
|
||||
CR8: "CR8",
|
||||
CR9: "CR9",
|
||||
CR10: "CR10",
|
||||
CR11: "CR11",
|
||||
CR12: "CR12",
|
||||
CR13: "CR13",
|
||||
CR14: "CR14",
|
||||
CR15: "CR15",
|
||||
DR0: "DR0",
|
||||
DR1: "DR1",
|
||||
DR2: "DR2",
|
||||
DR3: "DR3",
|
||||
DR4: "DR4",
|
||||
DR5: "DR5",
|
||||
DR6: "DR6",
|
||||
DR7: "DR7",
|
||||
DR8: "DR8",
|
||||
DR9: "DR9",
|
||||
DR10: "DR10",
|
||||
DR11: "DR11",
|
||||
DR12: "DR12",
|
||||
DR13: "DR13",
|
||||
DR14: "DR14",
|
||||
DR15: "DR15",
|
||||
TR0: "TR0",
|
||||
TR1: "TR1",
|
||||
TR2: "TR2",
|
||||
TR3: "TR3",
|
||||
TR4: "TR4",
|
||||
TR5: "TR5",
|
||||
TR6: "TR6",
|
||||
TR7: "TR7",
|
||||
}
|
||||
562
vendor/golang.org/x/arch/x86/x86asm/intel.go
generated
vendored
Normal file
562
vendor/golang.org/x/arch/x86/x86asm/intel.go
generated
vendored
Normal file
@@ -0,0 +1,562 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package x86asm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// IntelSyntax returns the Intel assembler syntax for the instruction, as defined by Intel's XED tool.
|
||||
func IntelSyntax(inst Inst, pc uint64, symname SymLookup) string {
|
||||
if symname == nil {
|
||||
symname = func(uint64) (string, uint64) { return "", 0 }
|
||||
}
|
||||
|
||||
var iargs []Arg
|
||||
for _, a := range inst.Args {
|
||||
if a == nil {
|
||||
break
|
||||
}
|
||||
iargs = append(iargs, a)
|
||||
}
|
||||
|
||||
switch inst.Op {
|
||||
case INSB, INSD, INSW, OUTSB, OUTSD, OUTSW, LOOPNE, JCXZ, JECXZ, JRCXZ, LOOP, LOOPE, MOV, XLATB:
|
||||
if inst.Op == MOV && (inst.Opcode>>16)&0xFFFC != 0x0F20 {
|
||||
break
|
||||
}
|
||||
for i, p := range inst.Prefix {
|
||||
if p&0xFF == PrefixAddrSize {
|
||||
inst.Prefix[i] &^= PrefixImplicit
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch inst.Op {
|
||||
case MOV:
|
||||
dst, _ := inst.Args[0].(Reg)
|
||||
src, _ := inst.Args[1].(Reg)
|
||||
if ES <= dst && dst <= GS && EAX <= src && src <= R15L {
|
||||
src -= EAX - AX
|
||||
iargs[1] = src
|
||||
}
|
||||
if ES <= dst && dst <= GS && RAX <= src && src <= R15 {
|
||||
src -= RAX - AX
|
||||
iargs[1] = src
|
||||
}
|
||||
|
||||
if inst.Opcode>>24&^3 == 0xA0 {
|
||||
for i, p := range inst.Prefix {
|
||||
if p&0xFF == PrefixAddrSize {
|
||||
inst.Prefix[i] |= PrefixImplicit
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch inst.Op {
|
||||
case AAM, AAD:
|
||||
if imm, ok := iargs[0].(Imm); ok {
|
||||
if inst.DataSize == 32 {
|
||||
iargs[0] = Imm(uint32(int8(imm)))
|
||||
} else if inst.DataSize == 16 {
|
||||
iargs[0] = Imm(uint16(int8(imm)))
|
||||
}
|
||||
}
|
||||
|
||||
case PUSH:
|
||||
if imm, ok := iargs[0].(Imm); ok {
|
||||
iargs[0] = Imm(uint32(imm))
|
||||
}
|
||||
}
|
||||
|
||||
for _, p := range inst.Prefix {
|
||||
if p&PrefixImplicit != 0 {
|
||||
for j, pj := range inst.Prefix {
|
||||
if pj&0xFF == p&0xFF {
|
||||
inst.Prefix[j] |= PrefixImplicit
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if inst.Op != 0 {
|
||||
for i, p := range inst.Prefix {
|
||||
switch p &^ PrefixIgnored {
|
||||
case PrefixData16, PrefixData32, PrefixCS, PrefixDS, PrefixES, PrefixSS:
|
||||
inst.Prefix[i] |= PrefixImplicit
|
||||
}
|
||||
if p.IsREX() {
|
||||
inst.Prefix[i] |= PrefixImplicit
|
||||
}
|
||||
if p.IsVEX() {
|
||||
if p == PrefixVEX3Bytes {
|
||||
inst.Prefix[i+2] |= PrefixImplicit
|
||||
}
|
||||
inst.Prefix[i] |= PrefixImplicit
|
||||
inst.Prefix[i+1] |= PrefixImplicit
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if isLoop[inst.Op] || inst.Op == JCXZ || inst.Op == JECXZ || inst.Op == JRCXZ {
|
||||
for i, p := range inst.Prefix {
|
||||
if p == PrefixPT || p == PrefixPN {
|
||||
inst.Prefix[i] |= PrefixImplicit
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch inst.Op {
|
||||
case AAA, AAS, CBW, CDQE, CLC, CLD, CLI, CLTS, CMC, CPUID, CQO, CWD, DAA, DAS,
|
||||
FDECSTP, FINCSTP, FNCLEX, FNINIT, FNOP, FWAIT, HLT,
|
||||
ICEBP, INSB, INSD, INSW, INT, INTO, INVD, IRET, IRETQ,
|
||||
LAHF, LEAVE, LRET, MONITOR, MWAIT, NOP, OUTSB, OUTSD, OUTSW,
|
||||
PAUSE, POPA, POPF, POPFQ, PUSHA, PUSHF, PUSHFQ,
|
||||
RDMSR, RDPMC, RDTSC, RDTSCP, RET, RSM,
|
||||
SAHF, STC, STD, STI, SYSENTER, SYSEXIT, SYSRET,
|
||||
UD2, WBINVD, WRMSR, XEND, XLATB, XTEST:
|
||||
|
||||
if inst.Op == NOP && inst.Opcode>>24 != 0x90 {
|
||||
break
|
||||
}
|
||||
if inst.Op == RET && inst.Opcode>>24 != 0xC3 {
|
||||
break
|
||||
}
|
||||
if inst.Op == INT && inst.Opcode>>24 != 0xCC {
|
||||
break
|
||||
}
|
||||
if inst.Op == LRET && inst.Opcode>>24 != 0xcb {
|
||||
break
|
||||
}
|
||||
for i, p := range inst.Prefix {
|
||||
if p&0xFF == PrefixDataSize {
|
||||
inst.Prefix[i] &^= PrefixImplicit | PrefixIgnored
|
||||
}
|
||||
}
|
||||
|
||||
case 0:
|
||||
// ok
|
||||
}
|
||||
|
||||
switch inst.Op {
|
||||
case INSB, INSD, INSW, OUTSB, OUTSD, OUTSW, MONITOR, MWAIT, XLATB:
|
||||
iargs = nil
|
||||
|
||||
case STOSB, STOSW, STOSD, STOSQ:
|
||||
iargs = iargs[:1]
|
||||
|
||||
case LODSB, LODSW, LODSD, LODSQ, SCASB, SCASW, SCASD, SCASQ:
|
||||
iargs = iargs[1:]
|
||||
}
|
||||
|
||||
const (
|
||||
haveData16 = 1 << iota
|
||||
haveData32
|
||||
haveAddr16
|
||||
haveAddr32
|
||||
haveXacquire
|
||||
haveXrelease
|
||||
haveLock
|
||||
haveHintTaken
|
||||
haveHintNotTaken
|
||||
haveBnd
|
||||
)
|
||||
var prefixBits uint32
|
||||
prefix := ""
|
||||
for _, p := range inst.Prefix {
|
||||
if p == 0 {
|
||||
break
|
||||
}
|
||||
if p&0xFF == 0xF3 {
|
||||
prefixBits &^= haveBnd
|
||||
}
|
||||
if p&(PrefixImplicit|PrefixIgnored) != 0 {
|
||||
continue
|
||||
}
|
||||
switch p {
|
||||
default:
|
||||
prefix += strings.ToLower(p.String()) + " "
|
||||
case PrefixCS, PrefixDS, PrefixES, PrefixFS, PrefixGS, PrefixSS:
|
||||
if inst.Op == 0 {
|
||||
prefix += strings.ToLower(p.String()) + " "
|
||||
}
|
||||
case PrefixREPN:
|
||||
prefix += "repne "
|
||||
case PrefixLOCK:
|
||||
prefixBits |= haveLock
|
||||
case PrefixData16, PrefixDataSize:
|
||||
prefixBits |= haveData16
|
||||
case PrefixData32:
|
||||
prefixBits |= haveData32
|
||||
case PrefixAddrSize, PrefixAddr16:
|
||||
prefixBits |= haveAddr16
|
||||
case PrefixAddr32:
|
||||
prefixBits |= haveAddr32
|
||||
case PrefixXACQUIRE:
|
||||
prefixBits |= haveXacquire
|
||||
case PrefixXRELEASE:
|
||||
prefixBits |= haveXrelease
|
||||
case PrefixPT:
|
||||
prefixBits |= haveHintTaken
|
||||
case PrefixPN:
|
||||
prefixBits |= haveHintNotTaken
|
||||
case PrefixBND:
|
||||
prefixBits |= haveBnd
|
||||
}
|
||||
}
|
||||
switch inst.Op {
|
||||
case JMP:
|
||||
if inst.Opcode>>24 == 0xEB {
|
||||
prefixBits &^= haveBnd
|
||||
}
|
||||
case RET, LRET:
|
||||
prefixBits &^= haveData16 | haveData32
|
||||
}
|
||||
|
||||
if prefixBits&haveXacquire != 0 {
|
||||
prefix += "xacquire "
|
||||
}
|
||||
if prefixBits&haveXrelease != 0 {
|
||||
prefix += "xrelease "
|
||||
}
|
||||
if prefixBits&haveLock != 0 {
|
||||
prefix += "lock "
|
||||
}
|
||||
if prefixBits&haveBnd != 0 {
|
||||
prefix += "bnd "
|
||||
}
|
||||
if prefixBits&haveHintTaken != 0 {
|
||||
prefix += "hint-taken "
|
||||
}
|
||||
if prefixBits&haveHintNotTaken != 0 {
|
||||
prefix += "hint-not-taken "
|
||||
}
|
||||
if prefixBits&haveAddr16 != 0 {
|
||||
prefix += "addr16 "
|
||||
}
|
||||
if prefixBits&haveAddr32 != 0 {
|
||||
prefix += "addr32 "
|
||||
}
|
||||
if prefixBits&haveData16 != 0 {
|
||||
prefix += "data16 "
|
||||
}
|
||||
if prefixBits&haveData32 != 0 {
|
||||
prefix += "data32 "
|
||||
}
|
||||
|
||||
if inst.Op == 0 {
|
||||
if prefix == "" {
|
||||
return "<no instruction>"
|
||||
}
|
||||
return prefix[:len(prefix)-1]
|
||||
}
|
||||
|
||||
var args []string
|
||||
for _, a := range iargs {
|
||||
if a == nil {
|
||||
break
|
||||
}
|
||||
args = append(args, intelArg(&inst, pc, symname, a))
|
||||
}
|
||||
|
||||
var op string
|
||||
switch inst.Op {
|
||||
case NOP:
|
||||
if inst.Opcode>>24 == 0x0F {
|
||||
if inst.DataSize == 16 {
|
||||
args = append(args, "ax")
|
||||
} else {
|
||||
args = append(args, "eax")
|
||||
}
|
||||
}
|
||||
|
||||
case BLENDVPD, BLENDVPS, PBLENDVB:
|
||||
args = args[:2]
|
||||
|
||||
case INT:
|
||||
if inst.Opcode>>24 == 0xCC {
|
||||
args = nil
|
||||
op = "int3"
|
||||
}
|
||||
|
||||
case LCALL, LJMP:
|
||||
if len(args) == 2 {
|
||||
args[0], args[1] = args[1], args[0]
|
||||
}
|
||||
|
||||
case FCHS, FABS, FTST, FLDPI, FLDL2E, FLDLG2, F2XM1, FXAM, FLD1, FLDL2T, FSQRT, FRNDINT, FCOS, FSIN:
|
||||
if len(args) == 0 {
|
||||
args = append(args, "st0")
|
||||
}
|
||||
|
||||
case FPTAN, FSINCOS, FUCOMPP, FCOMPP, FYL2X, FPATAN, FXTRACT, FPREM1, FPREM, FYL2XP1, FSCALE:
|
||||
if len(args) == 0 {
|
||||
args = []string{"st0", "st1"}
|
||||
}
|
||||
|
||||
case FST, FSTP, FISTTP, FIST, FISTP, FBSTP:
|
||||
if len(args) == 1 {
|
||||
args = append(args, "st0")
|
||||
}
|
||||
|
||||
case FLD, FXCH, FCOM, FCOMP, FIADD, FIMUL, FICOM, FICOMP, FISUBR, FIDIV, FUCOM, FUCOMP, FILD, FBLD, FADD, FMUL, FSUB, FSUBR, FISUB, FDIV, FDIVR, FIDIVR:
|
||||
if len(args) == 1 {
|
||||
args = []string{"st0", args[0]}
|
||||
}
|
||||
|
||||
case MASKMOVDQU, MASKMOVQ, XLATB, OUTSB, OUTSW, OUTSD:
|
||||
FixSegment:
|
||||
for i := len(inst.Prefix) - 1; i >= 0; i-- {
|
||||
p := inst.Prefix[i] & 0xFF
|
||||
switch p {
|
||||
case PrefixCS, PrefixES, PrefixFS, PrefixGS, PrefixSS:
|
||||
if inst.Mode != 64 || p == PrefixFS || p == PrefixGS {
|
||||
args = append(args, strings.ToLower((inst.Prefix[i] & 0xFF).String()))
|
||||
break FixSegment
|
||||
}
|
||||
case PrefixDS:
|
||||
if inst.Mode != 64 {
|
||||
break FixSegment
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if op == "" {
|
||||
op = intelOp[inst.Op]
|
||||
}
|
||||
if op == "" {
|
||||
op = strings.ToLower(inst.Op.String())
|
||||
}
|
||||
if args != nil {
|
||||
op += " " + strings.Join(args, ", ")
|
||||
}
|
||||
return prefix + op
|
||||
}
|
||||
|
||||
func intelArg(inst *Inst, pc uint64, symname SymLookup, arg Arg) string {
|
||||
switch a := arg.(type) {
|
||||
case Imm:
|
||||
if (inst.Op == MOV || inst.Op == PUSH) && inst.DataSize == 32 { // See comment in plan9x.go.
|
||||
if s, base := symname(uint64(a)); s != "" {
|
||||
suffix := ""
|
||||
if uint64(a) != base {
|
||||
suffix = fmt.Sprintf("%+d", uint64(a)-base)
|
||||
}
|
||||
return fmt.Sprintf("$%s%s", s, suffix)
|
||||
}
|
||||
}
|
||||
if inst.Mode == 32 {
|
||||
return fmt.Sprintf("%#x", uint32(a))
|
||||
}
|
||||
if Imm(int32(a)) == a {
|
||||
return fmt.Sprintf("%#x", int64(a))
|
||||
}
|
||||
return fmt.Sprintf("%#x", uint64(a))
|
||||
case Mem:
|
||||
if a.Base == EIP {
|
||||
a.Base = RIP
|
||||
}
|
||||
prefix := ""
|
||||
switch inst.MemBytes {
|
||||
case 1:
|
||||
prefix = "byte "
|
||||
case 2:
|
||||
prefix = "word "
|
||||
case 4:
|
||||
prefix = "dword "
|
||||
case 8:
|
||||
prefix = "qword "
|
||||
case 16:
|
||||
prefix = "xmmword "
|
||||
case 32:
|
||||
prefix = "ymmword "
|
||||
}
|
||||
switch inst.Op {
|
||||
case INVLPG:
|
||||
prefix = "byte "
|
||||
case STOSB, MOVSB, CMPSB, LODSB, SCASB:
|
||||
prefix = "byte "
|
||||
case STOSW, MOVSW, CMPSW, LODSW, SCASW:
|
||||
prefix = "word "
|
||||
case STOSD, MOVSD, CMPSD, LODSD, SCASD:
|
||||
prefix = "dword "
|
||||
case STOSQ, MOVSQ, CMPSQ, LODSQ, SCASQ:
|
||||
prefix = "qword "
|
||||
case LAR:
|
||||
prefix = "word "
|
||||
case BOUND:
|
||||
if inst.Mode == 32 {
|
||||
prefix = "qword "
|
||||
} else {
|
||||
prefix = "dword "
|
||||
}
|
||||
case PREFETCHW, PREFETCHNTA, PREFETCHT0, PREFETCHT1, PREFETCHT2, CLFLUSH:
|
||||
prefix = "zmmword "
|
||||
}
|
||||
switch inst.Op {
|
||||
case MOVSB, MOVSW, MOVSD, MOVSQ, CMPSB, CMPSW, CMPSD, CMPSQ, STOSB, STOSW, STOSD, STOSQ, SCASB, SCASW, SCASD, SCASQ, LODSB, LODSW, LODSD, LODSQ:
|
||||
switch a.Base {
|
||||
case DI, EDI, RDI:
|
||||
if a.Segment == ES {
|
||||
a.Segment = 0
|
||||
}
|
||||
case SI, ESI, RSI:
|
||||
if a.Segment == DS {
|
||||
a.Segment = 0
|
||||
}
|
||||
}
|
||||
case LEA:
|
||||
a.Segment = 0
|
||||
default:
|
||||
switch a.Base {
|
||||
case SP, ESP, RSP, BP, EBP, RBP:
|
||||
if a.Segment == SS {
|
||||
a.Segment = 0
|
||||
}
|
||||
default:
|
||||
if a.Segment == DS {
|
||||
a.Segment = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if inst.Mode == 64 && a.Segment != FS && a.Segment != GS {
|
||||
a.Segment = 0
|
||||
}
|
||||
|
||||
prefix += "ptr "
|
||||
if s, disp := memArgToSymbol(a, pc, inst.Len, symname); s != "" {
|
||||
suffix := ""
|
||||
if disp != 0 {
|
||||
suffix = fmt.Sprintf("%+d", disp)
|
||||
}
|
||||
return prefix + fmt.Sprintf("[%s%s]", s, suffix)
|
||||
}
|
||||
if a.Segment != 0 {
|
||||
prefix += strings.ToLower(a.Segment.String()) + ":"
|
||||
}
|
||||
prefix += "["
|
||||
if a.Base != 0 {
|
||||
prefix += intelArg(inst, pc, symname, a.Base)
|
||||
}
|
||||
if a.Scale != 0 && a.Index != 0 {
|
||||
if a.Base != 0 {
|
||||
prefix += "+"
|
||||
}
|
||||
prefix += fmt.Sprintf("%s*%d", intelArg(inst, pc, symname, a.Index), a.Scale)
|
||||
}
|
||||
if a.Disp != 0 {
|
||||
if prefix[len(prefix)-1] == '[' && (a.Disp >= 0 || int64(int32(a.Disp)) != a.Disp) {
|
||||
prefix += fmt.Sprintf("%#x", uint64(a.Disp))
|
||||
} else {
|
||||
prefix += fmt.Sprintf("%+#x", a.Disp)
|
||||
}
|
||||
}
|
||||
prefix += "]"
|
||||
return prefix
|
||||
case Rel:
|
||||
if pc == 0 {
|
||||
return fmt.Sprintf(".%+#x", int64(a))
|
||||
} else {
|
||||
addr := pc + uint64(inst.Len) + uint64(a)
|
||||
if s, base := symname(addr); s != "" && addr == base {
|
||||
return fmt.Sprintf("%s", s)
|
||||
} else {
|
||||
addr := pc + uint64(inst.Len) + uint64(a)
|
||||
return fmt.Sprintf("%#x", addr)
|
||||
}
|
||||
}
|
||||
case Reg:
|
||||
if int(a) < len(intelReg) && intelReg[a] != "" {
|
||||
switch inst.Op {
|
||||
case VMOVDQA, VMOVDQU, VMOVNTDQA, VMOVNTDQ:
|
||||
return strings.Replace(intelReg[a], "xmm", "ymm", -1)
|
||||
default:
|
||||
return intelReg[a]
|
||||
}
|
||||
}
|
||||
}
|
||||
return strings.ToLower(arg.String())
|
||||
}
|
||||
|
||||
var intelOp = map[Op]string{
|
||||
JAE: "jnb",
|
||||
JA: "jnbe",
|
||||
JGE: "jnl",
|
||||
JNE: "jnz",
|
||||
JG: "jnle",
|
||||
JE: "jz",
|
||||
SETAE: "setnb",
|
||||
SETA: "setnbe",
|
||||
SETGE: "setnl",
|
||||
SETNE: "setnz",
|
||||
SETG: "setnle",
|
||||
SETE: "setz",
|
||||
CMOVAE: "cmovnb",
|
||||
CMOVA: "cmovnbe",
|
||||
CMOVGE: "cmovnl",
|
||||
CMOVNE: "cmovnz",
|
||||
CMOVG: "cmovnle",
|
||||
CMOVE: "cmovz",
|
||||
LCALL: "call far",
|
||||
LJMP: "jmp far",
|
||||
LRET: "ret far",
|
||||
ICEBP: "int1",
|
||||
MOVSD_XMM: "movsd",
|
||||
XLATB: "xlat",
|
||||
}
|
||||
|
||||
var intelReg = [...]string{
|
||||
F0: "st0",
|
||||
F1: "st1",
|
||||
F2: "st2",
|
||||
F3: "st3",
|
||||
F4: "st4",
|
||||
F5: "st5",
|
||||
F6: "st6",
|
||||
F7: "st7",
|
||||
M0: "mmx0",
|
||||
M1: "mmx1",
|
||||
M2: "mmx2",
|
||||
M3: "mmx3",
|
||||
M4: "mmx4",
|
||||
M5: "mmx5",
|
||||
M6: "mmx6",
|
||||
M7: "mmx7",
|
||||
X0: "xmm0",
|
||||
X1: "xmm1",
|
||||
X2: "xmm2",
|
||||
X3: "xmm3",
|
||||
X4: "xmm4",
|
||||
X5: "xmm5",
|
||||
X6: "xmm6",
|
||||
X7: "xmm7",
|
||||
X8: "xmm8",
|
||||
X9: "xmm9",
|
||||
X10: "xmm10",
|
||||
X11: "xmm11",
|
||||
X12: "xmm12",
|
||||
X13: "xmm13",
|
||||
X14: "xmm14",
|
||||
X15: "xmm15",
|
||||
|
||||
// TODO: Maybe the constants are named wrong.
|
||||
SPB: "spl",
|
||||
BPB: "bpl",
|
||||
SIB: "sil",
|
||||
DIB: "dil",
|
||||
|
||||
R8L: "r8d",
|
||||
R9L: "r9d",
|
||||
R10L: "r10d",
|
||||
R11L: "r11d",
|
||||
R12L: "r12d",
|
||||
R13L: "r13d",
|
||||
R14L: "r14d",
|
||||
R15L: "r15d",
|
||||
}
|
||||
403
vendor/golang.org/x/arch/x86/x86asm/plan9x.go
generated
vendored
Normal file
403
vendor/golang.org/x/arch/x86/x86asm/plan9x.go
generated
vendored
Normal file
@@ -0,0 +1,403 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package x86asm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type SymLookup func(uint64) (string, uint64)
|
||||
|
||||
// GoSyntax returns the Go assembler syntax for the instruction.
|
||||
// The syntax was originally defined by Plan 9.
|
||||
// The pc is the program counter of the instruction, used for expanding
|
||||
// PC-relative addresses into absolute ones.
|
||||
// The symname function queries the symbol table for the program
|
||||
// being disassembled. Given a target address it returns the name and base
|
||||
// address of the symbol containing the target, if any; otherwise it returns "", 0.
|
||||
func GoSyntax(inst Inst, pc uint64, symname SymLookup) string {
|
||||
if symname == nil {
|
||||
symname = func(uint64) (string, uint64) { return "", 0 }
|
||||
}
|
||||
var args []string
|
||||
for i := len(inst.Args) - 1; i >= 0; i-- {
|
||||
a := inst.Args[i]
|
||||
if a == nil {
|
||||
continue
|
||||
}
|
||||
args = append(args, plan9Arg(&inst, pc, symname, a))
|
||||
}
|
||||
|
||||
var rep string
|
||||
var last Prefix
|
||||
for _, p := range inst.Prefix {
|
||||
if p == 0 || p.IsREX() || p.IsVEX() {
|
||||
break
|
||||
}
|
||||
|
||||
switch {
|
||||
// Don't show prefixes implied by the instruction text.
|
||||
case p&0xFF00 == PrefixImplicit:
|
||||
continue
|
||||
// Only REP and REPN are recognized repeaters. Plan 9 syntax
|
||||
// treats them as separate opcodes.
|
||||
case p&0xFF == PrefixREP:
|
||||
rep = "REP; "
|
||||
case p&0xFF == PrefixREPN:
|
||||
rep = "REPNE; "
|
||||
default:
|
||||
last = p
|
||||
}
|
||||
}
|
||||
|
||||
prefix := ""
|
||||
switch last & 0xFF {
|
||||
case 0, 0x66, 0x67:
|
||||
// ignore
|
||||
default:
|
||||
prefix += last.String() + " "
|
||||
}
|
||||
|
||||
op := inst.Op.String()
|
||||
if plan9Suffix[inst.Op] {
|
||||
s := inst.DataSize
|
||||
if inst.MemBytes != 0 {
|
||||
s = inst.MemBytes * 8
|
||||
} else if inst.Args[1] == nil { // look for register-only 64-bit instruction, like PUSHQ AX
|
||||
if r, ok := inst.Args[0].(Reg); ok && RAX <= r && r <= R15 {
|
||||
s = 64
|
||||
}
|
||||
}
|
||||
switch s {
|
||||
case 8:
|
||||
op += "B"
|
||||
case 16:
|
||||
op += "W"
|
||||
case 32:
|
||||
op += "L"
|
||||
case 64:
|
||||
op += "Q"
|
||||
}
|
||||
}
|
||||
|
||||
if inst.Op == CMP {
|
||||
// Use reads-left-to-right ordering for comparisons.
|
||||
// See issue 60920.
|
||||
args[0], args[1] = args[1], args[0]
|
||||
}
|
||||
|
||||
if args != nil {
|
||||
op += " " + strings.Join(args, ", ")
|
||||
}
|
||||
|
||||
return rep + prefix + op
|
||||
}
|
||||
|
||||
func plan9Arg(inst *Inst, pc uint64, symname func(uint64) (string, uint64), arg Arg) string {
|
||||
switch a := arg.(type) {
|
||||
case Reg:
|
||||
return plan9Reg[a]
|
||||
case Rel:
|
||||
if pc == 0 {
|
||||
break
|
||||
}
|
||||
// If the absolute address is the start of a symbol, use the name.
|
||||
// Otherwise use the raw address, so that things like relative
|
||||
// jumps show up as JMP 0x123 instead of JMP f+10(SB).
|
||||
// It is usually easier to search for 0x123 than to do the mental
|
||||
// arithmetic to find f+10.
|
||||
addr := pc + uint64(inst.Len) + uint64(a)
|
||||
if s, base := symname(addr); s != "" && addr == base {
|
||||
return fmt.Sprintf("%s(SB)", s)
|
||||
}
|
||||
return fmt.Sprintf("%#x", addr)
|
||||
|
||||
case Imm:
|
||||
if (inst.Op == MOV || inst.Op == PUSH) && inst.DataSize == 32 {
|
||||
// Only try to convert an immediate to a symbol in certain
|
||||
// special circumstances. See issue 72942.
|
||||
//
|
||||
// On 64-bit, symbol addresses always hit the Mem case below.
|
||||
// Particularly, we use LEAQ to materialize the address of
|
||||
// a global or function.
|
||||
//
|
||||
// On 32-bit, we sometimes use MOVL. Still try to symbolize
|
||||
// those immediates.
|
||||
if s, base := symname(uint64(a)); s != "" {
|
||||
suffix := ""
|
||||
if uint64(a) != base {
|
||||
suffix = fmt.Sprintf("%+d", uint64(a)-base)
|
||||
}
|
||||
return fmt.Sprintf("$%s%s(SB)", s, suffix)
|
||||
}
|
||||
}
|
||||
if inst.Mode == 32 {
|
||||
return fmt.Sprintf("$%#x", uint32(a))
|
||||
}
|
||||
if Imm(int32(a)) == a {
|
||||
return fmt.Sprintf("$%#x", int64(a))
|
||||
}
|
||||
return fmt.Sprintf("$%#x", uint64(a))
|
||||
case Mem:
|
||||
if s, disp := memArgToSymbol(a, pc, inst.Len, symname); s != "" {
|
||||
suffix := ""
|
||||
if disp != 0 {
|
||||
suffix = fmt.Sprintf("%+d", disp)
|
||||
}
|
||||
return fmt.Sprintf("%s%s(SB)", s, suffix)
|
||||
}
|
||||
s := ""
|
||||
if a.Segment != 0 {
|
||||
s += fmt.Sprintf("%s:", plan9Reg[a.Segment])
|
||||
}
|
||||
if a.Disp != 0 {
|
||||
s += fmt.Sprintf("%#x", a.Disp)
|
||||
} else {
|
||||
s += "0"
|
||||
}
|
||||
if a.Base != 0 {
|
||||
s += fmt.Sprintf("(%s)", plan9Reg[a.Base])
|
||||
}
|
||||
if a.Index != 0 && a.Scale != 0 {
|
||||
s += fmt.Sprintf("(%s*%d)", plan9Reg[a.Index], a.Scale)
|
||||
}
|
||||
return s
|
||||
}
|
||||
return arg.String()
|
||||
}
|
||||
|
||||
func memArgToSymbol(a Mem, pc uint64, instrLen int, symname SymLookup) (string, int64) {
|
||||
if a.Segment != 0 || a.Disp == 0 || a.Index != 0 || a.Scale != 0 {
|
||||
return "", 0
|
||||
}
|
||||
|
||||
var disp uint64
|
||||
switch a.Base {
|
||||
case IP, EIP, RIP:
|
||||
disp = uint64(a.Disp + int64(pc) + int64(instrLen))
|
||||
case 0:
|
||||
disp = uint64(a.Disp)
|
||||
default:
|
||||
return "", 0
|
||||
}
|
||||
|
||||
s, base := symname(disp)
|
||||
return s, int64(disp) - int64(base)
|
||||
}
|
||||
|
||||
var plan9Suffix = [maxOp + 1]bool{
|
||||
ADC: true,
|
||||
ADD: true,
|
||||
AND: true,
|
||||
BSF: true,
|
||||
BSR: true,
|
||||
BT: true,
|
||||
BTC: true,
|
||||
BTR: true,
|
||||
BTS: true,
|
||||
CMP: true,
|
||||
CMPXCHG: true,
|
||||
CVTSI2SD: true,
|
||||
CVTSI2SS: true,
|
||||
CVTSD2SI: true,
|
||||
CVTSS2SI: true,
|
||||
CVTTSD2SI: true,
|
||||
CVTTSS2SI: true,
|
||||
DEC: true,
|
||||
DIV: true,
|
||||
FLDENV: true,
|
||||
FRSTOR: true,
|
||||
IDIV: true,
|
||||
IMUL: true,
|
||||
IN: true,
|
||||
INC: true,
|
||||
LEA: true,
|
||||
MOV: true,
|
||||
MOVNTI: true,
|
||||
MUL: true,
|
||||
NEG: true,
|
||||
NOP: true,
|
||||
NOT: true,
|
||||
OR: true,
|
||||
OUT: true,
|
||||
POP: true,
|
||||
POPA: true,
|
||||
POPCNT: true,
|
||||
PUSH: true,
|
||||
PUSHA: true,
|
||||
RCL: true,
|
||||
RCR: true,
|
||||
ROL: true,
|
||||
ROR: true,
|
||||
SAR: true,
|
||||
SBB: true,
|
||||
SHL: true,
|
||||
SHLD: true,
|
||||
SHR: true,
|
||||
SHRD: true,
|
||||
SUB: true,
|
||||
TEST: true,
|
||||
XADD: true,
|
||||
XCHG: true,
|
||||
XOR: true,
|
||||
}
|
||||
|
||||
var plan9Reg = [...]string{
|
||||
AL: "AL",
|
||||
CL: "CL",
|
||||
BL: "BL",
|
||||
DL: "DL",
|
||||
AH: "AH",
|
||||
CH: "CH",
|
||||
BH: "BH",
|
||||
DH: "DH",
|
||||
SPB: "SP",
|
||||
BPB: "BP",
|
||||
SIB: "SI",
|
||||
DIB: "DI",
|
||||
R8B: "R8",
|
||||
R9B: "R9",
|
||||
R10B: "R10",
|
||||
R11B: "R11",
|
||||
R12B: "R12",
|
||||
R13B: "R13",
|
||||
R14B: "R14",
|
||||
R15B: "R15",
|
||||
AX: "AX",
|
||||
CX: "CX",
|
||||
BX: "BX",
|
||||
DX: "DX",
|
||||
SP: "SP",
|
||||
BP: "BP",
|
||||
SI: "SI",
|
||||
DI: "DI",
|
||||
R8W: "R8",
|
||||
R9W: "R9",
|
||||
R10W: "R10",
|
||||
R11W: "R11",
|
||||
R12W: "R12",
|
||||
R13W: "R13",
|
||||
R14W: "R14",
|
||||
R15W: "R15",
|
||||
EAX: "AX",
|
||||
ECX: "CX",
|
||||
EDX: "DX",
|
||||
EBX: "BX",
|
||||
ESP: "SP",
|
||||
EBP: "BP",
|
||||
ESI: "SI",
|
||||
EDI: "DI",
|
||||
R8L: "R8",
|
||||
R9L: "R9",
|
||||
R10L: "R10",
|
||||
R11L: "R11",
|
||||
R12L: "R12",
|
||||
R13L: "R13",
|
||||
R14L: "R14",
|
||||
R15L: "R15",
|
||||
RAX: "AX",
|
||||
RCX: "CX",
|
||||
RDX: "DX",
|
||||
RBX: "BX",
|
||||
RSP: "SP",
|
||||
RBP: "BP",
|
||||
RSI: "SI",
|
||||
RDI: "DI",
|
||||
R8: "R8",
|
||||
R9: "R9",
|
||||
R10: "R10",
|
||||
R11: "R11",
|
||||
R12: "R12",
|
||||
R13: "R13",
|
||||
R14: "R14",
|
||||
R15: "R15",
|
||||
IP: "IP",
|
||||
EIP: "IP",
|
||||
RIP: "IP",
|
||||
F0: "F0",
|
||||
F1: "F1",
|
||||
F2: "F2",
|
||||
F3: "F3",
|
||||
F4: "F4",
|
||||
F5: "F5",
|
||||
F6: "F6",
|
||||
F7: "F7",
|
||||
M0: "M0",
|
||||
M1: "M1",
|
||||
M2: "M2",
|
||||
M3: "M3",
|
||||
M4: "M4",
|
||||
M5: "M5",
|
||||
M6: "M6",
|
||||
M7: "M7",
|
||||
X0: "X0",
|
||||
X1: "X1",
|
||||
X2: "X2",
|
||||
X3: "X3",
|
||||
X4: "X4",
|
||||
X5: "X5",
|
||||
X6: "X6",
|
||||
X7: "X7",
|
||||
X8: "X8",
|
||||
X9: "X9",
|
||||
X10: "X10",
|
||||
X11: "X11",
|
||||
X12: "X12",
|
||||
X13: "X13",
|
||||
X14: "X14",
|
||||
X15: "X15",
|
||||
CS: "CS",
|
||||
SS: "SS",
|
||||
DS: "DS",
|
||||
ES: "ES",
|
||||
FS: "FS",
|
||||
GS: "GS",
|
||||
GDTR: "GDTR",
|
||||
IDTR: "IDTR",
|
||||
LDTR: "LDTR",
|
||||
MSW: "MSW",
|
||||
TASK: "TASK",
|
||||
CR0: "CR0",
|
||||
CR1: "CR1",
|
||||
CR2: "CR2",
|
||||
CR3: "CR3",
|
||||
CR4: "CR4",
|
||||
CR5: "CR5",
|
||||
CR6: "CR6",
|
||||
CR7: "CR7",
|
||||
CR8: "CR8",
|
||||
CR9: "CR9",
|
||||
CR10: "CR10",
|
||||
CR11: "CR11",
|
||||
CR12: "CR12",
|
||||
CR13: "CR13",
|
||||
CR14: "CR14",
|
||||
CR15: "CR15",
|
||||
DR0: "DR0",
|
||||
DR1: "DR1",
|
||||
DR2: "DR2",
|
||||
DR3: "DR3",
|
||||
DR4: "DR4",
|
||||
DR5: "DR5",
|
||||
DR6: "DR6",
|
||||
DR7: "DR7",
|
||||
DR8: "DR8",
|
||||
DR9: "DR9",
|
||||
DR10: "DR10",
|
||||
DR11: "DR11",
|
||||
DR12: "DR12",
|
||||
DR13: "DR13",
|
||||
DR14: "DR14",
|
||||
DR15: "DR15",
|
||||
TR0: "TR0",
|
||||
TR1: "TR1",
|
||||
TR2: "TR2",
|
||||
TR3: "TR3",
|
||||
TR4: "TR4",
|
||||
TR5: "TR5",
|
||||
TR6: "TR6",
|
||||
TR7: "TR7",
|
||||
}
|
||||
9924
vendor/golang.org/x/arch/x86/x86asm/tables.go
generated
vendored
Normal file
9924
vendor/golang.org/x/arch/x86/x86asm/tables.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
27
vendor/golang.org/x/crypto/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/crypto/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright 2009 The Go Authors.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
22
vendor/golang.org/x/crypto/PATENTS
generated
vendored
Normal file
22
vendor/golang.org/x/crypto/PATENTS
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
||||
16
vendor/golang.org/x/crypto/chacha20/chacha_arm64.go
generated
vendored
Normal file
16
vendor/golang.org/x/crypto/chacha20/chacha_arm64.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc && !purego
|
||||
|
||||
package chacha20
|
||||
|
||||
const bufSize = 256
|
||||
|
||||
//go:noescape
|
||||
func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32)
|
||||
|
||||
func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) {
|
||||
xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter)
|
||||
}
|
||||
307
vendor/golang.org/x/crypto/chacha20/chacha_arm64.s
generated
vendored
Normal file
307
vendor/golang.org/x/crypto/chacha20/chacha_arm64.s
generated
vendored
Normal file
@@ -0,0 +1,307 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc && !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
#define NUM_ROUNDS 10
|
||||
|
||||
// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32)
|
||||
TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0
|
||||
MOVD dst+0(FP), R1
|
||||
MOVD src+24(FP), R2
|
||||
MOVD src_len+32(FP), R3
|
||||
MOVD key+48(FP), R4
|
||||
MOVD nonce+56(FP), R6
|
||||
MOVD counter+64(FP), R7
|
||||
|
||||
MOVD $·constants(SB), R10
|
||||
MOVD $·incRotMatrix(SB), R11
|
||||
|
||||
MOVW (R7), R20
|
||||
|
||||
AND $~255, R3, R13
|
||||
ADD R2, R13, R12 // R12 for block end
|
||||
AND $255, R3, R13
|
||||
loop:
|
||||
MOVD $NUM_ROUNDS, R21
|
||||
VLD1 (R11), [V30.S4, V31.S4]
|
||||
|
||||
// load constants
|
||||
// VLD4R (R10), [V0.S4, V1.S4, V2.S4, V3.S4]
|
||||
WORD $0x4D60E940
|
||||
|
||||
// load keys
|
||||
// VLD4R 16(R4), [V4.S4, V5.S4, V6.S4, V7.S4]
|
||||
WORD $0x4DFFE884
|
||||
// VLD4R 16(R4), [V8.S4, V9.S4, V10.S4, V11.S4]
|
||||
WORD $0x4DFFE888
|
||||
SUB $32, R4
|
||||
|
||||
// load counter + nonce
|
||||
// VLD1R (R7), [V12.S4]
|
||||
WORD $0x4D40C8EC
|
||||
|
||||
// VLD3R (R6), [V13.S4, V14.S4, V15.S4]
|
||||
WORD $0x4D40E8CD
|
||||
|
||||
// update counter
|
||||
VADD V30.S4, V12.S4, V12.S4
|
||||
|
||||
chacha:
|
||||
// V0..V3 += V4..V7
|
||||
// V12..V15 <<<= ((V12..V15 XOR V0..V3), 16)
|
||||
VADD V0.S4, V4.S4, V0.S4
|
||||
VADD V1.S4, V5.S4, V1.S4
|
||||
VADD V2.S4, V6.S4, V2.S4
|
||||
VADD V3.S4, V7.S4, V3.S4
|
||||
VEOR V12.B16, V0.B16, V12.B16
|
||||
VEOR V13.B16, V1.B16, V13.B16
|
||||
VEOR V14.B16, V2.B16, V14.B16
|
||||
VEOR V15.B16, V3.B16, V15.B16
|
||||
VREV32 V12.H8, V12.H8
|
||||
VREV32 V13.H8, V13.H8
|
||||
VREV32 V14.H8, V14.H8
|
||||
VREV32 V15.H8, V15.H8
|
||||
// V8..V11 += V12..V15
|
||||
// V4..V7 <<<= ((V4..V7 XOR V8..V11), 12)
|
||||
VADD V8.S4, V12.S4, V8.S4
|
||||
VADD V9.S4, V13.S4, V9.S4
|
||||
VADD V10.S4, V14.S4, V10.S4
|
||||
VADD V11.S4, V15.S4, V11.S4
|
||||
VEOR V8.B16, V4.B16, V16.B16
|
||||
VEOR V9.B16, V5.B16, V17.B16
|
||||
VEOR V10.B16, V6.B16, V18.B16
|
||||
VEOR V11.B16, V7.B16, V19.B16
|
||||
VSHL $12, V16.S4, V4.S4
|
||||
VSHL $12, V17.S4, V5.S4
|
||||
VSHL $12, V18.S4, V6.S4
|
||||
VSHL $12, V19.S4, V7.S4
|
||||
VSRI $20, V16.S4, V4.S4
|
||||
VSRI $20, V17.S4, V5.S4
|
||||
VSRI $20, V18.S4, V6.S4
|
||||
VSRI $20, V19.S4, V7.S4
|
||||
|
||||
// V0..V3 += V4..V7
|
||||
// V12..V15 <<<= ((V12..V15 XOR V0..V3), 8)
|
||||
VADD V0.S4, V4.S4, V0.S4
|
||||
VADD V1.S4, V5.S4, V1.S4
|
||||
VADD V2.S4, V6.S4, V2.S4
|
||||
VADD V3.S4, V7.S4, V3.S4
|
||||
VEOR V12.B16, V0.B16, V12.B16
|
||||
VEOR V13.B16, V1.B16, V13.B16
|
||||
VEOR V14.B16, V2.B16, V14.B16
|
||||
VEOR V15.B16, V3.B16, V15.B16
|
||||
VTBL V31.B16, [V12.B16], V12.B16
|
||||
VTBL V31.B16, [V13.B16], V13.B16
|
||||
VTBL V31.B16, [V14.B16], V14.B16
|
||||
VTBL V31.B16, [V15.B16], V15.B16
|
||||
|
||||
// V8..V11 += V12..V15
|
||||
// V4..V7 <<<= ((V4..V7 XOR V8..V11), 7)
|
||||
VADD V12.S4, V8.S4, V8.S4
|
||||
VADD V13.S4, V9.S4, V9.S4
|
||||
VADD V14.S4, V10.S4, V10.S4
|
||||
VADD V15.S4, V11.S4, V11.S4
|
||||
VEOR V8.B16, V4.B16, V16.B16
|
||||
VEOR V9.B16, V5.B16, V17.B16
|
||||
VEOR V10.B16, V6.B16, V18.B16
|
||||
VEOR V11.B16, V7.B16, V19.B16
|
||||
VSHL $7, V16.S4, V4.S4
|
||||
VSHL $7, V17.S4, V5.S4
|
||||
VSHL $7, V18.S4, V6.S4
|
||||
VSHL $7, V19.S4, V7.S4
|
||||
VSRI $25, V16.S4, V4.S4
|
||||
VSRI $25, V17.S4, V5.S4
|
||||
VSRI $25, V18.S4, V6.S4
|
||||
VSRI $25, V19.S4, V7.S4
|
||||
|
||||
// V0..V3 += V5..V7, V4
|
||||
// V15,V12-V14 <<<= ((V15,V12-V14 XOR V0..V3), 16)
|
||||
VADD V0.S4, V5.S4, V0.S4
|
||||
VADD V1.S4, V6.S4, V1.S4
|
||||
VADD V2.S4, V7.S4, V2.S4
|
||||
VADD V3.S4, V4.S4, V3.S4
|
||||
VEOR V15.B16, V0.B16, V15.B16
|
||||
VEOR V12.B16, V1.B16, V12.B16
|
||||
VEOR V13.B16, V2.B16, V13.B16
|
||||
VEOR V14.B16, V3.B16, V14.B16
|
||||
VREV32 V12.H8, V12.H8
|
||||
VREV32 V13.H8, V13.H8
|
||||
VREV32 V14.H8, V14.H8
|
||||
VREV32 V15.H8, V15.H8
|
||||
|
||||
// V10 += V15; V5 <<<= ((V10 XOR V5), 12)
|
||||
// ...
|
||||
VADD V15.S4, V10.S4, V10.S4
|
||||
VADD V12.S4, V11.S4, V11.S4
|
||||
VADD V13.S4, V8.S4, V8.S4
|
||||
VADD V14.S4, V9.S4, V9.S4
|
||||
VEOR V10.B16, V5.B16, V16.B16
|
||||
VEOR V11.B16, V6.B16, V17.B16
|
||||
VEOR V8.B16, V7.B16, V18.B16
|
||||
VEOR V9.B16, V4.B16, V19.B16
|
||||
VSHL $12, V16.S4, V5.S4
|
||||
VSHL $12, V17.S4, V6.S4
|
||||
VSHL $12, V18.S4, V7.S4
|
||||
VSHL $12, V19.S4, V4.S4
|
||||
VSRI $20, V16.S4, V5.S4
|
||||
VSRI $20, V17.S4, V6.S4
|
||||
VSRI $20, V18.S4, V7.S4
|
||||
VSRI $20, V19.S4, V4.S4
|
||||
|
||||
// V0 += V5; V15 <<<= ((V0 XOR V15), 8)
|
||||
// ...
|
||||
VADD V5.S4, V0.S4, V0.S4
|
||||
VADD V6.S4, V1.S4, V1.S4
|
||||
VADD V7.S4, V2.S4, V2.S4
|
||||
VADD V4.S4, V3.S4, V3.S4
|
||||
VEOR V0.B16, V15.B16, V15.B16
|
||||
VEOR V1.B16, V12.B16, V12.B16
|
||||
VEOR V2.B16, V13.B16, V13.B16
|
||||
VEOR V3.B16, V14.B16, V14.B16
|
||||
VTBL V31.B16, [V12.B16], V12.B16
|
||||
VTBL V31.B16, [V13.B16], V13.B16
|
||||
VTBL V31.B16, [V14.B16], V14.B16
|
||||
VTBL V31.B16, [V15.B16], V15.B16
|
||||
|
||||
// V10 += V15; V5 <<<= ((V10 XOR V5), 7)
|
||||
// ...
|
||||
VADD V15.S4, V10.S4, V10.S4
|
||||
VADD V12.S4, V11.S4, V11.S4
|
||||
VADD V13.S4, V8.S4, V8.S4
|
||||
VADD V14.S4, V9.S4, V9.S4
|
||||
VEOR V10.B16, V5.B16, V16.B16
|
||||
VEOR V11.B16, V6.B16, V17.B16
|
||||
VEOR V8.B16, V7.B16, V18.B16
|
||||
VEOR V9.B16, V4.B16, V19.B16
|
||||
VSHL $7, V16.S4, V5.S4
|
||||
VSHL $7, V17.S4, V6.S4
|
||||
VSHL $7, V18.S4, V7.S4
|
||||
VSHL $7, V19.S4, V4.S4
|
||||
VSRI $25, V16.S4, V5.S4
|
||||
VSRI $25, V17.S4, V6.S4
|
||||
VSRI $25, V18.S4, V7.S4
|
||||
VSRI $25, V19.S4, V4.S4
|
||||
|
||||
SUB $1, R21
|
||||
CBNZ R21, chacha
|
||||
|
||||
// VLD4R (R10), [V16.S4, V17.S4, V18.S4, V19.S4]
|
||||
WORD $0x4D60E950
|
||||
|
||||
// VLD4R 16(R4), [V20.S4, V21.S4, V22.S4, V23.S4]
|
||||
WORD $0x4DFFE894
|
||||
VADD V30.S4, V12.S4, V12.S4
|
||||
VADD V16.S4, V0.S4, V0.S4
|
||||
VADD V17.S4, V1.S4, V1.S4
|
||||
VADD V18.S4, V2.S4, V2.S4
|
||||
VADD V19.S4, V3.S4, V3.S4
|
||||
// VLD4R 16(R4), [V24.S4, V25.S4, V26.S4, V27.S4]
|
||||
WORD $0x4DFFE898
|
||||
// restore R4
|
||||
SUB $32, R4
|
||||
|
||||
// load counter + nonce
|
||||
// VLD1R (R7), [V28.S4]
|
||||
WORD $0x4D40C8FC
|
||||
// VLD3R (R6), [V29.S4, V30.S4, V31.S4]
|
||||
WORD $0x4D40E8DD
|
||||
|
||||
VADD V20.S4, V4.S4, V4.S4
|
||||
VADD V21.S4, V5.S4, V5.S4
|
||||
VADD V22.S4, V6.S4, V6.S4
|
||||
VADD V23.S4, V7.S4, V7.S4
|
||||
VADD V24.S4, V8.S4, V8.S4
|
||||
VADD V25.S4, V9.S4, V9.S4
|
||||
VADD V26.S4, V10.S4, V10.S4
|
||||
VADD V27.S4, V11.S4, V11.S4
|
||||
VADD V28.S4, V12.S4, V12.S4
|
||||
VADD V29.S4, V13.S4, V13.S4
|
||||
VADD V30.S4, V14.S4, V14.S4
|
||||
VADD V31.S4, V15.S4, V15.S4
|
||||
|
||||
VZIP1 V1.S4, V0.S4, V16.S4
|
||||
VZIP2 V1.S4, V0.S4, V17.S4
|
||||
VZIP1 V3.S4, V2.S4, V18.S4
|
||||
VZIP2 V3.S4, V2.S4, V19.S4
|
||||
VZIP1 V5.S4, V4.S4, V20.S4
|
||||
VZIP2 V5.S4, V4.S4, V21.S4
|
||||
VZIP1 V7.S4, V6.S4, V22.S4
|
||||
VZIP2 V7.S4, V6.S4, V23.S4
|
||||
VZIP1 V9.S4, V8.S4, V24.S4
|
||||
VZIP2 V9.S4, V8.S4, V25.S4
|
||||
VZIP1 V11.S4, V10.S4, V26.S4
|
||||
VZIP2 V11.S4, V10.S4, V27.S4
|
||||
VZIP1 V13.S4, V12.S4, V28.S4
|
||||
VZIP2 V13.S4, V12.S4, V29.S4
|
||||
VZIP1 V15.S4, V14.S4, V30.S4
|
||||
VZIP2 V15.S4, V14.S4, V31.S4
|
||||
VZIP1 V18.D2, V16.D2, V0.D2
|
||||
VZIP2 V18.D2, V16.D2, V4.D2
|
||||
VZIP1 V19.D2, V17.D2, V8.D2
|
||||
VZIP2 V19.D2, V17.D2, V12.D2
|
||||
VLD1.P 64(R2), [V16.B16, V17.B16, V18.B16, V19.B16]
|
||||
|
||||
VZIP1 V22.D2, V20.D2, V1.D2
|
||||
VZIP2 V22.D2, V20.D2, V5.D2
|
||||
VZIP1 V23.D2, V21.D2, V9.D2
|
||||
VZIP2 V23.D2, V21.D2, V13.D2
|
||||
VLD1.P 64(R2), [V20.B16, V21.B16, V22.B16, V23.B16]
|
||||
VZIP1 V26.D2, V24.D2, V2.D2
|
||||
VZIP2 V26.D2, V24.D2, V6.D2
|
||||
VZIP1 V27.D2, V25.D2, V10.D2
|
||||
VZIP2 V27.D2, V25.D2, V14.D2
|
||||
VLD1.P 64(R2), [V24.B16, V25.B16, V26.B16, V27.B16]
|
||||
VZIP1 V30.D2, V28.D2, V3.D2
|
||||
VZIP2 V30.D2, V28.D2, V7.D2
|
||||
VZIP1 V31.D2, V29.D2, V11.D2
|
||||
VZIP2 V31.D2, V29.D2, V15.D2
|
||||
VLD1.P 64(R2), [V28.B16, V29.B16, V30.B16, V31.B16]
|
||||
VEOR V0.B16, V16.B16, V16.B16
|
||||
VEOR V1.B16, V17.B16, V17.B16
|
||||
VEOR V2.B16, V18.B16, V18.B16
|
||||
VEOR V3.B16, V19.B16, V19.B16
|
||||
VST1.P [V16.B16, V17.B16, V18.B16, V19.B16], 64(R1)
|
||||
VEOR V4.B16, V20.B16, V20.B16
|
||||
VEOR V5.B16, V21.B16, V21.B16
|
||||
VEOR V6.B16, V22.B16, V22.B16
|
||||
VEOR V7.B16, V23.B16, V23.B16
|
||||
VST1.P [V20.B16, V21.B16, V22.B16, V23.B16], 64(R1)
|
||||
VEOR V8.B16, V24.B16, V24.B16
|
||||
VEOR V9.B16, V25.B16, V25.B16
|
||||
VEOR V10.B16, V26.B16, V26.B16
|
||||
VEOR V11.B16, V27.B16, V27.B16
|
||||
VST1.P [V24.B16, V25.B16, V26.B16, V27.B16], 64(R1)
|
||||
VEOR V12.B16, V28.B16, V28.B16
|
||||
VEOR V13.B16, V29.B16, V29.B16
|
||||
VEOR V14.B16, V30.B16, V30.B16
|
||||
VEOR V15.B16, V31.B16, V31.B16
|
||||
VST1.P [V28.B16, V29.B16, V30.B16, V31.B16], 64(R1)
|
||||
|
||||
ADD $4, R20
|
||||
MOVW R20, (R7) // update counter
|
||||
|
||||
CMP R2, R12
|
||||
BGT loop
|
||||
|
||||
RET
|
||||
|
||||
|
||||
DATA ·constants+0x00(SB)/4, $0x61707865
|
||||
DATA ·constants+0x04(SB)/4, $0x3320646e
|
||||
DATA ·constants+0x08(SB)/4, $0x79622d32
|
||||
DATA ·constants+0x0c(SB)/4, $0x6b206574
|
||||
GLOBL ·constants(SB), NOPTR|RODATA, $32
|
||||
|
||||
DATA ·incRotMatrix+0x00(SB)/4, $0x00000000
|
||||
DATA ·incRotMatrix+0x04(SB)/4, $0x00000001
|
||||
DATA ·incRotMatrix+0x08(SB)/4, $0x00000002
|
||||
DATA ·incRotMatrix+0x0c(SB)/4, $0x00000003
|
||||
DATA ·incRotMatrix+0x10(SB)/4, $0x02010003
|
||||
DATA ·incRotMatrix+0x14(SB)/4, $0x06050407
|
||||
DATA ·incRotMatrix+0x18(SB)/4, $0x0A09080B
|
||||
DATA ·incRotMatrix+0x1c(SB)/4, $0x0E0D0C0F
|
||||
GLOBL ·incRotMatrix(SB), NOPTR|RODATA, $32
|
||||
398
vendor/golang.org/x/crypto/chacha20/chacha_generic.go
generated
vendored
Normal file
398
vendor/golang.org/x/crypto/chacha20/chacha_generic.go
generated
vendored
Normal file
@@ -0,0 +1,398 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package chacha20 implements the ChaCha20 and XChaCha20 encryption algorithms
|
||||
// as specified in RFC 8439 and draft-irtf-cfrg-xchacha-01.
|
||||
package chacha20
|
||||
|
||||
import (
|
||||
"crypto/cipher"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math/bits"
|
||||
|
||||
"golang.org/x/crypto/internal/alias"
|
||||
)
|
||||
|
||||
const (
|
||||
// KeySize is the size of the key used by this cipher, in bytes.
|
||||
KeySize = 32
|
||||
|
||||
// NonceSize is the size of the nonce used with the standard variant of this
|
||||
// cipher, in bytes.
|
||||
//
|
||||
// Note that this is too short to be safely generated at random if the same
|
||||
// key is reused more than 2³² times.
|
||||
NonceSize = 12
|
||||
|
||||
// NonceSizeX is the size of the nonce used with the XChaCha20 variant of
|
||||
// this cipher, in bytes.
|
||||
NonceSizeX = 24
|
||||
)
|
||||
|
||||
// Cipher is a stateful instance of ChaCha20 or XChaCha20 using a particular key
|
||||
// and nonce. A *Cipher implements the cipher.Stream interface.
|
||||
type Cipher struct {
|
||||
// The ChaCha20 state is 16 words: 4 constant, 8 of key, 1 of counter
|
||||
// (incremented after each block), and 3 of nonce.
|
||||
key [8]uint32
|
||||
counter uint32
|
||||
nonce [3]uint32
|
||||
|
||||
// The last len bytes of buf are leftover key stream bytes from the previous
|
||||
// XORKeyStream invocation. The size of buf depends on how many blocks are
|
||||
// computed at a time by xorKeyStreamBlocks.
|
||||
buf [bufSize]byte
|
||||
len int
|
||||
|
||||
// overflow is set when the counter overflowed, no more blocks can be
|
||||
// generated, and the next XORKeyStream call should panic.
|
||||
overflow bool
|
||||
|
||||
// The counter-independent results of the first round are cached after they
|
||||
// are computed the first time.
|
||||
precompDone bool
|
||||
p1, p5, p9, p13 uint32
|
||||
p2, p6, p10, p14 uint32
|
||||
p3, p7, p11, p15 uint32
|
||||
}
|
||||
|
||||
var _ cipher.Stream = (*Cipher)(nil)
|
||||
|
||||
// NewUnauthenticatedCipher creates a new ChaCha20 stream cipher with the given
|
||||
// 32 bytes key and a 12 or 24 bytes nonce. If a nonce of 24 bytes is provided,
|
||||
// the XChaCha20 construction will be used. It returns an error if key or nonce
|
||||
// have any other length.
|
||||
//
|
||||
// Note that ChaCha20, like all stream ciphers, is not authenticated and allows
|
||||
// attackers to silently tamper with the plaintext. For this reason, it is more
|
||||
// appropriate as a building block than as a standalone encryption mechanism.
|
||||
// Instead, consider using package golang.org/x/crypto/chacha20poly1305.
|
||||
func NewUnauthenticatedCipher(key, nonce []byte) (*Cipher, error) {
|
||||
// This function is split into a wrapper so that the Cipher allocation will
|
||||
// be inlined, and depending on how the caller uses the return value, won't
|
||||
// escape to the heap.
|
||||
c := &Cipher{}
|
||||
return newUnauthenticatedCipher(c, key, nonce)
|
||||
}
|
||||
|
||||
func newUnauthenticatedCipher(c *Cipher, key, nonce []byte) (*Cipher, error) {
|
||||
if len(key) != KeySize {
|
||||
return nil, errors.New("chacha20: wrong key size")
|
||||
}
|
||||
if len(nonce) == NonceSizeX {
|
||||
// XChaCha20 uses the ChaCha20 core to mix 16 bytes of the nonce into a
|
||||
// derived key, allowing it to operate on a nonce of 24 bytes. See
|
||||
// draft-irtf-cfrg-xchacha-01, Section 2.3.
|
||||
key, _ = HChaCha20(key, nonce[0:16])
|
||||
cNonce := make([]byte, NonceSize)
|
||||
copy(cNonce[4:12], nonce[16:24])
|
||||
nonce = cNonce
|
||||
} else if len(nonce) != NonceSize {
|
||||
return nil, errors.New("chacha20: wrong nonce size")
|
||||
}
|
||||
|
||||
key, nonce = key[:KeySize], nonce[:NonceSize] // bounds check elimination hint
|
||||
c.key = [8]uint32{
|
||||
binary.LittleEndian.Uint32(key[0:4]),
|
||||
binary.LittleEndian.Uint32(key[4:8]),
|
||||
binary.LittleEndian.Uint32(key[8:12]),
|
||||
binary.LittleEndian.Uint32(key[12:16]),
|
||||
binary.LittleEndian.Uint32(key[16:20]),
|
||||
binary.LittleEndian.Uint32(key[20:24]),
|
||||
binary.LittleEndian.Uint32(key[24:28]),
|
||||
binary.LittleEndian.Uint32(key[28:32]),
|
||||
}
|
||||
c.nonce = [3]uint32{
|
||||
binary.LittleEndian.Uint32(nonce[0:4]),
|
||||
binary.LittleEndian.Uint32(nonce[4:8]),
|
||||
binary.LittleEndian.Uint32(nonce[8:12]),
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// The constant first 4 words of the ChaCha20 state.
|
||||
const (
|
||||
j0 uint32 = 0x61707865 // expa
|
||||
j1 uint32 = 0x3320646e // nd 3
|
||||
j2 uint32 = 0x79622d32 // 2-by
|
||||
j3 uint32 = 0x6b206574 // te k
|
||||
)
|
||||
|
||||
const blockSize = 64
|
||||
|
||||
// quarterRound is the core of ChaCha20. It shuffles the bits of 4 state words.
|
||||
// It's executed 4 times for each of the 20 ChaCha20 rounds, operating on all 16
|
||||
// words each round, in columnar or diagonal groups of 4 at a time.
|
||||
func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) {
|
||||
a += b
|
||||
d ^= a
|
||||
d = bits.RotateLeft32(d, 16)
|
||||
c += d
|
||||
b ^= c
|
||||
b = bits.RotateLeft32(b, 12)
|
||||
a += b
|
||||
d ^= a
|
||||
d = bits.RotateLeft32(d, 8)
|
||||
c += d
|
||||
b ^= c
|
||||
b = bits.RotateLeft32(b, 7)
|
||||
return a, b, c, d
|
||||
}
|
||||
|
||||
// SetCounter sets the Cipher counter. The next invocation of XORKeyStream will
|
||||
// behave as if (64 * counter) bytes had been encrypted so far.
|
||||
//
|
||||
// To prevent accidental counter reuse, SetCounter panics if counter is less
|
||||
// than the current value.
|
||||
//
|
||||
// Note that the execution time of XORKeyStream is not independent of the
|
||||
// counter value.
|
||||
func (s *Cipher) SetCounter(counter uint32) {
|
||||
// Internally, s may buffer multiple blocks, which complicates this
|
||||
// implementation slightly. When checking whether the counter has rolled
|
||||
// back, we must use both s.counter and s.len to determine how many blocks
|
||||
// we have already output.
|
||||
outputCounter := s.counter - uint32(s.len)/blockSize
|
||||
if s.overflow || counter < outputCounter {
|
||||
panic("chacha20: SetCounter attempted to rollback counter")
|
||||
}
|
||||
|
||||
// In the general case, we set the new counter value and reset s.len to 0,
|
||||
// causing the next call to XORKeyStream to refill the buffer. However, if
|
||||
// we're advancing within the existing buffer, we can save work by simply
|
||||
// setting s.len.
|
||||
if counter < s.counter {
|
||||
s.len = int(s.counter-counter) * blockSize
|
||||
} else {
|
||||
s.counter = counter
|
||||
s.len = 0
|
||||
}
|
||||
}
|
||||
|
||||
// XORKeyStream XORs each byte in the given slice with a byte from the
|
||||
// cipher's key stream. Dst and src must overlap entirely or not at all.
|
||||
//
|
||||
// If len(dst) < len(src), XORKeyStream will panic. It is acceptable
|
||||
// to pass a dst bigger than src, and in that case, XORKeyStream will
|
||||
// only update dst[:len(src)] and will not touch the rest of dst.
|
||||
//
|
||||
// Multiple calls to XORKeyStream behave as if the concatenation of
|
||||
// the src buffers was passed in a single run. That is, Cipher
|
||||
// maintains state and does not reset at each XORKeyStream call.
|
||||
func (s *Cipher) XORKeyStream(dst, src []byte) {
|
||||
if len(src) == 0 {
|
||||
return
|
||||
}
|
||||
if len(dst) < len(src) {
|
||||
panic("chacha20: output smaller than input")
|
||||
}
|
||||
dst = dst[:len(src)]
|
||||
if alias.InexactOverlap(dst, src) {
|
||||
panic("chacha20: invalid buffer overlap")
|
||||
}
|
||||
|
||||
// First, drain any remaining key stream from a previous XORKeyStream.
|
||||
if s.len != 0 {
|
||||
keyStream := s.buf[bufSize-s.len:]
|
||||
if len(src) < len(keyStream) {
|
||||
keyStream = keyStream[:len(src)]
|
||||
}
|
||||
_ = src[len(keyStream)-1] // bounds check elimination hint
|
||||
for i, b := range keyStream {
|
||||
dst[i] = src[i] ^ b
|
||||
}
|
||||
s.len -= len(keyStream)
|
||||
dst, src = dst[len(keyStream):], src[len(keyStream):]
|
||||
}
|
||||
if len(src) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// If we'd need to let the counter overflow and keep generating output,
|
||||
// panic immediately. If instead we'd only reach the last block, remember
|
||||
// not to generate any more output after the buffer is drained.
|
||||
numBlocks := (uint64(len(src)) + blockSize - 1) / blockSize
|
||||
if s.overflow || uint64(s.counter)+numBlocks > 1<<32 {
|
||||
panic("chacha20: counter overflow")
|
||||
} else if uint64(s.counter)+numBlocks == 1<<32 {
|
||||
s.overflow = true
|
||||
}
|
||||
|
||||
// xorKeyStreamBlocks implementations expect input lengths that are a
|
||||
// multiple of bufSize. Platform-specific ones process multiple blocks at a
|
||||
// time, so have bufSizes that are a multiple of blockSize.
|
||||
|
||||
full := len(src) - len(src)%bufSize
|
||||
if full > 0 {
|
||||
s.xorKeyStreamBlocks(dst[:full], src[:full])
|
||||
}
|
||||
dst, src = dst[full:], src[full:]
|
||||
|
||||
// If using a multi-block xorKeyStreamBlocks would overflow, use the generic
|
||||
// one that does one block at a time.
|
||||
const blocksPerBuf = bufSize / blockSize
|
||||
if uint64(s.counter)+blocksPerBuf > 1<<32 {
|
||||
s.buf = [bufSize]byte{}
|
||||
numBlocks := (len(src) + blockSize - 1) / blockSize
|
||||
buf := s.buf[bufSize-numBlocks*blockSize:]
|
||||
copy(buf, src)
|
||||
s.xorKeyStreamBlocksGeneric(buf, buf)
|
||||
s.len = len(buf) - copy(dst, buf)
|
||||
return
|
||||
}
|
||||
|
||||
// If we have a partial (multi-)block, pad it for xorKeyStreamBlocks, and
|
||||
// keep the leftover keystream for the next XORKeyStream invocation.
|
||||
if len(src) > 0 {
|
||||
s.buf = [bufSize]byte{}
|
||||
copy(s.buf[:], src)
|
||||
s.xorKeyStreamBlocks(s.buf[:], s.buf[:])
|
||||
s.len = bufSize - copy(dst, s.buf[:])
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Cipher) xorKeyStreamBlocksGeneric(dst, src []byte) {
|
||||
if len(dst) != len(src) || len(dst)%blockSize != 0 {
|
||||
panic("chacha20: internal error: wrong dst and/or src length")
|
||||
}
|
||||
|
||||
// To generate each block of key stream, the initial cipher state
|
||||
// (represented below) is passed through 20 rounds of shuffling,
|
||||
// alternatively applying quarterRounds by columns (like 1, 5, 9, 13)
|
||||
// or by diagonals (like 1, 6, 11, 12).
|
||||
//
|
||||
// 0:cccccccc 1:cccccccc 2:cccccccc 3:cccccccc
|
||||
// 4:kkkkkkkk 5:kkkkkkkk 6:kkkkkkkk 7:kkkkkkkk
|
||||
// 8:kkkkkkkk 9:kkkkkkkk 10:kkkkkkkk 11:kkkkkkkk
|
||||
// 12:bbbbbbbb 13:nnnnnnnn 14:nnnnnnnn 15:nnnnnnnn
|
||||
//
|
||||
// c=constant k=key b=blockcount n=nonce
|
||||
var (
|
||||
c0, c1, c2, c3 = j0, j1, j2, j3
|
||||
c4, c5, c6, c7 = s.key[0], s.key[1], s.key[2], s.key[3]
|
||||
c8, c9, c10, c11 = s.key[4], s.key[5], s.key[6], s.key[7]
|
||||
_, c13, c14, c15 = s.counter, s.nonce[0], s.nonce[1], s.nonce[2]
|
||||
)
|
||||
|
||||
// Three quarters of the first round don't depend on the counter, so we can
|
||||
// calculate them here, and reuse them for multiple blocks in the loop, and
|
||||
// for future XORKeyStream invocations.
|
||||
if !s.precompDone {
|
||||
s.p1, s.p5, s.p9, s.p13 = quarterRound(c1, c5, c9, c13)
|
||||
s.p2, s.p6, s.p10, s.p14 = quarterRound(c2, c6, c10, c14)
|
||||
s.p3, s.p7, s.p11, s.p15 = quarterRound(c3, c7, c11, c15)
|
||||
s.precompDone = true
|
||||
}
|
||||
|
||||
// A condition of len(src) > 0 would be sufficient, but this also
|
||||
// acts as a bounds check elimination hint.
|
||||
for len(src) >= 64 && len(dst) >= 64 {
|
||||
// The remainder of the first column round.
|
||||
fcr0, fcr4, fcr8, fcr12 := quarterRound(c0, c4, c8, s.counter)
|
||||
|
||||
// The second diagonal round.
|
||||
x0, x5, x10, x15 := quarterRound(fcr0, s.p5, s.p10, s.p15)
|
||||
x1, x6, x11, x12 := quarterRound(s.p1, s.p6, s.p11, fcr12)
|
||||
x2, x7, x8, x13 := quarterRound(s.p2, s.p7, fcr8, s.p13)
|
||||
x3, x4, x9, x14 := quarterRound(s.p3, fcr4, s.p9, s.p14)
|
||||
|
||||
// The remaining 18 rounds.
|
||||
for i := 0; i < 9; i++ {
|
||||
// Column round.
|
||||
x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12)
|
||||
x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13)
|
||||
x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14)
|
||||
x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15)
|
||||
|
||||
// Diagonal round.
|
||||
x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15)
|
||||
x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12)
|
||||
x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13)
|
||||
x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14)
|
||||
}
|
||||
|
||||
// Add back the initial state to generate the key stream, then
|
||||
// XOR the key stream with the source and write out the result.
|
||||
addXor(dst[0:4], src[0:4], x0, c0)
|
||||
addXor(dst[4:8], src[4:8], x1, c1)
|
||||
addXor(dst[8:12], src[8:12], x2, c2)
|
||||
addXor(dst[12:16], src[12:16], x3, c3)
|
||||
addXor(dst[16:20], src[16:20], x4, c4)
|
||||
addXor(dst[20:24], src[20:24], x5, c5)
|
||||
addXor(dst[24:28], src[24:28], x6, c6)
|
||||
addXor(dst[28:32], src[28:32], x7, c7)
|
||||
addXor(dst[32:36], src[32:36], x8, c8)
|
||||
addXor(dst[36:40], src[36:40], x9, c9)
|
||||
addXor(dst[40:44], src[40:44], x10, c10)
|
||||
addXor(dst[44:48], src[44:48], x11, c11)
|
||||
addXor(dst[48:52], src[48:52], x12, s.counter)
|
||||
addXor(dst[52:56], src[52:56], x13, c13)
|
||||
addXor(dst[56:60], src[56:60], x14, c14)
|
||||
addXor(dst[60:64], src[60:64], x15, c15)
|
||||
|
||||
s.counter += 1
|
||||
|
||||
src, dst = src[blockSize:], dst[blockSize:]
|
||||
}
|
||||
}
|
||||
|
||||
// HChaCha20 uses the ChaCha20 core to generate a derived key from a 32 bytes
|
||||
// key and a 16 bytes nonce. It returns an error if key or nonce have any other
|
||||
// length. It is used as part of the XChaCha20 construction.
|
||||
func HChaCha20(key, nonce []byte) ([]byte, error) {
|
||||
// This function is split into a wrapper so that the slice allocation will
|
||||
// be inlined, and depending on how the caller uses the return value, won't
|
||||
// escape to the heap.
|
||||
out := make([]byte, 32)
|
||||
return hChaCha20(out, key, nonce)
|
||||
}
|
||||
|
||||
func hChaCha20(out, key, nonce []byte) ([]byte, error) {
|
||||
if len(key) != KeySize {
|
||||
return nil, errors.New("chacha20: wrong HChaCha20 key size")
|
||||
}
|
||||
if len(nonce) != 16 {
|
||||
return nil, errors.New("chacha20: wrong HChaCha20 nonce size")
|
||||
}
|
||||
|
||||
x0, x1, x2, x3 := j0, j1, j2, j3
|
||||
x4 := binary.LittleEndian.Uint32(key[0:4])
|
||||
x5 := binary.LittleEndian.Uint32(key[4:8])
|
||||
x6 := binary.LittleEndian.Uint32(key[8:12])
|
||||
x7 := binary.LittleEndian.Uint32(key[12:16])
|
||||
x8 := binary.LittleEndian.Uint32(key[16:20])
|
||||
x9 := binary.LittleEndian.Uint32(key[20:24])
|
||||
x10 := binary.LittleEndian.Uint32(key[24:28])
|
||||
x11 := binary.LittleEndian.Uint32(key[28:32])
|
||||
x12 := binary.LittleEndian.Uint32(nonce[0:4])
|
||||
x13 := binary.LittleEndian.Uint32(nonce[4:8])
|
||||
x14 := binary.LittleEndian.Uint32(nonce[8:12])
|
||||
x15 := binary.LittleEndian.Uint32(nonce[12:16])
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
// Diagonal round.
|
||||
x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12)
|
||||
x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13)
|
||||
x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14)
|
||||
x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15)
|
||||
|
||||
// Column round.
|
||||
x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15)
|
||||
x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12)
|
||||
x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13)
|
||||
x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14)
|
||||
}
|
||||
|
||||
_ = out[31] // bounds check elimination hint
|
||||
binary.LittleEndian.PutUint32(out[0:4], x0)
|
||||
binary.LittleEndian.PutUint32(out[4:8], x1)
|
||||
binary.LittleEndian.PutUint32(out[8:12], x2)
|
||||
binary.LittleEndian.PutUint32(out[12:16], x3)
|
||||
binary.LittleEndian.PutUint32(out[16:20], x12)
|
||||
binary.LittleEndian.PutUint32(out[20:24], x13)
|
||||
binary.LittleEndian.PutUint32(out[24:28], x14)
|
||||
binary.LittleEndian.PutUint32(out[28:32], x15)
|
||||
return out, nil
|
||||
}
|
||||
13
vendor/golang.org/x/crypto/chacha20/chacha_noasm.go
generated
vendored
Normal file
13
vendor/golang.org/x/crypto/chacha20/chacha_noasm.go
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (!arm64 && !s390x && !ppc64 && !ppc64le) || !gc || purego
|
||||
|
||||
package chacha20
|
||||
|
||||
const bufSize = blockSize
|
||||
|
||||
func (s *Cipher) xorKeyStreamBlocks(dst, src []byte) {
|
||||
s.xorKeyStreamBlocksGeneric(dst, src)
|
||||
}
|
||||
16
vendor/golang.org/x/crypto/chacha20/chacha_ppc64x.go
generated
vendored
Normal file
16
vendor/golang.org/x/crypto/chacha20/chacha_ppc64x.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc && !purego && (ppc64 || ppc64le)
|
||||
|
||||
package chacha20
|
||||
|
||||
const bufSize = 256
|
||||
|
||||
//go:noescape
|
||||
func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32)
|
||||
|
||||
func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) {
|
||||
chaCha20_ctr32_vsx(&dst[0], &src[0], len(src), &c.key, &c.counter)
|
||||
}
|
||||
501
vendor/golang.org/x/crypto/chacha20/chacha_ppc64x.s
generated
vendored
Normal file
501
vendor/golang.org/x/crypto/chacha20/chacha_ppc64x.s
generated
vendored
Normal file
@@ -0,0 +1,501 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Based on CRYPTOGAMS code with the following comment:
|
||||
// # ====================================================================
|
||||
// # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
|
||||
// # project. The module is, however, dual licensed under OpenSSL and
|
||||
// # CRYPTOGAMS licenses depending on where you obtain it. For further
|
||||
// # details see http://www.openssl.org/~appro/cryptogams/.
|
||||
// # ====================================================================
|
||||
|
||||
// Code for the perl script that generates the ppc64 assembler
|
||||
// can be found in the cryptogams repository at the link below. It is based on
|
||||
// the original from openssl.
|
||||
|
||||
// https://github.com/dot-asm/cryptogams/commit/a60f5b50ed908e91
|
||||
|
||||
// The differences in this and the original implementation are
|
||||
// due to the calling conventions and initialization of constants.
|
||||
|
||||
//go:build gc && !purego && (ppc64 || ppc64le)
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
#define OUT R3
|
||||
#define INP R4
|
||||
#define LEN R5
|
||||
#define KEY R6
|
||||
#define CNT R7
|
||||
#define TMP R15
|
||||
|
||||
#define CONSTBASE R16
|
||||
#define BLOCKS R17
|
||||
|
||||
// for VPERMXOR
|
||||
#define MASK R18
|
||||
|
||||
DATA consts<>+0x00(SB)/4, $0x61707865
|
||||
DATA consts<>+0x04(SB)/4, $0x3320646e
|
||||
DATA consts<>+0x08(SB)/4, $0x79622d32
|
||||
DATA consts<>+0x0c(SB)/4, $0x6b206574
|
||||
DATA consts<>+0x10(SB)/4, $0x00000001
|
||||
DATA consts<>+0x14(SB)/4, $0x00000000
|
||||
DATA consts<>+0x18(SB)/4, $0x00000000
|
||||
DATA consts<>+0x1c(SB)/4, $0x00000000
|
||||
DATA consts<>+0x20(SB)/4, $0x00000004
|
||||
DATA consts<>+0x24(SB)/4, $0x00000000
|
||||
DATA consts<>+0x28(SB)/4, $0x00000000
|
||||
DATA consts<>+0x2c(SB)/4, $0x00000000
|
||||
DATA consts<>+0x30(SB)/4, $0x0e0f0c0d
|
||||
DATA consts<>+0x34(SB)/4, $0x0a0b0809
|
||||
DATA consts<>+0x38(SB)/4, $0x06070405
|
||||
DATA consts<>+0x3c(SB)/4, $0x02030001
|
||||
DATA consts<>+0x40(SB)/4, $0x0d0e0f0c
|
||||
DATA consts<>+0x44(SB)/4, $0x090a0b08
|
||||
DATA consts<>+0x48(SB)/4, $0x05060704
|
||||
DATA consts<>+0x4c(SB)/4, $0x01020300
|
||||
DATA consts<>+0x50(SB)/4, $0x61707865
|
||||
DATA consts<>+0x54(SB)/4, $0x61707865
|
||||
DATA consts<>+0x58(SB)/4, $0x61707865
|
||||
DATA consts<>+0x5c(SB)/4, $0x61707865
|
||||
DATA consts<>+0x60(SB)/4, $0x3320646e
|
||||
DATA consts<>+0x64(SB)/4, $0x3320646e
|
||||
DATA consts<>+0x68(SB)/4, $0x3320646e
|
||||
DATA consts<>+0x6c(SB)/4, $0x3320646e
|
||||
DATA consts<>+0x70(SB)/4, $0x79622d32
|
||||
DATA consts<>+0x74(SB)/4, $0x79622d32
|
||||
DATA consts<>+0x78(SB)/4, $0x79622d32
|
||||
DATA consts<>+0x7c(SB)/4, $0x79622d32
|
||||
DATA consts<>+0x80(SB)/4, $0x6b206574
|
||||
DATA consts<>+0x84(SB)/4, $0x6b206574
|
||||
DATA consts<>+0x88(SB)/4, $0x6b206574
|
||||
DATA consts<>+0x8c(SB)/4, $0x6b206574
|
||||
DATA consts<>+0x90(SB)/4, $0x00000000
|
||||
DATA consts<>+0x94(SB)/4, $0x00000001
|
||||
DATA consts<>+0x98(SB)/4, $0x00000002
|
||||
DATA consts<>+0x9c(SB)/4, $0x00000003
|
||||
DATA consts<>+0xa0(SB)/4, $0x11223300
|
||||
DATA consts<>+0xa4(SB)/4, $0x55667744
|
||||
DATA consts<>+0xa8(SB)/4, $0x99aabb88
|
||||
DATA consts<>+0xac(SB)/4, $0xddeeffcc
|
||||
DATA consts<>+0xb0(SB)/4, $0x22330011
|
||||
DATA consts<>+0xb4(SB)/4, $0x66774455
|
||||
DATA consts<>+0xb8(SB)/4, $0xaabb8899
|
||||
DATA consts<>+0xbc(SB)/4, $0xeeffccdd
|
||||
GLOBL consts<>(SB), RODATA, $0xc0
|
||||
|
||||
#ifdef GOARCH_ppc64
|
||||
#define BE_XXBRW_INIT() \
|
||||
LVSL (R0)(R0), V24 \
|
||||
VSPLTISB $3, V25 \
|
||||
VXOR V24, V25, V24 \
|
||||
|
||||
#define BE_XXBRW(vr) VPERM vr, vr, V24, vr
|
||||
#else
|
||||
#define BE_XXBRW_INIT()
|
||||
#define BE_XXBRW(vr)
|
||||
#endif
|
||||
|
||||
//func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32)
|
||||
TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40
|
||||
MOVD out+0(FP), OUT
|
||||
MOVD inp+8(FP), INP
|
||||
MOVD len+16(FP), LEN
|
||||
MOVD key+24(FP), KEY
|
||||
MOVD counter+32(FP), CNT
|
||||
|
||||
// Addressing for constants
|
||||
MOVD $consts<>+0x00(SB), CONSTBASE
|
||||
MOVD $16, R8
|
||||
MOVD $32, R9
|
||||
MOVD $48, R10
|
||||
MOVD $64, R11
|
||||
SRD $6, LEN, BLOCKS
|
||||
// for VPERMXOR
|
||||
MOVD $consts<>+0xa0(SB), MASK
|
||||
MOVD $16, R20
|
||||
// V16
|
||||
LXVW4X (CONSTBASE)(R0), VS48
|
||||
ADD $80,CONSTBASE
|
||||
|
||||
// Load key into V17,V18
|
||||
LXVW4X (KEY)(R0), VS49
|
||||
LXVW4X (KEY)(R8), VS50
|
||||
|
||||
// Load CNT, NONCE into V19
|
||||
LXVW4X (CNT)(R0), VS51
|
||||
|
||||
// Clear V27
|
||||
VXOR V27, V27, V27
|
||||
|
||||
BE_XXBRW_INIT()
|
||||
|
||||
// V28
|
||||
LXVW4X (CONSTBASE)(R11), VS60
|
||||
|
||||
// Load mask constants for VPERMXOR
|
||||
LXVW4X (MASK)(R0), V20
|
||||
LXVW4X (MASK)(R20), V21
|
||||
|
||||
// splat slot from V19 -> V26
|
||||
VSPLTW $0, V19, V26
|
||||
|
||||
VSLDOI $4, V19, V27, V19
|
||||
VSLDOI $12, V27, V19, V19
|
||||
|
||||
VADDUWM V26, V28, V26
|
||||
|
||||
MOVD $10, R14
|
||||
MOVD R14, CTR
|
||||
PCALIGN $16
|
||||
loop_outer_vsx:
|
||||
// V0, V1, V2, V3
|
||||
LXVW4X (R0)(CONSTBASE), VS32
|
||||
LXVW4X (R8)(CONSTBASE), VS33
|
||||
LXVW4X (R9)(CONSTBASE), VS34
|
||||
LXVW4X (R10)(CONSTBASE), VS35
|
||||
|
||||
// splat values from V17, V18 into V4-V11
|
||||
VSPLTW $0, V17, V4
|
||||
VSPLTW $1, V17, V5
|
||||
VSPLTW $2, V17, V6
|
||||
VSPLTW $3, V17, V7
|
||||
VSPLTW $0, V18, V8
|
||||
VSPLTW $1, V18, V9
|
||||
VSPLTW $2, V18, V10
|
||||
VSPLTW $3, V18, V11
|
||||
|
||||
// VOR
|
||||
VOR V26, V26, V12
|
||||
|
||||
// splat values from V19 -> V13, V14, V15
|
||||
VSPLTW $1, V19, V13
|
||||
VSPLTW $2, V19, V14
|
||||
VSPLTW $3, V19, V15
|
||||
|
||||
// splat const values
|
||||
VSPLTISW $-16, V27
|
||||
VSPLTISW $12, V28
|
||||
VSPLTISW $8, V29
|
||||
VSPLTISW $7, V30
|
||||
PCALIGN $16
|
||||
loop_vsx:
|
||||
VADDUWM V0, V4, V0
|
||||
VADDUWM V1, V5, V1
|
||||
VADDUWM V2, V6, V2
|
||||
VADDUWM V3, V7, V3
|
||||
|
||||
VPERMXOR V12, V0, V21, V12
|
||||
VPERMXOR V13, V1, V21, V13
|
||||
VPERMXOR V14, V2, V21, V14
|
||||
VPERMXOR V15, V3, V21, V15
|
||||
|
||||
VADDUWM V8, V12, V8
|
||||
VADDUWM V9, V13, V9
|
||||
VADDUWM V10, V14, V10
|
||||
VADDUWM V11, V15, V11
|
||||
|
||||
VXOR V4, V8, V4
|
||||
VXOR V5, V9, V5
|
||||
VXOR V6, V10, V6
|
||||
VXOR V7, V11, V7
|
||||
|
||||
VRLW V4, V28, V4
|
||||
VRLW V5, V28, V5
|
||||
VRLW V6, V28, V6
|
||||
VRLW V7, V28, V7
|
||||
|
||||
VADDUWM V0, V4, V0
|
||||
VADDUWM V1, V5, V1
|
||||
VADDUWM V2, V6, V2
|
||||
VADDUWM V3, V7, V3
|
||||
|
||||
VPERMXOR V12, V0, V20, V12
|
||||
VPERMXOR V13, V1, V20, V13
|
||||
VPERMXOR V14, V2, V20, V14
|
||||
VPERMXOR V15, V3, V20, V15
|
||||
|
||||
VADDUWM V8, V12, V8
|
||||
VADDUWM V9, V13, V9
|
||||
VADDUWM V10, V14, V10
|
||||
VADDUWM V11, V15, V11
|
||||
|
||||
VXOR V4, V8, V4
|
||||
VXOR V5, V9, V5
|
||||
VXOR V6, V10, V6
|
||||
VXOR V7, V11, V7
|
||||
|
||||
VRLW V4, V30, V4
|
||||
VRLW V5, V30, V5
|
||||
VRLW V6, V30, V6
|
||||
VRLW V7, V30, V7
|
||||
|
||||
VADDUWM V0, V5, V0
|
||||
VADDUWM V1, V6, V1
|
||||
VADDUWM V2, V7, V2
|
||||
VADDUWM V3, V4, V3
|
||||
|
||||
VPERMXOR V15, V0, V21, V15
|
||||
VPERMXOR V12, V1, V21, V12
|
||||
VPERMXOR V13, V2, V21, V13
|
||||
VPERMXOR V14, V3, V21, V14
|
||||
|
||||
VADDUWM V10, V15, V10
|
||||
VADDUWM V11, V12, V11
|
||||
VADDUWM V8, V13, V8
|
||||
VADDUWM V9, V14, V9
|
||||
|
||||
VXOR V5, V10, V5
|
||||
VXOR V6, V11, V6
|
||||
VXOR V7, V8, V7
|
||||
VXOR V4, V9, V4
|
||||
|
||||
VRLW V5, V28, V5
|
||||
VRLW V6, V28, V6
|
||||
VRLW V7, V28, V7
|
||||
VRLW V4, V28, V4
|
||||
|
||||
VADDUWM V0, V5, V0
|
||||
VADDUWM V1, V6, V1
|
||||
VADDUWM V2, V7, V2
|
||||
VADDUWM V3, V4, V3
|
||||
|
||||
VPERMXOR V15, V0, V20, V15
|
||||
VPERMXOR V12, V1, V20, V12
|
||||
VPERMXOR V13, V2, V20, V13
|
||||
VPERMXOR V14, V3, V20, V14
|
||||
|
||||
VADDUWM V10, V15, V10
|
||||
VADDUWM V11, V12, V11
|
||||
VADDUWM V8, V13, V8
|
||||
VADDUWM V9, V14, V9
|
||||
|
||||
VXOR V5, V10, V5
|
||||
VXOR V6, V11, V6
|
||||
VXOR V7, V8, V7
|
||||
VXOR V4, V9, V4
|
||||
|
||||
VRLW V5, V30, V5
|
||||
VRLW V6, V30, V6
|
||||
VRLW V7, V30, V7
|
||||
VRLW V4, V30, V4
|
||||
BDNZ loop_vsx
|
||||
|
||||
VADDUWM V12, V26, V12
|
||||
|
||||
VMRGEW V0, V1, V27
|
||||
VMRGEW V2, V3, V28
|
||||
|
||||
VMRGOW V0, V1, V0
|
||||
VMRGOW V2, V3, V2
|
||||
|
||||
VMRGEW V4, V5, V29
|
||||
VMRGEW V6, V7, V30
|
||||
|
||||
XXPERMDI VS32, VS34, $0, VS33
|
||||
XXPERMDI VS32, VS34, $3, VS35
|
||||
XXPERMDI VS59, VS60, $0, VS32
|
||||
XXPERMDI VS59, VS60, $3, VS34
|
||||
|
||||
VMRGOW V4, V5, V4
|
||||
VMRGOW V6, V7, V6
|
||||
|
||||
VMRGEW V8, V9, V27
|
||||
VMRGEW V10, V11, V28
|
||||
|
||||
XXPERMDI VS36, VS38, $0, VS37
|
||||
XXPERMDI VS36, VS38, $3, VS39
|
||||
XXPERMDI VS61, VS62, $0, VS36
|
||||
XXPERMDI VS61, VS62, $3, VS38
|
||||
|
||||
VMRGOW V8, V9, V8
|
||||
VMRGOW V10, V11, V10
|
||||
|
||||
VMRGEW V12, V13, V29
|
||||
VMRGEW V14, V15, V30
|
||||
|
||||
XXPERMDI VS40, VS42, $0, VS41
|
||||
XXPERMDI VS40, VS42, $3, VS43
|
||||
XXPERMDI VS59, VS60, $0, VS40
|
||||
XXPERMDI VS59, VS60, $3, VS42
|
||||
|
||||
VMRGOW V12, V13, V12
|
||||
VMRGOW V14, V15, V14
|
||||
|
||||
VSPLTISW $4, V27
|
||||
VADDUWM V26, V27, V26
|
||||
|
||||
XXPERMDI VS44, VS46, $0, VS45
|
||||
XXPERMDI VS44, VS46, $3, VS47
|
||||
XXPERMDI VS61, VS62, $0, VS44
|
||||
XXPERMDI VS61, VS62, $3, VS46
|
||||
|
||||
VADDUWM V0, V16, V0
|
||||
VADDUWM V4, V17, V4
|
||||
VADDUWM V8, V18, V8
|
||||
VADDUWM V12, V19, V12
|
||||
|
||||
BE_XXBRW(V0)
|
||||
BE_XXBRW(V4)
|
||||
BE_XXBRW(V8)
|
||||
BE_XXBRW(V12)
|
||||
|
||||
CMPU LEN, $64
|
||||
BLT tail_vsx
|
||||
|
||||
// Bottom of loop
|
||||
LXVW4X (INP)(R0), VS59
|
||||
LXVW4X (INP)(R8), VS60
|
||||
LXVW4X (INP)(R9), VS61
|
||||
LXVW4X (INP)(R10), VS62
|
||||
|
||||
VXOR V27, V0, V27
|
||||
VXOR V28, V4, V28
|
||||
VXOR V29, V8, V29
|
||||
VXOR V30, V12, V30
|
||||
|
||||
STXVW4X VS59, (OUT)(R0)
|
||||
STXVW4X VS60, (OUT)(R8)
|
||||
ADD $64, INP
|
||||
STXVW4X VS61, (OUT)(R9)
|
||||
ADD $-64, LEN
|
||||
STXVW4X VS62, (OUT)(R10)
|
||||
ADD $64, OUT
|
||||
BEQ done_vsx
|
||||
|
||||
VADDUWM V1, V16, V0
|
||||
VADDUWM V5, V17, V4
|
||||
VADDUWM V9, V18, V8
|
||||
VADDUWM V13, V19, V12
|
||||
|
||||
BE_XXBRW(V0)
|
||||
BE_XXBRW(V4)
|
||||
BE_XXBRW(V8)
|
||||
BE_XXBRW(V12)
|
||||
|
||||
CMPU LEN, $64
|
||||
BLT tail_vsx
|
||||
|
||||
LXVW4X (INP)(R0), VS59
|
||||
LXVW4X (INP)(R8), VS60
|
||||
LXVW4X (INP)(R9), VS61
|
||||
LXVW4X (INP)(R10), VS62
|
||||
|
||||
VXOR V27, V0, V27
|
||||
VXOR V28, V4, V28
|
||||
VXOR V29, V8, V29
|
||||
VXOR V30, V12, V30
|
||||
|
||||
STXVW4X VS59, (OUT)(R0)
|
||||
STXVW4X VS60, (OUT)(R8)
|
||||
ADD $64, INP
|
||||
STXVW4X VS61, (OUT)(R9)
|
||||
ADD $-64, LEN
|
||||
STXVW4X VS62, (OUT)(V10)
|
||||
ADD $64, OUT
|
||||
BEQ done_vsx
|
||||
|
||||
VADDUWM V2, V16, V0
|
||||
VADDUWM V6, V17, V4
|
||||
VADDUWM V10, V18, V8
|
||||
VADDUWM V14, V19, V12
|
||||
|
||||
BE_XXBRW(V0)
|
||||
BE_XXBRW(V4)
|
||||
BE_XXBRW(V8)
|
||||
BE_XXBRW(V12)
|
||||
|
||||
CMPU LEN, $64
|
||||
BLT tail_vsx
|
||||
|
||||
LXVW4X (INP)(R0), VS59
|
||||
LXVW4X (INP)(R8), VS60
|
||||
LXVW4X (INP)(R9), VS61
|
||||
LXVW4X (INP)(R10), VS62
|
||||
|
||||
VXOR V27, V0, V27
|
||||
VXOR V28, V4, V28
|
||||
VXOR V29, V8, V29
|
||||
VXOR V30, V12, V30
|
||||
|
||||
STXVW4X VS59, (OUT)(R0)
|
||||
STXVW4X VS60, (OUT)(R8)
|
||||
ADD $64, INP
|
||||
STXVW4X VS61, (OUT)(R9)
|
||||
ADD $-64, LEN
|
||||
STXVW4X VS62, (OUT)(R10)
|
||||
ADD $64, OUT
|
||||
BEQ done_vsx
|
||||
|
||||
VADDUWM V3, V16, V0
|
||||
VADDUWM V7, V17, V4
|
||||
VADDUWM V11, V18, V8
|
||||
VADDUWM V15, V19, V12
|
||||
|
||||
BE_XXBRW(V0)
|
||||
BE_XXBRW(V4)
|
||||
BE_XXBRW(V8)
|
||||
BE_XXBRW(V12)
|
||||
|
||||
CMPU LEN, $64
|
||||
BLT tail_vsx
|
||||
|
||||
LXVW4X (INP)(R0), VS59
|
||||
LXVW4X (INP)(R8), VS60
|
||||
LXVW4X (INP)(R9), VS61
|
||||
LXVW4X (INP)(R10), VS62
|
||||
|
||||
VXOR V27, V0, V27
|
||||
VXOR V28, V4, V28
|
||||
VXOR V29, V8, V29
|
||||
VXOR V30, V12, V30
|
||||
|
||||
STXVW4X VS59, (OUT)(R0)
|
||||
STXVW4X VS60, (OUT)(R8)
|
||||
ADD $64, INP
|
||||
STXVW4X VS61, (OUT)(R9)
|
||||
ADD $-64, LEN
|
||||
STXVW4X VS62, (OUT)(R10)
|
||||
ADD $64, OUT
|
||||
|
||||
MOVD $10, R14
|
||||
MOVD R14, CTR
|
||||
BNE loop_outer_vsx
|
||||
|
||||
done_vsx:
|
||||
// Increment counter by number of 64 byte blocks
|
||||
MOVWZ (CNT), R14
|
||||
ADD BLOCKS, R14
|
||||
MOVWZ R14, (CNT)
|
||||
RET
|
||||
|
||||
tail_vsx:
|
||||
ADD $32, R1, R11
|
||||
MOVD LEN, CTR
|
||||
|
||||
// Save values on stack to copy from
|
||||
STXVW4X VS32, (R11)(R0)
|
||||
STXVW4X VS36, (R11)(R8)
|
||||
STXVW4X VS40, (R11)(R9)
|
||||
STXVW4X VS44, (R11)(R10)
|
||||
ADD $-1, R11, R12
|
||||
ADD $-1, INP
|
||||
ADD $-1, OUT
|
||||
PCALIGN $16
|
||||
looptail_vsx:
|
||||
// Copying the result to OUT
|
||||
// in bytes.
|
||||
MOVBZU 1(R12), KEY
|
||||
MOVBZU 1(INP), TMP
|
||||
XOR KEY, TMP, KEY
|
||||
MOVBU KEY, 1(OUT)
|
||||
BDNZ looptail_vsx
|
||||
|
||||
// Clear the stack values
|
||||
STXVW4X VS48, (R11)(R0)
|
||||
STXVW4X VS48, (R11)(R8)
|
||||
STXVW4X VS48, (R11)(R9)
|
||||
STXVW4X VS48, (R11)(R10)
|
||||
BR done_vsx
|
||||
27
vendor/golang.org/x/crypto/chacha20/chacha_s390x.go
generated
vendored
Normal file
27
vendor/golang.org/x/crypto/chacha20/chacha_s390x.go
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc && !purego
|
||||
|
||||
package chacha20
|
||||
|
||||
import "golang.org/x/sys/cpu"
|
||||
|
||||
var haveAsm = cpu.S390X.HasVX
|
||||
|
||||
const bufSize = 256
|
||||
|
||||
// xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only
|
||||
// be called when the vector facility is available. Implementation in asm_s390x.s.
|
||||
//
|
||||
//go:noescape
|
||||
func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32)
|
||||
|
||||
func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) {
|
||||
if cpu.S390X.HasVX {
|
||||
xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter)
|
||||
} else {
|
||||
c.xorKeyStreamBlocksGeneric(dst, src)
|
||||
}
|
||||
}
|
||||
224
vendor/golang.org/x/crypto/chacha20/chacha_s390x.s
generated
vendored
Normal file
224
vendor/golang.org/x/crypto/chacha20/chacha_s390x.s
generated
vendored
Normal file
@@ -0,0 +1,224 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc && !purego
|
||||
|
||||
#include "go_asm.h"
|
||||
#include "textflag.h"
|
||||
|
||||
// This is an implementation of the ChaCha20 encryption algorithm as
|
||||
// specified in RFC 7539. It uses vector instructions to compute
|
||||
// 4 keystream blocks in parallel (256 bytes) which are then XORed
|
||||
// with the bytes in the input slice.
|
||||
|
||||
GLOBL ·constants<>(SB), RODATA|NOPTR, $32
|
||||
// BSWAP: swap bytes in each 4-byte element
|
||||
DATA ·constants<>+0x00(SB)/4, $0x03020100
|
||||
DATA ·constants<>+0x04(SB)/4, $0x07060504
|
||||
DATA ·constants<>+0x08(SB)/4, $0x0b0a0908
|
||||
DATA ·constants<>+0x0c(SB)/4, $0x0f0e0d0c
|
||||
// J0: [j0, j1, j2, j3]
|
||||
DATA ·constants<>+0x10(SB)/4, $0x61707865
|
||||
DATA ·constants<>+0x14(SB)/4, $0x3320646e
|
||||
DATA ·constants<>+0x18(SB)/4, $0x79622d32
|
||||
DATA ·constants<>+0x1c(SB)/4, $0x6b206574
|
||||
|
||||
#define BSWAP V5
|
||||
#define J0 V6
|
||||
#define KEY0 V7
|
||||
#define KEY1 V8
|
||||
#define NONCE V9
|
||||
#define CTR V10
|
||||
#define M0 V11
|
||||
#define M1 V12
|
||||
#define M2 V13
|
||||
#define M3 V14
|
||||
#define INC V15
|
||||
#define X0 V16
|
||||
#define X1 V17
|
||||
#define X2 V18
|
||||
#define X3 V19
|
||||
#define X4 V20
|
||||
#define X5 V21
|
||||
#define X6 V22
|
||||
#define X7 V23
|
||||
#define X8 V24
|
||||
#define X9 V25
|
||||
#define X10 V26
|
||||
#define X11 V27
|
||||
#define X12 V28
|
||||
#define X13 V29
|
||||
#define X14 V30
|
||||
#define X15 V31
|
||||
|
||||
#define NUM_ROUNDS 20
|
||||
|
||||
#define ROUND4(a0, a1, a2, a3, b0, b1, b2, b3, c0, c1, c2, c3, d0, d1, d2, d3) \
|
||||
VAF a1, a0, a0 \
|
||||
VAF b1, b0, b0 \
|
||||
VAF c1, c0, c0 \
|
||||
VAF d1, d0, d0 \
|
||||
VX a0, a2, a2 \
|
||||
VX b0, b2, b2 \
|
||||
VX c0, c2, c2 \
|
||||
VX d0, d2, d2 \
|
||||
VERLLF $16, a2, a2 \
|
||||
VERLLF $16, b2, b2 \
|
||||
VERLLF $16, c2, c2 \
|
||||
VERLLF $16, d2, d2 \
|
||||
VAF a2, a3, a3 \
|
||||
VAF b2, b3, b3 \
|
||||
VAF c2, c3, c3 \
|
||||
VAF d2, d3, d3 \
|
||||
VX a3, a1, a1 \
|
||||
VX b3, b1, b1 \
|
||||
VX c3, c1, c1 \
|
||||
VX d3, d1, d1 \
|
||||
VERLLF $12, a1, a1 \
|
||||
VERLLF $12, b1, b1 \
|
||||
VERLLF $12, c1, c1 \
|
||||
VERLLF $12, d1, d1 \
|
||||
VAF a1, a0, a0 \
|
||||
VAF b1, b0, b0 \
|
||||
VAF c1, c0, c0 \
|
||||
VAF d1, d0, d0 \
|
||||
VX a0, a2, a2 \
|
||||
VX b0, b2, b2 \
|
||||
VX c0, c2, c2 \
|
||||
VX d0, d2, d2 \
|
||||
VERLLF $8, a2, a2 \
|
||||
VERLLF $8, b2, b2 \
|
||||
VERLLF $8, c2, c2 \
|
||||
VERLLF $8, d2, d2 \
|
||||
VAF a2, a3, a3 \
|
||||
VAF b2, b3, b3 \
|
||||
VAF c2, c3, c3 \
|
||||
VAF d2, d3, d3 \
|
||||
VX a3, a1, a1 \
|
||||
VX b3, b1, b1 \
|
||||
VX c3, c1, c1 \
|
||||
VX d3, d1, d1 \
|
||||
VERLLF $7, a1, a1 \
|
||||
VERLLF $7, b1, b1 \
|
||||
VERLLF $7, c1, c1 \
|
||||
VERLLF $7, d1, d1
|
||||
|
||||
#define PERMUTE(mask, v0, v1, v2, v3) \
|
||||
VPERM v0, v0, mask, v0 \
|
||||
VPERM v1, v1, mask, v1 \
|
||||
VPERM v2, v2, mask, v2 \
|
||||
VPERM v3, v3, mask, v3
|
||||
|
||||
#define ADDV(x, v0, v1, v2, v3) \
|
||||
VAF x, v0, v0 \
|
||||
VAF x, v1, v1 \
|
||||
VAF x, v2, v2 \
|
||||
VAF x, v3, v3
|
||||
|
||||
#define XORV(off, dst, src, v0, v1, v2, v3) \
|
||||
VLM off(src), M0, M3 \
|
||||
PERMUTE(BSWAP, v0, v1, v2, v3) \
|
||||
VX v0, M0, M0 \
|
||||
VX v1, M1, M1 \
|
||||
VX v2, M2, M2 \
|
||||
VX v3, M3, M3 \
|
||||
VSTM M0, M3, off(dst)
|
||||
|
||||
#define SHUFFLE(a, b, c, d, t, u, v, w) \
|
||||
VMRHF a, c, t \ // t = {a[0], c[0], a[1], c[1]}
|
||||
VMRHF b, d, u \ // u = {b[0], d[0], b[1], d[1]}
|
||||
VMRLF a, c, v \ // v = {a[2], c[2], a[3], c[3]}
|
||||
VMRLF b, d, w \ // w = {b[2], d[2], b[3], d[3]}
|
||||
VMRHF t, u, a \ // a = {a[0], b[0], c[0], d[0]}
|
||||
VMRLF t, u, b \ // b = {a[1], b[1], c[1], d[1]}
|
||||
VMRHF v, w, c \ // c = {a[2], b[2], c[2], d[2]}
|
||||
VMRLF v, w, d // d = {a[3], b[3], c[3], d[3]}
|
||||
|
||||
// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32)
|
||||
TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0
|
||||
MOVD $·constants<>(SB), R1
|
||||
MOVD dst+0(FP), R2 // R2=&dst[0]
|
||||
LMG src+24(FP), R3, R4 // R3=&src[0] R4=len(src)
|
||||
MOVD key+48(FP), R5 // R5=key
|
||||
MOVD nonce+56(FP), R6 // R6=nonce
|
||||
MOVD counter+64(FP), R7 // R7=counter
|
||||
|
||||
// load BSWAP and J0
|
||||
VLM (R1), BSWAP, J0
|
||||
|
||||
// setup
|
||||
MOVD $95, R0
|
||||
VLM (R5), KEY0, KEY1
|
||||
VLL R0, (R6), NONCE
|
||||
VZERO M0
|
||||
VLEIB $7, $32, M0
|
||||
VSRLB M0, NONCE, NONCE
|
||||
|
||||
// initialize counter values
|
||||
VLREPF (R7), CTR
|
||||
VZERO INC
|
||||
VLEIF $1, $1, INC
|
||||
VLEIF $2, $2, INC
|
||||
VLEIF $3, $3, INC
|
||||
VAF INC, CTR, CTR
|
||||
VREPIF $4, INC
|
||||
|
||||
chacha:
|
||||
VREPF $0, J0, X0
|
||||
VREPF $1, J0, X1
|
||||
VREPF $2, J0, X2
|
||||
VREPF $3, J0, X3
|
||||
VREPF $0, KEY0, X4
|
||||
VREPF $1, KEY0, X5
|
||||
VREPF $2, KEY0, X6
|
||||
VREPF $3, KEY0, X7
|
||||
VREPF $0, KEY1, X8
|
||||
VREPF $1, KEY1, X9
|
||||
VREPF $2, KEY1, X10
|
||||
VREPF $3, KEY1, X11
|
||||
VLR CTR, X12
|
||||
VREPF $1, NONCE, X13
|
||||
VREPF $2, NONCE, X14
|
||||
VREPF $3, NONCE, X15
|
||||
|
||||
MOVD $(NUM_ROUNDS/2), R1
|
||||
|
||||
loop:
|
||||
ROUND4(X0, X4, X12, X8, X1, X5, X13, X9, X2, X6, X14, X10, X3, X7, X15, X11)
|
||||
ROUND4(X0, X5, X15, X10, X1, X6, X12, X11, X2, X7, X13, X8, X3, X4, X14, X9)
|
||||
|
||||
ADD $-1, R1
|
||||
BNE loop
|
||||
|
||||
// decrement length
|
||||
ADD $-256, R4
|
||||
|
||||
// rearrange vectors
|
||||
SHUFFLE(X0, X1, X2, X3, M0, M1, M2, M3)
|
||||
ADDV(J0, X0, X1, X2, X3)
|
||||
SHUFFLE(X4, X5, X6, X7, M0, M1, M2, M3)
|
||||
ADDV(KEY0, X4, X5, X6, X7)
|
||||
SHUFFLE(X8, X9, X10, X11, M0, M1, M2, M3)
|
||||
ADDV(KEY1, X8, X9, X10, X11)
|
||||
VAF CTR, X12, X12
|
||||
SHUFFLE(X12, X13, X14, X15, M0, M1, M2, M3)
|
||||
ADDV(NONCE, X12, X13, X14, X15)
|
||||
|
||||
// increment counters
|
||||
VAF INC, CTR, CTR
|
||||
|
||||
// xor keystream with plaintext
|
||||
XORV(0*64, R2, R3, X0, X4, X8, X12)
|
||||
XORV(1*64, R2, R3, X1, X5, X9, X13)
|
||||
XORV(2*64, R2, R3, X2, X6, X10, X14)
|
||||
XORV(3*64, R2, R3, X3, X7, X11, X15)
|
||||
|
||||
// increment pointers
|
||||
MOVD $256(R2), R2
|
||||
MOVD $256(R3), R3
|
||||
|
||||
CMPBNE R4, $0, chacha
|
||||
|
||||
VSTEF $0, CTR, (R7)
|
||||
RET
|
||||
42
vendor/golang.org/x/crypto/chacha20/xor.go
generated
vendored
Normal file
42
vendor/golang.org/x/crypto/chacha20/xor.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found src the LICENSE file.
|
||||
|
||||
package chacha20
|
||||
|
||||
import "runtime"
|
||||
|
||||
// Platforms that have fast unaligned 32-bit little endian accesses.
|
||||
const unaligned = runtime.GOARCH == "386" ||
|
||||
runtime.GOARCH == "amd64" ||
|
||||
runtime.GOARCH == "arm64" ||
|
||||
runtime.GOARCH == "ppc64le" ||
|
||||
runtime.GOARCH == "s390x"
|
||||
|
||||
// addXor reads a little endian uint32 from src, XORs it with (a + b) and
|
||||
// places the result in little endian byte order in dst.
|
||||
func addXor(dst, src []byte, a, b uint32) {
|
||||
_, _ = src[3], dst[3] // bounds check elimination hint
|
||||
if unaligned {
|
||||
// The compiler should optimize this code into
|
||||
// 32-bit unaligned little endian loads and stores.
|
||||
// TODO: delete once the compiler does a reliably
|
||||
// good job with the generic code below.
|
||||
// See issue #25111 for more details.
|
||||
v := uint32(src[0])
|
||||
v |= uint32(src[1]) << 8
|
||||
v |= uint32(src[2]) << 16
|
||||
v |= uint32(src[3]) << 24
|
||||
v ^= a + b
|
||||
dst[0] = byte(v)
|
||||
dst[1] = byte(v >> 8)
|
||||
dst[2] = byte(v >> 16)
|
||||
dst[3] = byte(v >> 24)
|
||||
} else {
|
||||
a += b
|
||||
dst[0] = src[0] ^ byte(a)
|
||||
dst[1] = src[1] ^ byte(a>>8)
|
||||
dst[2] = src[2] ^ byte(a>>16)
|
||||
dst[3] = src[3] ^ byte(a>>24)
|
||||
}
|
||||
}
|
||||
101
vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go
generated
vendored
Normal file
101
vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go
generated
vendored
Normal file
@@ -0,0 +1,101 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package chacha20poly1305 implements the ChaCha20-Poly1305 AEAD and its
|
||||
// extended nonce variant XChaCha20-Poly1305, as specified in RFC 8439 and
|
||||
// draft-irtf-cfrg-xchacha-01.
|
||||
package chacha20poly1305
|
||||
|
||||
import (
|
||||
"crypto/cipher"
|
||||
"errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// KeySize is the size of the key used by this AEAD, in bytes.
|
||||
KeySize = 32
|
||||
|
||||
// NonceSize is the size of the nonce used with the standard variant of this
|
||||
// AEAD, in bytes.
|
||||
//
|
||||
// Note that this is too short to be safely generated at random if the same
|
||||
// key is reused more than 2³² times.
|
||||
NonceSize = 12
|
||||
|
||||
// NonceSizeX is the size of the nonce used with the XChaCha20-Poly1305
|
||||
// variant of this AEAD, in bytes.
|
||||
NonceSizeX = 24
|
||||
|
||||
// Overhead is the size of the Poly1305 authentication tag, and the
|
||||
// difference between a ciphertext length and its plaintext.
|
||||
Overhead = 16
|
||||
)
|
||||
|
||||
type chacha20poly1305 struct {
|
||||
key [KeySize]byte
|
||||
}
|
||||
|
||||
// New returns a ChaCha20-Poly1305 AEAD that uses the given 256-bit key.
|
||||
func New(key []byte) (cipher.AEAD, error) {
|
||||
if fips140Enforced() {
|
||||
return nil, errors.New("chacha20poly1305: use of ChaCha20Poly1305 is not allowed in FIPS 140-only mode")
|
||||
}
|
||||
if len(key) != KeySize {
|
||||
return nil, errors.New("chacha20poly1305: bad key length")
|
||||
}
|
||||
ret := new(chacha20poly1305)
|
||||
copy(ret.key[:], key)
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (c *chacha20poly1305) NonceSize() int {
|
||||
return NonceSize
|
||||
}
|
||||
|
||||
func (c *chacha20poly1305) Overhead() int {
|
||||
return Overhead
|
||||
}
|
||||
|
||||
func (c *chacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte {
|
||||
if len(nonce) != NonceSize {
|
||||
panic("chacha20poly1305: bad nonce length passed to Seal")
|
||||
}
|
||||
|
||||
if uint64(len(plaintext)) > (1<<38)-64 {
|
||||
panic("chacha20poly1305: plaintext too large")
|
||||
}
|
||||
|
||||
return c.seal(dst, nonce, plaintext, additionalData)
|
||||
}
|
||||
|
||||
var errOpen = errors.New("chacha20poly1305: message authentication failed")
|
||||
|
||||
func (c *chacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) {
|
||||
if len(nonce) != NonceSize {
|
||||
panic("chacha20poly1305: bad nonce length passed to Open")
|
||||
}
|
||||
if len(ciphertext) < 16 {
|
||||
return nil, errOpen
|
||||
}
|
||||
if uint64(len(ciphertext)) > (1<<38)-48 {
|
||||
panic("chacha20poly1305: ciphertext too large")
|
||||
}
|
||||
|
||||
return c.open(dst, nonce, ciphertext, additionalData)
|
||||
}
|
||||
|
||||
// sliceForAppend takes a slice and a requested number of bytes. It returns a
|
||||
// slice with the contents of the given slice followed by that many bytes and a
|
||||
// second slice that aliases into it and contains only the extra bytes. If the
|
||||
// original slice has sufficient capacity then no allocation is performed.
|
||||
func sliceForAppend(in []byte, n int) (head, tail []byte) {
|
||||
if total := len(in) + n; cap(in) >= total {
|
||||
head = in[:total]
|
||||
} else {
|
||||
head = make([]byte, total)
|
||||
copy(head, in)
|
||||
}
|
||||
tail = head[len(in):]
|
||||
return
|
||||
}
|
||||
92
vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go
generated
vendored
Normal file
92
vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc && !purego
|
||||
|
||||
package chacha20poly1305
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
"golang.org/x/crypto/internal/alias"
|
||||
"golang.org/x/sys/cpu"
|
||||
)
|
||||
|
||||
//go:noescape
|
||||
func chacha20Poly1305Open(dst []byte, key []uint32, src, ad []byte) bool
|
||||
|
||||
//go:noescape
|
||||
func chacha20Poly1305Seal(dst []byte, key []uint32, src, ad []byte)
|
||||
|
||||
var (
|
||||
useAVX2 = cpu.X86.HasAVX2 && cpu.X86.HasBMI2
|
||||
)
|
||||
|
||||
// setupState writes a ChaCha20 input matrix to state. See
|
||||
// https://tools.ietf.org/html/rfc7539#section-2.3.
|
||||
func setupState(state *[16]uint32, key *[32]byte, nonce []byte) {
|
||||
state[0] = 0x61707865
|
||||
state[1] = 0x3320646e
|
||||
state[2] = 0x79622d32
|
||||
state[3] = 0x6b206574
|
||||
|
||||
state[4] = binary.LittleEndian.Uint32(key[0:4])
|
||||
state[5] = binary.LittleEndian.Uint32(key[4:8])
|
||||
state[6] = binary.LittleEndian.Uint32(key[8:12])
|
||||
state[7] = binary.LittleEndian.Uint32(key[12:16])
|
||||
state[8] = binary.LittleEndian.Uint32(key[16:20])
|
||||
state[9] = binary.LittleEndian.Uint32(key[20:24])
|
||||
state[10] = binary.LittleEndian.Uint32(key[24:28])
|
||||
state[11] = binary.LittleEndian.Uint32(key[28:32])
|
||||
|
||||
state[12] = 0
|
||||
state[13] = binary.LittleEndian.Uint32(nonce[0:4])
|
||||
state[14] = binary.LittleEndian.Uint32(nonce[4:8])
|
||||
state[15] = binary.LittleEndian.Uint32(nonce[8:12])
|
||||
}
|
||||
|
||||
func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []byte {
|
||||
if !cpu.X86.HasSSSE3 {
|
||||
return c.sealGeneric(dst, nonce, plaintext, additionalData)
|
||||
}
|
||||
|
||||
var state [16]uint32
|
||||
setupState(&state, &c.key, nonce)
|
||||
|
||||
ret, out := sliceForAppend(dst, len(plaintext)+16)
|
||||
if alias.InexactOverlap(out, plaintext) {
|
||||
panic("chacha20poly1305: invalid buffer overlap of output and input")
|
||||
}
|
||||
if alias.AnyOverlap(out, additionalData) {
|
||||
panic("chacha20poly1305: invalid buffer overlap of output and additional data")
|
||||
}
|
||||
chacha20Poly1305Seal(out[:], state[:], plaintext, additionalData)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) {
|
||||
if !cpu.X86.HasSSSE3 {
|
||||
return c.openGeneric(dst, nonce, ciphertext, additionalData)
|
||||
}
|
||||
|
||||
var state [16]uint32
|
||||
setupState(&state, &c.key, nonce)
|
||||
|
||||
ciphertext = ciphertext[:len(ciphertext)-16]
|
||||
ret, out := sliceForAppend(dst, len(ciphertext))
|
||||
if alias.InexactOverlap(out, ciphertext) {
|
||||
panic("chacha20poly1305: invalid buffer overlap of output and input")
|
||||
}
|
||||
if alias.AnyOverlap(out, additionalData) {
|
||||
panic("chacha20poly1305: invalid buffer overlap of output and additional data")
|
||||
}
|
||||
if !chacha20Poly1305Open(out, state[:], ciphertext, additionalData) {
|
||||
for i := range out {
|
||||
out[i] = 0
|
||||
}
|
||||
return nil, errOpen
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
9762
vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s
generated
vendored
Normal file
9762
vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
87
vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go
generated
vendored
Normal file
87
vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go
generated
vendored
Normal file
@@ -0,0 +1,87 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package chacha20poly1305
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
"golang.org/x/crypto/chacha20"
|
||||
"golang.org/x/crypto/internal/alias"
|
||||
"golang.org/x/crypto/internal/poly1305"
|
||||
)
|
||||
|
||||
func writeWithPadding(p *poly1305.MAC, b []byte) {
|
||||
p.Write(b)
|
||||
if rem := len(b) % 16; rem != 0 {
|
||||
var buf [16]byte
|
||||
padLen := 16 - rem
|
||||
p.Write(buf[:padLen])
|
||||
}
|
||||
}
|
||||
|
||||
func writeUint64(p *poly1305.MAC, n int) {
|
||||
var buf [8]byte
|
||||
binary.LittleEndian.PutUint64(buf[:], uint64(n))
|
||||
p.Write(buf[:])
|
||||
}
|
||||
|
||||
func (c *chacha20poly1305) sealGeneric(dst, nonce, plaintext, additionalData []byte) []byte {
|
||||
ret, out := sliceForAppend(dst, len(plaintext)+poly1305.TagSize)
|
||||
ciphertext, tag := out[:len(plaintext)], out[len(plaintext):]
|
||||
if alias.InexactOverlap(out, plaintext) {
|
||||
panic("chacha20poly1305: invalid buffer overlap of output and input")
|
||||
}
|
||||
if alias.AnyOverlap(out, additionalData) {
|
||||
panic("chacha20poly1305: invalid buffer overlap of output and additional data")
|
||||
}
|
||||
|
||||
var polyKey [32]byte
|
||||
s, _ := chacha20.NewUnauthenticatedCipher(c.key[:], nonce)
|
||||
s.XORKeyStream(polyKey[:], polyKey[:])
|
||||
s.SetCounter(1) // set the counter to 1, skipping 32 bytes
|
||||
s.XORKeyStream(ciphertext, plaintext)
|
||||
|
||||
p := poly1305.New(&polyKey)
|
||||
writeWithPadding(p, additionalData)
|
||||
writeWithPadding(p, ciphertext)
|
||||
writeUint64(p, len(additionalData))
|
||||
writeUint64(p, len(plaintext))
|
||||
p.Sum(tag[:0])
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (c *chacha20poly1305) openGeneric(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) {
|
||||
tag := ciphertext[len(ciphertext)-16:]
|
||||
ciphertext = ciphertext[:len(ciphertext)-16]
|
||||
|
||||
var polyKey [32]byte
|
||||
s, _ := chacha20.NewUnauthenticatedCipher(c.key[:], nonce)
|
||||
s.XORKeyStream(polyKey[:], polyKey[:])
|
||||
s.SetCounter(1) // set the counter to 1, skipping 32 bytes
|
||||
|
||||
p := poly1305.New(&polyKey)
|
||||
writeWithPadding(p, additionalData)
|
||||
writeWithPadding(p, ciphertext)
|
||||
writeUint64(p, len(additionalData))
|
||||
writeUint64(p, len(ciphertext))
|
||||
|
||||
ret, out := sliceForAppend(dst, len(ciphertext))
|
||||
if alias.InexactOverlap(out, ciphertext) {
|
||||
panic("chacha20poly1305: invalid buffer overlap of output and input")
|
||||
}
|
||||
if alias.AnyOverlap(out, additionalData) {
|
||||
panic("chacha20poly1305: invalid buffer overlap of output and additional data")
|
||||
}
|
||||
if !p.Verify(tag) {
|
||||
for i := range out {
|
||||
out[i] = 0
|
||||
}
|
||||
return nil, errOpen
|
||||
}
|
||||
|
||||
s.XORKeyStream(out, ciphertext)
|
||||
return ret, nil
|
||||
}
|
||||
15
vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go
generated
vendored
Normal file
15
vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !amd64 || !gc || purego
|
||||
|
||||
package chacha20poly1305
|
||||
|
||||
func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []byte {
|
||||
return c.sealGeneric(dst, nonce, plaintext, additionalData)
|
||||
}
|
||||
|
||||
func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) {
|
||||
return c.openGeneric(dst, nonce, ciphertext, additionalData)
|
||||
}
|
||||
9
vendor/golang.org/x/crypto/chacha20poly1305/fips140only_compat.go
generated
vendored
Normal file
9
vendor/golang.org/x/crypto/chacha20poly1305/fips140only_compat.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.26
|
||||
|
||||
package chacha20poly1305
|
||||
|
||||
func fips140Enforced() bool { return false }
|
||||
11
vendor/golang.org/x/crypto/chacha20poly1305/fips140only_go1.26.go
generated
vendored
Normal file
11
vendor/golang.org/x/crypto/chacha20poly1305/fips140only_go1.26.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.26
|
||||
|
||||
package chacha20poly1305
|
||||
|
||||
import "crypto/fips140"
|
||||
|
||||
func fips140Enforced() bool { return fips140.Enforced() }
|
||||
89
vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go
generated
vendored
Normal file
89
vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package chacha20poly1305
|
||||
|
||||
import (
|
||||
"crypto/cipher"
|
||||
"errors"
|
||||
|
||||
"golang.org/x/crypto/chacha20"
|
||||
)
|
||||
|
||||
type xchacha20poly1305 struct {
|
||||
key [KeySize]byte
|
||||
}
|
||||
|
||||
// NewX returns a XChaCha20-Poly1305 AEAD that uses the given 256-bit key.
|
||||
//
|
||||
// XChaCha20-Poly1305 is a ChaCha20-Poly1305 variant that takes a longer nonce,
|
||||
// suitable to be generated randomly without risk of collisions. It should be
|
||||
// preferred when nonce uniqueness cannot be trivially ensured, or whenever
|
||||
// nonces are randomly generated.
|
||||
func NewX(key []byte) (cipher.AEAD, error) {
|
||||
if fips140Enforced() {
|
||||
return nil, errors.New("chacha20poly1305: use of ChaCha20Poly1305 is not allowed in FIPS 140-only mode")
|
||||
}
|
||||
if len(key) != KeySize {
|
||||
return nil, errors.New("chacha20poly1305: bad key length")
|
||||
}
|
||||
ret := new(xchacha20poly1305)
|
||||
copy(ret.key[:], key)
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (*xchacha20poly1305) NonceSize() int {
|
||||
return NonceSizeX
|
||||
}
|
||||
|
||||
func (*xchacha20poly1305) Overhead() int {
|
||||
return Overhead
|
||||
}
|
||||
|
||||
func (x *xchacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte {
|
||||
if len(nonce) != NonceSizeX {
|
||||
panic("chacha20poly1305: bad nonce length passed to Seal")
|
||||
}
|
||||
|
||||
// XChaCha20-Poly1305 technically supports a 64-bit counter, so there is no
|
||||
// size limit. However, since we reuse the ChaCha20-Poly1305 implementation,
|
||||
// the second half of the counter is not available. This is unlikely to be
|
||||
// an issue because the cipher.AEAD API requires the entire message to be in
|
||||
// memory, and the counter overflows at 256 GB.
|
||||
if uint64(len(plaintext)) > (1<<38)-64 {
|
||||
panic("chacha20poly1305: plaintext too large")
|
||||
}
|
||||
|
||||
c := new(chacha20poly1305)
|
||||
hKey, _ := chacha20.HChaCha20(x.key[:], nonce[0:16])
|
||||
copy(c.key[:], hKey)
|
||||
|
||||
// The first 4 bytes of the final nonce are unused counter space.
|
||||
cNonce := make([]byte, NonceSize)
|
||||
copy(cNonce[4:12], nonce[16:24])
|
||||
|
||||
return c.seal(dst, cNonce[:], plaintext, additionalData)
|
||||
}
|
||||
|
||||
func (x *xchacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) {
|
||||
if len(nonce) != NonceSizeX {
|
||||
panic("chacha20poly1305: bad nonce length passed to Open")
|
||||
}
|
||||
if len(ciphertext) < 16 {
|
||||
return nil, errOpen
|
||||
}
|
||||
if uint64(len(ciphertext)) > (1<<38)-48 {
|
||||
panic("chacha20poly1305: ciphertext too large")
|
||||
}
|
||||
|
||||
c := new(chacha20poly1305)
|
||||
hKey, _ := chacha20.HChaCha20(x.key[:], nonce[0:16])
|
||||
copy(c.key[:], hKey)
|
||||
|
||||
// The first 4 bytes of the final nonce are unused counter space.
|
||||
cNonce := make([]byte, NonceSize)
|
||||
copy(cNonce[4:12], nonce[16:24])
|
||||
|
||||
return c.open(dst, cNonce[:], ciphertext, additionalData)
|
||||
}
|
||||
825
vendor/golang.org/x/crypto/cryptobyte/asn1.go
generated
vendored
Normal file
825
vendor/golang.org/x/crypto/cryptobyte/asn1.go
generated
vendored
Normal file
@@ -0,0 +1,825 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cryptobyte
|
||||
|
||||
import (
|
||||
encoding_asn1 "encoding/asn1"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/cryptobyte/asn1"
|
||||
)
|
||||
|
||||
// This file contains ASN.1-related methods for String and Builder.
|
||||
|
||||
// Builder
|
||||
|
||||
// AddASN1Int64 appends a DER-encoded ASN.1 INTEGER.
|
||||
func (b *Builder) AddASN1Int64(v int64) {
|
||||
b.addASN1Signed(asn1.INTEGER, v)
|
||||
}
|
||||
|
||||
// AddASN1Int64WithTag appends a DER-encoded ASN.1 INTEGER with the
|
||||
// given tag.
|
||||
func (b *Builder) AddASN1Int64WithTag(v int64, tag asn1.Tag) {
|
||||
b.addASN1Signed(tag, v)
|
||||
}
|
||||
|
||||
// AddASN1Enum appends a DER-encoded ASN.1 ENUMERATION.
|
||||
func (b *Builder) AddASN1Enum(v int64) {
|
||||
b.addASN1Signed(asn1.ENUM, v)
|
||||
}
|
||||
|
||||
func (b *Builder) addASN1Signed(tag asn1.Tag, v int64) {
|
||||
b.AddASN1(tag, func(c *Builder) {
|
||||
length := 1
|
||||
for i := v; i >= 0x80 || i < -0x80; i >>= 8 {
|
||||
length++
|
||||
}
|
||||
|
||||
for ; length > 0; length-- {
|
||||
i := v >> uint((length-1)*8) & 0xff
|
||||
c.AddUint8(uint8(i))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// AddASN1Uint64 appends a DER-encoded ASN.1 INTEGER.
|
||||
func (b *Builder) AddASN1Uint64(v uint64) {
|
||||
b.AddASN1(asn1.INTEGER, func(c *Builder) {
|
||||
length := 1
|
||||
for i := v; i >= 0x80; i >>= 8 {
|
||||
length++
|
||||
}
|
||||
|
||||
for ; length > 0; length-- {
|
||||
i := v >> uint((length-1)*8) & 0xff
|
||||
c.AddUint8(uint8(i))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// AddASN1BigInt appends a DER-encoded ASN.1 INTEGER.
|
||||
func (b *Builder) AddASN1BigInt(n *big.Int) {
|
||||
if b.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
b.AddASN1(asn1.INTEGER, func(c *Builder) {
|
||||
if n.Sign() < 0 {
|
||||
// A negative number has to be converted to two's-complement form. So we
|
||||
// invert and subtract 1. If the most-significant-bit isn't set then
|
||||
// we'll need to pad the beginning with 0xff in order to keep the number
|
||||
// negative.
|
||||
nMinus1 := new(big.Int).Neg(n)
|
||||
nMinus1.Sub(nMinus1, bigOne)
|
||||
bytes := nMinus1.Bytes()
|
||||
for i := range bytes {
|
||||
bytes[i] ^= 0xff
|
||||
}
|
||||
if len(bytes) == 0 || bytes[0]&0x80 == 0 {
|
||||
c.add(0xff)
|
||||
}
|
||||
c.add(bytes...)
|
||||
} else if n.Sign() == 0 {
|
||||
c.add(0)
|
||||
} else {
|
||||
bytes := n.Bytes()
|
||||
if bytes[0]&0x80 != 0 {
|
||||
c.add(0)
|
||||
}
|
||||
c.add(bytes...)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// AddASN1OctetString appends a DER-encoded ASN.1 OCTET STRING.
|
||||
func (b *Builder) AddASN1OctetString(bytes []byte) {
|
||||
b.AddASN1(asn1.OCTET_STRING, func(c *Builder) {
|
||||
c.AddBytes(bytes)
|
||||
})
|
||||
}
|
||||
|
||||
const generalizedTimeFormatStr = "20060102150405Z0700"
|
||||
|
||||
// AddASN1GeneralizedTime appends a DER-encoded ASN.1 GENERALIZEDTIME.
|
||||
func (b *Builder) AddASN1GeneralizedTime(t time.Time) {
|
||||
if t.Year() < 0 || t.Year() > 9999 {
|
||||
b.err = fmt.Errorf("cryptobyte: cannot represent %v as a GeneralizedTime", t)
|
||||
return
|
||||
}
|
||||
b.AddASN1(asn1.GeneralizedTime, func(c *Builder) {
|
||||
c.AddBytes([]byte(t.Format(generalizedTimeFormatStr)))
|
||||
})
|
||||
}
|
||||
|
||||
// AddASN1UTCTime appends a DER-encoded ASN.1 UTCTime.
|
||||
func (b *Builder) AddASN1UTCTime(t time.Time) {
|
||||
b.AddASN1(asn1.UTCTime, func(c *Builder) {
|
||||
// As utilized by the X.509 profile, UTCTime can only
|
||||
// represent the years 1950 through 2049.
|
||||
if t.Year() < 1950 || t.Year() >= 2050 {
|
||||
b.err = fmt.Errorf("cryptobyte: cannot represent %v as a UTCTime", t)
|
||||
return
|
||||
}
|
||||
c.AddBytes([]byte(t.Format(defaultUTCTimeFormatStr)))
|
||||
})
|
||||
}
|
||||
|
||||
// AddASN1BitString appends a DER-encoded ASN.1 BIT STRING. This does not
|
||||
// support BIT STRINGs that are not a whole number of bytes.
|
||||
func (b *Builder) AddASN1BitString(data []byte) {
|
||||
b.AddASN1(asn1.BIT_STRING, func(b *Builder) {
|
||||
b.AddUint8(0)
|
||||
b.AddBytes(data)
|
||||
})
|
||||
}
|
||||
|
||||
func (b *Builder) addBase128Int(n int64) {
|
||||
var length int
|
||||
if n == 0 {
|
||||
length = 1
|
||||
} else {
|
||||
for i := n; i > 0; i >>= 7 {
|
||||
length++
|
||||
}
|
||||
}
|
||||
|
||||
for i := length - 1; i >= 0; i-- {
|
||||
o := byte(n >> uint(i*7))
|
||||
o &= 0x7f
|
||||
if i != 0 {
|
||||
o |= 0x80
|
||||
}
|
||||
|
||||
b.add(o)
|
||||
}
|
||||
}
|
||||
|
||||
func isValidOID(oid encoding_asn1.ObjectIdentifier) bool {
|
||||
if len(oid) < 2 {
|
||||
return false
|
||||
}
|
||||
|
||||
if oid[0] > 2 || (oid[0] <= 1 && oid[1] >= 40) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, v := range oid {
|
||||
if v < 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *Builder) AddASN1ObjectIdentifier(oid encoding_asn1.ObjectIdentifier) {
|
||||
b.AddASN1(asn1.OBJECT_IDENTIFIER, func(b *Builder) {
|
||||
if !isValidOID(oid) {
|
||||
b.err = fmt.Errorf("cryptobyte: invalid OID: %v", oid)
|
||||
return
|
||||
}
|
||||
|
||||
b.addBase128Int(int64(oid[0])*40 + int64(oid[1]))
|
||||
for _, v := range oid[2:] {
|
||||
b.addBase128Int(int64(v))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (b *Builder) AddASN1Boolean(v bool) {
|
||||
b.AddASN1(asn1.BOOLEAN, func(b *Builder) {
|
||||
if v {
|
||||
b.AddUint8(0xff)
|
||||
} else {
|
||||
b.AddUint8(0)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (b *Builder) AddASN1NULL() {
|
||||
b.add(uint8(asn1.NULL), 0)
|
||||
}
|
||||
|
||||
// MarshalASN1 calls encoding_asn1.Marshal on its input and appends the result if
|
||||
// successful or records an error if one occurred.
|
||||
func (b *Builder) MarshalASN1(v interface{}) {
|
||||
// NOTE(martinkr): This is somewhat of a hack to allow propagation of
|
||||
// encoding_asn1.Marshal errors into Builder.err. N.B. if you call MarshalASN1 with a
|
||||
// value embedded into a struct, its tag information is lost.
|
||||
if b.err != nil {
|
||||
return
|
||||
}
|
||||
bytes, err := encoding_asn1.Marshal(v)
|
||||
if err != nil {
|
||||
b.err = err
|
||||
return
|
||||
}
|
||||
b.AddBytes(bytes)
|
||||
}
|
||||
|
||||
// AddASN1 appends an ASN.1 object. The object is prefixed with the given tag.
|
||||
// Tags greater than 30 are not supported and result in an error (i.e.
|
||||
// low-tag-number form only). The child builder passed to the
|
||||
// BuilderContinuation can be used to build the content of the ASN.1 object.
|
||||
func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) {
|
||||
if b.err != nil {
|
||||
return
|
||||
}
|
||||
// Identifiers with the low five bits set indicate high-tag-number format
|
||||
// (two or more octets), which we don't support.
|
||||
if tag&0x1f == 0x1f {
|
||||
b.err = fmt.Errorf("cryptobyte: high-tag number identifier octets not supported: 0x%x", tag)
|
||||
return
|
||||
}
|
||||
b.AddUint8(uint8(tag))
|
||||
b.addLengthPrefixed(1, true, f)
|
||||
}
|
||||
|
||||
// String
|
||||
|
||||
// ReadASN1Boolean decodes an ASN.1 BOOLEAN and converts it to a boolean
|
||||
// representation into out and advances. It reports whether the read
|
||||
// was successful.
|
||||
func (s *String) ReadASN1Boolean(out *bool) bool {
|
||||
var bytes String
|
||||
if !s.ReadASN1(&bytes, asn1.BOOLEAN) || len(bytes) != 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
switch bytes[0] {
|
||||
case 0:
|
||||
*out = false
|
||||
case 0xff:
|
||||
*out = true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does
|
||||
// not point to an integer, to a big.Int, or to a []byte it panics. Only
|
||||
// positive and zero values can be decoded into []byte, and they are returned as
|
||||
// big-endian binary values that share memory with s. Positive values will have
|
||||
// no leading zeroes, and zero will be returned as a single zero byte.
|
||||
// ReadASN1Integer reports whether the read was successful.
|
||||
func (s *String) ReadASN1Integer(out interface{}) bool {
|
||||
switch out := out.(type) {
|
||||
case *int, *int8, *int16, *int32, *int64:
|
||||
var i int64
|
||||
if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) {
|
||||
return false
|
||||
}
|
||||
reflect.ValueOf(out).Elem().SetInt(i)
|
||||
return true
|
||||
case *uint, *uint8, *uint16, *uint32, *uint64:
|
||||
var u uint64
|
||||
if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) {
|
||||
return false
|
||||
}
|
||||
reflect.ValueOf(out).Elem().SetUint(u)
|
||||
return true
|
||||
case *big.Int:
|
||||
return s.readASN1BigInt(out)
|
||||
case *[]byte:
|
||||
return s.readASN1Bytes(out)
|
||||
default:
|
||||
panic("out does not point to an integer type")
|
||||
}
|
||||
}
|
||||
|
||||
func checkASN1Integer(bytes []byte) bool {
|
||||
if len(bytes) == 0 {
|
||||
// An INTEGER is encoded with at least one octet.
|
||||
return false
|
||||
}
|
||||
if len(bytes) == 1 {
|
||||
return true
|
||||
}
|
||||
if bytes[0] == 0 && bytes[1]&0x80 == 0 || bytes[0] == 0xff && bytes[1]&0x80 == 0x80 {
|
||||
// Value is not minimally encoded.
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var bigOne = big.NewInt(1)
|
||||
|
||||
func (s *String) readASN1BigInt(out *big.Int) bool {
|
||||
var bytes String
|
||||
if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) {
|
||||
return false
|
||||
}
|
||||
if bytes[0]&0x80 == 0x80 {
|
||||
// Negative number.
|
||||
neg := make([]byte, len(bytes))
|
||||
for i, b := range bytes {
|
||||
neg[i] = ^b
|
||||
}
|
||||
out.SetBytes(neg)
|
||||
out.Add(out, bigOne)
|
||||
out.Neg(out)
|
||||
} else {
|
||||
out.SetBytes(bytes)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *String) readASN1Bytes(out *[]byte) bool {
|
||||
var bytes String
|
||||
if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) {
|
||||
return false
|
||||
}
|
||||
if bytes[0]&0x80 == 0x80 {
|
||||
return false
|
||||
}
|
||||
for len(bytes) > 1 && bytes[0] == 0 {
|
||||
bytes = bytes[1:]
|
||||
}
|
||||
*out = bytes
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *String) readASN1Int64(out *int64) bool {
|
||||
var bytes String
|
||||
if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func asn1Signed(out *int64, n []byte) bool {
|
||||
length := len(n)
|
||||
if length > 8 {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < length; i++ {
|
||||
*out <<= 8
|
||||
*out |= int64(n[i])
|
||||
}
|
||||
// Shift up and down in order to sign extend the result.
|
||||
*out <<= 64 - uint8(length)*8
|
||||
*out >>= 64 - uint8(length)*8
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *String) readASN1Uint64(out *uint64) bool {
|
||||
var bytes String
|
||||
if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Unsigned(out, bytes) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func asn1Unsigned(out *uint64, n []byte) bool {
|
||||
length := len(n)
|
||||
if length > 9 || length == 9 && n[0] != 0 {
|
||||
// Too large for uint64.
|
||||
return false
|
||||
}
|
||||
if n[0]&0x80 != 0 {
|
||||
// Negative number.
|
||||
return false
|
||||
}
|
||||
for i := 0; i < length; i++ {
|
||||
*out <<= 8
|
||||
*out |= uint64(n[i])
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ReadASN1Int64WithTag decodes an ASN.1 INTEGER with the given tag into out
|
||||
// and advances. It reports whether the read was successful and resulted in a
|
||||
// value that can be represented in an int64.
|
||||
func (s *String) ReadASN1Int64WithTag(out *int64, tag asn1.Tag) bool {
|
||||
var bytes String
|
||||
return s.ReadASN1(&bytes, tag) && checkASN1Integer(bytes) && asn1Signed(out, bytes)
|
||||
}
|
||||
|
||||
// ReadASN1Enum decodes an ASN.1 ENUMERATION into out and advances. It reports
|
||||
// whether the read was successful.
|
||||
func (s *String) ReadASN1Enum(out *int) bool {
|
||||
var bytes String
|
||||
var i int64
|
||||
if !s.ReadASN1(&bytes, asn1.ENUM) || !checkASN1Integer(bytes) || !asn1Signed(&i, bytes) {
|
||||
return false
|
||||
}
|
||||
if int64(int(i)) != i {
|
||||
return false
|
||||
}
|
||||
*out = int(i)
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *String) readBase128Int(out *int) bool {
|
||||
ret := 0
|
||||
for i := 0; len(*s) > 0; i++ {
|
||||
if i == 5 {
|
||||
return false
|
||||
}
|
||||
// Avoid overflowing int on a 32-bit platform.
|
||||
// We don't want different behavior based on the architecture.
|
||||
if ret >= 1<<(31-7) {
|
||||
return false
|
||||
}
|
||||
ret <<= 7
|
||||
b := s.read(1)[0]
|
||||
|
||||
// ITU-T X.690, section 8.19.2:
|
||||
// The subidentifier shall be encoded in the fewest possible octets,
|
||||
// that is, the leading octet of the subidentifier shall not have the value 0x80.
|
||||
if i == 0 && b == 0x80 {
|
||||
return false
|
||||
}
|
||||
|
||||
ret |= int(b & 0x7f)
|
||||
if b&0x80 == 0 {
|
||||
*out = ret
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false // truncated
|
||||
}
|
||||
|
||||
// ReadASN1ObjectIdentifier decodes an ASN.1 OBJECT IDENTIFIER into out and
|
||||
// advances. It reports whether the read was successful.
|
||||
func (s *String) ReadASN1ObjectIdentifier(out *encoding_asn1.ObjectIdentifier) bool {
|
||||
var bytes String
|
||||
if !s.ReadASN1(&bytes, asn1.OBJECT_IDENTIFIER) || len(bytes) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// In the worst case, we get two elements from the first byte (which is
|
||||
// encoded differently) and then every varint is a single byte long.
|
||||
components := make([]int, len(bytes)+1)
|
||||
|
||||
// The first varint is 40*value1 + value2:
|
||||
// According to this packing, value1 can take the values 0, 1 and 2 only.
|
||||
// When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2,
|
||||
// then there are no restrictions on value2.
|
||||
var v int
|
||||
if !bytes.readBase128Int(&v) {
|
||||
return false
|
||||
}
|
||||
if v < 80 {
|
||||
components[0] = v / 40
|
||||
components[1] = v % 40
|
||||
} else {
|
||||
components[0] = 2
|
||||
components[1] = v - 80
|
||||
}
|
||||
|
||||
i := 2
|
||||
for ; len(bytes) > 0; i++ {
|
||||
if !bytes.readBase128Int(&v) {
|
||||
return false
|
||||
}
|
||||
components[i] = v
|
||||
}
|
||||
*out = components[:i]
|
||||
return true
|
||||
}
|
||||
|
||||
// ReadASN1GeneralizedTime decodes an ASN.1 GENERALIZEDTIME into out and
|
||||
// advances. It reports whether the read was successful.
|
||||
func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool {
|
||||
var bytes String
|
||||
if !s.ReadASN1(&bytes, asn1.GeneralizedTime) {
|
||||
return false
|
||||
}
|
||||
t := string(bytes)
|
||||
res, err := time.Parse(generalizedTimeFormatStr, t)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if serialized := res.Format(generalizedTimeFormatStr); serialized != t {
|
||||
return false
|
||||
}
|
||||
*out = res
|
||||
return true
|
||||
}
|
||||
|
||||
const defaultUTCTimeFormatStr = "060102150405Z0700"
|
||||
|
||||
// ReadASN1UTCTime decodes an ASN.1 UTCTime into out and advances.
|
||||
// It reports whether the read was successful.
|
||||
func (s *String) ReadASN1UTCTime(out *time.Time) bool {
|
||||
var bytes String
|
||||
if !s.ReadASN1(&bytes, asn1.UTCTime) {
|
||||
return false
|
||||
}
|
||||
t := string(bytes)
|
||||
|
||||
formatStr := defaultUTCTimeFormatStr
|
||||
var err error
|
||||
res, err := time.Parse(formatStr, t)
|
||||
if err != nil {
|
||||
// Fallback to minute precision if we can't parse second
|
||||
// precision. If we are following X.509 or X.690 we shouldn't
|
||||
// support this, but we do.
|
||||
formatStr = "0601021504Z0700"
|
||||
res, err = time.Parse(formatStr, t)
|
||||
}
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if serialized := res.Format(formatStr); serialized != t {
|
||||
return false
|
||||
}
|
||||
|
||||
if res.Year() >= 2050 {
|
||||
// UTCTime interprets the low order digits 50-99 as 1950-99.
|
||||
// This only applies to its use in the X.509 profile.
|
||||
// See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1
|
||||
res = res.AddDate(-100, 0, 0)
|
||||
}
|
||||
*out = res
|
||||
return true
|
||||
}
|
||||
|
||||
// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances.
|
||||
// It reports whether the read was successful.
|
||||
func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool {
|
||||
var bytes String
|
||||
if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 ||
|
||||
len(bytes)*8/8 != len(bytes) {
|
||||
return false
|
||||
}
|
||||
|
||||
paddingBits := bytes[0]
|
||||
bytes = bytes[1:]
|
||||
if paddingBits > 7 ||
|
||||
len(bytes) == 0 && paddingBits != 0 ||
|
||||
len(bytes) > 0 && bytes[len(bytes)-1]&(1<<paddingBits-1) != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
out.BitLength = len(bytes)*8 - int(paddingBits)
|
||||
out.Bytes = bytes
|
||||
return true
|
||||
}
|
||||
|
||||
// ReadASN1BitStringAsBytes decodes an ASN.1 BIT STRING into out and advances. It is
|
||||
// an error if the BIT STRING is not a whole number of bytes. It reports
|
||||
// whether the read was successful.
|
||||
func (s *String) ReadASN1BitStringAsBytes(out *[]byte) bool {
|
||||
var bytes String
|
||||
if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
paddingBits := bytes[0]
|
||||
if paddingBits != 0 {
|
||||
return false
|
||||
}
|
||||
*out = bytes[1:]
|
||||
return true
|
||||
}
|
||||
|
||||
// ReadASN1Bytes reads the contents of a DER-encoded ASN.1 element (not including
|
||||
// tag and length bytes) into out, and advances. The element must match the
|
||||
// given tag. It reports whether the read was successful.
|
||||
func (s *String) ReadASN1Bytes(out *[]byte, tag asn1.Tag) bool {
|
||||
return s.ReadASN1((*String)(out), tag)
|
||||
}
|
||||
|
||||
// ReadASN1 reads the contents of a DER-encoded ASN.1 element (not including
|
||||
// tag and length bytes) into out, and advances. The element must match the
|
||||
// given tag. It reports whether the read was successful.
|
||||
//
|
||||
// Tags greater than 30 are not supported (i.e. low-tag-number format only).
|
||||
func (s *String) ReadASN1(out *String, tag asn1.Tag) bool {
|
||||
var t asn1.Tag
|
||||
if !s.ReadAnyASN1(out, &t) || t != tag {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ReadASN1Element reads the contents of a DER-encoded ASN.1 element (including
|
||||
// tag and length bytes) into out, and advances. The element must match the
|
||||
// given tag. It reports whether the read was successful.
|
||||
//
|
||||
// Tags greater than 30 are not supported (i.e. low-tag-number format only).
|
||||
func (s *String) ReadASN1Element(out *String, tag asn1.Tag) bool {
|
||||
var t asn1.Tag
|
||||
if !s.ReadAnyASN1Element(out, &t) || t != tag {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ReadAnyASN1 reads the contents of a DER-encoded ASN.1 element (not including
|
||||
// tag and length bytes) into out, sets outTag to its tag, and advances.
|
||||
// It reports whether the read was successful.
|
||||
//
|
||||
// Tags greater than 30 are not supported (i.e. low-tag-number format only).
|
||||
func (s *String) ReadAnyASN1(out *String, outTag *asn1.Tag) bool {
|
||||
return s.readASN1(out, outTag, true /* skip header */)
|
||||
}
|
||||
|
||||
// ReadAnyASN1Element reads the contents of a DER-encoded ASN.1 element
|
||||
// (including tag and length bytes) into out, sets outTag to is tag, and
|
||||
// advances. It reports whether the read was successful.
|
||||
//
|
||||
// Tags greater than 30 are not supported (i.e. low-tag-number format only).
|
||||
func (s *String) ReadAnyASN1Element(out *String, outTag *asn1.Tag) bool {
|
||||
return s.readASN1(out, outTag, false /* include header */)
|
||||
}
|
||||
|
||||
// PeekASN1Tag reports whether the next ASN.1 value on the string starts with
|
||||
// the given tag.
|
||||
func (s String) PeekASN1Tag(tag asn1.Tag) bool {
|
||||
if len(s) == 0 {
|
||||
return false
|
||||
}
|
||||
return asn1.Tag(s[0]) == tag
|
||||
}
|
||||
|
||||
// SkipASN1 reads and discards an ASN.1 element with the given tag. It
|
||||
// reports whether the operation was successful.
|
||||
func (s *String) SkipASN1(tag asn1.Tag) bool {
|
||||
var unused String
|
||||
return s.ReadASN1(&unused, tag)
|
||||
}
|
||||
|
||||
// ReadOptionalASN1 attempts to read the contents of a DER-encoded ASN.1
|
||||
// element (not including tag and length bytes) tagged with the given tag into
|
||||
// out. It stores whether an element with the tag was found in outPresent,
|
||||
// unless outPresent is nil. It reports whether the read was successful.
|
||||
func (s *String) ReadOptionalASN1(out *String, outPresent *bool, tag asn1.Tag) bool {
|
||||
present := s.PeekASN1Tag(tag)
|
||||
if outPresent != nil {
|
||||
*outPresent = present
|
||||
}
|
||||
if present && !s.ReadASN1(out, tag) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// SkipOptionalASN1 advances s over an ASN.1 element with the given tag, or
|
||||
// else leaves s unchanged. It reports whether the operation was successful.
|
||||
func (s *String) SkipOptionalASN1(tag asn1.Tag) bool {
|
||||
if !s.PeekASN1Tag(tag) {
|
||||
return true
|
||||
}
|
||||
var unused String
|
||||
return s.ReadASN1(&unused, tag)
|
||||
}
|
||||
|
||||
// ReadOptionalASN1Integer attempts to read an optional ASN.1 INTEGER explicitly
|
||||
// tagged with tag into out and advances. If no element with a matching tag is
|
||||
// present, it writes defaultValue into out instead. Otherwise, it behaves like
|
||||
// ReadASN1Integer.
|
||||
func (s *String) ReadOptionalASN1Integer(out interface{}, tag asn1.Tag, defaultValue interface{}) bool {
|
||||
var present bool
|
||||
var i String
|
||||
if !s.ReadOptionalASN1(&i, &present, tag) {
|
||||
return false
|
||||
}
|
||||
if !present {
|
||||
switch out.(type) {
|
||||
case *int, *int8, *int16, *int32, *int64,
|
||||
*uint, *uint8, *uint16, *uint32, *uint64, *[]byte:
|
||||
reflect.ValueOf(out).Elem().Set(reflect.ValueOf(defaultValue))
|
||||
case *big.Int:
|
||||
if defaultValue, ok := defaultValue.(*big.Int); ok {
|
||||
out.(*big.Int).Set(defaultValue)
|
||||
} else {
|
||||
panic("out points to big.Int, but defaultValue does not")
|
||||
}
|
||||
default:
|
||||
panic("invalid integer type")
|
||||
}
|
||||
return true
|
||||
}
|
||||
if !i.ReadASN1Integer(out) || !i.Empty() {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ReadOptionalASN1OctetString attempts to read an optional ASN.1 OCTET STRING
|
||||
// explicitly tagged with tag into out and advances. If no element with a
|
||||
// matching tag is present, it sets "out" to nil instead. It reports
|
||||
// whether the read was successful.
|
||||
func (s *String) ReadOptionalASN1OctetString(out *[]byte, outPresent *bool, tag asn1.Tag) bool {
|
||||
var present bool
|
||||
var child String
|
||||
if !s.ReadOptionalASN1(&child, &present, tag) {
|
||||
return false
|
||||
}
|
||||
if outPresent != nil {
|
||||
*outPresent = present
|
||||
}
|
||||
if present {
|
||||
var oct String
|
||||
if !child.ReadASN1(&oct, asn1.OCTET_STRING) || !child.Empty() {
|
||||
return false
|
||||
}
|
||||
*out = oct
|
||||
} else {
|
||||
*out = nil
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ReadOptionalASN1Boolean attempts to read an optional ASN.1 BOOLEAN
|
||||
// explicitly tagged with tag into out and advances. If no element with a
|
||||
// matching tag is present, it sets "out" to defaultValue instead. It reports
|
||||
// whether the read was successful.
|
||||
func (s *String) ReadOptionalASN1Boolean(out *bool, tag asn1.Tag, defaultValue bool) bool {
|
||||
var present bool
|
||||
var child String
|
||||
if !s.ReadOptionalASN1(&child, &present, tag) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !present {
|
||||
*out = defaultValue
|
||||
return true
|
||||
}
|
||||
|
||||
return child.ReadASN1Boolean(out)
|
||||
}
|
||||
|
||||
func (s *String) readASN1(out *String, outTag *asn1.Tag, skipHeader bool) bool {
|
||||
if len(*s) < 2 {
|
||||
return false
|
||||
}
|
||||
tag, lenByte := (*s)[0], (*s)[1]
|
||||
|
||||
if tag&0x1f == 0x1f {
|
||||
// ITU-T X.690 section 8.1.2
|
||||
//
|
||||
// An identifier octet with a tag part of 0x1f indicates a high-tag-number
|
||||
// form identifier with two or more octets. We only support tags less than
|
||||
// 31 (i.e. low-tag-number form, single octet identifier).
|
||||
return false
|
||||
}
|
||||
|
||||
if outTag != nil {
|
||||
*outTag = asn1.Tag(tag)
|
||||
}
|
||||
|
||||
// ITU-T X.690 section 8.1.3
|
||||
//
|
||||
// Bit 8 of the first length byte indicates whether the length is short- or
|
||||
// long-form.
|
||||
var length, headerLen uint32 // length includes headerLen
|
||||
if lenByte&0x80 == 0 {
|
||||
// Short-form length (section 8.1.3.4), encoded in bits 1-7.
|
||||
length = uint32(lenByte) + 2
|
||||
headerLen = 2
|
||||
} else {
|
||||
// Long-form length (section 8.1.3.5). Bits 1-7 encode the number of octets
|
||||
// used to encode the length.
|
||||
lenLen := lenByte & 0x7f
|
||||
var len32 uint32
|
||||
|
||||
if lenLen == 0 || lenLen > 4 || len(*s) < int(2+lenLen) {
|
||||
return false
|
||||
}
|
||||
|
||||
lenBytes := String((*s)[2 : 2+lenLen])
|
||||
if !lenBytes.readUnsigned(&len32, int(lenLen)) {
|
||||
return false
|
||||
}
|
||||
|
||||
// ITU-T X.690 section 10.1 (DER length forms) requires encoding the length
|
||||
// with the minimum number of octets.
|
||||
if len32 < 128 {
|
||||
// Length should have used short-form encoding.
|
||||
return false
|
||||
}
|
||||
if len32>>((lenLen-1)*8) == 0 {
|
||||
// Leading octet is 0. Length should have been at least one byte shorter.
|
||||
return false
|
||||
}
|
||||
|
||||
headerLen = 2 + uint32(lenLen)
|
||||
if headerLen+len32 < len32 {
|
||||
// Overflow.
|
||||
return false
|
||||
}
|
||||
length = headerLen + len32
|
||||
}
|
||||
|
||||
if int(length) < 0 || !s.ReadBytes((*[]byte)(out), int(length)) {
|
||||
return false
|
||||
}
|
||||
if skipHeader && !out.Skip(int(headerLen)) {
|
||||
panic("cryptobyte: internal error")
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
46
vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go
generated
vendored
Normal file
46
vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package asn1 contains supporting types for parsing and building ASN.1
|
||||
// messages with the cryptobyte package.
|
||||
package asn1
|
||||
|
||||
// Tag represents an ASN.1 identifier octet, consisting of a tag number
|
||||
// (indicating a type) and class (such as context-specific or constructed).
|
||||
//
|
||||
// Methods in the cryptobyte package only support the low-tag-number form, i.e.
|
||||
// a single identifier octet with bits 7-8 encoding the class and bits 1-6
|
||||
// encoding the tag number.
|
||||
type Tag uint8
|
||||
|
||||
const (
|
||||
classConstructed = 0x20
|
||||
classContextSpecific = 0x80
|
||||
)
|
||||
|
||||
// Constructed returns t with the constructed class bit set.
|
||||
func (t Tag) Constructed() Tag { return t | classConstructed }
|
||||
|
||||
// ContextSpecific returns t with the context-specific class bit set.
|
||||
func (t Tag) ContextSpecific() Tag { return t | classContextSpecific }
|
||||
|
||||
// The following is a list of standard tag and class combinations.
|
||||
const (
|
||||
BOOLEAN = Tag(1)
|
||||
INTEGER = Tag(2)
|
||||
BIT_STRING = Tag(3)
|
||||
OCTET_STRING = Tag(4)
|
||||
NULL = Tag(5)
|
||||
OBJECT_IDENTIFIER = Tag(6)
|
||||
ENUM = Tag(10)
|
||||
UTF8String = Tag(12)
|
||||
SEQUENCE = Tag(16 | classConstructed)
|
||||
SET = Tag(17 | classConstructed)
|
||||
PrintableString = Tag(19)
|
||||
T61String = Tag(20)
|
||||
IA5String = Tag(22)
|
||||
UTCTime = Tag(23)
|
||||
GeneralizedTime = Tag(24)
|
||||
GeneralString = Tag(27)
|
||||
)
|
||||
350
vendor/golang.org/x/crypto/cryptobyte/builder.go
generated
vendored
Normal file
350
vendor/golang.org/x/crypto/cryptobyte/builder.go
generated
vendored
Normal file
@@ -0,0 +1,350 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cryptobyte
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// A Builder builds byte strings from fixed-length and length-prefixed values.
|
||||
// Builders either allocate space as needed, or are ‘fixed’, which means that
|
||||
// they write into a given buffer and produce an error if it's exhausted.
|
||||
//
|
||||
// The zero value is a usable Builder that allocates space as needed.
|
||||
//
|
||||
// Simple values are marshaled and appended to a Builder using methods on the
|
||||
// Builder. Length-prefixed values are marshaled by providing a
|
||||
// BuilderContinuation, which is a function that writes the inner contents of
|
||||
// the value to a given Builder. See the documentation for BuilderContinuation
|
||||
// for details.
|
||||
type Builder struct {
|
||||
err error
|
||||
result []byte
|
||||
fixedSize bool
|
||||
child *Builder
|
||||
offset int
|
||||
pendingLenLen int
|
||||
pendingIsASN1 bool
|
||||
inContinuation *bool
|
||||
}
|
||||
|
||||
// NewBuilder creates a Builder that appends its output to the given buffer.
|
||||
// Like append(), the slice will be reallocated if its capacity is exceeded.
|
||||
// Use Bytes to get the final buffer.
|
||||
func NewBuilder(buffer []byte) *Builder {
|
||||
return &Builder{
|
||||
result: buffer,
|
||||
}
|
||||
}
|
||||
|
||||
// NewFixedBuilder creates a Builder that appends its output into the given
|
||||
// buffer. This builder does not reallocate the output buffer. Writes that
|
||||
// would exceed the buffer's capacity are treated as an error.
|
||||
func NewFixedBuilder(buffer []byte) *Builder {
|
||||
return &Builder{
|
||||
result: buffer,
|
||||
fixedSize: true,
|
||||
}
|
||||
}
|
||||
|
||||
// SetError sets the value to be returned as the error from Bytes. Writes
|
||||
// performed after calling SetError are ignored.
|
||||
func (b *Builder) SetError(err error) {
|
||||
b.err = err
|
||||
}
|
||||
|
||||
// Bytes returns the bytes written by the builder or an error if one has
|
||||
// occurred during building.
|
||||
func (b *Builder) Bytes() ([]byte, error) {
|
||||
if b.err != nil {
|
||||
return nil, b.err
|
||||
}
|
||||
return b.result[b.offset:], nil
|
||||
}
|
||||
|
||||
// BytesOrPanic returns the bytes written by the builder or panics if an error
|
||||
// has occurred during building.
|
||||
func (b *Builder) BytesOrPanic() []byte {
|
||||
if b.err != nil {
|
||||
panic(b.err)
|
||||
}
|
||||
return b.result[b.offset:]
|
||||
}
|
||||
|
||||
// AddUint8 appends an 8-bit value to the byte string.
|
||||
func (b *Builder) AddUint8(v uint8) {
|
||||
b.add(byte(v))
|
||||
}
|
||||
|
||||
// AddUint16 appends a big-endian, 16-bit value to the byte string.
|
||||
func (b *Builder) AddUint16(v uint16) {
|
||||
b.add(byte(v>>8), byte(v))
|
||||
}
|
||||
|
||||
// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest
|
||||
// byte of the 32-bit input value is silently truncated.
|
||||
func (b *Builder) AddUint24(v uint32) {
|
||||
b.add(byte(v>>16), byte(v>>8), byte(v))
|
||||
}
|
||||
|
||||
// AddUint32 appends a big-endian, 32-bit value to the byte string.
|
||||
func (b *Builder) AddUint32(v uint32) {
|
||||
b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
|
||||
}
|
||||
|
||||
// AddUint48 appends a big-endian, 48-bit value to the byte string.
|
||||
func (b *Builder) AddUint48(v uint64) {
|
||||
b.add(byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
|
||||
}
|
||||
|
||||
// AddUint64 appends a big-endian, 64-bit value to the byte string.
|
||||
func (b *Builder) AddUint64(v uint64) {
|
||||
b.add(byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
|
||||
}
|
||||
|
||||
// AddBytes appends a sequence of bytes to the byte string.
|
||||
func (b *Builder) AddBytes(v []byte) {
|
||||
b.add(v...)
|
||||
}
|
||||
|
||||
// BuilderContinuation is a continuation-passing interface for building
|
||||
// length-prefixed byte sequences. Builder methods for length-prefixed
|
||||
// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation
|
||||
// supplied to them. The child builder passed to the continuation can be used
|
||||
// to build the content of the length-prefixed sequence. For example:
|
||||
//
|
||||
// parent := cryptobyte.NewBuilder()
|
||||
// parent.AddUint8LengthPrefixed(func (child *Builder) {
|
||||
// child.AddUint8(42)
|
||||
// child.AddUint8LengthPrefixed(func (grandchild *Builder) {
|
||||
// grandchild.AddUint8(5)
|
||||
// })
|
||||
// })
|
||||
//
|
||||
// It is an error to write more bytes to the child than allowed by the reserved
|
||||
// length prefix. After the continuation returns, the child must be considered
|
||||
// invalid, i.e. users must not store any copies or references of the child
|
||||
// that outlive the continuation.
|
||||
//
|
||||
// If the continuation panics with a value of type BuildError then the inner
|
||||
// error will be returned as the error from Bytes. If the child panics
|
||||
// otherwise then Bytes will repanic with the same value.
|
||||
type BuilderContinuation func(child *Builder)
|
||||
|
||||
// BuildError wraps an error. If a BuilderContinuation panics with this value,
|
||||
// the panic will be recovered and the inner error will be returned from
|
||||
// Builder.Bytes.
|
||||
type BuildError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence.
|
||||
func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) {
|
||||
b.addLengthPrefixed(1, false, f)
|
||||
}
|
||||
|
||||
// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence.
|
||||
func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) {
|
||||
b.addLengthPrefixed(2, false, f)
|
||||
}
|
||||
|
||||
// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence.
|
||||
func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) {
|
||||
b.addLengthPrefixed(3, false, f)
|
||||
}
|
||||
|
||||
// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence.
|
||||
func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) {
|
||||
b.addLengthPrefixed(4, false, f)
|
||||
}
|
||||
|
||||
func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) {
|
||||
if !*b.inContinuation {
|
||||
*b.inContinuation = true
|
||||
|
||||
defer func() {
|
||||
*b.inContinuation = false
|
||||
|
||||
r := recover()
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if buildError, ok := r.(BuildError); ok {
|
||||
b.err = buildError.Err
|
||||
} else {
|
||||
panic(r)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
f(arg)
|
||||
}
|
||||
|
||||
func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) {
|
||||
// Subsequent writes can be ignored if the builder has encountered an error.
|
||||
if b.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
offset := len(b.result)
|
||||
b.add(make([]byte, lenLen)...)
|
||||
|
||||
if b.inContinuation == nil {
|
||||
b.inContinuation = new(bool)
|
||||
}
|
||||
|
||||
b.child = &Builder{
|
||||
result: b.result,
|
||||
fixedSize: b.fixedSize,
|
||||
offset: offset,
|
||||
pendingLenLen: lenLen,
|
||||
pendingIsASN1: isASN1,
|
||||
inContinuation: b.inContinuation,
|
||||
}
|
||||
|
||||
b.callContinuation(f, b.child)
|
||||
b.flushChild()
|
||||
if b.child != nil {
|
||||
panic("cryptobyte: internal error")
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Builder) flushChild() {
|
||||
if b.child == nil {
|
||||
return
|
||||
}
|
||||
b.child.flushChild()
|
||||
child := b.child
|
||||
b.child = nil
|
||||
|
||||
if child.err != nil {
|
||||
b.err = child.err
|
||||
return
|
||||
}
|
||||
|
||||
length := len(child.result) - child.pendingLenLen - child.offset
|
||||
|
||||
if length < 0 {
|
||||
panic("cryptobyte: internal error") // result unexpectedly shrunk
|
||||
}
|
||||
|
||||
if child.pendingIsASN1 {
|
||||
// For ASN.1, we reserved a single byte for the length. If that turned out
|
||||
// to be incorrect, we have to move the contents along in order to make
|
||||
// space.
|
||||
if child.pendingLenLen != 1 {
|
||||
panic("cryptobyte: internal error")
|
||||
}
|
||||
var lenLen, lenByte uint8
|
||||
if int64(length) > 0xfffffffe {
|
||||
b.err = errors.New("pending ASN.1 child too long")
|
||||
return
|
||||
} else if length > 0xffffff {
|
||||
lenLen = 5
|
||||
lenByte = 0x80 | 4
|
||||
} else if length > 0xffff {
|
||||
lenLen = 4
|
||||
lenByte = 0x80 | 3
|
||||
} else if length > 0xff {
|
||||
lenLen = 3
|
||||
lenByte = 0x80 | 2
|
||||
} else if length > 0x7f {
|
||||
lenLen = 2
|
||||
lenByte = 0x80 | 1
|
||||
} else {
|
||||
lenLen = 1
|
||||
lenByte = uint8(length)
|
||||
length = 0
|
||||
}
|
||||
|
||||
// Insert the initial length byte, make space for successive length bytes,
|
||||
// and adjust the offset.
|
||||
child.result[child.offset] = lenByte
|
||||
extraBytes := int(lenLen - 1)
|
||||
if extraBytes != 0 {
|
||||
child.add(make([]byte, extraBytes)...)
|
||||
childStart := child.offset + child.pendingLenLen
|
||||
copy(child.result[childStart+extraBytes:], child.result[childStart:])
|
||||
}
|
||||
child.offset++
|
||||
child.pendingLenLen = extraBytes
|
||||
}
|
||||
|
||||
l := length
|
||||
for i := child.pendingLenLen - 1; i >= 0; i-- {
|
||||
child.result[child.offset+i] = uint8(l)
|
||||
l >>= 8
|
||||
}
|
||||
if l != 0 {
|
||||
b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen)
|
||||
return
|
||||
}
|
||||
|
||||
if b.fixedSize && &b.result[0] != &child.result[0] {
|
||||
panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer")
|
||||
}
|
||||
|
||||
b.result = child.result
|
||||
}
|
||||
|
||||
func (b *Builder) add(bytes ...byte) {
|
||||
if b.err != nil {
|
||||
return
|
||||
}
|
||||
if b.child != nil {
|
||||
panic("cryptobyte: attempted write while child is pending")
|
||||
}
|
||||
if len(b.result)+len(bytes) < len(bytes) {
|
||||
b.err = errors.New("cryptobyte: length overflow")
|
||||
}
|
||||
if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) {
|
||||
b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer")
|
||||
return
|
||||
}
|
||||
b.result = append(b.result, bytes...)
|
||||
}
|
||||
|
||||
// Unwrite rolls back non-negative n bytes written directly to the Builder.
|
||||
// An attempt by a child builder passed to a continuation to unwrite bytes
|
||||
// from its parent will panic.
|
||||
func (b *Builder) Unwrite(n int) {
|
||||
if b.err != nil {
|
||||
return
|
||||
}
|
||||
if b.child != nil {
|
||||
panic("cryptobyte: attempted unwrite while child is pending")
|
||||
}
|
||||
length := len(b.result) - b.pendingLenLen - b.offset
|
||||
if length < 0 {
|
||||
panic("cryptobyte: internal error")
|
||||
}
|
||||
if n < 0 {
|
||||
panic("cryptobyte: attempted to unwrite negative number of bytes")
|
||||
}
|
||||
if n > length {
|
||||
panic("cryptobyte: attempted to unwrite more than was written")
|
||||
}
|
||||
b.result = b.result[:len(b.result)-n]
|
||||
}
|
||||
|
||||
// A MarshalingValue marshals itself into a Builder.
|
||||
type MarshalingValue interface {
|
||||
// Marshal is called by Builder.AddValue. It receives a pointer to a builder
|
||||
// to marshal itself into. It may return an error that occurred during
|
||||
// marshaling, such as unset or invalid values.
|
||||
Marshal(b *Builder) error
|
||||
}
|
||||
|
||||
// AddValue calls Marshal on v, passing a pointer to the builder to append to.
|
||||
// If Marshal returns an error, it is set on the Builder so that subsequent
|
||||
// appends don't have an effect.
|
||||
func (b *Builder) AddValue(v MarshalingValue) {
|
||||
err := v.Marshal(b)
|
||||
if err != nil {
|
||||
b.err = err
|
||||
}
|
||||
}
|
||||
183
vendor/golang.org/x/crypto/cryptobyte/string.go
generated
vendored
Normal file
183
vendor/golang.org/x/crypto/cryptobyte/string.go
generated
vendored
Normal file
@@ -0,0 +1,183 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package cryptobyte contains types that help with parsing and constructing
|
||||
// length-prefixed, binary messages, including ASN.1 DER. (The asn1 subpackage
|
||||
// contains useful ASN.1 constants.)
|
||||
//
|
||||
// The String type is for parsing. It wraps a []byte slice and provides helper
|
||||
// functions for consuming structures, value by value.
|
||||
//
|
||||
// The Builder type is for constructing messages. It providers helper functions
|
||||
// for appending values and also for appending length-prefixed submessages –
|
||||
// without having to worry about calculating the length prefix ahead of time.
|
||||
//
|
||||
// See the documentation and examples for the Builder and String types to get
|
||||
// started.
|
||||
package cryptobyte
|
||||
|
||||
// String represents a string of bytes. It provides methods for parsing
|
||||
// fixed-length and length-prefixed values from it.
|
||||
type String []byte
|
||||
|
||||
// read advances a String by n bytes and returns them. If less than n bytes
|
||||
// remain, it returns nil.
|
||||
func (s *String) read(n int) []byte {
|
||||
if len(*s) < n || n < 0 {
|
||||
return nil
|
||||
}
|
||||
v := (*s)[:n]
|
||||
*s = (*s)[n:]
|
||||
return v
|
||||
}
|
||||
|
||||
// Skip advances the String by n byte and reports whether it was successful.
|
||||
func (s *String) Skip(n int) bool {
|
||||
return s.read(n) != nil
|
||||
}
|
||||
|
||||
// ReadUint8 decodes an 8-bit value into out and advances over it.
|
||||
// It reports whether the read was successful.
|
||||
func (s *String) ReadUint8(out *uint8) bool {
|
||||
v := s.read(1)
|
||||
if v == nil {
|
||||
return false
|
||||
}
|
||||
*out = uint8(v[0])
|
||||
return true
|
||||
}
|
||||
|
||||
// ReadUint16 decodes a big-endian, 16-bit value into out and advances over it.
|
||||
// It reports whether the read was successful.
|
||||
func (s *String) ReadUint16(out *uint16) bool {
|
||||
v := s.read(2)
|
||||
if v == nil {
|
||||
return false
|
||||
}
|
||||
*out = uint16(v[0])<<8 | uint16(v[1])
|
||||
return true
|
||||
}
|
||||
|
||||
// ReadUint24 decodes a big-endian, 24-bit value into out and advances over it.
|
||||
// It reports whether the read was successful.
|
||||
func (s *String) ReadUint24(out *uint32) bool {
|
||||
v := s.read(3)
|
||||
if v == nil {
|
||||
return false
|
||||
}
|
||||
*out = uint32(v[0])<<16 | uint32(v[1])<<8 | uint32(v[2])
|
||||
return true
|
||||
}
|
||||
|
||||
// ReadUint32 decodes a big-endian, 32-bit value into out and advances over it.
|
||||
// It reports whether the read was successful.
|
||||
func (s *String) ReadUint32(out *uint32) bool {
|
||||
v := s.read(4)
|
||||
if v == nil {
|
||||
return false
|
||||
}
|
||||
*out = uint32(v[0])<<24 | uint32(v[1])<<16 | uint32(v[2])<<8 | uint32(v[3])
|
||||
return true
|
||||
}
|
||||
|
||||
// ReadUint48 decodes a big-endian, 48-bit value into out and advances over it.
|
||||
// It reports whether the read was successful.
|
||||
func (s *String) ReadUint48(out *uint64) bool {
|
||||
v := s.read(6)
|
||||
if v == nil {
|
||||
return false
|
||||
}
|
||||
*out = uint64(v[0])<<40 | uint64(v[1])<<32 | uint64(v[2])<<24 | uint64(v[3])<<16 | uint64(v[4])<<8 | uint64(v[5])
|
||||
return true
|
||||
}
|
||||
|
||||
// ReadUint64 decodes a big-endian, 64-bit value into out and advances over it.
|
||||
// It reports whether the read was successful.
|
||||
func (s *String) ReadUint64(out *uint64) bool {
|
||||
v := s.read(8)
|
||||
if v == nil {
|
||||
return false
|
||||
}
|
||||
*out = uint64(v[0])<<56 | uint64(v[1])<<48 | uint64(v[2])<<40 | uint64(v[3])<<32 | uint64(v[4])<<24 | uint64(v[5])<<16 | uint64(v[6])<<8 | uint64(v[7])
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *String) readUnsigned(out *uint32, length int) bool {
|
||||
v := s.read(length)
|
||||
if v == nil {
|
||||
return false
|
||||
}
|
||||
var result uint32
|
||||
for i := 0; i < length; i++ {
|
||||
result <<= 8
|
||||
result |= uint32(v[i])
|
||||
}
|
||||
*out = result
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool {
|
||||
lenBytes := s.read(lenLen)
|
||||
if lenBytes == nil {
|
||||
return false
|
||||
}
|
||||
var length uint32
|
||||
for _, b := range lenBytes {
|
||||
length = length << 8
|
||||
length = length | uint32(b)
|
||||
}
|
||||
v := s.read(int(length))
|
||||
if v == nil {
|
||||
return false
|
||||
}
|
||||
*outChild = v
|
||||
return true
|
||||
}
|
||||
|
||||
// ReadUint8LengthPrefixed reads the content of an 8-bit length-prefixed value
|
||||
// into out and advances over it. It reports whether the read was successful.
|
||||
func (s *String) ReadUint8LengthPrefixed(out *String) bool {
|
||||
return s.readLengthPrefixed(1, out)
|
||||
}
|
||||
|
||||
// ReadUint16LengthPrefixed reads the content of a big-endian, 16-bit
|
||||
// length-prefixed value into out and advances over it. It reports whether the
|
||||
// read was successful.
|
||||
func (s *String) ReadUint16LengthPrefixed(out *String) bool {
|
||||
return s.readLengthPrefixed(2, out)
|
||||
}
|
||||
|
||||
// ReadUint24LengthPrefixed reads the content of a big-endian, 24-bit
|
||||
// length-prefixed value into out and advances over it. It reports whether
|
||||
// the read was successful.
|
||||
func (s *String) ReadUint24LengthPrefixed(out *String) bool {
|
||||
return s.readLengthPrefixed(3, out)
|
||||
}
|
||||
|
||||
// ReadBytes reads n bytes into out and advances over them. It reports
|
||||
// whether the read was successful.
|
||||
func (s *String) ReadBytes(out *[]byte, n int) bool {
|
||||
v := s.read(n)
|
||||
if v == nil {
|
||||
return false
|
||||
}
|
||||
*out = v
|
||||
return true
|
||||
}
|
||||
|
||||
// CopyBytes copies len(out) bytes into out and advances over them. It reports
|
||||
// whether the copy operation was successful
|
||||
func (s *String) CopyBytes(out []byte) bool {
|
||||
n := len(out)
|
||||
v := s.read(n)
|
||||
if v == nil {
|
||||
return false
|
||||
}
|
||||
return copy(out, v) == n
|
||||
}
|
||||
|
||||
// Empty reports whether the string does not contain any bytes.
|
||||
func (s String) Empty() bool {
|
||||
return len(s) == 0
|
||||
}
|
||||
95
vendor/golang.org/x/crypto/hkdf/hkdf.go
generated
vendored
Normal file
95
vendor/golang.org/x/crypto/hkdf/hkdf.go
generated
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package hkdf implements the HMAC-based Extract-and-Expand Key Derivation
|
||||
// Function (HKDF) as defined in RFC 5869.
|
||||
//
|
||||
// HKDF is a cryptographic key derivation function (KDF) with the goal of
|
||||
// expanding limited input keying material into one or more cryptographically
|
||||
// strong secret keys.
|
||||
package hkdf
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"errors"
|
||||
"hash"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Extract generates a pseudorandom key for use with Expand from an input secret
|
||||
// and an optional independent salt.
|
||||
//
|
||||
// Only use this function if you need to reuse the extracted key with multiple
|
||||
// Expand invocations and different context values. Most common scenarios,
|
||||
// including the generation of multiple keys, should use New instead.
|
||||
func Extract(hash func() hash.Hash, secret, salt []byte) []byte {
|
||||
if salt == nil {
|
||||
salt = make([]byte, hash().Size())
|
||||
}
|
||||
extractor := hmac.New(hash, salt)
|
||||
extractor.Write(secret)
|
||||
return extractor.Sum(nil)
|
||||
}
|
||||
|
||||
type hkdf struct {
|
||||
expander hash.Hash
|
||||
size int
|
||||
|
||||
info []byte
|
||||
counter byte
|
||||
|
||||
prev []byte
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func (f *hkdf) Read(p []byte) (int, error) {
|
||||
// Check whether enough data can be generated
|
||||
need := len(p)
|
||||
remains := len(f.buf) + int(255-f.counter+1)*f.size
|
||||
if remains < need {
|
||||
return 0, errors.New("hkdf: entropy limit reached")
|
||||
}
|
||||
// Read any leftover from the buffer
|
||||
n := copy(p, f.buf)
|
||||
p = p[n:]
|
||||
|
||||
// Fill the rest of the buffer
|
||||
for len(p) > 0 {
|
||||
if f.counter > 1 {
|
||||
f.expander.Reset()
|
||||
}
|
||||
f.expander.Write(f.prev)
|
||||
f.expander.Write(f.info)
|
||||
f.expander.Write([]byte{f.counter})
|
||||
f.prev = f.expander.Sum(f.prev[:0])
|
||||
f.counter++
|
||||
|
||||
// Copy the new batch into p
|
||||
f.buf = f.prev
|
||||
n = copy(p, f.buf)
|
||||
p = p[n:]
|
||||
}
|
||||
// Save leftovers for next run
|
||||
f.buf = f.buf[n:]
|
||||
|
||||
return need, nil
|
||||
}
|
||||
|
||||
// Expand returns a Reader, from which keys can be read, using the given
|
||||
// pseudorandom key and optional context info, skipping the extraction step.
|
||||
//
|
||||
// The pseudorandomKey should have been generated by Extract, or be a uniformly
|
||||
// random or pseudorandom cryptographically strong key. See RFC 5869, Section
|
||||
// 3.3. Most common scenarios will want to use New instead.
|
||||
func Expand(hash func() hash.Hash, pseudorandomKey, info []byte) io.Reader {
|
||||
expander := hmac.New(hash, pseudorandomKey)
|
||||
return &hkdf{expander, expander.Size(), info, 1, nil, nil}
|
||||
}
|
||||
|
||||
// New returns a Reader, from which keys can be read, using the given hash,
|
||||
// secret, salt and context info. Salt and info can be nil.
|
||||
func New(hash func() hash.Hash, secret, salt, info []byte) io.Reader {
|
||||
prk := Extract(hash, secret, salt)
|
||||
return Expand(hash, prk, info)
|
||||
}
|
||||
31
vendor/golang.org/x/crypto/internal/alias/alias.go
generated
vendored
Normal file
31
vendor/golang.org/x/crypto/internal/alias/alias.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !purego
|
||||
|
||||
// Package alias implements memory aliasing tests.
|
||||
package alias
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// AnyOverlap reports whether x and y share memory at any (not necessarily
|
||||
// corresponding) index. The memory beyond the slice length is ignored.
|
||||
func AnyOverlap(x, y []byte) bool {
|
||||
return len(x) > 0 && len(y) > 0 &&
|
||||
uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) &&
|
||||
uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1]))
|
||||
}
|
||||
|
||||
// InexactOverlap reports whether x and y share memory at any non-corresponding
|
||||
// index. The memory beyond the slice length is ignored. Note that x and y can
|
||||
// have different lengths and still not have any inexact overlap.
|
||||
//
|
||||
// InexactOverlap can be used to implement the requirements of the crypto/cipher
|
||||
// AEAD, Block, BlockMode and Stream interfaces.
|
||||
func InexactOverlap(x, y []byte) bool {
|
||||
if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] {
|
||||
return false
|
||||
}
|
||||
return AnyOverlap(x, y)
|
||||
}
|
||||
34
vendor/golang.org/x/crypto/internal/alias/alias_purego.go
generated
vendored
Normal file
34
vendor/golang.org/x/crypto/internal/alias/alias_purego.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build purego
|
||||
|
||||
// Package alias implements memory aliasing tests.
|
||||
package alias
|
||||
|
||||
// This is the Google App Engine standard variant based on reflect
|
||||
// because the unsafe package and cgo are disallowed.
|
||||
|
||||
import "reflect"
|
||||
|
||||
// AnyOverlap reports whether x and y share memory at any (not necessarily
|
||||
// corresponding) index. The memory beyond the slice length is ignored.
|
||||
func AnyOverlap(x, y []byte) bool {
|
||||
return len(x) > 0 && len(y) > 0 &&
|
||||
reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() &&
|
||||
reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer()
|
||||
}
|
||||
|
||||
// InexactOverlap reports whether x and y share memory at any non-corresponding
|
||||
// index. The memory beyond the slice length is ignored. Note that x and y can
|
||||
// have different lengths and still not have any inexact overlap.
|
||||
//
|
||||
// InexactOverlap can be used to implement the requirements of the crypto/cipher
|
||||
// AEAD, Block, BlockMode and Stream interfaces.
|
||||
func InexactOverlap(x, y []byte) bool {
|
||||
if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] {
|
||||
return false
|
||||
}
|
||||
return AnyOverlap(x, y)
|
||||
}
|
||||
9
vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go
generated
vendored
Normal file
9
vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (!amd64 && !loong64 && !ppc64le && !ppc64 && !s390x) || !gc || purego
|
||||
|
||||
package poly1305
|
||||
|
||||
type mac struct{ macGeneric }
|
||||
99
vendor/golang.org/x/crypto/internal/poly1305/poly1305.go
generated
vendored
Normal file
99
vendor/golang.org/x/crypto/internal/poly1305/poly1305.go
generated
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package poly1305 implements Poly1305 one-time message authentication code as
|
||||
// specified in https://cr.yp.to/mac/poly1305-20050329.pdf.
|
||||
//
|
||||
// Poly1305 is a fast, one-time authentication function. It is infeasible for an
|
||||
// attacker to generate an authenticator for a message without the key. However, a
|
||||
// key must only be used for a single message. Authenticating two different
|
||||
// messages with the same key allows an attacker to forge authenticators for other
|
||||
// messages with the same key.
|
||||
//
|
||||
// Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was
|
||||
// used with a fixed key in order to generate one-time keys from an nonce.
|
||||
// However, in this package AES isn't used and the one-time key is specified
|
||||
// directly.
|
||||
package poly1305
|
||||
|
||||
import "crypto/subtle"
|
||||
|
||||
// TagSize is the size, in bytes, of a poly1305 authenticator.
|
||||
const TagSize = 16
|
||||
|
||||
// Sum generates an authenticator for msg using a one-time key and puts the
|
||||
// 16-byte result into out. Authenticating two different messages with the same
|
||||
// key allows an attacker to forge messages at will.
|
||||
func Sum(out *[16]byte, m []byte, key *[32]byte) {
|
||||
h := New(key)
|
||||
h.Write(m)
|
||||
h.Sum(out[:0])
|
||||
}
|
||||
|
||||
// Verify returns true if mac is a valid authenticator for m with the given key.
|
||||
func Verify(mac *[16]byte, m []byte, key *[32]byte) bool {
|
||||
var tmp [16]byte
|
||||
Sum(&tmp, m, key)
|
||||
return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1
|
||||
}
|
||||
|
||||
// New returns a new MAC computing an authentication
|
||||
// tag of all data written to it with the given key.
|
||||
// This allows writing the message progressively instead
|
||||
// of passing it as a single slice. Common users should use
|
||||
// the Sum function instead.
|
||||
//
|
||||
// The key must be unique for each message, as authenticating
|
||||
// two different messages with the same key allows an attacker
|
||||
// to forge messages at will.
|
||||
func New(key *[32]byte) *MAC {
|
||||
m := &MAC{}
|
||||
initialize(key, &m.macState)
|
||||
return m
|
||||
}
|
||||
|
||||
// MAC is an io.Writer computing an authentication tag
|
||||
// of the data written to it.
|
||||
//
|
||||
// MAC cannot be used like common hash.Hash implementations,
|
||||
// because using a poly1305 key twice breaks its security.
|
||||
// Therefore writing data to a running MAC after calling
|
||||
// Sum or Verify causes it to panic.
|
||||
type MAC struct {
|
||||
mac // platform-dependent implementation
|
||||
|
||||
finalized bool
|
||||
}
|
||||
|
||||
// Size returns the number of bytes Sum will return.
|
||||
func (h *MAC) Size() int { return TagSize }
|
||||
|
||||
// Write adds more data to the running message authentication code.
|
||||
// It never returns an error.
|
||||
//
|
||||
// It must not be called after the first call of Sum or Verify.
|
||||
func (h *MAC) Write(p []byte) (n int, err error) {
|
||||
if h.finalized {
|
||||
panic("poly1305: write to MAC after Sum or Verify")
|
||||
}
|
||||
return h.mac.Write(p)
|
||||
}
|
||||
|
||||
// Sum computes the authenticator of all data written to the
|
||||
// message authentication code.
|
||||
func (h *MAC) Sum(b []byte) []byte {
|
||||
var mac [TagSize]byte
|
||||
h.mac.Sum(&mac)
|
||||
h.finalized = true
|
||||
return append(b, mac[:]...)
|
||||
}
|
||||
|
||||
// Verify returns whether the authenticator of all data written to
|
||||
// the message authentication code matches the expected value.
|
||||
func (h *MAC) Verify(expected []byte) bool {
|
||||
var mac [TagSize]byte
|
||||
h.mac.Sum(&mac)
|
||||
h.finalized = true
|
||||
return subtle.ConstantTimeCompare(expected, mac[:]) == 1
|
||||
}
|
||||
93
vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s
generated
vendored
Normal file
93
vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s
generated
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
// Code generated by command: go run sum_amd64_asm.go -out ../sum_amd64.s -pkg poly1305. DO NOT EDIT.
|
||||
|
||||
//go:build gc && !purego
|
||||
|
||||
// func update(state *macState, msg []byte)
|
||||
TEXT ·update(SB), $0-32
|
||||
MOVQ state+0(FP), DI
|
||||
MOVQ msg_base+8(FP), SI
|
||||
MOVQ msg_len+16(FP), R15
|
||||
MOVQ (DI), R8
|
||||
MOVQ 8(DI), R9
|
||||
MOVQ 16(DI), R10
|
||||
MOVQ 24(DI), R11
|
||||
MOVQ 32(DI), R12
|
||||
CMPQ R15, $0x10
|
||||
JB bytes_between_0_and_15
|
||||
|
||||
loop:
|
||||
ADDQ (SI), R8
|
||||
ADCQ 8(SI), R9
|
||||
ADCQ $0x01, R10
|
||||
LEAQ 16(SI), SI
|
||||
|
||||
multiply:
|
||||
MOVQ R11, AX
|
||||
MULQ R8
|
||||
MOVQ AX, BX
|
||||
MOVQ DX, CX
|
||||
MOVQ R11, AX
|
||||
MULQ R9
|
||||
ADDQ AX, CX
|
||||
ADCQ $0x00, DX
|
||||
MOVQ R11, R13
|
||||
IMULQ R10, R13
|
||||
ADDQ DX, R13
|
||||
MOVQ R12, AX
|
||||
MULQ R8
|
||||
ADDQ AX, CX
|
||||
ADCQ $0x00, DX
|
||||
MOVQ DX, R8
|
||||
MOVQ R12, R14
|
||||
IMULQ R10, R14
|
||||
MOVQ R12, AX
|
||||
MULQ R9
|
||||
ADDQ AX, R13
|
||||
ADCQ DX, R14
|
||||
ADDQ R8, R13
|
||||
ADCQ $0x00, R14
|
||||
MOVQ BX, R8
|
||||
MOVQ CX, R9
|
||||
MOVQ R13, R10
|
||||
ANDQ $0x03, R10
|
||||
MOVQ R13, BX
|
||||
ANDQ $-4, BX
|
||||
ADDQ BX, R8
|
||||
ADCQ R14, R9
|
||||
ADCQ $0x00, R10
|
||||
SHRQ $0x02, R14, R13
|
||||
SHRQ $0x02, R14
|
||||
ADDQ R13, R8
|
||||
ADCQ R14, R9
|
||||
ADCQ $0x00, R10
|
||||
SUBQ $0x10, R15
|
||||
CMPQ R15, $0x10
|
||||
JAE loop
|
||||
|
||||
bytes_between_0_and_15:
|
||||
TESTQ R15, R15
|
||||
JZ done
|
||||
MOVQ $0x00000001, BX
|
||||
XORQ CX, CX
|
||||
XORQ R13, R13
|
||||
ADDQ R15, SI
|
||||
|
||||
flush_buffer:
|
||||
SHLQ $0x08, BX, CX
|
||||
SHLQ $0x08, BX
|
||||
MOVB -1(SI), R13
|
||||
XORQ R13, BX
|
||||
DECQ SI
|
||||
DECQ R15
|
||||
JNZ flush_buffer
|
||||
ADDQ BX, R8
|
||||
ADCQ CX, R9
|
||||
ADCQ $0x00, R10
|
||||
MOVQ $0x00000010, R15
|
||||
JMP multiply
|
||||
|
||||
done:
|
||||
MOVQ R8, (DI)
|
||||
MOVQ R9, 8(DI)
|
||||
MOVQ R10, 16(DI)
|
||||
RET
|
||||
47
vendor/golang.org/x/crypto/internal/poly1305/sum_asm.go
generated
vendored
Normal file
47
vendor/golang.org/x/crypto/internal/poly1305/sum_asm.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc && !purego && (amd64 || loong64 || ppc64 || ppc64le)
|
||||
|
||||
package poly1305
|
||||
|
||||
//go:noescape
|
||||
func update(state *macState, msg []byte)
|
||||
|
||||
// mac is a wrapper for macGeneric that redirects calls that would have gone to
|
||||
// updateGeneric to update.
|
||||
//
|
||||
// Its Write and Sum methods are otherwise identical to the macGeneric ones, but
|
||||
// using function pointers would carry a major performance cost.
|
||||
type mac struct{ macGeneric }
|
||||
|
||||
func (h *mac) Write(p []byte) (int, error) {
|
||||
nn := len(p)
|
||||
if h.offset > 0 {
|
||||
n := copy(h.buffer[h.offset:], p)
|
||||
if h.offset+n < TagSize {
|
||||
h.offset += n
|
||||
return nn, nil
|
||||
}
|
||||
p = p[n:]
|
||||
h.offset = 0
|
||||
update(&h.macState, h.buffer[:])
|
||||
}
|
||||
if n := len(p) - (len(p) % TagSize); n > 0 {
|
||||
update(&h.macState, p[:n])
|
||||
p = p[n:]
|
||||
}
|
||||
if len(p) > 0 {
|
||||
h.offset += copy(h.buffer[h.offset:], p)
|
||||
}
|
||||
return nn, nil
|
||||
}
|
||||
|
||||
func (h *mac) Sum(out *[16]byte) {
|
||||
state := h.macState
|
||||
if h.offset > 0 {
|
||||
update(&state, h.buffer[:h.offset])
|
||||
}
|
||||
finalize(out, &state.h, &state.s)
|
||||
}
|
||||
312
vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go
generated
vendored
Normal file
312
vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go
generated
vendored
Normal file
@@ -0,0 +1,312 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file provides the generic implementation of Sum and MAC. Other files
|
||||
// might provide optimized assembly implementations of some of this code.
|
||||
|
||||
package poly1305
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
// Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag
|
||||
// for a 64 bytes message is approximately
|
||||
//
|
||||
// s + m[0:16] * r⁴ + m[16:32] * r³ + m[32:48] * r² + m[48:64] * r mod 2¹³⁰ - 5
|
||||
//
|
||||
// for some secret r and s. It can be computed sequentially like
|
||||
//
|
||||
// for len(msg) > 0:
|
||||
// h += read(msg, 16)
|
||||
// h *= r
|
||||
// h %= 2¹³⁰ - 5
|
||||
// return h + s
|
||||
//
|
||||
// All the complexity is about doing performant constant-time math on numbers
|
||||
// larger than any available numeric type.
|
||||
|
||||
func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) {
|
||||
h := newMACGeneric(key)
|
||||
h.Write(msg)
|
||||
h.Sum(out)
|
||||
}
|
||||
|
||||
func newMACGeneric(key *[32]byte) macGeneric {
|
||||
m := macGeneric{}
|
||||
initialize(key, &m.macState)
|
||||
return m
|
||||
}
|
||||
|
||||
// macState holds numbers in saturated 64-bit little-endian limbs. That is,
|
||||
// the value of [x0, x1, x2] is x[0] + x[1] * 2⁶⁴ + x[2] * 2¹²⁸.
|
||||
type macState struct {
|
||||
// h is the main accumulator. It is to be interpreted modulo 2¹³⁰ - 5, but
|
||||
// can grow larger during and after rounds. It must, however, remain below
|
||||
// 2 * (2¹³⁰ - 5).
|
||||
h [3]uint64
|
||||
// r and s are the private key components.
|
||||
r [2]uint64
|
||||
s [2]uint64
|
||||
}
|
||||
|
||||
type macGeneric struct {
|
||||
macState
|
||||
|
||||
buffer [TagSize]byte
|
||||
offset int
|
||||
}
|
||||
|
||||
// Write splits the incoming message into TagSize chunks, and passes them to
|
||||
// update. It buffers incomplete chunks.
|
||||
func (h *macGeneric) Write(p []byte) (int, error) {
|
||||
nn := len(p)
|
||||
if h.offset > 0 {
|
||||
n := copy(h.buffer[h.offset:], p)
|
||||
if h.offset+n < TagSize {
|
||||
h.offset += n
|
||||
return nn, nil
|
||||
}
|
||||
p = p[n:]
|
||||
h.offset = 0
|
||||
updateGeneric(&h.macState, h.buffer[:])
|
||||
}
|
||||
if n := len(p) - (len(p) % TagSize); n > 0 {
|
||||
updateGeneric(&h.macState, p[:n])
|
||||
p = p[n:]
|
||||
}
|
||||
if len(p) > 0 {
|
||||
h.offset += copy(h.buffer[h.offset:], p)
|
||||
}
|
||||
return nn, nil
|
||||
}
|
||||
|
||||
// Sum flushes the last incomplete chunk from the buffer, if any, and generates
|
||||
// the MAC output. It does not modify its state, in order to allow for multiple
|
||||
// calls to Sum, even if no Write is allowed after Sum.
|
||||
func (h *macGeneric) Sum(out *[TagSize]byte) {
|
||||
state := h.macState
|
||||
if h.offset > 0 {
|
||||
updateGeneric(&state, h.buffer[:h.offset])
|
||||
}
|
||||
finalize(out, &state.h, &state.s)
|
||||
}
|
||||
|
||||
// [rMask0, rMask1] is the specified Poly1305 clamping mask in little-endian. It
|
||||
// clears some bits of the secret coefficient to make it possible to implement
|
||||
// multiplication more efficiently.
|
||||
const (
|
||||
rMask0 = 0x0FFFFFFC0FFFFFFF
|
||||
rMask1 = 0x0FFFFFFC0FFFFFFC
|
||||
)
|
||||
|
||||
// initialize loads the 256-bit key into the two 128-bit secret values r and s.
|
||||
func initialize(key *[32]byte, m *macState) {
|
||||
m.r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0
|
||||
m.r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1
|
||||
m.s[0] = binary.LittleEndian.Uint64(key[16:24])
|
||||
m.s[1] = binary.LittleEndian.Uint64(key[24:32])
|
||||
}
|
||||
|
||||
// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
|
||||
// bits.Mul64 and bits.Add64 intrinsics.
|
||||
type uint128 struct {
|
||||
lo, hi uint64
|
||||
}
|
||||
|
||||
func mul64(a, b uint64) uint128 {
|
||||
hi, lo := bits.Mul64(a, b)
|
||||
return uint128{lo, hi}
|
||||
}
|
||||
|
||||
func add128(a, b uint128) uint128 {
|
||||
lo, c := bits.Add64(a.lo, b.lo, 0)
|
||||
hi, c := bits.Add64(a.hi, b.hi, c)
|
||||
if c != 0 {
|
||||
panic("poly1305: unexpected overflow")
|
||||
}
|
||||
return uint128{lo, hi}
|
||||
}
|
||||
|
||||
func shiftRightBy2(a uint128) uint128 {
|
||||
a.lo = a.lo>>2 | (a.hi&3)<<62
|
||||
a.hi = a.hi >> 2
|
||||
return a
|
||||
}
|
||||
|
||||
// updateGeneric absorbs msg into the state.h accumulator. For each chunk m of
|
||||
// 128 bits of message, it computes
|
||||
//
|
||||
// h₊ = (h + m) * r mod 2¹³⁰ - 5
|
||||
//
|
||||
// If the msg length is not a multiple of TagSize, it assumes the last
|
||||
// incomplete chunk is the final one.
|
||||
func updateGeneric(state *macState, msg []byte) {
|
||||
h0, h1, h2 := state.h[0], state.h[1], state.h[2]
|
||||
r0, r1 := state.r[0], state.r[1]
|
||||
|
||||
for len(msg) > 0 {
|
||||
var c uint64
|
||||
|
||||
// For the first step, h + m, we use a chain of bits.Add64 intrinsics.
|
||||
// The resulting value of h might exceed 2¹³⁰ - 5, but will be partially
|
||||
// reduced at the end of the multiplication below.
|
||||
//
|
||||
// The spec requires us to set a bit just above the message size, not to
|
||||
// hide leading zeroes. For full chunks, that's 1 << 128, so we can just
|
||||
// add 1 to the most significant (2¹²⁸) limb, h2.
|
||||
if len(msg) >= TagSize {
|
||||
h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0)
|
||||
h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(msg[8:16]), c)
|
||||
h2 += c + 1
|
||||
|
||||
msg = msg[TagSize:]
|
||||
} else {
|
||||
var buf [TagSize]byte
|
||||
copy(buf[:], msg)
|
||||
buf[len(msg)] = 1
|
||||
|
||||
h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0)
|
||||
h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(buf[8:16]), c)
|
||||
h2 += c
|
||||
|
||||
msg = nil
|
||||
}
|
||||
|
||||
// Multiplication of big number limbs is similar to elementary school
|
||||
// columnar multiplication. Instead of digits, there are 64-bit limbs.
|
||||
//
|
||||
// We are multiplying a 3 limbs number, h, by a 2 limbs number, r.
|
||||
//
|
||||
// h2 h1 h0 x
|
||||
// r1 r0 =
|
||||
// ----------------
|
||||
// h2r0 h1r0 h0r0 <-- individual 128-bit products
|
||||
// + h2r1 h1r1 h0r1
|
||||
// ------------------------
|
||||
// m3 m2 m1 m0 <-- result in 128-bit overlapping limbs
|
||||
// ------------------------
|
||||
// m3.hi m2.hi m1.hi m0.hi <-- carry propagation
|
||||
// + m3.lo m2.lo m1.lo m0.lo
|
||||
// -------------------------------
|
||||
// t4 t3 t2 t1 t0 <-- final result in 64-bit limbs
|
||||
//
|
||||
// The main difference from pen-and-paper multiplication is that we do
|
||||
// carry propagation in a separate step, as if we wrote two digit sums
|
||||
// at first (the 128-bit limbs), and then carried the tens all at once.
|
||||
|
||||
h0r0 := mul64(h0, r0)
|
||||
h1r0 := mul64(h1, r0)
|
||||
h2r0 := mul64(h2, r0)
|
||||
h0r1 := mul64(h0, r1)
|
||||
h1r1 := mul64(h1, r1)
|
||||
h2r1 := mul64(h2, r1)
|
||||
|
||||
// Since h2 is known to be at most 7 (5 + 1 + 1), and r0 and r1 have their
|
||||
// top 4 bits cleared by rMask{0,1}, we know that their product is not going
|
||||
// to overflow 64 bits, so we can ignore the high part of the products.
|
||||
//
|
||||
// This also means that the product doesn't have a fifth limb (t4).
|
||||
if h2r0.hi != 0 {
|
||||
panic("poly1305: unexpected overflow")
|
||||
}
|
||||
if h2r1.hi != 0 {
|
||||
panic("poly1305: unexpected overflow")
|
||||
}
|
||||
|
||||
m0 := h0r0
|
||||
m1 := add128(h1r0, h0r1) // These two additions don't overflow thanks again
|
||||
m2 := add128(h2r0, h1r1) // to the 4 masked bits at the top of r0 and r1.
|
||||
m3 := h2r1
|
||||
|
||||
t0 := m0.lo
|
||||
t1, c := bits.Add64(m1.lo, m0.hi, 0)
|
||||
t2, c := bits.Add64(m2.lo, m1.hi, c)
|
||||
t3, _ := bits.Add64(m3.lo, m2.hi, c)
|
||||
|
||||
// Now we have the result as 4 64-bit limbs, and we need to reduce it
|
||||
// modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do
|
||||
// a cheap partial reduction according to the reduction identity
|
||||
//
|
||||
// c * 2¹³⁰ + n = c * 5 + n mod 2¹³⁰ - 5
|
||||
//
|
||||
// because 2¹³⁰ = 5 mod 2¹³⁰ - 5. Partial reduction since the result is
|
||||
// likely to be larger than 2¹³⁰ - 5, but still small enough to fit the
|
||||
// assumptions we make about h in the rest of the code.
|
||||
//
|
||||
// See also https://speakerdeck.com/gtank/engineering-prime-numbers?slide=23
|
||||
|
||||
// We split the final result at the 2¹³⁰ mark into h and cc, the carry.
|
||||
// Note that the carry bits are effectively shifted left by 2, in other
|
||||
// words, cc = c * 4 for the c in the reduction identity.
|
||||
h0, h1, h2 = t0, t1, t2&maskLow2Bits
|
||||
cc := uint128{t2 & maskNotLow2Bits, t3}
|
||||
|
||||
// To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c.
|
||||
|
||||
h0, c = bits.Add64(h0, cc.lo, 0)
|
||||
h1, c = bits.Add64(h1, cc.hi, c)
|
||||
h2 += c
|
||||
|
||||
cc = shiftRightBy2(cc)
|
||||
|
||||
h0, c = bits.Add64(h0, cc.lo, 0)
|
||||
h1, c = bits.Add64(h1, cc.hi, c)
|
||||
h2 += c
|
||||
|
||||
// h2 is at most 3 + 1 + 1 = 5, making the whole of h at most
|
||||
//
|
||||
// 5 * 2¹²⁸ + (2¹²⁸ - 1) = 6 * 2¹²⁸ - 1
|
||||
}
|
||||
|
||||
state.h[0], state.h[1], state.h[2] = h0, h1, h2
|
||||
}
|
||||
|
||||
const (
|
||||
maskLow2Bits uint64 = 0x0000000000000003
|
||||
maskNotLow2Bits uint64 = ^maskLow2Bits
|
||||
)
|
||||
|
||||
// select64 returns x if v == 1 and y if v == 0, in constant time.
|
||||
func select64(v, x, y uint64) uint64 { return ^(v-1)&x | (v-1)&y }
|
||||
|
||||
// [p0, p1, p2] is 2¹³⁰ - 5 in little endian order.
|
||||
const (
|
||||
p0 = 0xFFFFFFFFFFFFFFFB
|
||||
p1 = 0xFFFFFFFFFFFFFFFF
|
||||
p2 = 0x0000000000000003
|
||||
)
|
||||
|
||||
// finalize completes the modular reduction of h and computes
|
||||
//
|
||||
// out = h + s mod 2¹²⁸
|
||||
func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) {
|
||||
h0, h1, h2 := h[0], h[1], h[2]
|
||||
|
||||
// After the partial reduction in updateGeneric, h might be more than
|
||||
// 2¹³⁰ - 5, but will be less than 2 * (2¹³⁰ - 5). To complete the reduction
|
||||
// in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the
|
||||
// result if the subtraction underflows, and t otherwise.
|
||||
|
||||
hMinusP0, b := bits.Sub64(h0, p0, 0)
|
||||
hMinusP1, b := bits.Sub64(h1, p1, b)
|
||||
_, b = bits.Sub64(h2, p2, b)
|
||||
|
||||
// h = h if h < p else h - p
|
||||
h0 = select64(b, h0, hMinusP0)
|
||||
h1 = select64(b, h1, hMinusP1)
|
||||
|
||||
// Finally, we compute the last Poly1305 step
|
||||
//
|
||||
// tag = h + s mod 2¹²⁸
|
||||
//
|
||||
// by just doing a wide addition with the 128 low bits of h and discarding
|
||||
// the overflow.
|
||||
h0, c := bits.Add64(h0, s[0], 0)
|
||||
h1, _ = bits.Add64(h1, s[1], c)
|
||||
|
||||
binary.LittleEndian.PutUint64(out[0:8], h0)
|
||||
binary.LittleEndian.PutUint64(out[8:16], h1)
|
||||
}
|
||||
123
vendor/golang.org/x/crypto/internal/poly1305/sum_loong64.s
generated
vendored
Normal file
123
vendor/golang.org/x/crypto/internal/poly1305/sum_loong64.s
generated
vendored
Normal file
@@ -0,0 +1,123 @@
|
||||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc && !purego
|
||||
|
||||
// func update(state *macState, msg []byte)
|
||||
TEXT ·update(SB), $0-32
|
||||
MOVV state+0(FP), R4
|
||||
MOVV msg_base+8(FP), R5
|
||||
MOVV msg_len+16(FP), R6
|
||||
|
||||
MOVV $0x10, R7
|
||||
|
||||
MOVV (R4), R8 // h0
|
||||
MOVV 8(R4), R9 // h1
|
||||
MOVV 16(R4), R10 // h2
|
||||
MOVV 24(R4), R11 // r0
|
||||
MOVV 32(R4), R12 // r1
|
||||
|
||||
BLT R6, R7, bytes_between_0_and_15
|
||||
|
||||
loop:
|
||||
MOVV (R5), R14 // msg[0:8]
|
||||
MOVV 8(R5), R16 // msg[8:16]
|
||||
ADDV R14, R8, R8 // h0 (x1 + y1 = z1', if z1' < x1 then z1' overflow)
|
||||
ADDV R16, R9, R27
|
||||
SGTU R14, R8, R24 // h0.carry
|
||||
SGTU R9, R27, R28
|
||||
ADDV R27, R24, R9 // h1
|
||||
SGTU R27, R9, R24
|
||||
OR R24, R28, R24 // h1.carry
|
||||
ADDV $0x01, R24, R24
|
||||
ADDV R10, R24, R10 // h2
|
||||
|
||||
ADDV $16, R5, R5 // msg = msg[16:]
|
||||
|
||||
multiply:
|
||||
MULV R8, R11, R14 // h0r0.lo
|
||||
MULHVU R8, R11, R15 // h0r0.hi
|
||||
MULV R9, R11, R13 // h1r0.lo
|
||||
MULHVU R9, R11, R16 // h1r0.hi
|
||||
ADDV R13, R15, R15
|
||||
SGTU R13, R15, R24
|
||||
ADDV R24, R16, R16
|
||||
MULV R10, R11, R25
|
||||
ADDV R16, R25, R25
|
||||
MULV R8, R12, R13 // h0r1.lo
|
||||
MULHVU R8, R12, R16 // h0r1.hi
|
||||
ADDV R13, R15, R15
|
||||
SGTU R13, R15, R24
|
||||
ADDV R24, R16, R16
|
||||
MOVV R16, R8
|
||||
MULV R10, R12, R26 // h2r1
|
||||
MULV R9, R12, R13 // h1r1.lo
|
||||
MULHVU R9, R12, R16 // h1r1.hi
|
||||
ADDV R13, R25, R25
|
||||
ADDV R16, R26, R27
|
||||
SGTU R13, R25, R24
|
||||
ADDV R27, R24, R26
|
||||
ADDV R8, R25, R25
|
||||
SGTU R8, R25, R24
|
||||
ADDV R24, R26, R26
|
||||
AND $3, R25, R10
|
||||
AND $-4, R25, R17
|
||||
ADDV R17, R14, R8
|
||||
ADDV R26, R15, R27
|
||||
SGTU R17, R8, R24
|
||||
SGTU R26, R27, R28
|
||||
ADDV R27, R24, R9
|
||||
SGTU R27, R9, R24
|
||||
OR R24, R28, R24
|
||||
ADDV R24, R10, R10
|
||||
SLLV $62, R26, R27
|
||||
SRLV $2, R25, R28
|
||||
SRLV $2, R26, R26
|
||||
OR R27, R28, R25
|
||||
ADDV R25, R8, R8
|
||||
ADDV R26, R9, R27
|
||||
SGTU R25, R8, R24
|
||||
SGTU R26, R27, R28
|
||||
ADDV R27, R24, R9
|
||||
SGTU R27, R9, R24
|
||||
OR R24, R28, R24
|
||||
ADDV R24, R10, R10
|
||||
|
||||
SUBV $16, R6, R6
|
||||
BGE R6, R7, loop
|
||||
|
||||
bytes_between_0_and_15:
|
||||
BEQ R6, R0, done
|
||||
MOVV $1, R14
|
||||
XOR R15, R15
|
||||
ADDV R6, R5, R5
|
||||
|
||||
flush_buffer:
|
||||
MOVBU -1(R5), R25
|
||||
SRLV $56, R14, R24
|
||||
SLLV $8, R15, R28
|
||||
SLLV $8, R14, R14
|
||||
OR R24, R28, R15
|
||||
XOR R25, R14, R14
|
||||
SUBV $1, R6, R6
|
||||
SUBV $1, R5, R5
|
||||
BNE R6, R0, flush_buffer
|
||||
|
||||
ADDV R14, R8, R8
|
||||
SGTU R14, R8, R24
|
||||
ADDV R15, R9, R27
|
||||
SGTU R15, R27, R28
|
||||
ADDV R27, R24, R9
|
||||
SGTU R27, R9, R24
|
||||
OR R24, R28, R24
|
||||
ADDV R10, R24, R10
|
||||
|
||||
MOVV $16, R6
|
||||
JMP multiply
|
||||
|
||||
done:
|
||||
MOVV R8, (R4)
|
||||
MOVV R9, 8(R4)
|
||||
MOVV R10, 16(R4)
|
||||
RET
|
||||
187
vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.s
generated
vendored
Normal file
187
vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.s
generated
vendored
Normal file
@@ -0,0 +1,187 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc && !purego && (ppc64 || ppc64le)
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// This was ported from the amd64 implementation.
|
||||
|
||||
#ifdef GOARCH_ppc64le
|
||||
#define LE_MOVD MOVD
|
||||
#define LE_MOVWZ MOVWZ
|
||||
#define LE_MOVHZ MOVHZ
|
||||
#else
|
||||
#define LE_MOVD MOVDBR
|
||||
#define LE_MOVWZ MOVWBR
|
||||
#define LE_MOVHZ MOVHBR
|
||||
#endif
|
||||
|
||||
#define POLY1305_ADD(msg, h0, h1, h2, t0, t1, t2) \
|
||||
LE_MOVD (msg)( R0), t0; \
|
||||
LE_MOVD (msg)(R24), t1; \
|
||||
MOVD $1, t2; \
|
||||
ADDC t0, h0, h0; \
|
||||
ADDE t1, h1, h1; \
|
||||
ADDE t2, h2; \
|
||||
ADD $16, msg
|
||||
|
||||
#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \
|
||||
MULLD r0, h0, t0; \
|
||||
MULHDU r0, h0, t1; \
|
||||
MULLD r0, h1, t4; \
|
||||
MULHDU r0, h1, t5; \
|
||||
ADDC t4, t1, t1; \
|
||||
MULLD r0, h2, t2; \
|
||||
MULHDU r1, h0, t4; \
|
||||
MULLD r1, h0, h0; \
|
||||
ADDE t5, t2, t2; \
|
||||
ADDC h0, t1, t1; \
|
||||
MULLD h2, r1, t3; \
|
||||
ADDZE t4, h0; \
|
||||
MULHDU r1, h1, t5; \
|
||||
MULLD r1, h1, t4; \
|
||||
ADDC t4, t2, t2; \
|
||||
ADDE t5, t3, t3; \
|
||||
ADDC h0, t2, t2; \
|
||||
MOVD $-4, t4; \
|
||||
ADDZE t3; \
|
||||
RLDICL $0, t2, $62, h2; \
|
||||
AND t2, t4, h0; \
|
||||
ADDC t0, h0, h0; \
|
||||
ADDE t3, t1, h1; \
|
||||
SLD $62, t3, t4; \
|
||||
SRD $2, t2; \
|
||||
ADDZE h2; \
|
||||
OR t4, t2, t2; \
|
||||
SRD $2, t3; \
|
||||
ADDC t2, h0, h0; \
|
||||
ADDE t3, h1, h1; \
|
||||
ADDZE h2
|
||||
|
||||
// func update(state *[7]uint64, msg []byte)
|
||||
TEXT ·update(SB), $0-32
|
||||
MOVD state+0(FP), R3
|
||||
MOVD msg_base+8(FP), R4
|
||||
MOVD msg_len+16(FP), R5
|
||||
|
||||
MOVD 0(R3), R8 // h0
|
||||
MOVD 8(R3), R9 // h1
|
||||
MOVD 16(R3), R10 // h2
|
||||
MOVD 24(R3), R11 // r0
|
||||
MOVD 32(R3), R12 // r1
|
||||
|
||||
MOVD $8, R24
|
||||
|
||||
CMP R5, $16
|
||||
BLT bytes_between_0_and_15
|
||||
|
||||
loop:
|
||||
POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22)
|
||||
|
||||
PCALIGN $16
|
||||
multiply:
|
||||
POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21)
|
||||
ADD $-16, R5
|
||||
CMP R5, $16
|
||||
BGE loop
|
||||
|
||||
bytes_between_0_and_15:
|
||||
CMP R5, $0
|
||||
BEQ done
|
||||
MOVD $0, R16 // h0
|
||||
MOVD $0, R17 // h1
|
||||
|
||||
flush_buffer:
|
||||
CMP R5, $8
|
||||
BLE just1
|
||||
|
||||
MOVD $8, R21
|
||||
SUB R21, R5, R21
|
||||
|
||||
// Greater than 8 -- load the rightmost remaining bytes in msg
|
||||
// and put into R17 (h1)
|
||||
LE_MOVD (R4)(R21), R17
|
||||
MOVD $16, R22
|
||||
|
||||
// Find the offset to those bytes
|
||||
SUB R5, R22, R22
|
||||
SLD $3, R22
|
||||
|
||||
// Shift to get only the bytes in msg
|
||||
SRD R22, R17, R17
|
||||
|
||||
// Put 1 at high end
|
||||
MOVD $1, R23
|
||||
SLD $3, R21
|
||||
SLD R21, R23, R23
|
||||
OR R23, R17, R17
|
||||
|
||||
// Remainder is 8
|
||||
MOVD $8, R5
|
||||
|
||||
just1:
|
||||
CMP R5, $8
|
||||
BLT less8
|
||||
|
||||
// Exactly 8
|
||||
LE_MOVD (R4), R16
|
||||
|
||||
CMP R17, $0
|
||||
|
||||
// Check if we've already set R17; if not
|
||||
// set 1 to indicate end of msg.
|
||||
BNE carry
|
||||
MOVD $1, R17
|
||||
BR carry
|
||||
|
||||
less8:
|
||||
MOVD $0, R16 // h0
|
||||
MOVD $0, R22 // shift count
|
||||
CMP R5, $4
|
||||
BLT less4
|
||||
LE_MOVWZ (R4), R16
|
||||
ADD $4, R4
|
||||
ADD $-4, R5
|
||||
MOVD $32, R22
|
||||
|
||||
less4:
|
||||
CMP R5, $2
|
||||
BLT less2
|
||||
LE_MOVHZ (R4), R21
|
||||
SLD R22, R21, R21
|
||||
OR R16, R21, R16
|
||||
ADD $16, R22
|
||||
ADD $-2, R5
|
||||
ADD $2, R4
|
||||
|
||||
less2:
|
||||
CMP R5, $0
|
||||
BEQ insert1
|
||||
MOVBZ (R4), R21
|
||||
SLD R22, R21, R21
|
||||
OR R16, R21, R16
|
||||
ADD $8, R22
|
||||
|
||||
insert1:
|
||||
// Insert 1 at end of msg
|
||||
MOVD $1, R21
|
||||
SLD R22, R21, R21
|
||||
OR R16, R21, R16
|
||||
|
||||
carry:
|
||||
// Add new values to h0, h1, h2
|
||||
ADDC R16, R8
|
||||
ADDE R17, R9
|
||||
ADDZE R10, R10
|
||||
MOVD $16, R5
|
||||
ADD R5, R4
|
||||
BR multiply
|
||||
|
||||
done:
|
||||
// Save h0, h1, h2 in state
|
||||
MOVD R8, 0(R3)
|
||||
MOVD R9, 8(R3)
|
||||
MOVD R10, 16(R3)
|
||||
RET
|
||||
76
vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go
generated
vendored
Normal file
76
vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go
generated
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc && !purego
|
||||
|
||||
package poly1305
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/cpu"
|
||||
)
|
||||
|
||||
// updateVX is an assembly implementation of Poly1305 that uses vector
|
||||
// instructions. It must only be called if the vector facility (vx) is
|
||||
// available.
|
||||
//
|
||||
//go:noescape
|
||||
func updateVX(state *macState, msg []byte)
|
||||
|
||||
// mac is a replacement for macGeneric that uses a larger buffer and redirects
|
||||
// calls that would have gone to updateGeneric to updateVX if the vector
|
||||
// facility is installed.
|
||||
//
|
||||
// A larger buffer is required for good performance because the vector
|
||||
// implementation has a higher fixed cost per call than the generic
|
||||
// implementation.
|
||||
type mac struct {
|
||||
macState
|
||||
|
||||
buffer [16 * TagSize]byte // size must be a multiple of block size (16)
|
||||
offset int
|
||||
}
|
||||
|
||||
func (h *mac) Write(p []byte) (int, error) {
|
||||
nn := len(p)
|
||||
if h.offset > 0 {
|
||||
n := copy(h.buffer[h.offset:], p)
|
||||
if h.offset+n < len(h.buffer) {
|
||||
h.offset += n
|
||||
return nn, nil
|
||||
}
|
||||
p = p[n:]
|
||||
h.offset = 0
|
||||
if cpu.S390X.HasVX {
|
||||
updateVX(&h.macState, h.buffer[:])
|
||||
} else {
|
||||
updateGeneric(&h.macState, h.buffer[:])
|
||||
}
|
||||
}
|
||||
|
||||
tail := len(p) % len(h.buffer) // number of bytes to copy into buffer
|
||||
body := len(p) - tail // number of bytes to process now
|
||||
if body > 0 {
|
||||
if cpu.S390X.HasVX {
|
||||
updateVX(&h.macState, p[:body])
|
||||
} else {
|
||||
updateGeneric(&h.macState, p[:body])
|
||||
}
|
||||
}
|
||||
h.offset = copy(h.buffer[:], p[body:]) // copy tail bytes - can be 0
|
||||
return nn, nil
|
||||
}
|
||||
|
||||
func (h *mac) Sum(out *[TagSize]byte) {
|
||||
state := h.macState
|
||||
remainder := h.buffer[:h.offset]
|
||||
|
||||
// Use the generic implementation if we have 2 or fewer blocks left
|
||||
// to sum. The vector implementation has a higher startup time.
|
||||
if cpu.S390X.HasVX && len(remainder) > 2*TagSize {
|
||||
updateVX(&state, remainder)
|
||||
} else if len(remainder) > 0 {
|
||||
updateGeneric(&state, remainder)
|
||||
}
|
||||
finalize(out, &state.h, &state.s)
|
||||
}
|
||||
503
vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s
generated
vendored
Normal file
503
vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s
generated
vendored
Normal file
@@ -0,0 +1,503 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gc && !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// This implementation of Poly1305 uses the vector facility (vx)
|
||||
// to process up to 2 blocks (32 bytes) per iteration using an
|
||||
// algorithm based on the one described in:
|
||||
//
|
||||
// NEON crypto, Daniel J. Bernstein & Peter Schwabe
|
||||
// https://cryptojedi.org/papers/neoncrypto-20120320.pdf
|
||||
//
|
||||
// This algorithm uses 5 26-bit limbs to represent a 130-bit
|
||||
// value. These limbs are, for the most part, zero extended and
|
||||
// placed into 64-bit vector register elements. Each vector
|
||||
// register is 128-bits wide and so holds 2 of these elements.
|
||||
// Using 26-bit limbs allows us plenty of headroom to accommodate
|
||||
// accumulations before and after multiplication without
|
||||
// overflowing either 32-bits (before multiplication) or 64-bits
|
||||
// (after multiplication).
|
||||
//
|
||||
// In order to parallelise the operations required to calculate
|
||||
// the sum we use two separate accumulators and then sum those
|
||||
// in an extra final step. For compatibility with the generic
|
||||
// implementation we perform this summation at the end of every
|
||||
// updateVX call.
|
||||
//
|
||||
// To use two accumulators we must multiply the message blocks
|
||||
// by r² rather than r. Only the final message block should be
|
||||
// multiplied by r.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// We want to calculate the sum (h) for a 64 byte message (m):
|
||||
//
|
||||
// h = m[0:16]r⁴ + m[16:32]r³ + m[32:48]r² + m[48:64]r
|
||||
//
|
||||
// To do this we split the calculation into the even indices
|
||||
// and odd indices of the message. These form our SIMD 'lanes':
|
||||
//
|
||||
// h = m[ 0:16]r⁴ + m[32:48]r² + <- lane 0
|
||||
// m[16:32]r³ + m[48:64]r <- lane 1
|
||||
//
|
||||
// To calculate this iteratively we refactor so that both lanes
|
||||
// are written in terms of r² and r:
|
||||
//
|
||||
// h = (m[ 0:16]r² + m[32:48])r² + <- lane 0
|
||||
// (m[16:32]r² + m[48:64])r <- lane 1
|
||||
// ^ ^
|
||||
// | coefficients for second iteration
|
||||
// coefficients for first iteration
|
||||
//
|
||||
// So in this case we would have two iterations. In the first
|
||||
// both lanes are multiplied by r². In the second only the
|
||||
// first lane is multiplied by r² and the second lane is
|
||||
// instead multiplied by r. This gives use the odd and even
|
||||
// powers of r that we need from the original equation.
|
||||
//
|
||||
// Notation:
|
||||
//
|
||||
// h - accumulator
|
||||
// r - key
|
||||
// m - message
|
||||
//
|
||||
// [a, b] - SIMD register holding two 64-bit values
|
||||
// [a, b, c, d] - SIMD register holding four 32-bit values
|
||||
// xᵢ[n] - limb n of variable x with bit width i
|
||||
//
|
||||
// Limbs are expressed in little endian order, so for 26-bit
|
||||
// limbs x₂₆[4] will be the most significant limb and x₂₆[0]
|
||||
// will be the least significant limb.
|
||||
|
||||
// masking constants
|
||||
#define MOD24 V0 // [0x0000000000ffffff, 0x0000000000ffffff] - mask low 24-bits
|
||||
#define MOD26 V1 // [0x0000000003ffffff, 0x0000000003ffffff] - mask low 26-bits
|
||||
|
||||
// expansion constants (see EXPAND macro)
|
||||
#define EX0 V2
|
||||
#define EX1 V3
|
||||
#define EX2 V4
|
||||
|
||||
// key (r², r or 1 depending on context)
|
||||
#define R_0 V5
|
||||
#define R_1 V6
|
||||
#define R_2 V7
|
||||
#define R_3 V8
|
||||
#define R_4 V9
|
||||
|
||||
// precalculated coefficients (5r², 5r or 0 depending on context)
|
||||
#define R5_1 V10
|
||||
#define R5_2 V11
|
||||
#define R5_3 V12
|
||||
#define R5_4 V13
|
||||
|
||||
// message block (m)
|
||||
#define M_0 V14
|
||||
#define M_1 V15
|
||||
#define M_2 V16
|
||||
#define M_3 V17
|
||||
#define M_4 V18
|
||||
|
||||
// accumulator (h)
|
||||
#define H_0 V19
|
||||
#define H_1 V20
|
||||
#define H_2 V21
|
||||
#define H_3 V22
|
||||
#define H_4 V23
|
||||
|
||||
// temporary registers (for short-lived values)
|
||||
#define T_0 V24
|
||||
#define T_1 V25
|
||||
#define T_2 V26
|
||||
#define T_3 V27
|
||||
#define T_4 V28
|
||||
|
||||
GLOBL ·constants<>(SB), RODATA, $0x30
|
||||
// EX0
|
||||
DATA ·constants<>+0x00(SB)/8, $0x0006050403020100
|
||||
DATA ·constants<>+0x08(SB)/8, $0x1016151413121110
|
||||
// EX1
|
||||
DATA ·constants<>+0x10(SB)/8, $0x060c0b0a09080706
|
||||
DATA ·constants<>+0x18(SB)/8, $0x161c1b1a19181716
|
||||
// EX2
|
||||
DATA ·constants<>+0x20(SB)/8, $0x0d0d0d0d0d0f0e0d
|
||||
DATA ·constants<>+0x28(SB)/8, $0x1d1d1d1d1d1f1e1d
|
||||
|
||||
// MULTIPLY multiplies each lane of f and g, partially reduced
|
||||
// modulo 2¹³⁰ - 5. The result, h, consists of partial products
|
||||
// in each lane that need to be reduced further to produce the
|
||||
// final result.
|
||||
//
|
||||
// h₁₃₀ = (f₁₃₀g₁₃₀) % 2¹³⁰ + (5f₁₃₀g₁₃₀) / 2¹³⁰
|
||||
//
|
||||
// Note that the multiplication by 5 of the high bits is
|
||||
// achieved by precalculating the multiplication of four of the
|
||||
// g coefficients by 5. These are g51-g54.
|
||||
#define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \
|
||||
VMLOF f0, g0, h0 \
|
||||
VMLOF f0, g3, h3 \
|
||||
VMLOF f0, g1, h1 \
|
||||
VMLOF f0, g4, h4 \
|
||||
VMLOF f0, g2, h2 \
|
||||
VMLOF f1, g54, T_0 \
|
||||
VMLOF f1, g2, T_3 \
|
||||
VMLOF f1, g0, T_1 \
|
||||
VMLOF f1, g3, T_4 \
|
||||
VMLOF f1, g1, T_2 \
|
||||
VMALOF f2, g53, h0, h0 \
|
||||
VMALOF f2, g1, h3, h3 \
|
||||
VMALOF f2, g54, h1, h1 \
|
||||
VMALOF f2, g2, h4, h4 \
|
||||
VMALOF f2, g0, h2, h2 \
|
||||
VMALOF f3, g52, T_0, T_0 \
|
||||
VMALOF f3, g0, T_3, T_3 \
|
||||
VMALOF f3, g53, T_1, T_1 \
|
||||
VMALOF f3, g1, T_4, T_4 \
|
||||
VMALOF f3, g54, T_2, T_2 \
|
||||
VMALOF f4, g51, h0, h0 \
|
||||
VMALOF f4, g54, h3, h3 \
|
||||
VMALOF f4, g52, h1, h1 \
|
||||
VMALOF f4, g0, h4, h4 \
|
||||
VMALOF f4, g53, h2, h2 \
|
||||
VAG T_0, h0, h0 \
|
||||
VAG T_3, h3, h3 \
|
||||
VAG T_1, h1, h1 \
|
||||
VAG T_4, h4, h4 \
|
||||
VAG T_2, h2, h2
|
||||
|
||||
// REDUCE performs the following carry operations in four
|
||||
// stages, as specified in Bernstein & Schwabe:
|
||||
//
|
||||
// 1: h₂₆[0]->h₂₆[1] h₂₆[3]->h₂₆[4]
|
||||
// 2: h₂₆[1]->h₂₆[2] h₂₆[4]->h₂₆[0]
|
||||
// 3: h₂₆[0]->h₂₆[1] h₂₆[2]->h₂₆[3]
|
||||
// 4: h₂₆[3]->h₂₆[4]
|
||||
//
|
||||
// The result is that all of the limbs are limited to 26-bits
|
||||
// except for h₂₆[1] and h₂₆[4] which are limited to 27-bits.
|
||||
//
|
||||
// Note that although each limb is aligned at 26-bit intervals
|
||||
// they may contain values that exceed 2²⁶ - 1, hence the need
|
||||
// to carry the excess bits in each limb.
|
||||
#define REDUCE(h0, h1, h2, h3, h4) \
|
||||
VESRLG $26, h0, T_0 \
|
||||
VESRLG $26, h3, T_1 \
|
||||
VN MOD26, h0, h0 \
|
||||
VN MOD26, h3, h3 \
|
||||
VAG T_0, h1, h1 \
|
||||
VAG T_1, h4, h4 \
|
||||
VESRLG $26, h1, T_2 \
|
||||
VESRLG $26, h4, T_3 \
|
||||
VN MOD26, h1, h1 \
|
||||
VN MOD26, h4, h4 \
|
||||
VESLG $2, T_3, T_4 \
|
||||
VAG T_3, T_4, T_4 \
|
||||
VAG T_2, h2, h2 \
|
||||
VAG T_4, h0, h0 \
|
||||
VESRLG $26, h2, T_0 \
|
||||
VESRLG $26, h0, T_1 \
|
||||
VN MOD26, h2, h2 \
|
||||
VN MOD26, h0, h0 \
|
||||
VAG T_0, h3, h3 \
|
||||
VAG T_1, h1, h1 \
|
||||
VESRLG $26, h3, T_2 \
|
||||
VN MOD26, h3, h3 \
|
||||
VAG T_2, h4, h4
|
||||
|
||||
// EXPAND splits the 128-bit little-endian values in0 and in1
|
||||
// into 26-bit big-endian limbs and places the results into
|
||||
// the first and second lane of d₂₆[0:4] respectively.
|
||||
//
|
||||
// The EX0, EX1 and EX2 constants are arrays of byte indices
|
||||
// for permutation. The permutation both reverses the bytes
|
||||
// in the input and ensures the bytes are copied into the
|
||||
// destination limb ready to be shifted into their final
|
||||
// position.
|
||||
#define EXPAND(in0, in1, d0, d1, d2, d3, d4) \
|
||||
VPERM in0, in1, EX0, d0 \
|
||||
VPERM in0, in1, EX1, d2 \
|
||||
VPERM in0, in1, EX2, d4 \
|
||||
VESRLG $26, d0, d1 \
|
||||
VESRLG $30, d2, d3 \
|
||||
VESRLG $4, d2, d2 \
|
||||
VN MOD26, d0, d0 \ // [in0₂₆[0], in1₂₆[0]]
|
||||
VN MOD26, d3, d3 \ // [in0₂₆[3], in1₂₆[3]]
|
||||
VN MOD26, d1, d1 \ // [in0₂₆[1], in1₂₆[1]]
|
||||
VN MOD24, d4, d4 \ // [in0₂₆[4], in1₂₆[4]]
|
||||
VN MOD26, d2, d2 // [in0₂₆[2], in1₂₆[2]]
|
||||
|
||||
// func updateVX(state *macState, msg []byte)
|
||||
TEXT ·updateVX(SB), NOSPLIT, $0
|
||||
MOVD state+0(FP), R1
|
||||
LMG msg+8(FP), R2, R3 // R2=msg_base, R3=msg_len
|
||||
|
||||
// load EX0, EX1 and EX2
|
||||
MOVD $·constants<>(SB), R5
|
||||
VLM (R5), EX0, EX2
|
||||
|
||||
// generate masks
|
||||
VGMG $(64-24), $63, MOD24 // [0x00ffffff, 0x00ffffff]
|
||||
VGMG $(64-26), $63, MOD26 // [0x03ffffff, 0x03ffffff]
|
||||
|
||||
// load h (accumulator) and r (key) from state
|
||||
VZERO T_1 // [0, 0]
|
||||
VL 0(R1), T_0 // [h₆₄[0], h₆₄[1]]
|
||||
VLEG $0, 16(R1), T_1 // [h₆₄[2], 0]
|
||||
VL 24(R1), T_2 // [r₆₄[0], r₆₄[1]]
|
||||
VPDI $0, T_0, T_2, T_3 // [h₆₄[0], r₆₄[0]]
|
||||
VPDI $5, T_0, T_2, T_4 // [h₆₄[1], r₆₄[1]]
|
||||
|
||||
// unpack h and r into 26-bit limbs
|
||||
// note: h₆₄[2] may have the low 3 bits set, so h₂₆[4] is a 27-bit value
|
||||
VN MOD26, T_3, H_0 // [h₂₆[0], r₂₆[0]]
|
||||
VZERO H_1 // [0, 0]
|
||||
VZERO H_3 // [0, 0]
|
||||
VGMG $(64-12-14), $(63-12), T_0 // [0x03fff000, 0x03fff000] - 26-bit mask with low 12 bits masked out
|
||||
VESLG $24, T_1, T_1 // [h₆₄[2]<<24, 0]
|
||||
VERIMG $-26&63, T_3, MOD26, H_1 // [h₂₆[1], r₂₆[1]]
|
||||
VESRLG $+52&63, T_3, H_2 // [h₂₆[2], r₂₆[2]] - low 12 bits only
|
||||
VERIMG $-14&63, T_4, MOD26, H_3 // [h₂₆[1], r₂₆[1]]
|
||||
VESRLG $40, T_4, H_4 // [h₂₆[4], r₂₆[4]] - low 24 bits only
|
||||
VERIMG $+12&63, T_4, T_0, H_2 // [h₂₆[2], r₂₆[2]] - complete
|
||||
VO T_1, H_4, H_4 // [h₂₆[4], r₂₆[4]] - complete
|
||||
|
||||
// replicate r across all 4 vector elements
|
||||
VREPF $3, H_0, R_0 // [r₂₆[0], r₂₆[0], r₂₆[0], r₂₆[0]]
|
||||
VREPF $3, H_1, R_1 // [r₂₆[1], r₂₆[1], r₂₆[1], r₂₆[1]]
|
||||
VREPF $3, H_2, R_2 // [r₂₆[2], r₂₆[2], r₂₆[2], r₂₆[2]]
|
||||
VREPF $3, H_3, R_3 // [r₂₆[3], r₂₆[3], r₂₆[3], r₂₆[3]]
|
||||
VREPF $3, H_4, R_4 // [r₂₆[4], r₂₆[4], r₂₆[4], r₂₆[4]]
|
||||
|
||||
// zero out lane 1 of h
|
||||
VLEIG $1, $0, H_0 // [h₂₆[0], 0]
|
||||
VLEIG $1, $0, H_1 // [h₂₆[1], 0]
|
||||
VLEIG $1, $0, H_2 // [h₂₆[2], 0]
|
||||
VLEIG $1, $0, H_3 // [h₂₆[3], 0]
|
||||
VLEIG $1, $0, H_4 // [h₂₆[4], 0]
|
||||
|
||||
// calculate 5r (ignore least significant limb)
|
||||
VREPIF $5, T_0
|
||||
VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r₂₆[1], 5r₂₆[1], 5r₂₆[1]]
|
||||
VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r₂₆[2], 5r₂₆[2], 5r₂₆[2]]
|
||||
VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r₂₆[3], 5r₂₆[3], 5r₂₆[3]]
|
||||
VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r₂₆[4], 5r₂₆[4], 5r₂₆[4]]
|
||||
|
||||
// skip r² calculation if we are only calculating one block
|
||||
CMPBLE R3, $16, skip
|
||||
|
||||
// calculate r²
|
||||
MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, M_0, M_1, M_2, M_3, M_4)
|
||||
REDUCE(M_0, M_1, M_2, M_3, M_4)
|
||||
VGBM $0x0f0f, T_0
|
||||
VERIMG $0, M_0, T_0, R_0 // [r₂₆[0], r²₂₆[0], r₂₆[0], r²₂₆[0]]
|
||||
VERIMG $0, M_1, T_0, R_1 // [r₂₆[1], r²₂₆[1], r₂₆[1], r²₂₆[1]]
|
||||
VERIMG $0, M_2, T_0, R_2 // [r₂₆[2], r²₂₆[2], r₂₆[2], r²₂₆[2]]
|
||||
VERIMG $0, M_3, T_0, R_3 // [r₂₆[3], r²₂₆[3], r₂₆[3], r²₂₆[3]]
|
||||
VERIMG $0, M_4, T_0, R_4 // [r₂₆[4], r²₂₆[4], r₂₆[4], r²₂₆[4]]
|
||||
|
||||
// calculate 5r² (ignore least significant limb)
|
||||
VREPIF $5, T_0
|
||||
VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r²₂₆[1], 5r₂₆[1], 5r²₂₆[1]]
|
||||
VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r²₂₆[2], 5r₂₆[2], 5r²₂₆[2]]
|
||||
VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r²₂₆[3], 5r₂₆[3], 5r²₂₆[3]]
|
||||
VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r²₂₆[4], 5r₂₆[4], 5r²₂₆[4]]
|
||||
|
||||
loop:
|
||||
CMPBLE R3, $32, b2 // 2 or fewer blocks remaining, need to change key coefficients
|
||||
|
||||
// load next 2 blocks from message
|
||||
VLM (R2), T_0, T_1
|
||||
|
||||
// update message slice
|
||||
SUB $32, R3
|
||||
MOVD $32(R2), R2
|
||||
|
||||
// unpack message blocks into 26-bit big-endian limbs
|
||||
EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4)
|
||||
|
||||
// add 2¹²⁸ to each message block value
|
||||
VLEIB $4, $1, M_4
|
||||
VLEIB $12, $1, M_4
|
||||
|
||||
multiply:
|
||||
// accumulate the incoming message
|
||||
VAG H_0, M_0, M_0
|
||||
VAG H_3, M_3, M_3
|
||||
VAG H_1, M_1, M_1
|
||||
VAG H_4, M_4, M_4
|
||||
VAG H_2, M_2, M_2
|
||||
|
||||
// multiply the accumulator by the key coefficient
|
||||
MULTIPLY(M_0, M_1, M_2, M_3, M_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4)
|
||||
|
||||
// carry and partially reduce the partial products
|
||||
REDUCE(H_0, H_1, H_2, H_3, H_4)
|
||||
|
||||
CMPBNE R3, $0, loop
|
||||
|
||||
finish:
|
||||
// sum lane 0 and lane 1 and put the result in lane 1
|
||||
VZERO T_0
|
||||
VSUMQG H_0, T_0, H_0
|
||||
VSUMQG H_3, T_0, H_3
|
||||
VSUMQG H_1, T_0, H_1
|
||||
VSUMQG H_4, T_0, H_4
|
||||
VSUMQG H_2, T_0, H_2
|
||||
|
||||
// reduce again after summation
|
||||
// TODO(mundaym): there might be a more efficient way to do this
|
||||
// now that we only have 1 active lane. For example, we could
|
||||
// simultaneously pack the values as we reduce them.
|
||||
REDUCE(H_0, H_1, H_2, H_3, H_4)
|
||||
|
||||
// carry h[1] through to h[4] so that only h[4] can exceed 2²⁶ - 1
|
||||
// TODO(mundaym): in testing this final carry was unnecessary.
|
||||
// Needs a proof before it can be removed though.
|
||||
VESRLG $26, H_1, T_1
|
||||
VN MOD26, H_1, H_1
|
||||
VAQ T_1, H_2, H_2
|
||||
VESRLG $26, H_2, T_2
|
||||
VN MOD26, H_2, H_2
|
||||
VAQ T_2, H_3, H_3
|
||||
VESRLG $26, H_3, T_3
|
||||
VN MOD26, H_3, H_3
|
||||
VAQ T_3, H_4, H_4
|
||||
|
||||
// h is now < 2(2¹³⁰ - 5)
|
||||
// Pack each lane in h₂₆[0:4] into h₁₂₈[0:1].
|
||||
VESLG $26, H_1, H_1
|
||||
VESLG $26, H_3, H_3
|
||||
VO H_0, H_1, H_0
|
||||
VO H_2, H_3, H_2
|
||||
VESLG $4, H_2, H_2
|
||||
VLEIB $7, $48, H_1
|
||||
VSLB H_1, H_2, H_2
|
||||
VO H_0, H_2, H_0
|
||||
VLEIB $7, $104, H_1
|
||||
VSLB H_1, H_4, H_3
|
||||
VO H_3, H_0, H_0
|
||||
VLEIB $7, $24, H_1
|
||||
VSRLB H_1, H_4, H_1
|
||||
|
||||
// update state
|
||||
VSTEG $1, H_0, 0(R1)
|
||||
VSTEG $0, H_0, 8(R1)
|
||||
VSTEG $1, H_1, 16(R1)
|
||||
RET
|
||||
|
||||
b2: // 2 or fewer blocks remaining
|
||||
CMPBLE R3, $16, b1
|
||||
|
||||
// Load the 2 remaining blocks (17-32 bytes remaining).
|
||||
MOVD $-17(R3), R0 // index of final byte to load modulo 16
|
||||
VL (R2), T_0 // load full 16 byte block
|
||||
VLL R0, 16(R2), T_1 // load final (possibly partial) block and pad with zeros to 16 bytes
|
||||
|
||||
// The Poly1305 algorithm requires that a 1 bit be appended to
|
||||
// each message block. If the final block is less than 16 bytes
|
||||
// long then it is easiest to insert the 1 before the message
|
||||
// block is split into 26-bit limbs. If, on the other hand, the
|
||||
// final message block is 16 bytes long then we append the 1 bit
|
||||
// after expansion as normal.
|
||||
MOVBZ $1, R0
|
||||
MOVD $-16(R3), R3 // index of byte in last block to insert 1 at (could be 16)
|
||||
CMPBEQ R3, $16, 2(PC) // skip the insertion if the final block is 16 bytes long
|
||||
VLVGB R3, R0, T_1 // insert 1 into the byte at index R3
|
||||
|
||||
// Split both blocks into 26-bit limbs in the appropriate lanes.
|
||||
EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4)
|
||||
|
||||
// Append a 1 byte to the end of the second to last block.
|
||||
VLEIB $4, $1, M_4
|
||||
|
||||
// Append a 1 byte to the end of the last block only if it is a
|
||||
// full 16 byte block.
|
||||
CMPBNE R3, $16, 2(PC)
|
||||
VLEIB $12, $1, M_4
|
||||
|
||||
// Finally, set up the coefficients for the final multiplication.
|
||||
// We have previously saved r and 5r in the 32-bit even indexes
|
||||
// of the R_[0-4] and R5_[1-4] coefficient registers.
|
||||
//
|
||||
// We want lane 0 to be multiplied by r² so that can be kept the
|
||||
// same. We want lane 1 to be multiplied by r so we need to move
|
||||
// the saved r value into the 32-bit odd index in lane 1 by
|
||||
// rotating the 64-bit lane by 32.
|
||||
VGBM $0x00ff, T_0 // [0, 0xffffffffffffffff] - mask lane 1 only
|
||||
VERIMG $32, R_0, T_0, R_0 // [_, r²₂₆[0], _, r₂₆[0]]
|
||||
VERIMG $32, R_1, T_0, R_1 // [_, r²₂₆[1], _, r₂₆[1]]
|
||||
VERIMG $32, R_2, T_0, R_2 // [_, r²₂₆[2], _, r₂₆[2]]
|
||||
VERIMG $32, R_3, T_0, R_3 // [_, r²₂₆[3], _, r₂₆[3]]
|
||||
VERIMG $32, R_4, T_0, R_4 // [_, r²₂₆[4], _, r₂₆[4]]
|
||||
VERIMG $32, R5_1, T_0, R5_1 // [_, 5r²₂₆[1], _, 5r₂₆[1]]
|
||||
VERIMG $32, R5_2, T_0, R5_2 // [_, 5r²₂₆[2], _, 5r₂₆[2]]
|
||||
VERIMG $32, R5_3, T_0, R5_3 // [_, 5r²₂₆[3], _, 5r₂₆[3]]
|
||||
VERIMG $32, R5_4, T_0, R5_4 // [_, 5r²₂₆[4], _, 5r₂₆[4]]
|
||||
|
||||
MOVD $0, R3
|
||||
BR multiply
|
||||
|
||||
skip:
|
||||
CMPBEQ R3, $0, finish
|
||||
|
||||
b1: // 1 block remaining
|
||||
|
||||
// Load the final block (1-16 bytes). This will be placed into
|
||||
// lane 0.
|
||||
MOVD $-1(R3), R0
|
||||
VLL R0, (R2), T_0 // pad to 16 bytes with zeros
|
||||
|
||||
// The Poly1305 algorithm requires that a 1 bit be appended to
|
||||
// each message block. If the final block is less than 16 bytes
|
||||
// long then it is easiest to insert the 1 before the message
|
||||
// block is split into 26-bit limbs. If, on the other hand, the
|
||||
// final message block is 16 bytes long then we append the 1 bit
|
||||
// after expansion as normal.
|
||||
MOVBZ $1, R0
|
||||
CMPBEQ R3, $16, 2(PC)
|
||||
VLVGB R3, R0, T_0
|
||||
|
||||
// Set the message block in lane 1 to the value 0 so that it
|
||||
// can be accumulated without affecting the final result.
|
||||
VZERO T_1
|
||||
|
||||
// Split the final message block into 26-bit limbs in lane 0.
|
||||
// Lane 1 will be contain 0.
|
||||
EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4)
|
||||
|
||||
// Append a 1 byte to the end of the last block only if it is a
|
||||
// full 16 byte block.
|
||||
CMPBNE R3, $16, 2(PC)
|
||||
VLEIB $4, $1, M_4
|
||||
|
||||
// We have previously saved r and 5r in the 32-bit even indexes
|
||||
// of the R_[0-4] and R5_[1-4] coefficient registers.
|
||||
//
|
||||
// We want lane 0 to be multiplied by r so we need to move the
|
||||
// saved r value into the 32-bit odd index in lane 0. We want
|
||||
// lane 1 to be set to the value 1. This makes multiplication
|
||||
// a no-op. We do this by setting lane 1 in every register to 0
|
||||
// and then just setting the 32-bit index 3 in R_0 to 1.
|
||||
VZERO T_0
|
||||
MOVD $0, R0
|
||||
MOVD $0x10111213, R12
|
||||
VLVGP R12, R0, T_1 // [_, 0x10111213, _, 0x00000000]
|
||||
VPERM T_0, R_0, T_1, R_0 // [_, r₂₆[0], _, 0]
|
||||
VPERM T_0, R_1, T_1, R_1 // [_, r₂₆[1], _, 0]
|
||||
VPERM T_0, R_2, T_1, R_2 // [_, r₂₆[2], _, 0]
|
||||
VPERM T_0, R_3, T_1, R_3 // [_, r₂₆[3], _, 0]
|
||||
VPERM T_0, R_4, T_1, R_4 // [_, r₂₆[4], _, 0]
|
||||
VPERM T_0, R5_1, T_1, R5_1 // [_, 5r₂₆[1], _, 0]
|
||||
VPERM T_0, R5_2, T_1, R5_2 // [_, 5r₂₆[2], _, 0]
|
||||
VPERM T_0, R5_3, T_1, R5_3 // [_, 5r₂₆[3], _, 0]
|
||||
VPERM T_0, R5_4, T_1, R5_4 // [_, 5r₂₆[4], _, 0]
|
||||
|
||||
// Set the value of lane 1 to be 1.
|
||||
VLEIF $3, $1, R_0 // [_, r₂₆[0], _, 1]
|
||||
|
||||
MOVD $0, R3
|
||||
BR multiply
|
||||
95
vendor/golang.org/x/crypto/sha3/hashes.go
generated
vendored
Normal file
95
vendor/golang.org/x/crypto/sha3/hashes.go
generated
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package sha3 implements the SHA-3 hash algorithms and the SHAKE extendable
|
||||
// output functions defined in FIPS 202.
|
||||
//
|
||||
// Most of this package is a wrapper around the crypto/sha3 package in the
|
||||
// standard library. The only exception is the legacy Keccak hash functions.
|
||||
package sha3
|
||||
|
||||
import (
|
||||
"crypto/sha3"
|
||||
"hash"
|
||||
)
|
||||
|
||||
// New224 creates a new SHA3-224 hash.
|
||||
// Its generic security strength is 224 bits against preimage attacks,
|
||||
// and 112 bits against collision attacks.
|
||||
//
|
||||
// It is a wrapper for the [sha3.New224] function in the standard library.
|
||||
//
|
||||
//go:fix inline
|
||||
func New224() hash.Hash {
|
||||
return sha3.New224()
|
||||
}
|
||||
|
||||
// New256 creates a new SHA3-256 hash.
|
||||
// Its generic security strength is 256 bits against preimage attacks,
|
||||
// and 128 bits against collision attacks.
|
||||
//
|
||||
// It is a wrapper for the [sha3.New256] function in the standard library.
|
||||
//
|
||||
//go:fix inline
|
||||
func New256() hash.Hash {
|
||||
return sha3.New256()
|
||||
}
|
||||
|
||||
// New384 creates a new SHA3-384 hash.
|
||||
// Its generic security strength is 384 bits against preimage attacks,
|
||||
// and 192 bits against collision attacks.
|
||||
//
|
||||
// It is a wrapper for the [sha3.New384] function in the standard library.
|
||||
//
|
||||
//go:fix inline
|
||||
func New384() hash.Hash {
|
||||
return sha3.New384()
|
||||
}
|
||||
|
||||
// New512 creates a new SHA3-512 hash.
|
||||
// Its generic security strength is 512 bits against preimage attacks,
|
||||
// and 256 bits against collision attacks.
|
||||
//
|
||||
// It is a wrapper for the [sha3.New512] function in the standard library.
|
||||
//
|
||||
//go:fix inline
|
||||
func New512() hash.Hash {
|
||||
return sha3.New512()
|
||||
}
|
||||
|
||||
// Sum224 returns the SHA3-224 digest of the data.
|
||||
//
|
||||
// It is a wrapper for the [sha3.Sum224] function in the standard library.
|
||||
//
|
||||
//go:fix inline
|
||||
func Sum224(data []byte) [28]byte {
|
||||
return sha3.Sum224(data)
|
||||
}
|
||||
|
||||
// Sum256 returns the SHA3-256 digest of the data.
|
||||
//
|
||||
// It is a wrapper for the [sha3.Sum256] function in the standard library.
|
||||
//
|
||||
//go:fix inline
|
||||
func Sum256(data []byte) [32]byte {
|
||||
return sha3.Sum256(data)
|
||||
}
|
||||
|
||||
// Sum384 returns the SHA3-384 digest of the data.
|
||||
//
|
||||
// It is a wrapper for the [sha3.Sum384] function in the standard library.
|
||||
//
|
||||
//go:fix inline
|
||||
func Sum384(data []byte) [48]byte {
|
||||
return sha3.Sum384(data)
|
||||
}
|
||||
|
||||
// Sum512 returns the SHA3-512 digest of the data.
|
||||
//
|
||||
// It is a wrapper for the [sha3.Sum512] function in the standard library.
|
||||
//
|
||||
//go:fix inline
|
||||
func Sum512(data []byte) [64]byte {
|
||||
return sha3.Sum512(data)
|
||||
}
|
||||
263
vendor/golang.org/x/crypto/sha3/legacy_hash.go
generated
vendored
Normal file
263
vendor/golang.org/x/crypto/sha3/legacy_hash.go
generated
vendored
Normal file
@@ -0,0 +1,263 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sha3
|
||||
|
||||
// This implementation is only used for NewLegacyKeccak256 and
|
||||
// NewLegacyKeccak512, which are not implemented by crypto/sha3.
|
||||
// All other functions in this package are wrappers around crypto/sha3.
|
||||
|
||||
import (
|
||||
"crypto/subtle"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"hash"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/cpu"
|
||||
)
|
||||
|
||||
const (
|
||||
dsbyteKeccak = 0b00000001
|
||||
|
||||
// rateK[c] is the rate in bytes for Keccak[c] where c is the capacity in
|
||||
// bits. Given the sponge size is 1600 bits, the rate is 1600 - c bits.
|
||||
rateK256 = (1600 - 256) / 8
|
||||
rateK512 = (1600 - 512) / 8
|
||||
rateK1024 = (1600 - 1024) / 8
|
||||
)
|
||||
|
||||
// NewLegacyKeccak256 creates a new Keccak-256 hash.
|
||||
//
|
||||
// Only use this function if you require compatibility with an existing cryptosystem
|
||||
// that uses non-standard padding. All other users should use New256 instead.
|
||||
func NewLegacyKeccak256() hash.Hash {
|
||||
return &state{rate: rateK512, outputLen: 32, dsbyte: dsbyteKeccak}
|
||||
}
|
||||
|
||||
// NewLegacyKeccak512 creates a new Keccak-512 hash.
|
||||
//
|
||||
// Only use this function if you require compatibility with an existing cryptosystem
|
||||
// that uses non-standard padding. All other users should use New512 instead.
|
||||
func NewLegacyKeccak512() hash.Hash {
|
||||
return &state{rate: rateK1024, outputLen: 64, dsbyte: dsbyteKeccak}
|
||||
}
|
||||
|
||||
// spongeDirection indicates the direction bytes are flowing through the sponge.
|
||||
type spongeDirection int
|
||||
|
||||
const (
|
||||
// spongeAbsorbing indicates that the sponge is absorbing input.
|
||||
spongeAbsorbing spongeDirection = iota
|
||||
// spongeSqueezing indicates that the sponge is being squeezed.
|
||||
spongeSqueezing
|
||||
)
|
||||
|
||||
type state struct {
|
||||
a [1600 / 8]byte // main state of the hash
|
||||
|
||||
// a[n:rate] is the buffer. If absorbing, it's the remaining space to XOR
|
||||
// into before running the permutation. If squeezing, it's the remaining
|
||||
// output to produce before running the permutation.
|
||||
n, rate int
|
||||
|
||||
// dsbyte contains the "domain separation" bits and the first bit of
|
||||
// the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the
|
||||
// SHA-3 and SHAKE functions by appending bitstrings to the message.
|
||||
// Using a little-endian bit-ordering convention, these are "01" for SHA-3
|
||||
// and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the
|
||||
// padding rule from section 5.1 is applied to pad the message to a multiple
|
||||
// of the rate, which involves adding a "1" bit, zero or more "0" bits, and
|
||||
// a final "1" bit. We merge the first "1" bit from the padding into dsbyte,
|
||||
// giving 00000110b (0x06) and 00011111b (0x1f).
|
||||
// [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf
|
||||
// "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and
|
||||
// Extendable-Output Functions (May 2014)"
|
||||
dsbyte byte
|
||||
|
||||
outputLen int // the default output size in bytes
|
||||
state spongeDirection // whether the sponge is absorbing or squeezing
|
||||
}
|
||||
|
||||
// BlockSize returns the rate of sponge underlying this hash function.
|
||||
func (d *state) BlockSize() int { return d.rate }
|
||||
|
||||
// Size returns the output size of the hash function in bytes.
|
||||
func (d *state) Size() int { return d.outputLen }
|
||||
|
||||
// Reset clears the internal state by zeroing the sponge state and
|
||||
// the buffer indexes, and setting Sponge.state to absorbing.
|
||||
func (d *state) Reset() {
|
||||
// Zero the permutation's state.
|
||||
for i := range d.a {
|
||||
d.a[i] = 0
|
||||
}
|
||||
d.state = spongeAbsorbing
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
func (d *state) clone() *state {
|
||||
ret := *d
|
||||
return &ret
|
||||
}
|
||||
|
||||
// permute applies the KeccakF-1600 permutation.
|
||||
func (d *state) permute() {
|
||||
var a *[25]uint64
|
||||
if cpu.IsBigEndian {
|
||||
a = new([25]uint64)
|
||||
for i := range a {
|
||||
a[i] = binary.LittleEndian.Uint64(d.a[i*8:])
|
||||
}
|
||||
} else {
|
||||
a = (*[25]uint64)(unsafe.Pointer(&d.a))
|
||||
}
|
||||
|
||||
keccakF1600(a)
|
||||
d.n = 0
|
||||
|
||||
if cpu.IsBigEndian {
|
||||
for i := range a {
|
||||
binary.LittleEndian.PutUint64(d.a[i*8:], a[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// pads appends the domain separation bits in dsbyte, applies
|
||||
// the multi-bitrate 10..1 padding rule, and permutes the state.
|
||||
func (d *state) padAndPermute() {
|
||||
// Pad with this instance's domain-separator bits. We know that there's
|
||||
// at least one byte of space in the sponge because, if it were full,
|
||||
// permute would have been called to empty it. dsbyte also contains the
|
||||
// first one bit for the padding. See the comment in the state struct.
|
||||
d.a[d.n] ^= d.dsbyte
|
||||
// This adds the final one bit for the padding. Because of the way that
|
||||
// bits are numbered from the LSB upwards, the final bit is the MSB of
|
||||
// the last byte.
|
||||
d.a[d.rate-1] ^= 0x80
|
||||
// Apply the permutation
|
||||
d.permute()
|
||||
d.state = spongeSqueezing
|
||||
}
|
||||
|
||||
// Write absorbs more data into the hash's state. It panics if any
|
||||
// output has already been read.
|
||||
func (d *state) Write(p []byte) (n int, err error) {
|
||||
if d.state != spongeAbsorbing {
|
||||
panic("sha3: Write after Read")
|
||||
}
|
||||
|
||||
n = len(p)
|
||||
|
||||
for len(p) > 0 {
|
||||
x := subtle.XORBytes(d.a[d.n:d.rate], d.a[d.n:d.rate], p)
|
||||
d.n += x
|
||||
p = p[x:]
|
||||
|
||||
// If the sponge is full, apply the permutation.
|
||||
if d.n == d.rate {
|
||||
d.permute()
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Read squeezes an arbitrary number of bytes from the sponge.
|
||||
func (d *state) Read(out []byte) (n int, err error) {
|
||||
// If we're still absorbing, pad and apply the permutation.
|
||||
if d.state == spongeAbsorbing {
|
||||
d.padAndPermute()
|
||||
}
|
||||
|
||||
n = len(out)
|
||||
|
||||
// Now, do the squeezing.
|
||||
for len(out) > 0 {
|
||||
// Apply the permutation if we've squeezed the sponge dry.
|
||||
if d.n == d.rate {
|
||||
d.permute()
|
||||
}
|
||||
|
||||
x := copy(out, d.a[d.n:d.rate])
|
||||
d.n += x
|
||||
out = out[x:]
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Sum applies padding to the hash state and then squeezes out the desired
|
||||
// number of output bytes. It panics if any output has already been read.
|
||||
func (d *state) Sum(in []byte) []byte {
|
||||
if d.state != spongeAbsorbing {
|
||||
panic("sha3: Sum after Read")
|
||||
}
|
||||
|
||||
// Make a copy of the original hash so that caller can keep writing
|
||||
// and summing.
|
||||
dup := d.clone()
|
||||
hash := make([]byte, dup.outputLen, 64) // explicit cap to allow stack allocation
|
||||
dup.Read(hash)
|
||||
return append(in, hash...)
|
||||
}
|
||||
|
||||
const (
|
||||
magicKeccak = "sha\x0b"
|
||||
// magic || rate || main state || n || sponge direction
|
||||
marshaledSize = len(magicKeccak) + 1 + 200 + 1 + 1
|
||||
)
|
||||
|
||||
func (d *state) MarshalBinary() ([]byte, error) {
|
||||
return d.AppendBinary(make([]byte, 0, marshaledSize))
|
||||
}
|
||||
|
||||
func (d *state) AppendBinary(b []byte) ([]byte, error) {
|
||||
switch d.dsbyte {
|
||||
case dsbyteKeccak:
|
||||
b = append(b, magicKeccak...)
|
||||
default:
|
||||
panic("unknown dsbyte")
|
||||
}
|
||||
// rate is at most 168, and n is at most rate.
|
||||
b = append(b, byte(d.rate))
|
||||
b = append(b, d.a[:]...)
|
||||
b = append(b, byte(d.n), byte(d.state))
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (d *state) UnmarshalBinary(b []byte) error {
|
||||
if len(b) != marshaledSize {
|
||||
return errors.New("sha3: invalid hash state")
|
||||
}
|
||||
|
||||
magic := string(b[:len(magicKeccak)])
|
||||
b = b[len(magicKeccak):]
|
||||
switch {
|
||||
case magic == magicKeccak && d.dsbyte == dsbyteKeccak:
|
||||
default:
|
||||
return errors.New("sha3: invalid hash state identifier")
|
||||
}
|
||||
|
||||
rate := int(b[0])
|
||||
b = b[1:]
|
||||
if rate != d.rate {
|
||||
return errors.New("sha3: invalid hash state function")
|
||||
}
|
||||
|
||||
copy(d.a[:], b)
|
||||
b = b[len(d.a):]
|
||||
|
||||
n, state := int(b[0]), spongeDirection(b[1])
|
||||
if n > d.rate {
|
||||
return errors.New("sha3: invalid hash state")
|
||||
}
|
||||
d.n = n
|
||||
if state != spongeAbsorbing && state != spongeSqueezing {
|
||||
return errors.New("sha3: invalid hash state")
|
||||
}
|
||||
d.state = state
|
||||
|
||||
return nil
|
||||
}
|
||||
416
vendor/golang.org/x/crypto/sha3/legacy_keccakf.go
generated
vendored
Normal file
416
vendor/golang.org/x/crypto/sha3/legacy_keccakf.go
generated
vendored
Normal file
@@ -0,0 +1,416 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sha3
|
||||
|
||||
// This implementation is only used for NewLegacyKeccak256 and
|
||||
// NewLegacyKeccak512, which are not implemented by crypto/sha3.
|
||||
// All other functions in this package are wrappers around crypto/sha3.
|
||||
|
||||
import "math/bits"
|
||||
|
||||
// rc stores the round constants for use in the ι step.
|
||||
var rc = [24]uint64{
|
||||
0x0000000000000001,
|
||||
0x0000000000008082,
|
||||
0x800000000000808A,
|
||||
0x8000000080008000,
|
||||
0x000000000000808B,
|
||||
0x0000000080000001,
|
||||
0x8000000080008081,
|
||||
0x8000000000008009,
|
||||
0x000000000000008A,
|
||||
0x0000000000000088,
|
||||
0x0000000080008009,
|
||||
0x000000008000000A,
|
||||
0x000000008000808B,
|
||||
0x800000000000008B,
|
||||
0x8000000000008089,
|
||||
0x8000000000008003,
|
||||
0x8000000000008002,
|
||||
0x8000000000000080,
|
||||
0x000000000000800A,
|
||||
0x800000008000000A,
|
||||
0x8000000080008081,
|
||||
0x8000000000008080,
|
||||
0x0000000080000001,
|
||||
0x8000000080008008,
|
||||
}
|
||||
|
||||
// keccakF1600 applies the Keccak permutation to a 1600b-wide
|
||||
// state represented as a slice of 25 uint64s.
|
||||
func keccakF1600(a *[25]uint64) {
|
||||
// Implementation translated from Keccak-inplace.c
|
||||
// in the keccak reference code.
|
||||
var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64
|
||||
|
||||
for i := 0; i < 24; i += 4 {
|
||||
// Combines the 5 steps in each round into 2 steps.
|
||||
// Unrolls 4 rounds per loop and spreads some steps across rounds.
|
||||
|
||||
// Round 1
|
||||
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
|
||||
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
|
||||
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
|
||||
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
|
||||
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
|
||||
d0 = bc4 ^ (bc1<<1 | bc1>>63)
|
||||
d1 = bc0 ^ (bc2<<1 | bc2>>63)
|
||||
d2 = bc1 ^ (bc3<<1 | bc3>>63)
|
||||
d3 = bc2 ^ (bc4<<1 | bc4>>63)
|
||||
d4 = bc3 ^ (bc0<<1 | bc0>>63)
|
||||
|
||||
bc0 = a[0] ^ d0
|
||||
t = a[6] ^ d1
|
||||
bc1 = bits.RotateLeft64(t, 44)
|
||||
t = a[12] ^ d2
|
||||
bc2 = bits.RotateLeft64(t, 43)
|
||||
t = a[18] ^ d3
|
||||
bc3 = bits.RotateLeft64(t, 21)
|
||||
t = a[24] ^ d4
|
||||
bc4 = bits.RotateLeft64(t, 14)
|
||||
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i]
|
||||
a[6] = bc1 ^ (bc3 &^ bc2)
|
||||
a[12] = bc2 ^ (bc4 &^ bc3)
|
||||
a[18] = bc3 ^ (bc0 &^ bc4)
|
||||
a[24] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[10] ^ d0
|
||||
bc2 = bits.RotateLeft64(t, 3)
|
||||
t = a[16] ^ d1
|
||||
bc3 = bits.RotateLeft64(t, 45)
|
||||
t = a[22] ^ d2
|
||||
bc4 = bits.RotateLeft64(t, 61)
|
||||
t = a[3] ^ d3
|
||||
bc0 = bits.RotateLeft64(t, 28)
|
||||
t = a[9] ^ d4
|
||||
bc1 = bits.RotateLeft64(t, 20)
|
||||
a[10] = bc0 ^ (bc2 &^ bc1)
|
||||
a[16] = bc1 ^ (bc3 &^ bc2)
|
||||
a[22] = bc2 ^ (bc4 &^ bc3)
|
||||
a[3] = bc3 ^ (bc0 &^ bc4)
|
||||
a[9] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[20] ^ d0
|
||||
bc4 = bits.RotateLeft64(t, 18)
|
||||
t = a[1] ^ d1
|
||||
bc0 = bits.RotateLeft64(t, 1)
|
||||
t = a[7] ^ d2
|
||||
bc1 = bits.RotateLeft64(t, 6)
|
||||
t = a[13] ^ d3
|
||||
bc2 = bits.RotateLeft64(t, 25)
|
||||
t = a[19] ^ d4
|
||||
bc3 = bits.RotateLeft64(t, 8)
|
||||
a[20] = bc0 ^ (bc2 &^ bc1)
|
||||
a[1] = bc1 ^ (bc3 &^ bc2)
|
||||
a[7] = bc2 ^ (bc4 &^ bc3)
|
||||
a[13] = bc3 ^ (bc0 &^ bc4)
|
||||
a[19] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[5] ^ d0
|
||||
bc1 = bits.RotateLeft64(t, 36)
|
||||
t = a[11] ^ d1
|
||||
bc2 = bits.RotateLeft64(t, 10)
|
||||
t = a[17] ^ d2
|
||||
bc3 = bits.RotateLeft64(t, 15)
|
||||
t = a[23] ^ d3
|
||||
bc4 = bits.RotateLeft64(t, 56)
|
||||
t = a[4] ^ d4
|
||||
bc0 = bits.RotateLeft64(t, 27)
|
||||
a[5] = bc0 ^ (bc2 &^ bc1)
|
||||
a[11] = bc1 ^ (bc3 &^ bc2)
|
||||
a[17] = bc2 ^ (bc4 &^ bc3)
|
||||
a[23] = bc3 ^ (bc0 &^ bc4)
|
||||
a[4] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[15] ^ d0
|
||||
bc3 = bits.RotateLeft64(t, 41)
|
||||
t = a[21] ^ d1
|
||||
bc4 = bits.RotateLeft64(t, 2)
|
||||
t = a[2] ^ d2
|
||||
bc0 = bits.RotateLeft64(t, 62)
|
||||
t = a[8] ^ d3
|
||||
bc1 = bits.RotateLeft64(t, 55)
|
||||
t = a[14] ^ d4
|
||||
bc2 = bits.RotateLeft64(t, 39)
|
||||
a[15] = bc0 ^ (bc2 &^ bc1)
|
||||
a[21] = bc1 ^ (bc3 &^ bc2)
|
||||
a[2] = bc2 ^ (bc4 &^ bc3)
|
||||
a[8] = bc3 ^ (bc0 &^ bc4)
|
||||
a[14] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
// Round 2
|
||||
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
|
||||
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
|
||||
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
|
||||
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
|
||||
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
|
||||
d0 = bc4 ^ (bc1<<1 | bc1>>63)
|
||||
d1 = bc0 ^ (bc2<<1 | bc2>>63)
|
||||
d2 = bc1 ^ (bc3<<1 | bc3>>63)
|
||||
d3 = bc2 ^ (bc4<<1 | bc4>>63)
|
||||
d4 = bc3 ^ (bc0<<1 | bc0>>63)
|
||||
|
||||
bc0 = a[0] ^ d0
|
||||
t = a[16] ^ d1
|
||||
bc1 = bits.RotateLeft64(t, 44)
|
||||
t = a[7] ^ d2
|
||||
bc2 = bits.RotateLeft64(t, 43)
|
||||
t = a[23] ^ d3
|
||||
bc3 = bits.RotateLeft64(t, 21)
|
||||
t = a[14] ^ d4
|
||||
bc4 = bits.RotateLeft64(t, 14)
|
||||
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1]
|
||||
a[16] = bc1 ^ (bc3 &^ bc2)
|
||||
a[7] = bc2 ^ (bc4 &^ bc3)
|
||||
a[23] = bc3 ^ (bc0 &^ bc4)
|
||||
a[14] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[20] ^ d0
|
||||
bc2 = bits.RotateLeft64(t, 3)
|
||||
t = a[11] ^ d1
|
||||
bc3 = bits.RotateLeft64(t, 45)
|
||||
t = a[2] ^ d2
|
||||
bc4 = bits.RotateLeft64(t, 61)
|
||||
t = a[18] ^ d3
|
||||
bc0 = bits.RotateLeft64(t, 28)
|
||||
t = a[9] ^ d4
|
||||
bc1 = bits.RotateLeft64(t, 20)
|
||||
a[20] = bc0 ^ (bc2 &^ bc1)
|
||||
a[11] = bc1 ^ (bc3 &^ bc2)
|
||||
a[2] = bc2 ^ (bc4 &^ bc3)
|
||||
a[18] = bc3 ^ (bc0 &^ bc4)
|
||||
a[9] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[15] ^ d0
|
||||
bc4 = bits.RotateLeft64(t, 18)
|
||||
t = a[6] ^ d1
|
||||
bc0 = bits.RotateLeft64(t, 1)
|
||||
t = a[22] ^ d2
|
||||
bc1 = bits.RotateLeft64(t, 6)
|
||||
t = a[13] ^ d3
|
||||
bc2 = bits.RotateLeft64(t, 25)
|
||||
t = a[4] ^ d4
|
||||
bc3 = bits.RotateLeft64(t, 8)
|
||||
a[15] = bc0 ^ (bc2 &^ bc1)
|
||||
a[6] = bc1 ^ (bc3 &^ bc2)
|
||||
a[22] = bc2 ^ (bc4 &^ bc3)
|
||||
a[13] = bc3 ^ (bc0 &^ bc4)
|
||||
a[4] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[10] ^ d0
|
||||
bc1 = bits.RotateLeft64(t, 36)
|
||||
t = a[1] ^ d1
|
||||
bc2 = bits.RotateLeft64(t, 10)
|
||||
t = a[17] ^ d2
|
||||
bc3 = bits.RotateLeft64(t, 15)
|
||||
t = a[8] ^ d3
|
||||
bc4 = bits.RotateLeft64(t, 56)
|
||||
t = a[24] ^ d4
|
||||
bc0 = bits.RotateLeft64(t, 27)
|
||||
a[10] = bc0 ^ (bc2 &^ bc1)
|
||||
a[1] = bc1 ^ (bc3 &^ bc2)
|
||||
a[17] = bc2 ^ (bc4 &^ bc3)
|
||||
a[8] = bc3 ^ (bc0 &^ bc4)
|
||||
a[24] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[5] ^ d0
|
||||
bc3 = bits.RotateLeft64(t, 41)
|
||||
t = a[21] ^ d1
|
||||
bc4 = bits.RotateLeft64(t, 2)
|
||||
t = a[12] ^ d2
|
||||
bc0 = bits.RotateLeft64(t, 62)
|
||||
t = a[3] ^ d3
|
||||
bc1 = bits.RotateLeft64(t, 55)
|
||||
t = a[19] ^ d4
|
||||
bc2 = bits.RotateLeft64(t, 39)
|
||||
a[5] = bc0 ^ (bc2 &^ bc1)
|
||||
a[21] = bc1 ^ (bc3 &^ bc2)
|
||||
a[12] = bc2 ^ (bc4 &^ bc3)
|
||||
a[3] = bc3 ^ (bc0 &^ bc4)
|
||||
a[19] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
// Round 3
|
||||
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
|
||||
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
|
||||
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
|
||||
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
|
||||
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
|
||||
d0 = bc4 ^ (bc1<<1 | bc1>>63)
|
||||
d1 = bc0 ^ (bc2<<1 | bc2>>63)
|
||||
d2 = bc1 ^ (bc3<<1 | bc3>>63)
|
||||
d3 = bc2 ^ (bc4<<1 | bc4>>63)
|
||||
d4 = bc3 ^ (bc0<<1 | bc0>>63)
|
||||
|
||||
bc0 = a[0] ^ d0
|
||||
t = a[11] ^ d1
|
||||
bc1 = bits.RotateLeft64(t, 44)
|
||||
t = a[22] ^ d2
|
||||
bc2 = bits.RotateLeft64(t, 43)
|
||||
t = a[8] ^ d3
|
||||
bc3 = bits.RotateLeft64(t, 21)
|
||||
t = a[19] ^ d4
|
||||
bc4 = bits.RotateLeft64(t, 14)
|
||||
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2]
|
||||
a[11] = bc1 ^ (bc3 &^ bc2)
|
||||
a[22] = bc2 ^ (bc4 &^ bc3)
|
||||
a[8] = bc3 ^ (bc0 &^ bc4)
|
||||
a[19] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[15] ^ d0
|
||||
bc2 = bits.RotateLeft64(t, 3)
|
||||
t = a[1] ^ d1
|
||||
bc3 = bits.RotateLeft64(t, 45)
|
||||
t = a[12] ^ d2
|
||||
bc4 = bits.RotateLeft64(t, 61)
|
||||
t = a[23] ^ d3
|
||||
bc0 = bits.RotateLeft64(t, 28)
|
||||
t = a[9] ^ d4
|
||||
bc1 = bits.RotateLeft64(t, 20)
|
||||
a[15] = bc0 ^ (bc2 &^ bc1)
|
||||
a[1] = bc1 ^ (bc3 &^ bc2)
|
||||
a[12] = bc2 ^ (bc4 &^ bc3)
|
||||
a[23] = bc3 ^ (bc0 &^ bc4)
|
||||
a[9] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[5] ^ d0
|
||||
bc4 = bits.RotateLeft64(t, 18)
|
||||
t = a[16] ^ d1
|
||||
bc0 = bits.RotateLeft64(t, 1)
|
||||
t = a[2] ^ d2
|
||||
bc1 = bits.RotateLeft64(t, 6)
|
||||
t = a[13] ^ d3
|
||||
bc2 = bits.RotateLeft64(t, 25)
|
||||
t = a[24] ^ d4
|
||||
bc3 = bits.RotateLeft64(t, 8)
|
||||
a[5] = bc0 ^ (bc2 &^ bc1)
|
||||
a[16] = bc1 ^ (bc3 &^ bc2)
|
||||
a[2] = bc2 ^ (bc4 &^ bc3)
|
||||
a[13] = bc3 ^ (bc0 &^ bc4)
|
||||
a[24] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[20] ^ d0
|
||||
bc1 = bits.RotateLeft64(t, 36)
|
||||
t = a[6] ^ d1
|
||||
bc2 = bits.RotateLeft64(t, 10)
|
||||
t = a[17] ^ d2
|
||||
bc3 = bits.RotateLeft64(t, 15)
|
||||
t = a[3] ^ d3
|
||||
bc4 = bits.RotateLeft64(t, 56)
|
||||
t = a[14] ^ d4
|
||||
bc0 = bits.RotateLeft64(t, 27)
|
||||
a[20] = bc0 ^ (bc2 &^ bc1)
|
||||
a[6] = bc1 ^ (bc3 &^ bc2)
|
||||
a[17] = bc2 ^ (bc4 &^ bc3)
|
||||
a[3] = bc3 ^ (bc0 &^ bc4)
|
||||
a[14] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[10] ^ d0
|
||||
bc3 = bits.RotateLeft64(t, 41)
|
||||
t = a[21] ^ d1
|
||||
bc4 = bits.RotateLeft64(t, 2)
|
||||
t = a[7] ^ d2
|
||||
bc0 = bits.RotateLeft64(t, 62)
|
||||
t = a[18] ^ d3
|
||||
bc1 = bits.RotateLeft64(t, 55)
|
||||
t = a[4] ^ d4
|
||||
bc2 = bits.RotateLeft64(t, 39)
|
||||
a[10] = bc0 ^ (bc2 &^ bc1)
|
||||
a[21] = bc1 ^ (bc3 &^ bc2)
|
||||
a[7] = bc2 ^ (bc4 &^ bc3)
|
||||
a[18] = bc3 ^ (bc0 &^ bc4)
|
||||
a[4] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
// Round 4
|
||||
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
|
||||
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
|
||||
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
|
||||
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
|
||||
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
|
||||
d0 = bc4 ^ (bc1<<1 | bc1>>63)
|
||||
d1 = bc0 ^ (bc2<<1 | bc2>>63)
|
||||
d2 = bc1 ^ (bc3<<1 | bc3>>63)
|
||||
d3 = bc2 ^ (bc4<<1 | bc4>>63)
|
||||
d4 = bc3 ^ (bc0<<1 | bc0>>63)
|
||||
|
||||
bc0 = a[0] ^ d0
|
||||
t = a[1] ^ d1
|
||||
bc1 = bits.RotateLeft64(t, 44)
|
||||
t = a[2] ^ d2
|
||||
bc2 = bits.RotateLeft64(t, 43)
|
||||
t = a[3] ^ d3
|
||||
bc3 = bits.RotateLeft64(t, 21)
|
||||
t = a[4] ^ d4
|
||||
bc4 = bits.RotateLeft64(t, 14)
|
||||
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3]
|
||||
a[1] = bc1 ^ (bc3 &^ bc2)
|
||||
a[2] = bc2 ^ (bc4 &^ bc3)
|
||||
a[3] = bc3 ^ (bc0 &^ bc4)
|
||||
a[4] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[5] ^ d0
|
||||
bc2 = bits.RotateLeft64(t, 3)
|
||||
t = a[6] ^ d1
|
||||
bc3 = bits.RotateLeft64(t, 45)
|
||||
t = a[7] ^ d2
|
||||
bc4 = bits.RotateLeft64(t, 61)
|
||||
t = a[8] ^ d3
|
||||
bc0 = bits.RotateLeft64(t, 28)
|
||||
t = a[9] ^ d4
|
||||
bc1 = bits.RotateLeft64(t, 20)
|
||||
a[5] = bc0 ^ (bc2 &^ bc1)
|
||||
a[6] = bc1 ^ (bc3 &^ bc2)
|
||||
a[7] = bc2 ^ (bc4 &^ bc3)
|
||||
a[8] = bc3 ^ (bc0 &^ bc4)
|
||||
a[9] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[10] ^ d0
|
||||
bc4 = bits.RotateLeft64(t, 18)
|
||||
t = a[11] ^ d1
|
||||
bc0 = bits.RotateLeft64(t, 1)
|
||||
t = a[12] ^ d2
|
||||
bc1 = bits.RotateLeft64(t, 6)
|
||||
t = a[13] ^ d3
|
||||
bc2 = bits.RotateLeft64(t, 25)
|
||||
t = a[14] ^ d4
|
||||
bc3 = bits.RotateLeft64(t, 8)
|
||||
a[10] = bc0 ^ (bc2 &^ bc1)
|
||||
a[11] = bc1 ^ (bc3 &^ bc2)
|
||||
a[12] = bc2 ^ (bc4 &^ bc3)
|
||||
a[13] = bc3 ^ (bc0 &^ bc4)
|
||||
a[14] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[15] ^ d0
|
||||
bc1 = bits.RotateLeft64(t, 36)
|
||||
t = a[16] ^ d1
|
||||
bc2 = bits.RotateLeft64(t, 10)
|
||||
t = a[17] ^ d2
|
||||
bc3 = bits.RotateLeft64(t, 15)
|
||||
t = a[18] ^ d3
|
||||
bc4 = bits.RotateLeft64(t, 56)
|
||||
t = a[19] ^ d4
|
||||
bc0 = bits.RotateLeft64(t, 27)
|
||||
a[15] = bc0 ^ (bc2 &^ bc1)
|
||||
a[16] = bc1 ^ (bc3 &^ bc2)
|
||||
a[17] = bc2 ^ (bc4 &^ bc3)
|
||||
a[18] = bc3 ^ (bc0 &^ bc4)
|
||||
a[19] = bc4 ^ (bc1 &^ bc0)
|
||||
|
||||
t = a[20] ^ d0
|
||||
bc3 = bits.RotateLeft64(t, 41)
|
||||
t = a[21] ^ d1
|
||||
bc4 = bits.RotateLeft64(t, 2)
|
||||
t = a[22] ^ d2
|
||||
bc0 = bits.RotateLeft64(t, 62)
|
||||
t = a[23] ^ d3
|
||||
bc1 = bits.RotateLeft64(t, 55)
|
||||
t = a[24] ^ d4
|
||||
bc2 = bits.RotateLeft64(t, 39)
|
||||
a[20] = bc0 ^ (bc2 &^ bc1)
|
||||
a[21] = bc1 ^ (bc3 &^ bc2)
|
||||
a[22] = bc2 ^ (bc4 &^ bc3)
|
||||
a[23] = bc3 ^ (bc0 &^ bc4)
|
||||
a[24] = bc4 ^ (bc1 &^ bc0)
|
||||
}
|
||||
}
|
||||
119
vendor/golang.org/x/crypto/sha3/shake.go
generated
vendored
Normal file
119
vendor/golang.org/x/crypto/sha3/shake.go
generated
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sha3
|
||||
|
||||
import (
|
||||
"crypto/sha3"
|
||||
"hash"
|
||||
"io"
|
||||
)
|
||||
|
||||
// ShakeHash defines the interface to hash functions that support
|
||||
// arbitrary-length output. When used as a plain [hash.Hash], it
|
||||
// produces minimum-length outputs that provide full-strength generic
|
||||
// security.
|
||||
type ShakeHash interface {
|
||||
hash.Hash
|
||||
|
||||
// Read reads more output from the hash; reading affects the hash's
|
||||
// state. (ShakeHash.Read is thus very different from Hash.Sum.)
|
||||
// It never returns an error, but subsequent calls to Write or Sum
|
||||
// will panic.
|
||||
io.Reader
|
||||
|
||||
// Clone returns a copy of the ShakeHash in its current state.
|
||||
Clone() ShakeHash
|
||||
}
|
||||
|
||||
// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash.
|
||||
// Its generic security strength is 128 bits against all attacks if at
|
||||
// least 32 bytes of its output are used.
|
||||
func NewShake128() ShakeHash {
|
||||
return &shakeWrapper{sha3.NewSHAKE128(), 32, false, sha3.NewSHAKE128}
|
||||
}
|
||||
|
||||
// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash.
|
||||
// Its generic security strength is 256 bits against all attacks if
|
||||
// at least 64 bytes of its output are used.
|
||||
func NewShake256() ShakeHash {
|
||||
return &shakeWrapper{sha3.NewSHAKE256(), 64, false, sha3.NewSHAKE256}
|
||||
}
|
||||
|
||||
// NewCShake128 creates a new instance of cSHAKE128 variable-output-length ShakeHash,
|
||||
// a customizable variant of SHAKE128.
|
||||
// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is
|
||||
// desired. S is a customization byte string used for domain separation - two cSHAKE
|
||||
// computations on same input with different S yield unrelated outputs.
|
||||
// When N and S are both empty, this is equivalent to NewShake128.
|
||||
func NewCShake128(N, S []byte) ShakeHash {
|
||||
return &shakeWrapper{sha3.NewCSHAKE128(N, S), 32, false, func() *sha3.SHAKE {
|
||||
return sha3.NewCSHAKE128(N, S)
|
||||
}}
|
||||
}
|
||||
|
||||
// NewCShake256 creates a new instance of cSHAKE256 variable-output-length ShakeHash,
|
||||
// a customizable variant of SHAKE256.
|
||||
// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is
|
||||
// desired. S is a customization byte string used for domain separation - two cSHAKE
|
||||
// computations on same input with different S yield unrelated outputs.
|
||||
// When N and S are both empty, this is equivalent to NewShake256.
|
||||
func NewCShake256(N, S []byte) ShakeHash {
|
||||
return &shakeWrapper{sha3.NewCSHAKE256(N, S), 64, false, func() *sha3.SHAKE {
|
||||
return sha3.NewCSHAKE256(N, S)
|
||||
}}
|
||||
}
|
||||
|
||||
// ShakeSum128 writes an arbitrary-length digest of data into hash.
|
||||
func ShakeSum128(hash, data []byte) {
|
||||
h := NewShake128()
|
||||
h.Write(data)
|
||||
h.Read(hash)
|
||||
}
|
||||
|
||||
// ShakeSum256 writes an arbitrary-length digest of data into hash.
|
||||
func ShakeSum256(hash, data []byte) {
|
||||
h := NewShake256()
|
||||
h.Write(data)
|
||||
h.Read(hash)
|
||||
}
|
||||
|
||||
// shakeWrapper adds the Size, Sum, and Clone methods to a sha3.SHAKE
|
||||
// to implement the ShakeHash interface.
|
||||
type shakeWrapper struct {
|
||||
*sha3.SHAKE
|
||||
outputLen int
|
||||
squeezing bool
|
||||
newSHAKE func() *sha3.SHAKE
|
||||
}
|
||||
|
||||
func (w *shakeWrapper) Read(p []byte) (n int, err error) {
|
||||
w.squeezing = true
|
||||
return w.SHAKE.Read(p)
|
||||
}
|
||||
|
||||
func (w *shakeWrapper) Clone() ShakeHash {
|
||||
s := w.newSHAKE()
|
||||
b, err := w.MarshalBinary()
|
||||
if err != nil {
|
||||
panic(err) // unreachable
|
||||
}
|
||||
if err := s.UnmarshalBinary(b); err != nil {
|
||||
panic(err) // unreachable
|
||||
}
|
||||
return &shakeWrapper{s, w.outputLen, w.squeezing, w.newSHAKE}
|
||||
}
|
||||
|
||||
func (w *shakeWrapper) Size() int { return w.outputLen }
|
||||
|
||||
func (w *shakeWrapper) Sum(b []byte) []byte {
|
||||
if w.squeezing {
|
||||
panic("sha3: Sum after Read")
|
||||
}
|
||||
out := make([]byte, w.outputLen)
|
||||
// Clone the state so that we don't affect future Write calls.
|
||||
s := w.Clone()
|
||||
s.Read(out)
|
||||
return append(b, out...)
|
||||
}
|
||||
27
vendor/golang.org/x/net/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/net/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright 2009 The Go Authors.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
22
vendor/golang.org/x/net/PATENTS
generated
vendored
Normal file
22
vendor/golang.org/x/net/PATENTS
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
||||
41
vendor/golang.org/x/net/bpf/asm.go
generated
vendored
Normal file
41
vendor/golang.org/x/net/bpf/asm.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bpf
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Assemble converts insts into raw instructions suitable for loading
|
||||
// into a BPF virtual machine.
|
||||
//
|
||||
// Currently, no optimization is attempted, the assembled program flow
|
||||
// is exactly as provided.
|
||||
func Assemble(insts []Instruction) ([]RawInstruction, error) {
|
||||
ret := make([]RawInstruction, len(insts))
|
||||
var err error
|
||||
for i, inst := range insts {
|
||||
ret[i], err = inst.Assemble()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("assembling instruction %d: %s", i+1, err)
|
||||
}
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// Disassemble attempts to parse raw back into
|
||||
// Instructions. Unrecognized RawInstructions are assumed to be an
|
||||
// extension not implemented by this package, and are passed through
|
||||
// unchanged to the output. The allDecoded value reports whether insts
|
||||
// contains no RawInstructions.
|
||||
func Disassemble(raw []RawInstruction) (insts []Instruction, allDecoded bool) {
|
||||
insts = make([]Instruction, len(raw))
|
||||
allDecoded = true
|
||||
for i, r := range raw {
|
||||
insts[i] = r.Disassemble()
|
||||
if _, ok := insts[i].(RawInstruction); ok {
|
||||
allDecoded = false
|
||||
}
|
||||
}
|
||||
return insts, allDecoded
|
||||
}
|
||||
222
vendor/golang.org/x/net/bpf/constants.go
generated
vendored
Normal file
222
vendor/golang.org/x/net/bpf/constants.go
generated
vendored
Normal file
@@ -0,0 +1,222 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bpf
|
||||
|
||||
// A Register is a register of the BPF virtual machine.
|
||||
type Register uint16
|
||||
|
||||
const (
|
||||
// RegA is the accumulator register. RegA is always the
|
||||
// destination register of ALU operations.
|
||||
RegA Register = iota
|
||||
// RegX is the indirection register, used by LoadIndirect
|
||||
// operations.
|
||||
RegX
|
||||
)
|
||||
|
||||
// An ALUOp is an arithmetic or logic operation.
|
||||
type ALUOp uint16
|
||||
|
||||
// ALU binary operation types.
|
||||
const (
|
||||
ALUOpAdd ALUOp = iota << 4
|
||||
ALUOpSub
|
||||
ALUOpMul
|
||||
ALUOpDiv
|
||||
ALUOpOr
|
||||
ALUOpAnd
|
||||
ALUOpShiftLeft
|
||||
ALUOpShiftRight
|
||||
aluOpNeg // Not exported because it's the only unary ALU operation, and gets its own instruction type.
|
||||
ALUOpMod
|
||||
ALUOpXor
|
||||
)
|
||||
|
||||
// A JumpTest is a comparison operator used in conditional jumps.
|
||||
type JumpTest uint16
|
||||
|
||||
// Supported operators for conditional jumps.
|
||||
// K can be RegX for JumpIfX
|
||||
const (
|
||||
// K == A
|
||||
JumpEqual JumpTest = iota
|
||||
// K != A
|
||||
JumpNotEqual
|
||||
// K > A
|
||||
JumpGreaterThan
|
||||
// K < A
|
||||
JumpLessThan
|
||||
// K >= A
|
||||
JumpGreaterOrEqual
|
||||
// K <= A
|
||||
JumpLessOrEqual
|
||||
// K & A != 0
|
||||
JumpBitsSet
|
||||
// K & A == 0
|
||||
JumpBitsNotSet
|
||||
)
|
||||
|
||||
// An Extension is a function call provided by the kernel that
|
||||
// performs advanced operations that are expensive or impossible
|
||||
// within the BPF virtual machine.
|
||||
//
|
||||
// Extensions are only implemented by the Linux kernel.
|
||||
//
|
||||
// TODO: should we prune this list? Some of these extensions seem
|
||||
// either broken or near-impossible to use correctly, whereas other
|
||||
// (len, random, ifindex) are quite useful.
|
||||
type Extension int
|
||||
|
||||
// Extension functions available in the Linux kernel.
|
||||
const (
|
||||
// extOffset is the negative maximum number of instructions used
|
||||
// to load instructions by overloading the K argument.
|
||||
extOffset = -0x1000
|
||||
// ExtLen returns the length of the packet.
|
||||
ExtLen Extension = 1
|
||||
// ExtProto returns the packet's L3 protocol type.
|
||||
ExtProto Extension = 0
|
||||
// ExtType returns the packet's type (skb->pkt_type in the kernel)
|
||||
//
|
||||
// TODO: better documentation. How nice an API do we want to
|
||||
// provide for these esoteric extensions?
|
||||
ExtType Extension = 4
|
||||
// ExtPayloadOffset returns the offset of the packet payload, or
|
||||
// the first protocol header that the kernel does not know how to
|
||||
// parse.
|
||||
ExtPayloadOffset Extension = 52
|
||||
// ExtInterfaceIndex returns the index of the interface on which
|
||||
// the packet was received.
|
||||
ExtInterfaceIndex Extension = 8
|
||||
// ExtNetlinkAttr returns the netlink attribute of type X at
|
||||
// offset A.
|
||||
ExtNetlinkAttr Extension = 12
|
||||
// ExtNetlinkAttrNested returns the nested netlink attribute of
|
||||
// type X at offset A.
|
||||
ExtNetlinkAttrNested Extension = 16
|
||||
// ExtMark returns the packet's mark value.
|
||||
ExtMark Extension = 20
|
||||
// ExtQueue returns the packet's assigned hardware queue.
|
||||
ExtQueue Extension = 24
|
||||
// ExtLinkLayerType returns the packet's hardware address type
|
||||
// (e.g. Ethernet, Infiniband).
|
||||
ExtLinkLayerType Extension = 28
|
||||
// ExtRXHash returns the packets receive hash.
|
||||
//
|
||||
// TODO: figure out what this rxhash actually is.
|
||||
ExtRXHash Extension = 32
|
||||
// ExtCPUID returns the ID of the CPU processing the current
|
||||
// packet.
|
||||
ExtCPUID Extension = 36
|
||||
// ExtVLANTag returns the packet's VLAN tag.
|
||||
ExtVLANTag Extension = 44
|
||||
// ExtVLANTagPresent returns non-zero if the packet has a VLAN
|
||||
// tag.
|
||||
//
|
||||
// TODO: I think this might be a lie: it reads bit 0x1000 of the
|
||||
// VLAN header, which changed meaning in recent revisions of the
|
||||
// spec - this extension may now return meaningless information.
|
||||
ExtVLANTagPresent Extension = 48
|
||||
// ExtVLANProto returns 0x8100 if the frame has a VLAN header,
|
||||
// 0x88a8 if the frame has a "Q-in-Q" double VLAN header, or some
|
||||
// other value if no VLAN information is present.
|
||||
ExtVLANProto Extension = 60
|
||||
// ExtRand returns a uniformly random uint32.
|
||||
ExtRand Extension = 56
|
||||
)
|
||||
|
||||
// The following gives names to various bit patterns used in opcode construction.
|
||||
|
||||
const (
|
||||
opMaskCls uint16 = 0x7
|
||||
// opClsLoad masks
|
||||
opMaskLoadDest = 0x01
|
||||
opMaskLoadWidth = 0x18
|
||||
opMaskLoadMode = 0xe0
|
||||
// opClsALU & opClsJump
|
||||
opMaskOperand = 0x08
|
||||
opMaskOperator = 0xf0
|
||||
)
|
||||
|
||||
const (
|
||||
// +---------------+-----------------+---+---+---+
|
||||
// | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 0 |
|
||||
// +---------------+-----------------+---+---+---+
|
||||
opClsLoadA uint16 = iota
|
||||
// +---------------+-----------------+---+---+---+
|
||||
// | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 1 |
|
||||
// +---------------+-----------------+---+---+---+
|
||||
opClsLoadX
|
||||
// +---+---+---+---+---+---+---+---+
|
||||
// | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
|
||||
// +---+---+---+---+---+---+---+---+
|
||||
opClsStoreA
|
||||
// +---+---+---+---+---+---+---+---+
|
||||
// | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
|
||||
// +---+---+---+---+---+---+---+---+
|
||||
opClsStoreX
|
||||
// +---------------+-----------------+---+---+---+
|
||||
// | Operator (4b) | OperandSrc (1b) | 1 | 0 | 0 |
|
||||
// +---------------+-----------------+---+---+---+
|
||||
opClsALU
|
||||
// +-----------------------------+---+---+---+---+
|
||||
// | TestOperator (4b) | 0 | 1 | 0 | 1 |
|
||||
// +-----------------------------+---+---+---+---+
|
||||
opClsJump
|
||||
// +---+-------------------------+---+---+---+---+
|
||||
// | 0 | 0 | 0 | RetSrc (1b) | 0 | 1 | 1 | 0 |
|
||||
// +---+-------------------------+---+---+---+---+
|
||||
opClsReturn
|
||||
// +---+-------------------------+---+---+---+---+
|
||||
// | 0 | 0 | 0 | TXAorTAX (1b) | 0 | 1 | 1 | 1 |
|
||||
// +---+-------------------------+---+---+---+---+
|
||||
opClsMisc
|
||||
)
|
||||
|
||||
const (
|
||||
opAddrModeImmediate uint16 = iota << 5
|
||||
opAddrModeAbsolute
|
||||
opAddrModeIndirect
|
||||
opAddrModeScratch
|
||||
opAddrModePacketLen // actually an extension, not an addressing mode.
|
||||
opAddrModeMemShift
|
||||
)
|
||||
|
||||
const (
|
||||
opLoadWidth4 uint16 = iota << 3
|
||||
opLoadWidth2
|
||||
opLoadWidth1
|
||||
)
|
||||
|
||||
// Operand for ALU and Jump instructions
|
||||
type opOperand uint16
|
||||
|
||||
// Supported operand sources.
|
||||
const (
|
||||
opOperandConstant opOperand = iota << 3
|
||||
opOperandX
|
||||
)
|
||||
|
||||
// An jumpOp is a conditional jump condition.
|
||||
type jumpOp uint16
|
||||
|
||||
// Supported jump conditions.
|
||||
const (
|
||||
opJumpAlways jumpOp = iota << 4
|
||||
opJumpEqual
|
||||
opJumpGT
|
||||
opJumpGE
|
||||
opJumpSet
|
||||
)
|
||||
|
||||
const (
|
||||
opRetSrcConstant uint16 = iota << 4
|
||||
opRetSrcA
|
||||
)
|
||||
|
||||
const (
|
||||
opMiscTAX = 0x00
|
||||
opMiscTXA = 0x80
|
||||
)
|
||||
80
vendor/golang.org/x/net/bpf/doc.go
generated
vendored
Normal file
80
vendor/golang.org/x/net/bpf/doc.go
generated
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package bpf implements marshaling and unmarshaling of programs for the
|
||||
Berkeley Packet Filter virtual machine, and provides a Go implementation
|
||||
of the virtual machine.
|
||||
|
||||
BPF's main use is to specify a packet filter for network taps, so that
|
||||
the kernel doesn't have to expensively copy every packet it sees to
|
||||
userspace. However, it's been repurposed to other areas where running
|
||||
user code in-kernel is needed. For example, Linux's seccomp uses BPF
|
||||
to apply security policies to system calls. For simplicity, this
|
||||
documentation refers only to packets, but other uses of BPF have their
|
||||
own data payloads.
|
||||
|
||||
BPF programs run in a restricted virtual machine. It has almost no
|
||||
access to kernel functions, and while conditional branches are
|
||||
allowed, they can only jump forwards, to guarantee that there are no
|
||||
infinite loops.
|
||||
|
||||
# The virtual machine
|
||||
|
||||
The BPF VM is an accumulator machine. Its main register, called
|
||||
register A, is an implicit source and destination in all arithmetic
|
||||
and logic operations. The machine also has 16 scratch registers for
|
||||
temporary storage, and an indirection register (register X) for
|
||||
indirect memory access. All registers are 32 bits wide.
|
||||
|
||||
Each run of a BPF program is given one packet, which is placed in the
|
||||
VM's read-only "main memory". LoadAbsolute and LoadIndirect
|
||||
instructions can fetch up to 32 bits at a time into register A for
|
||||
examination.
|
||||
|
||||
The goal of a BPF program is to produce and return a verdict (uint32),
|
||||
which tells the kernel what to do with the packet. In the context of
|
||||
packet filtering, the returned value is the number of bytes of the
|
||||
packet to forward to userspace, or 0 to ignore the packet. Other
|
||||
contexts like seccomp define their own return values.
|
||||
|
||||
In order to simplify programs, attempts to read past the end of the
|
||||
packet terminate the program execution with a verdict of 0 (ignore
|
||||
packet). This means that the vast majority of BPF programs don't need
|
||||
to do any explicit bounds checking.
|
||||
|
||||
In addition to the bytes of the packet, some BPF programs have access
|
||||
to extensions, which are essentially calls to kernel utility
|
||||
functions. Currently, the only extensions supported by this package
|
||||
are the Linux packet filter extensions.
|
||||
|
||||
# Examples
|
||||
|
||||
This packet filter selects all ARP packets.
|
||||
|
||||
bpf.Assemble([]bpf.Instruction{
|
||||
// Load "EtherType" field from the ethernet header.
|
||||
bpf.LoadAbsolute{Off: 12, Size: 2},
|
||||
// Skip over the next instruction if EtherType is not ARP.
|
||||
bpf.JumpIf{Cond: bpf.JumpNotEqual, Val: 0x0806, SkipTrue: 1},
|
||||
// Verdict is "send up to 4k of the packet to userspace."
|
||||
bpf.RetConstant{Val: 4096},
|
||||
// Verdict is "ignore packet."
|
||||
bpf.RetConstant{Val: 0},
|
||||
})
|
||||
|
||||
This packet filter captures a random 1% sample of traffic.
|
||||
|
||||
bpf.Assemble([]bpf.Instruction{
|
||||
// Get a 32-bit random number from the Linux kernel.
|
||||
bpf.LoadExtension{Num: bpf.ExtRand},
|
||||
// 1% dice roll?
|
||||
bpf.JumpIf{Cond: bpf.JumpLessThan, Val: 2^32/100, SkipFalse: 1},
|
||||
// Capture.
|
||||
bpf.RetConstant{Val: 4096},
|
||||
// Ignore.
|
||||
bpf.RetConstant{Val: 0},
|
||||
})
|
||||
*/
|
||||
package bpf // import "golang.org/x/net/bpf"
|
||||
726
vendor/golang.org/x/net/bpf/instructions.go
generated
vendored
Normal file
726
vendor/golang.org/x/net/bpf/instructions.go
generated
vendored
Normal file
@@ -0,0 +1,726 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bpf
|
||||
|
||||
import "fmt"
|
||||
|
||||
// An Instruction is one instruction executed by the BPF virtual
|
||||
// machine.
|
||||
type Instruction interface {
|
||||
// Assemble assembles the Instruction into a RawInstruction.
|
||||
Assemble() (RawInstruction, error)
|
||||
}
|
||||
|
||||
// A RawInstruction is a raw BPF virtual machine instruction.
|
||||
type RawInstruction struct {
|
||||
// Operation to execute.
|
||||
Op uint16
|
||||
// For conditional jump instructions, the number of instructions
|
||||
// to skip if the condition is true/false.
|
||||
Jt uint8
|
||||
Jf uint8
|
||||
// Constant parameter. The meaning depends on the Op.
|
||||
K uint32
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (ri RawInstruction) Assemble() (RawInstruction, error) { return ri, nil }
|
||||
|
||||
// Disassemble parses ri into an Instruction and returns it. If ri is
|
||||
// not recognized by this package, ri itself is returned.
|
||||
func (ri RawInstruction) Disassemble() Instruction {
|
||||
switch ri.Op & opMaskCls {
|
||||
case opClsLoadA, opClsLoadX:
|
||||
reg := Register(ri.Op & opMaskLoadDest)
|
||||
sz := 0
|
||||
switch ri.Op & opMaskLoadWidth {
|
||||
case opLoadWidth4:
|
||||
sz = 4
|
||||
case opLoadWidth2:
|
||||
sz = 2
|
||||
case opLoadWidth1:
|
||||
sz = 1
|
||||
default:
|
||||
return ri
|
||||
}
|
||||
switch ri.Op & opMaskLoadMode {
|
||||
case opAddrModeImmediate:
|
||||
if sz != 4 {
|
||||
return ri
|
||||
}
|
||||
return LoadConstant{Dst: reg, Val: ri.K}
|
||||
case opAddrModeScratch:
|
||||
if sz != 4 || ri.K > 15 {
|
||||
return ri
|
||||
}
|
||||
return LoadScratch{Dst: reg, N: int(ri.K)}
|
||||
case opAddrModeAbsolute:
|
||||
if ri.K > extOffset+0xffffffff {
|
||||
return LoadExtension{Num: Extension(-extOffset + ri.K)}
|
||||
}
|
||||
return LoadAbsolute{Size: sz, Off: ri.K}
|
||||
case opAddrModeIndirect:
|
||||
return LoadIndirect{Size: sz, Off: ri.K}
|
||||
case opAddrModePacketLen:
|
||||
if sz != 4 {
|
||||
return ri
|
||||
}
|
||||
return LoadExtension{Num: ExtLen}
|
||||
case opAddrModeMemShift:
|
||||
return LoadMemShift{Off: ri.K}
|
||||
default:
|
||||
return ri
|
||||
}
|
||||
|
||||
case opClsStoreA:
|
||||
if ri.Op != opClsStoreA || ri.K > 15 {
|
||||
return ri
|
||||
}
|
||||
return StoreScratch{Src: RegA, N: int(ri.K)}
|
||||
|
||||
case opClsStoreX:
|
||||
if ri.Op != opClsStoreX || ri.K > 15 {
|
||||
return ri
|
||||
}
|
||||
return StoreScratch{Src: RegX, N: int(ri.K)}
|
||||
|
||||
case opClsALU:
|
||||
switch op := ALUOp(ri.Op & opMaskOperator); op {
|
||||
case ALUOpAdd, ALUOpSub, ALUOpMul, ALUOpDiv, ALUOpOr, ALUOpAnd, ALUOpShiftLeft, ALUOpShiftRight, ALUOpMod, ALUOpXor:
|
||||
switch operand := opOperand(ri.Op & opMaskOperand); operand {
|
||||
case opOperandX:
|
||||
return ALUOpX{Op: op}
|
||||
case opOperandConstant:
|
||||
return ALUOpConstant{Op: op, Val: ri.K}
|
||||
default:
|
||||
return ri
|
||||
}
|
||||
case aluOpNeg:
|
||||
return NegateA{}
|
||||
default:
|
||||
return ri
|
||||
}
|
||||
|
||||
case opClsJump:
|
||||
switch op := jumpOp(ri.Op & opMaskOperator); op {
|
||||
case opJumpAlways:
|
||||
return Jump{Skip: ri.K}
|
||||
case opJumpEqual, opJumpGT, opJumpGE, opJumpSet:
|
||||
cond, skipTrue, skipFalse := jumpOpToTest(op, ri.Jt, ri.Jf)
|
||||
switch operand := opOperand(ri.Op & opMaskOperand); operand {
|
||||
case opOperandX:
|
||||
return JumpIfX{Cond: cond, SkipTrue: skipTrue, SkipFalse: skipFalse}
|
||||
case opOperandConstant:
|
||||
return JumpIf{Cond: cond, Val: ri.K, SkipTrue: skipTrue, SkipFalse: skipFalse}
|
||||
default:
|
||||
return ri
|
||||
}
|
||||
default:
|
||||
return ri
|
||||
}
|
||||
|
||||
case opClsReturn:
|
||||
switch ri.Op {
|
||||
case opClsReturn | opRetSrcA:
|
||||
return RetA{}
|
||||
case opClsReturn | opRetSrcConstant:
|
||||
return RetConstant{Val: ri.K}
|
||||
default:
|
||||
return ri
|
||||
}
|
||||
|
||||
case opClsMisc:
|
||||
switch ri.Op {
|
||||
case opClsMisc | opMiscTAX:
|
||||
return TAX{}
|
||||
case opClsMisc | opMiscTXA:
|
||||
return TXA{}
|
||||
default:
|
||||
return ri
|
||||
}
|
||||
|
||||
default:
|
||||
panic("unreachable") // switch is exhaustive on the bit pattern
|
||||
}
|
||||
}
|
||||
|
||||
func jumpOpToTest(op jumpOp, skipTrue uint8, skipFalse uint8) (JumpTest, uint8, uint8) {
|
||||
var test JumpTest
|
||||
|
||||
// Decode "fake" jump conditions that don't appear in machine code
|
||||
// Ensures the Assemble -> Disassemble stage recreates the same instructions
|
||||
// See https://github.com/golang/go/issues/18470
|
||||
if skipTrue == 0 {
|
||||
switch op {
|
||||
case opJumpEqual:
|
||||
test = JumpNotEqual
|
||||
case opJumpGT:
|
||||
test = JumpLessOrEqual
|
||||
case opJumpGE:
|
||||
test = JumpLessThan
|
||||
case opJumpSet:
|
||||
test = JumpBitsNotSet
|
||||
}
|
||||
|
||||
return test, skipFalse, 0
|
||||
}
|
||||
|
||||
switch op {
|
||||
case opJumpEqual:
|
||||
test = JumpEqual
|
||||
case opJumpGT:
|
||||
test = JumpGreaterThan
|
||||
case opJumpGE:
|
||||
test = JumpGreaterOrEqual
|
||||
case opJumpSet:
|
||||
test = JumpBitsSet
|
||||
}
|
||||
|
||||
return test, skipTrue, skipFalse
|
||||
}
|
||||
|
||||
// LoadConstant loads Val into register Dst.
|
||||
type LoadConstant struct {
|
||||
Dst Register
|
||||
Val uint32
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a LoadConstant) Assemble() (RawInstruction, error) {
|
||||
return assembleLoad(a.Dst, 4, opAddrModeImmediate, a.Val)
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a LoadConstant) String() string {
|
||||
switch a.Dst {
|
||||
case RegA:
|
||||
return fmt.Sprintf("ld #%d", a.Val)
|
||||
case RegX:
|
||||
return fmt.Sprintf("ldx #%d", a.Val)
|
||||
default:
|
||||
return fmt.Sprintf("unknown instruction: %#v", a)
|
||||
}
|
||||
}
|
||||
|
||||
// LoadScratch loads scratch[N] into register Dst.
|
||||
type LoadScratch struct {
|
||||
Dst Register
|
||||
N int // 0-15
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a LoadScratch) Assemble() (RawInstruction, error) {
|
||||
if a.N < 0 || a.N > 15 {
|
||||
return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N)
|
||||
}
|
||||
return assembleLoad(a.Dst, 4, opAddrModeScratch, uint32(a.N))
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a LoadScratch) String() string {
|
||||
switch a.Dst {
|
||||
case RegA:
|
||||
return fmt.Sprintf("ld M[%d]", a.N)
|
||||
case RegX:
|
||||
return fmt.Sprintf("ldx M[%d]", a.N)
|
||||
default:
|
||||
return fmt.Sprintf("unknown instruction: %#v", a)
|
||||
}
|
||||
}
|
||||
|
||||
// LoadAbsolute loads packet[Off:Off+Size] as an integer value into
|
||||
// register A.
|
||||
type LoadAbsolute struct {
|
||||
Off uint32
|
||||
Size int // 1, 2 or 4
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a LoadAbsolute) Assemble() (RawInstruction, error) {
|
||||
return assembleLoad(RegA, a.Size, opAddrModeAbsolute, a.Off)
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a LoadAbsolute) String() string {
|
||||
switch a.Size {
|
||||
case 1: // byte
|
||||
return fmt.Sprintf("ldb [%d]", a.Off)
|
||||
case 2: // half word
|
||||
return fmt.Sprintf("ldh [%d]", a.Off)
|
||||
case 4: // word
|
||||
if a.Off > extOffset+0xffffffff {
|
||||
return LoadExtension{Num: Extension(a.Off + 0x1000)}.String()
|
||||
}
|
||||
return fmt.Sprintf("ld [%d]", a.Off)
|
||||
default:
|
||||
return fmt.Sprintf("unknown instruction: %#v", a)
|
||||
}
|
||||
}
|
||||
|
||||
// LoadIndirect loads packet[X+Off:X+Off+Size] as an integer value
|
||||
// into register A.
|
||||
type LoadIndirect struct {
|
||||
Off uint32
|
||||
Size int // 1, 2 or 4
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a LoadIndirect) Assemble() (RawInstruction, error) {
|
||||
return assembleLoad(RegA, a.Size, opAddrModeIndirect, a.Off)
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a LoadIndirect) String() string {
|
||||
switch a.Size {
|
||||
case 1: // byte
|
||||
return fmt.Sprintf("ldb [x + %d]", a.Off)
|
||||
case 2: // half word
|
||||
return fmt.Sprintf("ldh [x + %d]", a.Off)
|
||||
case 4: // word
|
||||
return fmt.Sprintf("ld [x + %d]", a.Off)
|
||||
default:
|
||||
return fmt.Sprintf("unknown instruction: %#v", a)
|
||||
}
|
||||
}
|
||||
|
||||
// LoadMemShift multiplies the first 4 bits of the byte at packet[Off]
|
||||
// by 4 and stores the result in register X.
|
||||
//
|
||||
// This instruction is mainly useful to load into X the length of an
|
||||
// IPv4 packet header in a single instruction, rather than have to do
|
||||
// the arithmetic on the header's first byte by hand.
|
||||
type LoadMemShift struct {
|
||||
Off uint32
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a LoadMemShift) Assemble() (RawInstruction, error) {
|
||||
return assembleLoad(RegX, 1, opAddrModeMemShift, a.Off)
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a LoadMemShift) String() string {
|
||||
return fmt.Sprintf("ldx 4*([%d]&0xf)", a.Off)
|
||||
}
|
||||
|
||||
// LoadExtension invokes a linux-specific extension and stores the
|
||||
// result in register A.
|
||||
type LoadExtension struct {
|
||||
Num Extension
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a LoadExtension) Assemble() (RawInstruction, error) {
|
||||
if a.Num == ExtLen {
|
||||
return assembleLoad(RegA, 4, opAddrModePacketLen, 0)
|
||||
}
|
||||
return assembleLoad(RegA, 4, opAddrModeAbsolute, uint32(extOffset+a.Num))
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a LoadExtension) String() string {
|
||||
switch a.Num {
|
||||
case ExtLen:
|
||||
return "ld #len"
|
||||
case ExtProto:
|
||||
return "ld #proto"
|
||||
case ExtType:
|
||||
return "ld #type"
|
||||
case ExtPayloadOffset:
|
||||
return "ld #poff"
|
||||
case ExtInterfaceIndex:
|
||||
return "ld #ifidx"
|
||||
case ExtNetlinkAttr:
|
||||
return "ld #nla"
|
||||
case ExtNetlinkAttrNested:
|
||||
return "ld #nlan"
|
||||
case ExtMark:
|
||||
return "ld #mark"
|
||||
case ExtQueue:
|
||||
return "ld #queue"
|
||||
case ExtLinkLayerType:
|
||||
return "ld #hatype"
|
||||
case ExtRXHash:
|
||||
return "ld #rxhash"
|
||||
case ExtCPUID:
|
||||
return "ld #cpu"
|
||||
case ExtVLANTag:
|
||||
return "ld #vlan_tci"
|
||||
case ExtVLANTagPresent:
|
||||
return "ld #vlan_avail"
|
||||
case ExtVLANProto:
|
||||
return "ld #vlan_tpid"
|
||||
case ExtRand:
|
||||
return "ld #rand"
|
||||
default:
|
||||
return fmt.Sprintf("unknown instruction: %#v", a)
|
||||
}
|
||||
}
|
||||
|
||||
// StoreScratch stores register Src into scratch[N].
|
||||
type StoreScratch struct {
|
||||
Src Register
|
||||
N int // 0-15
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a StoreScratch) Assemble() (RawInstruction, error) {
|
||||
if a.N < 0 || a.N > 15 {
|
||||
return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N)
|
||||
}
|
||||
var op uint16
|
||||
switch a.Src {
|
||||
case RegA:
|
||||
op = opClsStoreA
|
||||
case RegX:
|
||||
op = opClsStoreX
|
||||
default:
|
||||
return RawInstruction{}, fmt.Errorf("invalid source register %v", a.Src)
|
||||
}
|
||||
|
||||
return RawInstruction{
|
||||
Op: op,
|
||||
K: uint32(a.N),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a StoreScratch) String() string {
|
||||
switch a.Src {
|
||||
case RegA:
|
||||
return fmt.Sprintf("st M[%d]", a.N)
|
||||
case RegX:
|
||||
return fmt.Sprintf("stx M[%d]", a.N)
|
||||
default:
|
||||
return fmt.Sprintf("unknown instruction: %#v", a)
|
||||
}
|
||||
}
|
||||
|
||||
// ALUOpConstant executes A = A <Op> Val.
|
||||
type ALUOpConstant struct {
|
||||
Op ALUOp
|
||||
Val uint32
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a ALUOpConstant) Assemble() (RawInstruction, error) {
|
||||
return RawInstruction{
|
||||
Op: opClsALU | uint16(opOperandConstant) | uint16(a.Op),
|
||||
K: a.Val,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a ALUOpConstant) String() string {
|
||||
switch a.Op {
|
||||
case ALUOpAdd:
|
||||
return fmt.Sprintf("add #%d", a.Val)
|
||||
case ALUOpSub:
|
||||
return fmt.Sprintf("sub #%d", a.Val)
|
||||
case ALUOpMul:
|
||||
return fmt.Sprintf("mul #%d", a.Val)
|
||||
case ALUOpDiv:
|
||||
return fmt.Sprintf("div #%d", a.Val)
|
||||
case ALUOpMod:
|
||||
return fmt.Sprintf("mod #%d", a.Val)
|
||||
case ALUOpAnd:
|
||||
return fmt.Sprintf("and #%d", a.Val)
|
||||
case ALUOpOr:
|
||||
return fmt.Sprintf("or #%d", a.Val)
|
||||
case ALUOpXor:
|
||||
return fmt.Sprintf("xor #%d", a.Val)
|
||||
case ALUOpShiftLeft:
|
||||
return fmt.Sprintf("lsh #%d", a.Val)
|
||||
case ALUOpShiftRight:
|
||||
return fmt.Sprintf("rsh #%d", a.Val)
|
||||
default:
|
||||
return fmt.Sprintf("unknown instruction: %#v", a)
|
||||
}
|
||||
}
|
||||
|
||||
// ALUOpX executes A = A <Op> X
|
||||
type ALUOpX struct {
|
||||
Op ALUOp
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a ALUOpX) Assemble() (RawInstruction, error) {
|
||||
return RawInstruction{
|
||||
Op: opClsALU | uint16(opOperandX) | uint16(a.Op),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a ALUOpX) String() string {
|
||||
switch a.Op {
|
||||
case ALUOpAdd:
|
||||
return "add x"
|
||||
case ALUOpSub:
|
||||
return "sub x"
|
||||
case ALUOpMul:
|
||||
return "mul x"
|
||||
case ALUOpDiv:
|
||||
return "div x"
|
||||
case ALUOpMod:
|
||||
return "mod x"
|
||||
case ALUOpAnd:
|
||||
return "and x"
|
||||
case ALUOpOr:
|
||||
return "or x"
|
||||
case ALUOpXor:
|
||||
return "xor x"
|
||||
case ALUOpShiftLeft:
|
||||
return "lsh x"
|
||||
case ALUOpShiftRight:
|
||||
return "rsh x"
|
||||
default:
|
||||
return fmt.Sprintf("unknown instruction: %#v", a)
|
||||
}
|
||||
}
|
||||
|
||||
// NegateA executes A = -A.
|
||||
type NegateA struct{}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a NegateA) Assemble() (RawInstruction, error) {
|
||||
return RawInstruction{
|
||||
Op: opClsALU | uint16(aluOpNeg),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a NegateA) String() string {
|
||||
return fmt.Sprintf("neg")
|
||||
}
|
||||
|
||||
// Jump skips the following Skip instructions in the program.
|
||||
type Jump struct {
|
||||
Skip uint32
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a Jump) Assemble() (RawInstruction, error) {
|
||||
return RawInstruction{
|
||||
Op: opClsJump | uint16(opJumpAlways),
|
||||
K: a.Skip,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a Jump) String() string {
|
||||
return fmt.Sprintf("ja %d", a.Skip)
|
||||
}
|
||||
|
||||
// JumpIf skips the following Skip instructions in the program if A
|
||||
// <Cond> Val is true.
|
||||
type JumpIf struct {
|
||||
Cond JumpTest
|
||||
Val uint32
|
||||
SkipTrue uint8
|
||||
SkipFalse uint8
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a JumpIf) Assemble() (RawInstruction, error) {
|
||||
return jumpToRaw(a.Cond, opOperandConstant, a.Val, a.SkipTrue, a.SkipFalse)
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a JumpIf) String() string {
|
||||
return jumpToString(a.Cond, fmt.Sprintf("#%d", a.Val), a.SkipTrue, a.SkipFalse)
|
||||
}
|
||||
|
||||
// JumpIfX skips the following Skip instructions in the program if A
|
||||
// <Cond> X is true.
|
||||
type JumpIfX struct {
|
||||
Cond JumpTest
|
||||
SkipTrue uint8
|
||||
SkipFalse uint8
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a JumpIfX) Assemble() (RawInstruction, error) {
|
||||
return jumpToRaw(a.Cond, opOperandX, 0, a.SkipTrue, a.SkipFalse)
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a JumpIfX) String() string {
|
||||
return jumpToString(a.Cond, "x", a.SkipTrue, a.SkipFalse)
|
||||
}
|
||||
|
||||
// jumpToRaw assembles a jump instruction into a RawInstruction
|
||||
func jumpToRaw(test JumpTest, operand opOperand, k uint32, skipTrue, skipFalse uint8) (RawInstruction, error) {
|
||||
var (
|
||||
cond jumpOp
|
||||
flip bool
|
||||
)
|
||||
switch test {
|
||||
case JumpEqual:
|
||||
cond = opJumpEqual
|
||||
case JumpNotEqual:
|
||||
cond, flip = opJumpEqual, true
|
||||
case JumpGreaterThan:
|
||||
cond = opJumpGT
|
||||
case JumpLessThan:
|
||||
cond, flip = opJumpGE, true
|
||||
case JumpGreaterOrEqual:
|
||||
cond = opJumpGE
|
||||
case JumpLessOrEqual:
|
||||
cond, flip = opJumpGT, true
|
||||
case JumpBitsSet:
|
||||
cond = opJumpSet
|
||||
case JumpBitsNotSet:
|
||||
cond, flip = opJumpSet, true
|
||||
default:
|
||||
return RawInstruction{}, fmt.Errorf("unknown JumpTest %v", test)
|
||||
}
|
||||
jt, jf := skipTrue, skipFalse
|
||||
if flip {
|
||||
jt, jf = jf, jt
|
||||
}
|
||||
return RawInstruction{
|
||||
Op: opClsJump | uint16(cond) | uint16(operand),
|
||||
Jt: jt,
|
||||
Jf: jf,
|
||||
K: k,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// jumpToString converts a jump instruction to assembler notation
|
||||
func jumpToString(cond JumpTest, operand string, skipTrue, skipFalse uint8) string {
|
||||
switch cond {
|
||||
// K == A
|
||||
case JumpEqual:
|
||||
return conditionalJump(operand, skipTrue, skipFalse, "jeq", "jneq")
|
||||
// K != A
|
||||
case JumpNotEqual:
|
||||
return fmt.Sprintf("jneq %s,%d", operand, skipTrue)
|
||||
// K > A
|
||||
case JumpGreaterThan:
|
||||
return conditionalJump(operand, skipTrue, skipFalse, "jgt", "jle")
|
||||
// K < A
|
||||
case JumpLessThan:
|
||||
return fmt.Sprintf("jlt %s,%d", operand, skipTrue)
|
||||
// K >= A
|
||||
case JumpGreaterOrEqual:
|
||||
return conditionalJump(operand, skipTrue, skipFalse, "jge", "jlt")
|
||||
// K <= A
|
||||
case JumpLessOrEqual:
|
||||
return fmt.Sprintf("jle %s,%d", operand, skipTrue)
|
||||
// K & A != 0
|
||||
case JumpBitsSet:
|
||||
if skipFalse > 0 {
|
||||
return fmt.Sprintf("jset %s,%d,%d", operand, skipTrue, skipFalse)
|
||||
}
|
||||
return fmt.Sprintf("jset %s,%d", operand, skipTrue)
|
||||
// K & A == 0, there is no assembler instruction for JumpBitNotSet, use JumpBitSet and invert skips
|
||||
case JumpBitsNotSet:
|
||||
return jumpToString(JumpBitsSet, operand, skipFalse, skipTrue)
|
||||
default:
|
||||
return fmt.Sprintf("unknown JumpTest %#v", cond)
|
||||
}
|
||||
}
|
||||
|
||||
func conditionalJump(operand string, skipTrue, skipFalse uint8, positiveJump, negativeJump string) string {
|
||||
if skipTrue > 0 {
|
||||
if skipFalse > 0 {
|
||||
return fmt.Sprintf("%s %s,%d,%d", positiveJump, operand, skipTrue, skipFalse)
|
||||
}
|
||||
return fmt.Sprintf("%s %s,%d", positiveJump, operand, skipTrue)
|
||||
}
|
||||
return fmt.Sprintf("%s %s,%d", negativeJump, operand, skipFalse)
|
||||
}
|
||||
|
||||
// RetA exits the BPF program, returning the value of register A.
|
||||
type RetA struct{}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a RetA) Assemble() (RawInstruction, error) {
|
||||
return RawInstruction{
|
||||
Op: opClsReturn | opRetSrcA,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a RetA) String() string {
|
||||
return fmt.Sprintf("ret a")
|
||||
}
|
||||
|
||||
// RetConstant exits the BPF program, returning a constant value.
|
||||
type RetConstant struct {
|
||||
Val uint32
|
||||
}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a RetConstant) Assemble() (RawInstruction, error) {
|
||||
return RawInstruction{
|
||||
Op: opClsReturn | opRetSrcConstant,
|
||||
K: a.Val,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a RetConstant) String() string {
|
||||
return fmt.Sprintf("ret #%d", a.Val)
|
||||
}
|
||||
|
||||
// TXA copies the value of register X to register A.
|
||||
type TXA struct{}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a TXA) Assemble() (RawInstruction, error) {
|
||||
return RawInstruction{
|
||||
Op: opClsMisc | opMiscTXA,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a TXA) String() string {
|
||||
return fmt.Sprintf("txa")
|
||||
}
|
||||
|
||||
// TAX copies the value of register A to register X.
|
||||
type TAX struct{}
|
||||
|
||||
// Assemble implements the Instruction Assemble method.
|
||||
func (a TAX) Assemble() (RawInstruction, error) {
|
||||
return RawInstruction{
|
||||
Op: opClsMisc | opMiscTAX,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// String returns the instruction in assembler notation.
|
||||
func (a TAX) String() string {
|
||||
return fmt.Sprintf("tax")
|
||||
}
|
||||
|
||||
func assembleLoad(dst Register, loadSize int, mode uint16, k uint32) (RawInstruction, error) {
|
||||
var (
|
||||
cls uint16
|
||||
sz uint16
|
||||
)
|
||||
switch dst {
|
||||
case RegA:
|
||||
cls = opClsLoadA
|
||||
case RegX:
|
||||
cls = opClsLoadX
|
||||
default:
|
||||
return RawInstruction{}, fmt.Errorf("invalid target register %v", dst)
|
||||
}
|
||||
switch loadSize {
|
||||
case 1:
|
||||
sz = opLoadWidth1
|
||||
case 2:
|
||||
sz = opLoadWidth2
|
||||
case 4:
|
||||
sz = opLoadWidth4
|
||||
default:
|
||||
return RawInstruction{}, fmt.Errorf("invalid load byte length %d", sz)
|
||||
}
|
||||
return RawInstruction{
|
||||
Op: cls | sz | mode,
|
||||
K: k,
|
||||
}, nil
|
||||
}
|
||||
10
vendor/golang.org/x/net/bpf/setter.go
generated
vendored
Normal file
10
vendor/golang.org/x/net/bpf/setter.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bpf
|
||||
|
||||
// A Setter is a type which can attach a compiled BPF filter to itself.
|
||||
type Setter interface {
|
||||
SetBPF(filter []RawInstruction) error
|
||||
}
|
||||
150
vendor/golang.org/x/net/bpf/vm.go
generated
vendored
Normal file
150
vendor/golang.org/x/net/bpf/vm.go
generated
vendored
Normal file
@@ -0,0 +1,150 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bpf
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// A VM is an emulated BPF virtual machine.
|
||||
type VM struct {
|
||||
filter []Instruction
|
||||
}
|
||||
|
||||
// NewVM returns a new VM using the input BPF program.
|
||||
func NewVM(filter []Instruction) (*VM, error) {
|
||||
if len(filter) == 0 {
|
||||
return nil, errors.New("one or more Instructions must be specified")
|
||||
}
|
||||
|
||||
for i, ins := range filter {
|
||||
check := len(filter) - (i + 1)
|
||||
switch ins := ins.(type) {
|
||||
// Check for out-of-bounds jumps in instructions
|
||||
case Jump:
|
||||
if check <= int(ins.Skip) {
|
||||
return nil, fmt.Errorf("cannot jump %d instructions; jumping past program bounds", ins.Skip)
|
||||
}
|
||||
case JumpIf:
|
||||
if check <= int(ins.SkipTrue) {
|
||||
return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue)
|
||||
}
|
||||
if check <= int(ins.SkipFalse) {
|
||||
return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse)
|
||||
}
|
||||
case JumpIfX:
|
||||
if check <= int(ins.SkipTrue) {
|
||||
return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue)
|
||||
}
|
||||
if check <= int(ins.SkipFalse) {
|
||||
return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse)
|
||||
}
|
||||
// Check for division or modulus by zero
|
||||
case ALUOpConstant:
|
||||
if ins.Val != 0 {
|
||||
break
|
||||
}
|
||||
|
||||
switch ins.Op {
|
||||
case ALUOpDiv, ALUOpMod:
|
||||
return nil, errors.New("cannot divide by zero using ALUOpConstant")
|
||||
}
|
||||
// Check for unknown extensions
|
||||
case LoadExtension:
|
||||
switch ins.Num {
|
||||
case ExtLen:
|
||||
default:
|
||||
return nil, fmt.Errorf("extension %d not implemented", ins.Num)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure last instruction is a return instruction
|
||||
switch filter[len(filter)-1].(type) {
|
||||
case RetA, RetConstant:
|
||||
default:
|
||||
return nil, errors.New("BPF program must end with RetA or RetConstant")
|
||||
}
|
||||
|
||||
// Though our VM works using disassembled instructions, we
|
||||
// attempt to assemble the input filter anyway to ensure it is compatible
|
||||
// with an operating system VM.
|
||||
_, err := Assemble(filter)
|
||||
|
||||
return &VM{
|
||||
filter: filter,
|
||||
}, err
|
||||
}
|
||||
|
||||
// Run runs the VM's BPF program against the input bytes.
|
||||
// Run returns the number of bytes accepted by the BPF program, and any errors
|
||||
// which occurred while processing the program.
|
||||
func (v *VM) Run(in []byte) (int, error) {
|
||||
var (
|
||||
// Registers of the virtual machine
|
||||
regA uint32
|
||||
regX uint32
|
||||
regScratch [16]uint32
|
||||
|
||||
// OK is true if the program should continue processing the next
|
||||
// instruction, or false if not, causing the loop to break
|
||||
ok = true
|
||||
)
|
||||
|
||||
// TODO(mdlayher): implement:
|
||||
// - NegateA:
|
||||
// - would require a change from uint32 registers to int32
|
||||
// registers
|
||||
|
||||
// TODO(mdlayher): add interop tests that check signedness of ALU
|
||||
// operations against kernel implementation, and make sure Go
|
||||
// implementation matches behavior
|
||||
|
||||
for i := 0; i < len(v.filter) && ok; i++ {
|
||||
ins := v.filter[i]
|
||||
|
||||
switch ins := ins.(type) {
|
||||
case ALUOpConstant:
|
||||
regA = aluOpConstant(ins, regA)
|
||||
case ALUOpX:
|
||||
regA, ok = aluOpX(ins, regA, regX)
|
||||
case Jump:
|
||||
i += int(ins.Skip)
|
||||
case JumpIf:
|
||||
jump := jumpIf(ins, regA)
|
||||
i += jump
|
||||
case JumpIfX:
|
||||
jump := jumpIfX(ins, regA, regX)
|
||||
i += jump
|
||||
case LoadAbsolute:
|
||||
regA, ok = loadAbsolute(ins, in)
|
||||
case LoadConstant:
|
||||
regA, regX = loadConstant(ins, regA, regX)
|
||||
case LoadExtension:
|
||||
regA = loadExtension(ins, in)
|
||||
case LoadIndirect:
|
||||
regA, ok = loadIndirect(ins, in, regX)
|
||||
case LoadMemShift:
|
||||
regX, ok = loadMemShift(ins, in)
|
||||
case LoadScratch:
|
||||
regA, regX = loadScratch(ins, regScratch, regA, regX)
|
||||
case RetA:
|
||||
return int(regA), nil
|
||||
case RetConstant:
|
||||
return int(ins.Val), nil
|
||||
case StoreScratch:
|
||||
regScratch = storeScratch(ins, regScratch, regA, regX)
|
||||
case TAX:
|
||||
regX = regA
|
||||
case TXA:
|
||||
regA = regX
|
||||
default:
|
||||
return 0, fmt.Errorf("unknown Instruction at index %d: %T", i, ins)
|
||||
}
|
||||
}
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
182
vendor/golang.org/x/net/bpf/vm_instructions.go
generated
vendored
Normal file
182
vendor/golang.org/x/net/bpf/vm_instructions.go
generated
vendored
Normal file
@@ -0,0 +1,182 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bpf
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func aluOpConstant(ins ALUOpConstant, regA uint32) uint32 {
|
||||
return aluOpCommon(ins.Op, regA, ins.Val)
|
||||
}
|
||||
|
||||
func aluOpX(ins ALUOpX, regA uint32, regX uint32) (uint32, bool) {
|
||||
// Guard against division or modulus by zero by terminating
|
||||
// the program, as the OS BPF VM does
|
||||
if regX == 0 {
|
||||
switch ins.Op {
|
||||
case ALUOpDiv, ALUOpMod:
|
||||
return 0, false
|
||||
}
|
||||
}
|
||||
|
||||
return aluOpCommon(ins.Op, regA, regX), true
|
||||
}
|
||||
|
||||
func aluOpCommon(op ALUOp, regA uint32, value uint32) uint32 {
|
||||
switch op {
|
||||
case ALUOpAdd:
|
||||
return regA + value
|
||||
case ALUOpSub:
|
||||
return regA - value
|
||||
case ALUOpMul:
|
||||
return regA * value
|
||||
case ALUOpDiv:
|
||||
// Division by zero not permitted by NewVM and aluOpX checks
|
||||
return regA / value
|
||||
case ALUOpOr:
|
||||
return regA | value
|
||||
case ALUOpAnd:
|
||||
return regA & value
|
||||
case ALUOpShiftLeft:
|
||||
return regA << value
|
||||
case ALUOpShiftRight:
|
||||
return regA >> value
|
||||
case ALUOpMod:
|
||||
// Modulus by zero not permitted by NewVM and aluOpX checks
|
||||
return regA % value
|
||||
case ALUOpXor:
|
||||
return regA ^ value
|
||||
default:
|
||||
return regA
|
||||
}
|
||||
}
|
||||
|
||||
func jumpIf(ins JumpIf, regA uint32) int {
|
||||
return jumpIfCommon(ins.Cond, ins.SkipTrue, ins.SkipFalse, regA, ins.Val)
|
||||
}
|
||||
|
||||
func jumpIfX(ins JumpIfX, regA uint32, regX uint32) int {
|
||||
return jumpIfCommon(ins.Cond, ins.SkipTrue, ins.SkipFalse, regA, regX)
|
||||
}
|
||||
|
||||
func jumpIfCommon(cond JumpTest, skipTrue, skipFalse uint8, regA uint32, value uint32) int {
|
||||
var ok bool
|
||||
|
||||
switch cond {
|
||||
case JumpEqual:
|
||||
ok = regA == value
|
||||
case JumpNotEqual:
|
||||
ok = regA != value
|
||||
case JumpGreaterThan:
|
||||
ok = regA > value
|
||||
case JumpLessThan:
|
||||
ok = regA < value
|
||||
case JumpGreaterOrEqual:
|
||||
ok = regA >= value
|
||||
case JumpLessOrEqual:
|
||||
ok = regA <= value
|
||||
case JumpBitsSet:
|
||||
ok = (regA & value) != 0
|
||||
case JumpBitsNotSet:
|
||||
ok = (regA & value) == 0
|
||||
}
|
||||
|
||||
if ok {
|
||||
return int(skipTrue)
|
||||
}
|
||||
|
||||
return int(skipFalse)
|
||||
}
|
||||
|
||||
func loadAbsolute(ins LoadAbsolute, in []byte) (uint32, bool) {
|
||||
offset := int(ins.Off)
|
||||
size := ins.Size
|
||||
|
||||
return loadCommon(in, offset, size)
|
||||
}
|
||||
|
||||
func loadConstant(ins LoadConstant, regA uint32, regX uint32) (uint32, uint32) {
|
||||
switch ins.Dst {
|
||||
case RegA:
|
||||
regA = ins.Val
|
||||
case RegX:
|
||||
regX = ins.Val
|
||||
}
|
||||
|
||||
return regA, regX
|
||||
}
|
||||
|
||||
func loadExtension(ins LoadExtension, in []byte) uint32 {
|
||||
switch ins.Num {
|
||||
case ExtLen:
|
||||
return uint32(len(in))
|
||||
default:
|
||||
panic(fmt.Sprintf("unimplemented extension: %d", ins.Num))
|
||||
}
|
||||
}
|
||||
|
||||
func loadIndirect(ins LoadIndirect, in []byte, regX uint32) (uint32, bool) {
|
||||
offset := int(ins.Off) + int(regX)
|
||||
size := ins.Size
|
||||
|
||||
return loadCommon(in, offset, size)
|
||||
}
|
||||
|
||||
func loadMemShift(ins LoadMemShift, in []byte) (uint32, bool) {
|
||||
offset := int(ins.Off)
|
||||
|
||||
// Size of LoadMemShift is always 1 byte
|
||||
if !inBounds(len(in), offset, 1) {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// Mask off high 4 bits and multiply low 4 bits by 4
|
||||
return uint32(in[offset]&0x0f) * 4, true
|
||||
}
|
||||
|
||||
func inBounds(inLen int, offset int, size int) bool {
|
||||
return offset+size <= inLen
|
||||
}
|
||||
|
||||
func loadCommon(in []byte, offset int, size int) (uint32, bool) {
|
||||
if !inBounds(len(in), offset, size) {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
switch size {
|
||||
case 1:
|
||||
return uint32(in[offset]), true
|
||||
case 2:
|
||||
return uint32(binary.BigEndian.Uint16(in[offset : offset+size])), true
|
||||
case 4:
|
||||
return uint32(binary.BigEndian.Uint32(in[offset : offset+size])), true
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid load size: %d", size))
|
||||
}
|
||||
}
|
||||
|
||||
func loadScratch(ins LoadScratch, regScratch [16]uint32, regA uint32, regX uint32) (uint32, uint32) {
|
||||
switch ins.Dst {
|
||||
case RegA:
|
||||
regA = regScratch[ins.N]
|
||||
case RegX:
|
||||
regX = regScratch[ins.N]
|
||||
}
|
||||
|
||||
return regA, regX
|
||||
}
|
||||
|
||||
func storeScratch(ins StoreScratch, regScratch [16]uint32, regA uint32, regX uint32) [16]uint32 {
|
||||
switch ins.Src {
|
||||
case RegA:
|
||||
regScratch[ins.N] = regA
|
||||
case RegX:
|
||||
regScratch[ins.N] = regX
|
||||
}
|
||||
|
||||
return regScratch
|
||||
}
|
||||
50
vendor/golang.org/x/net/http/httpguts/guts.go
generated
vendored
Normal file
50
vendor/golang.org/x/net/http/httpguts/guts.go
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package httpguts provides functions implementing various details
|
||||
// of the HTTP specification.
|
||||
//
|
||||
// This package is shared by the standard library (which vendors it)
|
||||
// and x/net/http2. It comes with no API stability promise.
|
||||
package httpguts
|
||||
|
||||
import (
|
||||
"net/textproto"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ValidTrailerHeader reports whether name is a valid header field name to appear
|
||||
// in trailers.
|
||||
// See RFC 7230, Section 4.1.2
|
||||
func ValidTrailerHeader(name string) bool {
|
||||
name = textproto.CanonicalMIMEHeaderKey(name)
|
||||
if strings.HasPrefix(name, "If-") || badTrailer[name] {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var badTrailer = map[string]bool{
|
||||
"Authorization": true,
|
||||
"Cache-Control": true,
|
||||
"Connection": true,
|
||||
"Content-Encoding": true,
|
||||
"Content-Length": true,
|
||||
"Content-Range": true,
|
||||
"Content-Type": true,
|
||||
"Expect": true,
|
||||
"Host": true,
|
||||
"Keep-Alive": true,
|
||||
"Max-Forwards": true,
|
||||
"Pragma": true,
|
||||
"Proxy-Authenticate": true,
|
||||
"Proxy-Authorization": true,
|
||||
"Proxy-Connection": true,
|
||||
"Range": true,
|
||||
"Realm": true,
|
||||
"Te": true,
|
||||
"Trailer": true,
|
||||
"Transfer-Encoding": true,
|
||||
"Www-Authenticate": true,
|
||||
}
|
||||
347
vendor/golang.org/x/net/http/httpguts/httplex.go
generated
vendored
Normal file
347
vendor/golang.org/x/net/http/httpguts/httplex.go
generated
vendored
Normal file
@@ -0,0 +1,347 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package httpguts
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/net/idna"
|
||||
)
|
||||
|
||||
var isTokenTable = [256]bool{
|
||||
'!': true,
|
||||
'#': true,
|
||||
'$': true,
|
||||
'%': true,
|
||||
'&': true,
|
||||
'\'': true,
|
||||
'*': true,
|
||||
'+': true,
|
||||
'-': true,
|
||||
'.': true,
|
||||
'0': true,
|
||||
'1': true,
|
||||
'2': true,
|
||||
'3': true,
|
||||
'4': true,
|
||||
'5': true,
|
||||
'6': true,
|
||||
'7': true,
|
||||
'8': true,
|
||||
'9': true,
|
||||
'A': true,
|
||||
'B': true,
|
||||
'C': true,
|
||||
'D': true,
|
||||
'E': true,
|
||||
'F': true,
|
||||
'G': true,
|
||||
'H': true,
|
||||
'I': true,
|
||||
'J': true,
|
||||
'K': true,
|
||||
'L': true,
|
||||
'M': true,
|
||||
'N': true,
|
||||
'O': true,
|
||||
'P': true,
|
||||
'Q': true,
|
||||
'R': true,
|
||||
'S': true,
|
||||
'T': true,
|
||||
'U': true,
|
||||
'W': true,
|
||||
'V': true,
|
||||
'X': true,
|
||||
'Y': true,
|
||||
'Z': true,
|
||||
'^': true,
|
||||
'_': true,
|
||||
'`': true,
|
||||
'a': true,
|
||||
'b': true,
|
||||
'c': true,
|
||||
'd': true,
|
||||
'e': true,
|
||||
'f': true,
|
||||
'g': true,
|
||||
'h': true,
|
||||
'i': true,
|
||||
'j': true,
|
||||
'k': true,
|
||||
'l': true,
|
||||
'm': true,
|
||||
'n': true,
|
||||
'o': true,
|
||||
'p': true,
|
||||
'q': true,
|
||||
'r': true,
|
||||
's': true,
|
||||
't': true,
|
||||
'u': true,
|
||||
'v': true,
|
||||
'w': true,
|
||||
'x': true,
|
||||
'y': true,
|
||||
'z': true,
|
||||
'|': true,
|
||||
'~': true,
|
||||
}
|
||||
|
||||
func IsTokenRune(r rune) bool {
|
||||
return r < utf8.RuneSelf && isTokenTable[byte(r)]
|
||||
}
|
||||
|
||||
// HeaderValuesContainsToken reports whether any string in values
|
||||
// contains the provided token, ASCII case-insensitively.
|
||||
func HeaderValuesContainsToken(values []string, token string) bool {
|
||||
for _, v := range values {
|
||||
if headerValueContainsToken(v, token) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isOWS reports whether b is an optional whitespace byte, as defined
|
||||
// by RFC 7230 section 3.2.3.
|
||||
func isOWS(b byte) bool { return b == ' ' || b == '\t' }
|
||||
|
||||
// trimOWS returns x with all optional whitespace removes from the
|
||||
// beginning and end.
|
||||
func trimOWS(x string) string {
|
||||
// TODO: consider using strings.Trim(x, " \t") instead,
|
||||
// if and when it's fast enough. See issue 10292.
|
||||
// But this ASCII-only code will probably always beat UTF-8
|
||||
// aware code.
|
||||
for len(x) > 0 && isOWS(x[0]) {
|
||||
x = x[1:]
|
||||
}
|
||||
for len(x) > 0 && isOWS(x[len(x)-1]) {
|
||||
x = x[:len(x)-1]
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// headerValueContainsToken reports whether v (assumed to be a
|
||||
// 0#element, in the ABNF extension described in RFC 7230 section 7)
|
||||
// contains token amongst its comma-separated tokens, ASCII
|
||||
// case-insensitively.
|
||||
func headerValueContainsToken(v string, token string) bool {
|
||||
for comma := strings.IndexByte(v, ','); comma != -1; comma = strings.IndexByte(v, ',') {
|
||||
if tokenEqual(trimOWS(v[:comma]), token) {
|
||||
return true
|
||||
}
|
||||
v = v[comma+1:]
|
||||
}
|
||||
return tokenEqual(trimOWS(v), token)
|
||||
}
|
||||
|
||||
// lowerASCII returns the ASCII lowercase version of b.
|
||||
func lowerASCII(b byte) byte {
|
||||
if 'A' <= b && b <= 'Z' {
|
||||
return b + ('a' - 'A')
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively.
|
||||
func tokenEqual(t1, t2 string) bool {
|
||||
if len(t1) != len(t2) {
|
||||
return false
|
||||
}
|
||||
for i, b := range t1 {
|
||||
if b >= utf8.RuneSelf {
|
||||
// No UTF-8 or non-ASCII allowed in tokens.
|
||||
return false
|
||||
}
|
||||
if lowerASCII(byte(b)) != lowerASCII(t2[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// isLWS reports whether b is linear white space, according
|
||||
// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
|
||||
//
|
||||
// LWS = [CRLF] 1*( SP | HT )
|
||||
func isLWS(b byte) bool { return b == ' ' || b == '\t' }
|
||||
|
||||
// isCTL reports whether b is a control byte, according
|
||||
// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
|
||||
//
|
||||
// CTL = <any US-ASCII control character
|
||||
// (octets 0 - 31) and DEL (127)>
|
||||
func isCTL(b byte) bool {
|
||||
const del = 0x7f // a CTL
|
||||
return b < ' ' || b == del
|
||||
}
|
||||
|
||||
// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name.
|
||||
// HTTP/2 imposes the additional restriction that uppercase ASCII
|
||||
// letters are not allowed.
|
||||
//
|
||||
// RFC 7230 says:
|
||||
//
|
||||
// header-field = field-name ":" OWS field-value OWS
|
||||
// field-name = token
|
||||
// token = 1*tchar
|
||||
// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
|
||||
// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA
|
||||
func ValidHeaderFieldName(v string) bool {
|
||||
if len(v) == 0 {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < len(v); i++ {
|
||||
if !isTokenTable[v[i]] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ValidHostHeader reports whether h is a valid host header.
|
||||
func ValidHostHeader(h string) bool {
|
||||
// The latest spec is actually this:
|
||||
//
|
||||
// http://tools.ietf.org/html/rfc7230#section-5.4
|
||||
// Host = uri-host [ ":" port ]
|
||||
//
|
||||
// Where uri-host is:
|
||||
// http://tools.ietf.org/html/rfc3986#section-3.2.2
|
||||
//
|
||||
// But we're going to be much more lenient for now and just
|
||||
// search for any byte that's not a valid byte in any of those
|
||||
// expressions.
|
||||
for i := 0; i < len(h); i++ {
|
||||
if !validHostByte[h[i]] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// See the validHostHeader comment.
|
||||
var validHostByte = [256]bool{
|
||||
'0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true,
|
||||
'8': true, '9': true,
|
||||
|
||||
'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true,
|
||||
'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true,
|
||||
'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true,
|
||||
'y': true, 'z': true,
|
||||
|
||||
'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true,
|
||||
'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true,
|
||||
'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true,
|
||||
'Y': true, 'Z': true,
|
||||
|
||||
'!': true, // sub-delims
|
||||
'$': true, // sub-delims
|
||||
'%': true, // pct-encoded (and used in IPv6 zones)
|
||||
'&': true, // sub-delims
|
||||
'(': true, // sub-delims
|
||||
')': true, // sub-delims
|
||||
'*': true, // sub-delims
|
||||
'+': true, // sub-delims
|
||||
',': true, // sub-delims
|
||||
'-': true, // unreserved
|
||||
'.': true, // unreserved
|
||||
':': true, // IPv6address + Host expression's optional port
|
||||
';': true, // sub-delims
|
||||
'=': true, // sub-delims
|
||||
'[': true,
|
||||
'\'': true, // sub-delims
|
||||
']': true,
|
||||
'_': true, // unreserved
|
||||
'~': true, // unreserved
|
||||
}
|
||||
|
||||
// ValidHeaderFieldValue reports whether v is a valid "field-value" according to
|
||||
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 :
|
||||
//
|
||||
// message-header = field-name ":" [ field-value ]
|
||||
// field-value = *( field-content | LWS )
|
||||
// field-content = <the OCTETs making up the field-value
|
||||
// and consisting of either *TEXT or combinations
|
||||
// of token, separators, and quoted-string>
|
||||
//
|
||||
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 :
|
||||
//
|
||||
// TEXT = <any OCTET except CTLs,
|
||||
// but including LWS>
|
||||
// LWS = [CRLF] 1*( SP | HT )
|
||||
// CTL = <any US-ASCII control character
|
||||
// (octets 0 - 31) and DEL (127)>
|
||||
//
|
||||
// RFC 7230 says:
|
||||
//
|
||||
// field-value = *( field-content / obs-fold )
|
||||
// obj-fold = N/A to http2, and deprecated
|
||||
// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
|
||||
// field-vchar = VCHAR / obs-text
|
||||
// obs-text = %x80-FF
|
||||
// VCHAR = "any visible [USASCII] character"
|
||||
//
|
||||
// http2 further says: "Similarly, HTTP/2 allows header field values
|
||||
// that are not valid. While most of the values that can be encoded
|
||||
// will not alter header field parsing, carriage return (CR, ASCII
|
||||
// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII
|
||||
// 0x0) might be exploited by an attacker if they are translated
|
||||
// verbatim. Any request or response that contains a character not
|
||||
// permitted in a header field value MUST be treated as malformed
|
||||
// (Section 8.1.2.6). Valid characters are defined by the
|
||||
// field-content ABNF rule in Section 3.2 of [RFC7230]."
|
||||
//
|
||||
// This function does not (yet?) properly handle the rejection of
|
||||
// strings that begin or end with SP or HTAB.
|
||||
func ValidHeaderFieldValue(v string) bool {
|
||||
for i := 0; i < len(v); i++ {
|
||||
b := v[i]
|
||||
if isCTL(b) && !isLWS(b) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func isASCII(s string) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] >= utf8.RuneSelf {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// PunycodeHostPort returns the IDNA Punycode version
|
||||
// of the provided "host" or "host:port" string.
|
||||
func PunycodeHostPort(v string) (string, error) {
|
||||
if isASCII(v) {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
host, port, err := net.SplitHostPort(v)
|
||||
if err != nil {
|
||||
// The input 'v' argument was just a "host" argument,
|
||||
// without a port. This error should not be returned
|
||||
// to the caller.
|
||||
host = v
|
||||
port = ""
|
||||
}
|
||||
host, err = idna.ToASCII(host)
|
||||
if err != nil {
|
||||
// Non-UTF-8? Not representable in Punycode, in any
|
||||
// case.
|
||||
return "", err
|
||||
}
|
||||
if port == "" {
|
||||
return host, nil
|
||||
}
|
||||
return net.JoinHostPort(host, port), nil
|
||||
}
|
||||
2
vendor/golang.org/x/net/http2/.gitignore
generated
vendored
Normal file
2
vendor/golang.org/x/net/http2/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*~
|
||||
h2i/h2i
|
||||
53
vendor/golang.org/x/net/http2/ascii.go
generated
vendored
Normal file
53
vendor/golang.org/x/net/http2/ascii.go
generated
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import "strings"
|
||||
|
||||
// The HTTP protocols are defined in terms of ASCII, not Unicode. This file
|
||||
// contains helper functions which may use Unicode-aware functions which would
|
||||
// otherwise be unsafe and could introduce vulnerabilities if used improperly.
|
||||
|
||||
// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t
|
||||
// are equal, ASCII-case-insensitively.
|
||||
func asciiEqualFold(s, t string) bool {
|
||||
if len(s) != len(t) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < len(s); i++ {
|
||||
if lower(s[i]) != lower(t[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// lower returns the ASCII lowercase version of b.
|
||||
func lower(b byte) byte {
|
||||
if 'A' <= b && b <= 'Z' {
|
||||
return b + ('a' - 'A')
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// isASCIIPrint returns whether s is ASCII and printable according to
|
||||
// https://tools.ietf.org/html/rfc20#section-4.2.
|
||||
func isASCIIPrint(s string) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] < ' ' || s[i] > '~' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// asciiToLower returns the lowercase version of s if s is ASCII and printable,
|
||||
// and whether or not it was.
|
||||
func asciiToLower(s string) (lower string, ok bool) {
|
||||
if !isASCIIPrint(s) {
|
||||
return "", false
|
||||
}
|
||||
return strings.ToLower(s), true
|
||||
}
|
||||
641
vendor/golang.org/x/net/http2/ciphers.go
generated
vendored
Normal file
641
vendor/golang.org/x/net/http2/ciphers.go
generated
vendored
Normal file
@@ -0,0 +1,641 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
// A list of the possible cipher suite ids. Taken from
|
||||
// https://www.iana.org/assignments/tls-parameters/tls-parameters.txt
|
||||
|
||||
const (
|
||||
cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000
|
||||
cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001
|
||||
cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002
|
||||
cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003
|
||||
cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004
|
||||
cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
|
||||
cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006
|
||||
cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007
|
||||
cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008
|
||||
cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009
|
||||
cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A
|
||||
cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B
|
||||
cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C
|
||||
cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D
|
||||
cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E
|
||||
cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F
|
||||
cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010
|
||||
cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011
|
||||
cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012
|
||||
cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013
|
||||
cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014
|
||||
cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015
|
||||
cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016
|
||||
cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017
|
||||
cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018
|
||||
cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019
|
||||
cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A
|
||||
cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B
|
||||
// Reserved uint16 = 0x001C-1D
|
||||
cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E
|
||||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F
|
||||
cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020
|
||||
cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021
|
||||
cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022
|
||||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023
|
||||
cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024
|
||||
cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025
|
||||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028
|
||||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B
|
||||
cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C
|
||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D
|
||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E
|
||||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F
|
||||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030
|
||||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031
|
||||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033
|
||||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034
|
||||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035
|
||||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036
|
||||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037
|
||||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039
|
||||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A
|
||||
cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B
|
||||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C
|
||||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D
|
||||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E
|
||||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F
|
||||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046
|
||||
// Reserved uint16 = 0x0047-4F
|
||||
// Reserved uint16 = 0x0050-58
|
||||
// Reserved uint16 = 0x0059-5C
|
||||
// Unassigned uint16 = 0x005D-5F
|
||||
// Reserved uint16 = 0x0060-66
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067
|
||||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068
|
||||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069
|
||||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B
|
||||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C
|
||||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D
|
||||
// Unassigned uint16 = 0x006E-83
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089
|
||||
cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A
|
||||
cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B
|
||||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C
|
||||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D
|
||||
cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E
|
||||
cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F
|
||||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090
|
||||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091
|
||||
cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092
|
||||
cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093
|
||||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094
|
||||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095
|
||||
cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096
|
||||
cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097
|
||||
cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098
|
||||
cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099
|
||||
cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A
|
||||
cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B
|
||||
cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C
|
||||
cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F
|
||||
cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0
|
||||
cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1
|
||||
cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2
|
||||
cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3
|
||||
cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4
|
||||
cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5
|
||||
cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6
|
||||
cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7
|
||||
cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8
|
||||
cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9
|
||||
cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA
|
||||
cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB
|
||||
cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC
|
||||
cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD
|
||||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE
|
||||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF
|
||||
cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0
|
||||
cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1
|
||||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2
|
||||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3
|
||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4
|
||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5
|
||||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6
|
||||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7
|
||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8
|
||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5
|
||||
// Unassigned uint16 = 0x00C6-FE
|
||||
cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF
|
||||
// Unassigned uint16 = 0x01-55,*
|
||||
cipher_TLS_FALLBACK_SCSV uint16 = 0x5600
|
||||
// Unassigned uint16 = 0x5601 - 0xC000
|
||||
cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001
|
||||
cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002
|
||||
cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A
|
||||
cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B
|
||||
cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C
|
||||
cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F
|
||||
cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010
|
||||
cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011
|
||||
cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014
|
||||
cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015
|
||||
cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016
|
||||
cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017
|
||||
cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018
|
||||
cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019
|
||||
cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A
|
||||
cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B
|
||||
cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C
|
||||
cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D
|
||||
cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E
|
||||
cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F
|
||||
cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020
|
||||
cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021
|
||||
cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032
|
||||
cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033
|
||||
cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038
|
||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039
|
||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A
|
||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B
|
||||
cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C
|
||||
cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041
|
||||
cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042
|
||||
cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043
|
||||
cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044
|
||||
cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045
|
||||
cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046
|
||||
cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B
|
||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C
|
||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F
|
||||
cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050
|
||||
cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051
|
||||
cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052
|
||||
cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055
|
||||
cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056
|
||||
cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059
|
||||
cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A
|
||||
cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F
|
||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060
|
||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063
|
||||
cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064
|
||||
cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065
|
||||
cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066
|
||||
cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069
|
||||
cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A
|
||||
cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B
|
||||
cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C
|
||||
cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F
|
||||
cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070
|
||||
cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075
|
||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076
|
||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089
|
||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A
|
||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F
|
||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090
|
||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095
|
||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096
|
||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099
|
||||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A
|
||||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B
|
||||
cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C
|
||||
cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F
|
||||
cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0
|
||||
cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3
|
||||
cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4
|
||||
cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5
|
||||
cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6
|
||||
cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7
|
||||
cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8
|
||||
cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9
|
||||
cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA
|
||||
cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF
|
||||
// Unassigned uint16 = 0xC0B0-FF
|
||||
// Unassigned uint16 = 0xC1-CB,*
|
||||
// Unassigned uint16 = 0xCC00-A7
|
||||
cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9
|
||||
cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA
|
||||
cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB
|
||||
cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC
|
||||
cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD
|
||||
cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE
|
||||
)
|
||||
|
||||
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
|
||||
// References:
|
||||
// https://tools.ietf.org/html/rfc7540#appendix-A
|
||||
// Reject cipher suites from Appendix A.
|
||||
// "This list includes those cipher suites that do not
|
||||
// offer an ephemeral key exchange and those that are
|
||||
// based on the TLS null, stream or block cipher type"
|
||||
func isBadCipher(cipher uint16) bool {
|
||||
switch cipher {
|
||||
case cipher_TLS_NULL_WITH_NULL_NULL,
|
||||
cipher_TLS_RSA_WITH_NULL_MD5,
|
||||
cipher_TLS_RSA_WITH_NULL_SHA,
|
||||
cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5,
|
||||
cipher_TLS_RSA_WITH_RC4_128_MD5,
|
||||
cipher_TLS_RSA_WITH_RC4_128_SHA,
|
||||
cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,
|
||||
cipher_TLS_RSA_WITH_IDEA_CBC_SHA,
|
||||
cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5,
|
||||
cipher_TLS_DH_anon_WITH_RC4_128_MD5,
|
||||
cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_KRB5_WITH_DES_CBC_SHA,
|
||||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_KRB5_WITH_RC4_128_SHA,
|
||||
cipher_TLS_KRB5_WITH_IDEA_CBC_SHA,
|
||||
cipher_TLS_KRB5_WITH_DES_CBC_MD5,
|
||||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5,
|
||||
cipher_TLS_KRB5_WITH_RC4_128_MD5,
|
||||
cipher_TLS_KRB5_WITH_IDEA_CBC_MD5,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5,
|
||||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5,
|
||||
cipher_TLS_PSK_WITH_NULL_SHA,
|
||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA,
|
||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA,
|
||||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_NULL_SHA256,
|
||||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA,
|
||||
cipher_TLS_PSK_WITH_RC4_128_SHA,
|
||||
cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_DHE_PSK_WITH_RC4_128_SHA,
|
||||
cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_RSA_PSK_WITH_RC4_128_SHA,
|
||||
cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_DH_anon_WITH_SEED_CBC_SHA,
|
||||
cipher_TLS_RSA_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_PSK_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_PSK_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_PSK_WITH_NULL_SHA256,
|
||||
cipher_TLS_PSK_WITH_NULL_SHA384,
|
||||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA256,
|
||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA384,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256,
|
||||
cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDH_RSA_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDHE_RSA_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDH_anon_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDH_anon_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384,
|
||||
cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA,
|
||||
cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA,
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA,
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA,
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA,
|
||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256,
|
||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384,
|
||||
cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384,
|
||||
cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384,
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
||||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
||||
cipher_TLS_RSA_WITH_AES_128_CCM,
|
||||
cipher_TLS_RSA_WITH_AES_256_CCM,
|
||||
cipher_TLS_RSA_WITH_AES_128_CCM_8,
|
||||
cipher_TLS_RSA_WITH_AES_256_CCM_8,
|
||||
cipher_TLS_PSK_WITH_AES_128_CCM,
|
||||
cipher_TLS_PSK_WITH_AES_256_CCM,
|
||||
cipher_TLS_PSK_WITH_AES_128_CCM_8,
|
||||
cipher_TLS_PSK_WITH_AES_256_CCM_8:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
311
vendor/golang.org/x/net/http2/client_conn_pool.go
generated
vendored
Normal file
311
vendor/golang.org/x/net/http2/client_conn_pool.go
generated
vendored
Normal file
@@ -0,0 +1,311 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Transport code's client connection pooling.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ClientConnPool manages a pool of HTTP/2 client connections.
|
||||
type ClientConnPool interface {
|
||||
// GetClientConn returns a specific HTTP/2 connection (usually
|
||||
// a TLS-TCP connection) to an HTTP/2 server. On success, the
|
||||
// returned ClientConn accounts for the upcoming RoundTrip
|
||||
// call, so the caller should not omit it. If the caller needs
|
||||
// to, ClientConn.RoundTrip can be called with a bogus
|
||||
// new(http.Request) to release the stream reservation.
|
||||
GetClientConn(req *http.Request, addr string) (*ClientConn, error)
|
||||
MarkDead(*ClientConn)
|
||||
}
|
||||
|
||||
// clientConnPoolIdleCloser is the interface implemented by ClientConnPool
|
||||
// implementations which can close their idle connections.
|
||||
type clientConnPoolIdleCloser interface {
|
||||
ClientConnPool
|
||||
closeIdleConnections()
|
||||
}
|
||||
|
||||
var (
|
||||
_ clientConnPoolIdleCloser = (*clientConnPool)(nil)
|
||||
_ clientConnPoolIdleCloser = noDialClientConnPool{}
|
||||
)
|
||||
|
||||
// TODO: use singleflight for dialing and addConnCalls?
|
||||
type clientConnPool struct {
|
||||
t *Transport
|
||||
|
||||
mu sync.Mutex // TODO: maybe switch to RWMutex
|
||||
// TODO: add support for sharing conns based on cert names
|
||||
// (e.g. share conn for googleapis.com and appspot.com)
|
||||
conns map[string][]*ClientConn // key is host:port
|
||||
dialing map[string]*dialCall // currently in-flight dials
|
||||
keys map[*ClientConn][]string
|
||||
addConnCalls map[string]*addConnCall // in-flight addConnIfNeeded calls
|
||||
}
|
||||
|
||||
func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
|
||||
return p.getClientConn(req, addr, dialOnMiss)
|
||||
}
|
||||
|
||||
const (
|
||||
dialOnMiss = true
|
||||
noDialOnMiss = false
|
||||
)
|
||||
|
||||
func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
|
||||
// TODO(dneil): Dial a new connection when t.DisableKeepAlives is set?
|
||||
if isConnectionCloseRequest(req) && dialOnMiss {
|
||||
// It gets its own connection.
|
||||
traceGetConn(req, addr)
|
||||
const singleUse = true
|
||||
cc, err := p.t.dialClientConn(req.Context(), addr, singleUse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cc, nil
|
||||
}
|
||||
for {
|
||||
p.mu.Lock()
|
||||
for _, cc := range p.conns[addr] {
|
||||
if cc.ReserveNewRequest() {
|
||||
// When a connection is presented to us by the net/http package,
|
||||
// the GetConn hook has already been called.
|
||||
// Don't call it a second time here.
|
||||
if !cc.getConnCalled {
|
||||
traceGetConn(req, addr)
|
||||
}
|
||||
cc.getConnCalled = false
|
||||
p.mu.Unlock()
|
||||
return cc, nil
|
||||
}
|
||||
}
|
||||
if !dialOnMiss {
|
||||
p.mu.Unlock()
|
||||
return nil, ErrNoCachedConn
|
||||
}
|
||||
traceGetConn(req, addr)
|
||||
call := p.getStartDialLocked(req.Context(), addr)
|
||||
p.mu.Unlock()
|
||||
<-call.done
|
||||
if shouldRetryDial(call, req) {
|
||||
continue
|
||||
}
|
||||
cc, err := call.res, call.err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cc.ReserveNewRequest() {
|
||||
return cc, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// dialCall is an in-flight Transport dial call to a host.
|
||||
type dialCall struct {
|
||||
_ incomparable
|
||||
p *clientConnPool
|
||||
// the context associated with the request
|
||||
// that created this dialCall
|
||||
ctx context.Context
|
||||
done chan struct{} // closed when done
|
||||
res *ClientConn // valid after done is closed
|
||||
err error // valid after done is closed
|
||||
}
|
||||
|
||||
// requires p.mu is held.
|
||||
func (p *clientConnPool) getStartDialLocked(ctx context.Context, addr string) *dialCall {
|
||||
if call, ok := p.dialing[addr]; ok {
|
||||
// A dial is already in-flight. Don't start another.
|
||||
return call
|
||||
}
|
||||
call := &dialCall{p: p, done: make(chan struct{}), ctx: ctx}
|
||||
if p.dialing == nil {
|
||||
p.dialing = make(map[string]*dialCall)
|
||||
}
|
||||
p.dialing[addr] = call
|
||||
go call.dial(call.ctx, addr)
|
||||
return call
|
||||
}
|
||||
|
||||
// run in its own goroutine.
|
||||
func (c *dialCall) dial(ctx context.Context, addr string) {
|
||||
const singleUse = false // shared conn
|
||||
c.res, c.err = c.p.t.dialClientConn(ctx, addr, singleUse)
|
||||
|
||||
c.p.mu.Lock()
|
||||
delete(c.p.dialing, addr)
|
||||
if c.err == nil {
|
||||
c.p.addConnLocked(addr, c.res)
|
||||
}
|
||||
c.p.mu.Unlock()
|
||||
|
||||
close(c.done)
|
||||
}
|
||||
|
||||
// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't
|
||||
// already exist. It coalesces concurrent calls with the same key.
|
||||
// This is used by the http1 Transport code when it creates a new connection. Because
|
||||
// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know
|
||||
// the protocol), it can get into a situation where it has multiple TLS connections.
|
||||
// This code decides which ones live or die.
|
||||
// The return value used is whether c was used.
|
||||
// c is never closed.
|
||||
func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c net.Conn) (used bool, err error) {
|
||||
p.mu.Lock()
|
||||
for _, cc := range p.conns[key] {
|
||||
if cc.CanTakeNewRequest() {
|
||||
p.mu.Unlock()
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
call, dup := p.addConnCalls[key]
|
||||
if !dup {
|
||||
if p.addConnCalls == nil {
|
||||
p.addConnCalls = make(map[string]*addConnCall)
|
||||
}
|
||||
call = &addConnCall{
|
||||
p: p,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
p.addConnCalls[key] = call
|
||||
go call.run(t, key, c)
|
||||
}
|
||||
p.mu.Unlock()
|
||||
|
||||
<-call.done
|
||||
if call.err != nil {
|
||||
return false, call.err
|
||||
}
|
||||
return !dup, nil
|
||||
}
|
||||
|
||||
type addConnCall struct {
|
||||
_ incomparable
|
||||
p *clientConnPool
|
||||
done chan struct{} // closed when done
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *addConnCall) run(t *Transport, key string, nc net.Conn) {
|
||||
cc, err := t.NewClientConn(nc)
|
||||
|
||||
p := c.p
|
||||
p.mu.Lock()
|
||||
if err != nil {
|
||||
c.err = err
|
||||
} else {
|
||||
cc.getConnCalled = true // already called by the net/http package
|
||||
p.addConnLocked(key, cc)
|
||||
}
|
||||
delete(p.addConnCalls, key)
|
||||
p.mu.Unlock()
|
||||
close(c.done)
|
||||
}
|
||||
|
||||
// p.mu must be held
|
||||
func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) {
|
||||
for _, v := range p.conns[key] {
|
||||
if v == cc {
|
||||
return
|
||||
}
|
||||
}
|
||||
if p.conns == nil {
|
||||
p.conns = make(map[string][]*ClientConn)
|
||||
}
|
||||
if p.keys == nil {
|
||||
p.keys = make(map[*ClientConn][]string)
|
||||
}
|
||||
p.conns[key] = append(p.conns[key], cc)
|
||||
p.keys[cc] = append(p.keys[cc], key)
|
||||
}
|
||||
|
||||
func (p *clientConnPool) MarkDead(cc *ClientConn) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
for _, key := range p.keys[cc] {
|
||||
vv, ok := p.conns[key]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
newList := filterOutClientConn(vv, cc)
|
||||
if len(newList) > 0 {
|
||||
p.conns[key] = newList
|
||||
} else {
|
||||
delete(p.conns, key)
|
||||
}
|
||||
}
|
||||
delete(p.keys, cc)
|
||||
}
|
||||
|
||||
func (p *clientConnPool) closeIdleConnections() {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
// TODO: don't close a cc if it was just added to the pool
|
||||
// milliseconds ago and has never been used. There's currently
|
||||
// a small race window with the HTTP/1 Transport's integration
|
||||
// where it can add an idle conn just before using it, and
|
||||
// somebody else can concurrently call CloseIdleConns and
|
||||
// break some caller's RoundTrip.
|
||||
for _, vv := range p.conns {
|
||||
for _, cc := range vv {
|
||||
cc.closeIfIdle()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn {
|
||||
out := in[:0]
|
||||
for _, v := range in {
|
||||
if v != exclude {
|
||||
out = append(out, v)
|
||||
}
|
||||
}
|
||||
// If we filtered it out, zero out the last item to prevent
|
||||
// the GC from seeing it.
|
||||
if len(in) != len(out) {
|
||||
in[len(in)-1] = nil
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// noDialClientConnPool is an implementation of http2.ClientConnPool
|
||||
// which never dials. We let the HTTP/1.1 client dial and use its TLS
|
||||
// connection instead.
|
||||
type noDialClientConnPool struct{ *clientConnPool }
|
||||
|
||||
func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
|
||||
return p.getClientConn(req, addr, noDialOnMiss)
|
||||
}
|
||||
|
||||
// shouldRetryDial reports whether the current request should
|
||||
// retry dialing after the call finished unsuccessfully, for example
|
||||
// if the dial was canceled because of a context cancellation or
|
||||
// deadline expiry.
|
||||
func shouldRetryDial(call *dialCall, req *http.Request) bool {
|
||||
if call.err == nil {
|
||||
// No error, no need to retry
|
||||
return false
|
||||
}
|
||||
if call.ctx == req.Context() {
|
||||
// If the call has the same context as the request, the dial
|
||||
// should not be retried, since any cancellation will have come
|
||||
// from this request.
|
||||
return false
|
||||
}
|
||||
if !errors.Is(call.err, context.Canceled) && !errors.Is(call.err, context.DeadlineExceeded) {
|
||||
// If the call error is not because of a context cancellation or a deadline expiry,
|
||||
// the dial should not be retried.
|
||||
return false
|
||||
}
|
||||
// Only retry if the error is a context cancellation error or deadline expiry
|
||||
// and the context associated with the call was canceled or expired.
|
||||
return call.ctx.Err() != nil
|
||||
}
|
||||
169
vendor/golang.org/x/net/http2/config.go
generated
vendored
Normal file
169
vendor/golang.org/x/net/http2/config.go
generated
vendored
Normal file
@@ -0,0 +1,169 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"math"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// http2Config is a package-internal version of net/http.HTTP2Config.
|
||||
//
|
||||
// http.HTTP2Config was added in Go 1.24.
|
||||
// When running with a version of net/http that includes HTTP2Config,
|
||||
// we merge the configuration with the fields in Transport or Server
|
||||
// to produce an http2Config.
|
||||
//
|
||||
// Zero valued fields in http2Config are interpreted as in the
|
||||
// net/http.HTTPConfig documentation.
|
||||
//
|
||||
// Precedence order for reconciling configurations is:
|
||||
//
|
||||
// - Use the net/http.{Server,Transport}.HTTP2Config value, when non-zero.
|
||||
// - Otherwise use the http2.{Server.Transport} value.
|
||||
// - If the resulting value is zero or out of range, use a default.
|
||||
type http2Config struct {
|
||||
MaxConcurrentStreams uint32
|
||||
StrictMaxConcurrentRequests bool
|
||||
MaxDecoderHeaderTableSize uint32
|
||||
MaxEncoderHeaderTableSize uint32
|
||||
MaxReadFrameSize uint32
|
||||
MaxUploadBufferPerConnection int32
|
||||
MaxUploadBufferPerStream int32
|
||||
SendPingTimeout time.Duration
|
||||
PingTimeout time.Duration
|
||||
WriteByteTimeout time.Duration
|
||||
PermitProhibitedCipherSuites bool
|
||||
CountError func(errType string)
|
||||
}
|
||||
|
||||
// configFromServer merges configuration settings from
|
||||
// net/http.Server.HTTP2Config and http2.Server.
|
||||
func configFromServer(h1 *http.Server, h2 *Server) http2Config {
|
||||
conf := http2Config{
|
||||
MaxConcurrentStreams: h2.MaxConcurrentStreams,
|
||||
MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
|
||||
MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
|
||||
MaxReadFrameSize: h2.MaxReadFrameSize,
|
||||
MaxUploadBufferPerConnection: h2.MaxUploadBufferPerConnection,
|
||||
MaxUploadBufferPerStream: h2.MaxUploadBufferPerStream,
|
||||
SendPingTimeout: h2.ReadIdleTimeout,
|
||||
PingTimeout: h2.PingTimeout,
|
||||
WriteByteTimeout: h2.WriteByteTimeout,
|
||||
PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites,
|
||||
CountError: h2.CountError,
|
||||
}
|
||||
fillNetHTTPConfig(&conf, h1.HTTP2)
|
||||
setConfigDefaults(&conf, true)
|
||||
return conf
|
||||
}
|
||||
|
||||
// configFromTransport merges configuration settings from h2 and h2.t1.HTTP2
|
||||
// (the net/http Transport).
|
||||
func configFromTransport(h2 *Transport) http2Config {
|
||||
conf := http2Config{
|
||||
StrictMaxConcurrentRequests: h2.StrictMaxConcurrentStreams,
|
||||
MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
|
||||
MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
|
||||
MaxReadFrameSize: h2.MaxReadFrameSize,
|
||||
SendPingTimeout: h2.ReadIdleTimeout,
|
||||
PingTimeout: h2.PingTimeout,
|
||||
WriteByteTimeout: h2.WriteByteTimeout,
|
||||
}
|
||||
|
||||
// Unlike most config fields, where out-of-range values revert to the default,
|
||||
// Transport.MaxReadFrameSize clips.
|
||||
if conf.MaxReadFrameSize < minMaxFrameSize {
|
||||
conf.MaxReadFrameSize = minMaxFrameSize
|
||||
} else if conf.MaxReadFrameSize > maxFrameSize {
|
||||
conf.MaxReadFrameSize = maxFrameSize
|
||||
}
|
||||
|
||||
if h2.t1 != nil {
|
||||
fillNetHTTPConfig(&conf, h2.t1.HTTP2)
|
||||
}
|
||||
setConfigDefaults(&conf, false)
|
||||
return conf
|
||||
}
|
||||
|
||||
func setDefault[T ~int | ~int32 | ~uint32 | ~int64](v *T, minval, maxval, defval T) {
|
||||
if *v < minval || *v > maxval {
|
||||
*v = defval
|
||||
}
|
||||
}
|
||||
|
||||
func setConfigDefaults(conf *http2Config, server bool) {
|
||||
setDefault(&conf.MaxConcurrentStreams, 1, math.MaxUint32, defaultMaxStreams)
|
||||
setDefault(&conf.MaxEncoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize)
|
||||
setDefault(&conf.MaxDecoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize)
|
||||
if server {
|
||||
setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, 1<<20)
|
||||
} else {
|
||||
setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, transportDefaultConnFlow)
|
||||
}
|
||||
if server {
|
||||
setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, 1<<20)
|
||||
} else {
|
||||
setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, transportDefaultStreamFlow)
|
||||
}
|
||||
setDefault(&conf.MaxReadFrameSize, minMaxFrameSize, maxFrameSize, defaultMaxReadFrameSize)
|
||||
setDefault(&conf.PingTimeout, 1, math.MaxInt64, 15*time.Second)
|
||||
}
|
||||
|
||||
// adjustHTTP1MaxHeaderSize converts a limit in bytes on the size of an HTTP/1 header
|
||||
// to an HTTP/2 MAX_HEADER_LIST_SIZE value.
|
||||
func adjustHTTP1MaxHeaderSize(n int64) int64 {
|
||||
// http2's count is in a slightly different unit and includes 32 bytes per pair.
|
||||
// So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
|
||||
const perFieldOverhead = 32 // per http2 spec
|
||||
const typicalHeaders = 10 // conservative
|
||||
return n + typicalHeaders*perFieldOverhead
|
||||
}
|
||||
|
||||
func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
|
||||
if h2 == nil {
|
||||
return
|
||||
}
|
||||
if h2.MaxConcurrentStreams != 0 {
|
||||
conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
|
||||
}
|
||||
if http2ConfigStrictMaxConcurrentRequests(h2) {
|
||||
conf.StrictMaxConcurrentRequests = true
|
||||
}
|
||||
if h2.MaxEncoderHeaderTableSize != 0 {
|
||||
conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
|
||||
}
|
||||
if h2.MaxDecoderHeaderTableSize != 0 {
|
||||
conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize)
|
||||
}
|
||||
if h2.MaxConcurrentStreams != 0 {
|
||||
conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
|
||||
}
|
||||
if h2.MaxReadFrameSize != 0 {
|
||||
conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize)
|
||||
}
|
||||
if h2.MaxReceiveBufferPerConnection != 0 {
|
||||
conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection)
|
||||
}
|
||||
if h2.MaxReceiveBufferPerStream != 0 {
|
||||
conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream)
|
||||
}
|
||||
if h2.SendPingTimeout != 0 {
|
||||
conf.SendPingTimeout = h2.SendPingTimeout
|
||||
}
|
||||
if h2.PingTimeout != 0 {
|
||||
conf.PingTimeout = h2.PingTimeout
|
||||
}
|
||||
if h2.WriteByteTimeout != 0 {
|
||||
conf.WriteByteTimeout = h2.WriteByteTimeout
|
||||
}
|
||||
if h2.PermitProhibitedCipherSuites {
|
||||
conf.PermitProhibitedCipherSuites = true
|
||||
}
|
||||
if h2.CountError != nil {
|
||||
conf.CountError = h2.CountError
|
||||
}
|
||||
}
|
||||
15
vendor/golang.org/x/net/http2/config_go125.go
generated
vendored
Normal file
15
vendor/golang.org/x/net/http2/config_go125.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.26
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool {
|
||||
return false
|
||||
}
|
||||
15
vendor/golang.org/x/net/http2/config_go126.go
generated
vendored
Normal file
15
vendor/golang.org/x/net/http2/config_go126.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.26
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool {
|
||||
return h2.StrictMaxConcurrentRequests
|
||||
}
|
||||
149
vendor/golang.org/x/net/http2/databuffer.go
generated
vendored
Normal file
149
vendor/golang.org/x/net/http2/databuffer.go
generated
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Buffer chunks are allocated from a pool to reduce pressure on GC.
|
||||
// The maximum wasted space per dataBuffer is 2x the largest size class,
|
||||
// which happens when the dataBuffer has multiple chunks and there is
|
||||
// one unread byte in both the first and last chunks. We use a few size
|
||||
// classes to minimize overheads for servers that typically receive very
|
||||
// small request bodies.
|
||||
//
|
||||
// TODO: Benchmark to determine if the pools are necessary. The GC may have
|
||||
// improved enough that we can instead allocate chunks like this:
|
||||
// make([]byte, max(16<<10, expectedBytesRemaining))
|
||||
var dataChunkPools = [...]sync.Pool{
|
||||
{New: func() interface{} { return new([1 << 10]byte) }},
|
||||
{New: func() interface{} { return new([2 << 10]byte) }},
|
||||
{New: func() interface{} { return new([4 << 10]byte) }},
|
||||
{New: func() interface{} { return new([8 << 10]byte) }},
|
||||
{New: func() interface{} { return new([16 << 10]byte) }},
|
||||
}
|
||||
|
||||
func getDataBufferChunk(size int64) []byte {
|
||||
switch {
|
||||
case size <= 1<<10:
|
||||
return dataChunkPools[0].Get().(*[1 << 10]byte)[:]
|
||||
case size <= 2<<10:
|
||||
return dataChunkPools[1].Get().(*[2 << 10]byte)[:]
|
||||
case size <= 4<<10:
|
||||
return dataChunkPools[2].Get().(*[4 << 10]byte)[:]
|
||||
case size <= 8<<10:
|
||||
return dataChunkPools[3].Get().(*[8 << 10]byte)[:]
|
||||
default:
|
||||
return dataChunkPools[4].Get().(*[16 << 10]byte)[:]
|
||||
}
|
||||
}
|
||||
|
||||
func putDataBufferChunk(p []byte) {
|
||||
switch len(p) {
|
||||
case 1 << 10:
|
||||
dataChunkPools[0].Put((*[1 << 10]byte)(p))
|
||||
case 2 << 10:
|
||||
dataChunkPools[1].Put((*[2 << 10]byte)(p))
|
||||
case 4 << 10:
|
||||
dataChunkPools[2].Put((*[4 << 10]byte)(p))
|
||||
case 8 << 10:
|
||||
dataChunkPools[3].Put((*[8 << 10]byte)(p))
|
||||
case 16 << 10:
|
||||
dataChunkPools[4].Put((*[16 << 10]byte)(p))
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
|
||||
}
|
||||
}
|
||||
|
||||
// dataBuffer is an io.ReadWriter backed by a list of data chunks.
|
||||
// Each dataBuffer is used to read DATA frames on a single stream.
|
||||
// The buffer is divided into chunks so the server can limit the
|
||||
// total memory used by a single connection without limiting the
|
||||
// request body size on any single stream.
|
||||
type dataBuffer struct {
|
||||
chunks [][]byte
|
||||
r int // next byte to read is chunks[0][r]
|
||||
w int // next byte to write is chunks[len(chunks)-1][w]
|
||||
size int // total buffered bytes
|
||||
expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0)
|
||||
}
|
||||
|
||||
var errReadEmpty = errors.New("read from empty dataBuffer")
|
||||
|
||||
// Read copies bytes from the buffer into p.
|
||||
// It is an error to read when no data is available.
|
||||
func (b *dataBuffer) Read(p []byte) (int, error) {
|
||||
if b.size == 0 {
|
||||
return 0, errReadEmpty
|
||||
}
|
||||
var ntotal int
|
||||
for len(p) > 0 && b.size > 0 {
|
||||
readFrom := b.bytesFromFirstChunk()
|
||||
n := copy(p, readFrom)
|
||||
p = p[n:]
|
||||
ntotal += n
|
||||
b.r += n
|
||||
b.size -= n
|
||||
// If the first chunk has been consumed, advance to the next chunk.
|
||||
if b.r == len(b.chunks[0]) {
|
||||
putDataBufferChunk(b.chunks[0])
|
||||
end := len(b.chunks) - 1
|
||||
copy(b.chunks[:end], b.chunks[1:])
|
||||
b.chunks[end] = nil
|
||||
b.chunks = b.chunks[:end]
|
||||
b.r = 0
|
||||
}
|
||||
}
|
||||
return ntotal, nil
|
||||
}
|
||||
|
||||
func (b *dataBuffer) bytesFromFirstChunk() []byte {
|
||||
if len(b.chunks) == 1 {
|
||||
return b.chunks[0][b.r:b.w]
|
||||
}
|
||||
return b.chunks[0][b.r:]
|
||||
}
|
||||
|
||||
// Len returns the number of bytes of the unread portion of the buffer.
|
||||
func (b *dataBuffer) Len() int {
|
||||
return b.size
|
||||
}
|
||||
|
||||
// Write appends p to the buffer.
|
||||
func (b *dataBuffer) Write(p []byte) (int, error) {
|
||||
ntotal := len(p)
|
||||
for len(p) > 0 {
|
||||
// If the last chunk is empty, allocate a new chunk. Try to allocate
|
||||
// enough to fully copy p plus any additional bytes we expect to
|
||||
// receive. However, this may allocate less than len(p).
|
||||
want := int64(len(p))
|
||||
if b.expected > want {
|
||||
want = b.expected
|
||||
}
|
||||
chunk := b.lastChunkOrAlloc(want)
|
||||
n := copy(chunk[b.w:], p)
|
||||
p = p[n:]
|
||||
b.w += n
|
||||
b.size += n
|
||||
b.expected -= int64(n)
|
||||
}
|
||||
return ntotal, nil
|
||||
}
|
||||
|
||||
func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte {
|
||||
if len(b.chunks) != 0 {
|
||||
last := b.chunks[len(b.chunks)-1]
|
||||
if b.w < len(last) {
|
||||
return last
|
||||
}
|
||||
}
|
||||
chunk := getDataBufferChunk(want)
|
||||
b.chunks = append(b.chunks, chunk)
|
||||
b.w = 0
|
||||
return chunk
|
||||
}
|
||||
145
vendor/golang.org/x/net/http2/errors.go
generated
vendored
Normal file
145
vendor/golang.org/x/net/http2/errors.go
generated
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.
|
||||
type ErrCode uint32
|
||||
|
||||
const (
|
||||
ErrCodeNo ErrCode = 0x0
|
||||
ErrCodeProtocol ErrCode = 0x1
|
||||
ErrCodeInternal ErrCode = 0x2
|
||||
ErrCodeFlowControl ErrCode = 0x3
|
||||
ErrCodeSettingsTimeout ErrCode = 0x4
|
||||
ErrCodeStreamClosed ErrCode = 0x5
|
||||
ErrCodeFrameSize ErrCode = 0x6
|
||||
ErrCodeRefusedStream ErrCode = 0x7
|
||||
ErrCodeCancel ErrCode = 0x8
|
||||
ErrCodeCompression ErrCode = 0x9
|
||||
ErrCodeConnect ErrCode = 0xa
|
||||
ErrCodeEnhanceYourCalm ErrCode = 0xb
|
||||
ErrCodeInadequateSecurity ErrCode = 0xc
|
||||
ErrCodeHTTP11Required ErrCode = 0xd
|
||||
)
|
||||
|
||||
var errCodeName = map[ErrCode]string{
|
||||
ErrCodeNo: "NO_ERROR",
|
||||
ErrCodeProtocol: "PROTOCOL_ERROR",
|
||||
ErrCodeInternal: "INTERNAL_ERROR",
|
||||
ErrCodeFlowControl: "FLOW_CONTROL_ERROR",
|
||||
ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT",
|
||||
ErrCodeStreamClosed: "STREAM_CLOSED",
|
||||
ErrCodeFrameSize: "FRAME_SIZE_ERROR",
|
||||
ErrCodeRefusedStream: "REFUSED_STREAM",
|
||||
ErrCodeCancel: "CANCEL",
|
||||
ErrCodeCompression: "COMPRESSION_ERROR",
|
||||
ErrCodeConnect: "CONNECT_ERROR",
|
||||
ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM",
|
||||
ErrCodeInadequateSecurity: "INADEQUATE_SECURITY",
|
||||
ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED",
|
||||
}
|
||||
|
||||
func (e ErrCode) String() string {
|
||||
if s, ok := errCodeName[e]; ok {
|
||||
return s
|
||||
}
|
||||
return fmt.Sprintf("unknown error code 0x%x", uint32(e))
|
||||
}
|
||||
|
||||
func (e ErrCode) stringToken() string {
|
||||
if s, ok := errCodeName[e]; ok {
|
||||
return s
|
||||
}
|
||||
return fmt.Sprintf("ERR_UNKNOWN_%d", uint32(e))
|
||||
}
|
||||
|
||||
// ConnectionError is an error that results in the termination of the
|
||||
// entire connection.
|
||||
type ConnectionError ErrCode
|
||||
|
||||
func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) }
|
||||
|
||||
// StreamError is an error that only affects one stream within an
|
||||
// HTTP/2 connection.
|
||||
type StreamError struct {
|
||||
StreamID uint32
|
||||
Code ErrCode
|
||||
Cause error // optional additional detail
|
||||
}
|
||||
|
||||
// errFromPeer is a sentinel error value for StreamError.Cause to
|
||||
// indicate that the StreamError was sent from the peer over the wire
|
||||
// and wasn't locally generated in the Transport.
|
||||
var errFromPeer = errors.New("received from peer")
|
||||
|
||||
func streamError(id uint32, code ErrCode) StreamError {
|
||||
return StreamError{StreamID: id, Code: code}
|
||||
}
|
||||
|
||||
func (e StreamError) Error() string {
|
||||
if e.Cause != nil {
|
||||
return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause)
|
||||
}
|
||||
return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
|
||||
}
|
||||
|
||||
// 6.9.1 The Flow Control Window
|
||||
// "If a sender receives a WINDOW_UPDATE that causes a flow control
|
||||
// window to exceed this maximum it MUST terminate either the stream
|
||||
// or the connection, as appropriate. For streams, [...]; for the
|
||||
// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code."
|
||||
type goAwayFlowError struct{}
|
||||
|
||||
func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
|
||||
|
||||
// connError represents an HTTP/2 ConnectionError error code, along
|
||||
// with a string (for debugging) explaining why.
|
||||
//
|
||||
// Errors of this type are only returned by the frame parser functions
|
||||
// and converted into ConnectionError(Code), after stashing away
|
||||
// the Reason into the Framer's errDetail field, accessible via
|
||||
// the (*Framer).ErrorDetail method.
|
||||
type connError struct {
|
||||
Code ErrCode // the ConnectionError error code
|
||||
Reason string // additional reason
|
||||
}
|
||||
|
||||
func (e connError) Error() string {
|
||||
return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason)
|
||||
}
|
||||
|
||||
type pseudoHeaderError string
|
||||
|
||||
func (e pseudoHeaderError) Error() string {
|
||||
return fmt.Sprintf("invalid pseudo-header %q", string(e))
|
||||
}
|
||||
|
||||
type duplicatePseudoHeaderError string
|
||||
|
||||
func (e duplicatePseudoHeaderError) Error() string {
|
||||
return fmt.Sprintf("duplicate pseudo-header %q", string(e))
|
||||
}
|
||||
|
||||
type headerFieldNameError string
|
||||
|
||||
func (e headerFieldNameError) Error() string {
|
||||
return fmt.Sprintf("invalid header field name %q", string(e))
|
||||
}
|
||||
|
||||
type headerFieldValueError string
|
||||
|
||||
func (e headerFieldValueError) Error() string {
|
||||
return fmt.Sprintf("invalid header field value for %q", string(e))
|
||||
}
|
||||
|
||||
var (
|
||||
errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers")
|
||||
errPseudoAfterRegular = errors.New("pseudo header field after regular")
|
||||
)
|
||||
120
vendor/golang.org/x/net/http2/flow.go
generated
vendored
Normal file
120
vendor/golang.org/x/net/http2/flow.go
generated
vendored
Normal file
@@ -0,0 +1,120 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Flow control
|
||||
|
||||
package http2
|
||||
|
||||
// inflowMinRefresh is the minimum number of bytes we'll send for a
|
||||
// flow control window update.
|
||||
const inflowMinRefresh = 4 << 10
|
||||
|
||||
// inflow accounts for an inbound flow control window.
|
||||
// It tracks both the latest window sent to the peer (used for enforcement)
|
||||
// and the accumulated unsent window.
|
||||
type inflow struct {
|
||||
avail int32
|
||||
unsent int32
|
||||
}
|
||||
|
||||
// init sets the initial window.
|
||||
func (f *inflow) init(n int32) {
|
||||
f.avail = n
|
||||
}
|
||||
|
||||
// add adds n bytes to the window, with a maximum window size of max,
|
||||
// indicating that the peer can now send us more data.
|
||||
// For example, the user read from a {Request,Response} body and consumed
|
||||
// some of the buffered data, so the peer can now send more.
|
||||
// It returns the number of bytes to send in a WINDOW_UPDATE frame to the peer.
|
||||
// Window updates are accumulated and sent when the unsent capacity
|
||||
// is at least inflowMinRefresh or will at least double the peer's available window.
|
||||
func (f *inflow) add(n int) (connAdd int32) {
|
||||
if n < 0 {
|
||||
panic("negative update")
|
||||
}
|
||||
unsent := int64(f.unsent) + int64(n)
|
||||
// "A sender MUST NOT allow a flow-control window to exceed 2^31-1 octets."
|
||||
// RFC 7540 Section 6.9.1.
|
||||
const maxWindow = 1<<31 - 1
|
||||
if unsent+int64(f.avail) > maxWindow {
|
||||
panic("flow control update exceeds maximum window size")
|
||||
}
|
||||
f.unsent = int32(unsent)
|
||||
if f.unsent < inflowMinRefresh && f.unsent < f.avail {
|
||||
// If there aren't at least inflowMinRefresh bytes of window to send,
|
||||
// and this update won't at least double the window, buffer the update for later.
|
||||
return 0
|
||||
}
|
||||
f.avail += f.unsent
|
||||
f.unsent = 0
|
||||
return int32(unsent)
|
||||
}
|
||||
|
||||
// take attempts to take n bytes from the peer's flow control window.
|
||||
// It reports whether the window has available capacity.
|
||||
func (f *inflow) take(n uint32) bool {
|
||||
if n > uint32(f.avail) {
|
||||
return false
|
||||
}
|
||||
f.avail -= int32(n)
|
||||
return true
|
||||
}
|
||||
|
||||
// takeInflows attempts to take n bytes from two inflows,
|
||||
// typically connection-level and stream-level flows.
|
||||
// It reports whether both windows have available capacity.
|
||||
func takeInflows(f1, f2 *inflow, n uint32) bool {
|
||||
if n > uint32(f1.avail) || n > uint32(f2.avail) {
|
||||
return false
|
||||
}
|
||||
f1.avail -= int32(n)
|
||||
f2.avail -= int32(n)
|
||||
return true
|
||||
}
|
||||
|
||||
// outflow is the outbound flow control window's size.
|
||||
type outflow struct {
|
||||
_ incomparable
|
||||
|
||||
// n is the number of DATA bytes we're allowed to send.
|
||||
// An outflow is kept both on a conn and a per-stream.
|
||||
n int32
|
||||
|
||||
// conn points to the shared connection-level outflow that is
|
||||
// shared by all streams on that conn. It is nil for the outflow
|
||||
// that's on the conn directly.
|
||||
conn *outflow
|
||||
}
|
||||
|
||||
func (f *outflow) setConnFlow(cf *outflow) { f.conn = cf }
|
||||
|
||||
func (f *outflow) available() int32 {
|
||||
n := f.n
|
||||
if f.conn != nil && f.conn.n < n {
|
||||
n = f.conn.n
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (f *outflow) take(n int32) {
|
||||
if n > f.available() {
|
||||
panic("internal error: took too much")
|
||||
}
|
||||
f.n -= n
|
||||
if f.conn != nil {
|
||||
f.conn.n -= n
|
||||
}
|
||||
}
|
||||
|
||||
// add adds n bytes (positive or negative) to the flow control window.
|
||||
// It returns false if the sum would exceed 2^31-1.
|
||||
func (f *outflow) add(n int32) bool {
|
||||
sum := f.n + n
|
||||
if (sum > n) == (f.n > 0) {
|
||||
f.n = sum
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
1753
vendor/golang.org/x/net/http2/frame.go
generated
vendored
Normal file
1753
vendor/golang.org/x/net/http2/frame.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
181
vendor/golang.org/x/net/http2/gotrack.go
generated
vendored
Normal file
181
vendor/golang.org/x/net/http2/gotrack.go
generated
vendored
Normal file
@@ -0,0 +1,181 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Defensive debug-only utility to track that functions run on the
|
||||
// goroutine that they're supposed to.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
|
||||
|
||||
// Setting DebugGoroutines to false during a test to disable goroutine debugging
|
||||
// results in race detector complaints when a test leaves goroutines running before
|
||||
// returning. Tests shouldn't do this, of course, but when they do it generally shows
|
||||
// up as infrequent, hard-to-debug flakes. (See #66519.)
|
||||
//
|
||||
// Disable goroutine debugging during individual tests with an atomic bool.
|
||||
// (Note that it's safe to enable/disable debugging mid-test, so the actual race condition
|
||||
// here is harmless.)
|
||||
var disableDebugGoroutines atomic.Bool
|
||||
|
||||
type goroutineLock uint64
|
||||
|
||||
func newGoroutineLock() goroutineLock {
|
||||
if !DebugGoroutines || disableDebugGoroutines.Load() {
|
||||
return 0
|
||||
}
|
||||
return goroutineLock(curGoroutineID())
|
||||
}
|
||||
|
||||
func (g goroutineLock) check() {
|
||||
if !DebugGoroutines || disableDebugGoroutines.Load() {
|
||||
return
|
||||
}
|
||||
if curGoroutineID() != uint64(g) {
|
||||
panic("running on the wrong goroutine")
|
||||
}
|
||||
}
|
||||
|
||||
func (g goroutineLock) checkNotOn() {
|
||||
if !DebugGoroutines || disableDebugGoroutines.Load() {
|
||||
return
|
||||
}
|
||||
if curGoroutineID() == uint64(g) {
|
||||
panic("running on the wrong goroutine")
|
||||
}
|
||||
}
|
||||
|
||||
var goroutineSpace = []byte("goroutine ")
|
||||
|
||||
func curGoroutineID() uint64 {
|
||||
bp := littleBuf.Get().(*[]byte)
|
||||
defer littleBuf.Put(bp)
|
||||
b := *bp
|
||||
b = b[:runtime.Stack(b, false)]
|
||||
// Parse the 4707 out of "goroutine 4707 ["
|
||||
b = bytes.TrimPrefix(b, goroutineSpace)
|
||||
i := bytes.IndexByte(b, ' ')
|
||||
if i < 0 {
|
||||
panic(fmt.Sprintf("No space found in %q", b))
|
||||
}
|
||||
b = b[:i]
|
||||
n, err := parseUintBytes(b, 10, 64)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
var littleBuf = sync.Pool{
|
||||
New: func() interface{} {
|
||||
buf := make([]byte, 64)
|
||||
return &buf
|
||||
},
|
||||
}
|
||||
|
||||
// parseUintBytes is like strconv.ParseUint, but using a []byte.
|
||||
func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) {
|
||||
var cutoff, maxVal uint64
|
||||
|
||||
if bitSize == 0 {
|
||||
bitSize = int(strconv.IntSize)
|
||||
}
|
||||
|
||||
s0 := s
|
||||
switch {
|
||||
case len(s) < 1:
|
||||
err = strconv.ErrSyntax
|
||||
goto Error
|
||||
|
||||
case 2 <= base && base <= 36:
|
||||
// valid base; nothing to do
|
||||
|
||||
case base == 0:
|
||||
// Look for octal, hex prefix.
|
||||
switch {
|
||||
case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):
|
||||
base = 16
|
||||
s = s[2:]
|
||||
if len(s) < 1 {
|
||||
err = strconv.ErrSyntax
|
||||
goto Error
|
||||
}
|
||||
case s[0] == '0':
|
||||
base = 8
|
||||
default:
|
||||
base = 10
|
||||
}
|
||||
|
||||
default:
|
||||
err = errors.New("invalid base " + strconv.Itoa(base))
|
||||
goto Error
|
||||
}
|
||||
|
||||
n = 0
|
||||
cutoff = cutoff64(base)
|
||||
maxVal = 1<<uint(bitSize) - 1
|
||||
|
||||
for i := 0; i < len(s); i++ {
|
||||
var v byte
|
||||
d := s[i]
|
||||
switch {
|
||||
case '0' <= d && d <= '9':
|
||||
v = d - '0'
|
||||
case 'a' <= d && d <= 'z':
|
||||
v = d - 'a' + 10
|
||||
case 'A' <= d && d <= 'Z':
|
||||
v = d - 'A' + 10
|
||||
default:
|
||||
n = 0
|
||||
err = strconv.ErrSyntax
|
||||
goto Error
|
||||
}
|
||||
if int(v) >= base {
|
||||
n = 0
|
||||
err = strconv.ErrSyntax
|
||||
goto Error
|
||||
}
|
||||
|
||||
if n >= cutoff {
|
||||
// n*base overflows
|
||||
n = 1<<64 - 1
|
||||
err = strconv.ErrRange
|
||||
goto Error
|
||||
}
|
||||
n *= uint64(base)
|
||||
|
||||
n1 := n + uint64(v)
|
||||
if n1 < n || n1 > maxVal {
|
||||
// n+v overflows
|
||||
n = 1<<64 - 1
|
||||
err = strconv.ErrRange
|
||||
goto Error
|
||||
}
|
||||
n = n1
|
||||
}
|
||||
|
||||
return n, nil
|
||||
|
||||
Error:
|
||||
return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err}
|
||||
}
|
||||
|
||||
// Return the first number n such that n*base >= 1<<64.
|
||||
func cutoff64(base int) uint64 {
|
||||
if base < 2 {
|
||||
return 0
|
||||
}
|
||||
return (1<<64-1)/uint64(base) + 1
|
||||
}
|
||||
234
vendor/golang.org/x/net/http2/h2c/h2c.go
generated
vendored
Normal file
234
vendor/golang.org/x/net/http2/h2c/h2c.go
generated
vendored
Normal file
@@ -0,0 +1,234 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package h2c implements the unencrypted "h2c" form of HTTP/2.
|
||||
//
|
||||
// The h2c protocol is the non-TLS version of HTTP/2 which is not available from
|
||||
// net/http or golang.org/x/net/http2.
|
||||
package h2c
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/http/httpguts"
|
||||
"golang.org/x/net/http2"
|
||||
)
|
||||
|
||||
var (
|
||||
http2VerboseLogs bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
e := os.Getenv("GODEBUG")
|
||||
if strings.Contains(e, "http2debug=1") || strings.Contains(e, "http2debug=2") {
|
||||
http2VerboseLogs = true
|
||||
}
|
||||
}
|
||||
|
||||
// h2cHandler is a Handler which implements h2c by hijacking the HTTP/1 traffic
|
||||
// that should be h2c traffic. There are two ways to begin a h2c connection
|
||||
// (RFC 7540 Section 3.2 and 3.4): (1) Starting with Prior Knowledge - this
|
||||
// works by starting an h2c connection with a string of bytes that is valid
|
||||
// HTTP/1, but unlikely to occur in practice and (2) Upgrading from HTTP/1 to
|
||||
// h2c - this works by using the HTTP/1 Upgrade header to request an upgrade to
|
||||
// h2c. When either of those situations occur we hijack the HTTP/1 connection,
|
||||
// convert it to an HTTP/2 connection and pass the net.Conn to http2.ServeConn.
|
||||
type h2cHandler struct {
|
||||
Handler http.Handler
|
||||
s *http2.Server
|
||||
}
|
||||
|
||||
// NewHandler returns an http.Handler that wraps h, intercepting any h2c
|
||||
// traffic. If a request is an h2c connection, it's hijacked and redirected to
|
||||
// s.ServeConn. Otherwise the returned Handler just forwards requests to h. This
|
||||
// works because h2c is designed to be parseable as valid HTTP/1, but ignored by
|
||||
// any HTTP server that does not handle h2c. Therefore we leverage the HTTP/1
|
||||
// compatible parts of the Go http library to parse and recognize h2c requests.
|
||||
// Once a request is recognized as h2c, we hijack the connection and convert it
|
||||
// to an HTTP/2 connection which is understandable to s.ServeConn. (s.ServeConn
|
||||
// understands HTTP/2 except for the h2c part of it.)
|
||||
//
|
||||
// The first request on an h2c connection is read entirely into memory before
|
||||
// the Handler is called. To limit the memory consumed by this request, wrap
|
||||
// the result of NewHandler in an http.MaxBytesHandler.
|
||||
func NewHandler(h http.Handler, s *http2.Server) http.Handler {
|
||||
return &h2cHandler{
|
||||
Handler: h,
|
||||
s: s,
|
||||
}
|
||||
}
|
||||
|
||||
// extractServer extracts existing http.Server instance from http.Request or create an empty http.Server
|
||||
func extractServer(r *http.Request) *http.Server {
|
||||
server, ok := r.Context().Value(http.ServerContextKey).(*http.Server)
|
||||
if ok {
|
||||
return server
|
||||
}
|
||||
return new(http.Server)
|
||||
}
|
||||
|
||||
// ServeHTTP implement the h2c support that is enabled by h2c.GetH2CHandler.
|
||||
func (s h2cHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Handle h2c with prior knowledge (RFC 7540 Section 3.4)
|
||||
if r.Method == "PRI" && len(r.Header) == 0 && r.URL.Path == "*" && r.Proto == "HTTP/2.0" {
|
||||
if http2VerboseLogs {
|
||||
log.Print("h2c: attempting h2c with prior knowledge.")
|
||||
}
|
||||
conn, err := initH2CWithPriorKnowledge(w)
|
||||
if err != nil {
|
||||
if http2VerboseLogs {
|
||||
log.Printf("h2c: error h2c with prior knowledge: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
s.s.ServeConn(conn, &http2.ServeConnOpts{
|
||||
Context: r.Context(),
|
||||
BaseConfig: extractServer(r),
|
||||
Handler: s.Handler,
|
||||
SawClientPreface: true,
|
||||
})
|
||||
return
|
||||
}
|
||||
// Handle Upgrade to h2c (RFC 7540 Section 3.2)
|
||||
if isH2CUpgrade(r.Header) {
|
||||
conn, settings, err := h2cUpgrade(w, r)
|
||||
if err != nil {
|
||||
if http2VerboseLogs {
|
||||
log.Printf("h2c: error h2c upgrade: %v", err)
|
||||
}
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
s.s.ServeConn(conn, &http2.ServeConnOpts{
|
||||
Context: r.Context(),
|
||||
BaseConfig: extractServer(r),
|
||||
Handler: s.Handler,
|
||||
UpgradeRequest: r,
|
||||
Settings: settings,
|
||||
})
|
||||
return
|
||||
}
|
||||
s.Handler.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// initH2CWithPriorKnowledge implements creating a h2c connection with prior
|
||||
// knowledge (Section 3.4) and creates a net.Conn suitable for http2.ServeConn.
|
||||
// All we have to do is look for the client preface that is suppose to be part
|
||||
// of the body, and reforward the client preface on the net.Conn this function
|
||||
// creates.
|
||||
func initH2CWithPriorKnowledge(w http.ResponseWriter) (net.Conn, error) {
|
||||
rc := http.NewResponseController(w)
|
||||
conn, rw, err := rc.Hijack()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
const expectedBody = "SM\r\n\r\n"
|
||||
|
||||
buf := make([]byte, len(expectedBody))
|
||||
n, err := io.ReadFull(rw, buf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("h2c: error reading client preface: %s", err)
|
||||
}
|
||||
|
||||
if string(buf[:n]) == expectedBody {
|
||||
return newBufConn(conn, rw), nil
|
||||
}
|
||||
|
||||
conn.Close()
|
||||
return nil, errors.New("h2c: invalid client preface")
|
||||
}
|
||||
|
||||
// h2cUpgrade establishes a h2c connection using the HTTP/1 upgrade (Section 3.2).
|
||||
func h2cUpgrade(w http.ResponseWriter, r *http.Request) (_ net.Conn, settings []byte, err error) {
|
||||
settings, err = getH2Settings(r.Header)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
r.Body = io.NopCloser(bytes.NewBuffer(body))
|
||||
|
||||
rc := http.NewResponseController(w)
|
||||
conn, rw, err := rc.Hijack()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
rw.Write([]byte("HTTP/1.1 101 Switching Protocols\r\n" +
|
||||
"Connection: Upgrade\r\n" +
|
||||
"Upgrade: h2c\r\n\r\n"))
|
||||
return newBufConn(conn, rw), settings, nil
|
||||
}
|
||||
|
||||
// isH2CUpgrade returns true if the header properly request an upgrade to h2c
|
||||
// as specified by Section 3.2.
|
||||
func isH2CUpgrade(h http.Header) bool {
|
||||
return httpguts.HeaderValuesContainsToken(h[textproto.CanonicalMIMEHeaderKey("Upgrade")], "h2c") &&
|
||||
httpguts.HeaderValuesContainsToken(h[textproto.CanonicalMIMEHeaderKey("Connection")], "HTTP2-Settings")
|
||||
}
|
||||
|
||||
// getH2Settings returns the settings in the HTTP2-Settings header.
|
||||
func getH2Settings(h http.Header) ([]byte, error) {
|
||||
vals, ok := h[textproto.CanonicalMIMEHeaderKey("HTTP2-Settings")]
|
||||
if !ok {
|
||||
return nil, errors.New("missing HTTP2-Settings header")
|
||||
}
|
||||
if len(vals) != 1 {
|
||||
return nil, fmt.Errorf("expected 1 HTTP2-Settings. Got: %v", vals)
|
||||
}
|
||||
settings, err := base64.RawURLEncoding.DecodeString(vals[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return settings, nil
|
||||
}
|
||||
|
||||
func newBufConn(conn net.Conn, rw *bufio.ReadWriter) net.Conn {
|
||||
rw.Flush()
|
||||
if rw.Reader.Buffered() == 0 {
|
||||
// If there's no buffered data to be read,
|
||||
// we can just discard the bufio.ReadWriter.
|
||||
return conn
|
||||
}
|
||||
return &bufConn{conn, rw.Reader}
|
||||
}
|
||||
|
||||
// bufConn wraps a net.Conn, but reads drain the bufio.Reader first.
|
||||
type bufConn struct {
|
||||
net.Conn
|
||||
*bufio.Reader
|
||||
}
|
||||
|
||||
func (c *bufConn) Read(p []byte) (int, error) {
|
||||
if c.Reader == nil {
|
||||
return c.Conn.Read(p)
|
||||
}
|
||||
n := c.Reader.Buffered()
|
||||
if n == 0 {
|
||||
c.Reader = nil
|
||||
return c.Conn.Read(p)
|
||||
}
|
||||
if n < len(p) {
|
||||
p = p[:n]
|
||||
}
|
||||
return c.Reader.Read(p)
|
||||
}
|
||||
245
vendor/golang.org/x/net/http2/hpack/encode.go
generated
vendored
Normal file
245
vendor/golang.org/x/net/http2/hpack/encode.go
generated
vendored
Normal file
@@ -0,0 +1,245 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
uint32Max = ^uint32(0)
|
||||
initialHeaderTableSize = 4096
|
||||
)
|
||||
|
||||
type Encoder struct {
|
||||
dynTab dynamicTable
|
||||
// minSize is the minimum table size set by
|
||||
// SetMaxDynamicTableSize after the previous Header Table Size
|
||||
// Update.
|
||||
minSize uint32
|
||||
// maxSizeLimit is the maximum table size this encoder
|
||||
// supports. This will protect the encoder from too large
|
||||
// size.
|
||||
maxSizeLimit uint32
|
||||
// tableSizeUpdate indicates whether "Header Table Size
|
||||
// Update" is required.
|
||||
tableSizeUpdate bool
|
||||
w io.Writer
|
||||
buf []byte
|
||||
}
|
||||
|
||||
// NewEncoder returns a new Encoder which performs HPACK encoding. An
|
||||
// encoded data is written to w.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
e := &Encoder{
|
||||
minSize: uint32Max,
|
||||
maxSizeLimit: initialHeaderTableSize,
|
||||
tableSizeUpdate: false,
|
||||
w: w,
|
||||
}
|
||||
e.dynTab.table.init()
|
||||
e.dynTab.setMaxSize(initialHeaderTableSize)
|
||||
return e
|
||||
}
|
||||
|
||||
// WriteField encodes f into a single Write to e's underlying Writer.
|
||||
// This function may also produce bytes for "Header Table Size Update"
|
||||
// if necessary. If produced, it is done before encoding f.
|
||||
func (e *Encoder) WriteField(f HeaderField) error {
|
||||
e.buf = e.buf[:0]
|
||||
|
||||
if e.tableSizeUpdate {
|
||||
e.tableSizeUpdate = false
|
||||
if e.minSize < e.dynTab.maxSize {
|
||||
e.buf = appendTableSize(e.buf, e.minSize)
|
||||
}
|
||||
e.minSize = uint32Max
|
||||
e.buf = appendTableSize(e.buf, e.dynTab.maxSize)
|
||||
}
|
||||
|
||||
idx, nameValueMatch := e.searchTable(f)
|
||||
if nameValueMatch {
|
||||
e.buf = appendIndexed(e.buf, idx)
|
||||
} else {
|
||||
indexing := e.shouldIndex(f)
|
||||
if indexing {
|
||||
e.dynTab.add(f)
|
||||
}
|
||||
|
||||
if idx == 0 {
|
||||
e.buf = appendNewName(e.buf, f, indexing)
|
||||
} else {
|
||||
e.buf = appendIndexedName(e.buf, f, idx, indexing)
|
||||
}
|
||||
}
|
||||
n, err := e.w.Write(e.buf)
|
||||
if err == nil && n != len(e.buf) {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// searchTable searches f in both stable and dynamic header tables.
|
||||
// The static header table is searched first. Only when there is no
|
||||
// exact match for both name and value, the dynamic header table is
|
||||
// then searched. If there is no match, i is 0. If both name and value
|
||||
// match, i is the matched index and nameValueMatch becomes true. If
|
||||
// only name matches, i points to that index and nameValueMatch
|
||||
// becomes false.
|
||||
func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
|
||||
i, nameValueMatch = staticTable.search(f)
|
||||
if nameValueMatch {
|
||||
return i, true
|
||||
}
|
||||
|
||||
j, nameValueMatch := e.dynTab.table.search(f)
|
||||
if nameValueMatch || (i == 0 && j != 0) {
|
||||
return j + uint64(staticTable.len()), nameValueMatch
|
||||
}
|
||||
|
||||
return i, false
|
||||
}
|
||||
|
||||
// SetMaxDynamicTableSize changes the dynamic header table size to v.
|
||||
// The actual size is bounded by the value passed to
|
||||
// SetMaxDynamicTableSizeLimit.
|
||||
func (e *Encoder) SetMaxDynamicTableSize(v uint32) {
|
||||
if v > e.maxSizeLimit {
|
||||
v = e.maxSizeLimit
|
||||
}
|
||||
if v < e.minSize {
|
||||
e.minSize = v
|
||||
}
|
||||
e.tableSizeUpdate = true
|
||||
e.dynTab.setMaxSize(v)
|
||||
}
|
||||
|
||||
// MaxDynamicTableSize returns the current dynamic header table size.
|
||||
func (e *Encoder) MaxDynamicTableSize() (v uint32) {
|
||||
return e.dynTab.maxSize
|
||||
}
|
||||
|
||||
// SetMaxDynamicTableSizeLimit changes the maximum value that can be
|
||||
// specified in SetMaxDynamicTableSize to v. By default, it is set to
|
||||
// 4096, which is the same size of the default dynamic header table
|
||||
// size described in HPACK specification. If the current maximum
|
||||
// dynamic header table size is strictly greater than v, "Header Table
|
||||
// Size Update" will be done in the next WriteField call and the
|
||||
// maximum dynamic header table size is truncated to v.
|
||||
func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) {
|
||||
e.maxSizeLimit = v
|
||||
if e.dynTab.maxSize > v {
|
||||
e.tableSizeUpdate = true
|
||||
e.dynTab.setMaxSize(v)
|
||||
}
|
||||
}
|
||||
|
||||
// shouldIndex reports whether f should be indexed.
|
||||
func (e *Encoder) shouldIndex(f HeaderField) bool {
|
||||
return !f.Sensitive && f.Size() <= e.dynTab.maxSize
|
||||
}
|
||||
|
||||
// appendIndexed appends index i, as encoded in "Indexed Header Field"
|
||||
// representation, to dst and returns the extended buffer.
|
||||
func appendIndexed(dst []byte, i uint64) []byte {
|
||||
first := len(dst)
|
||||
dst = appendVarInt(dst, 7, i)
|
||||
dst[first] |= 0x80
|
||||
return dst
|
||||
}
|
||||
|
||||
// appendNewName appends f, as encoded in one of "Literal Header field
|
||||
// - New Name" representation variants, to dst and returns the
|
||||
// extended buffer.
|
||||
//
|
||||
// If f.Sensitive is true, "Never Indexed" representation is used. If
|
||||
// f.Sensitive is false and indexing is true, "Incremental Indexing"
|
||||
// representation is used.
|
||||
func appendNewName(dst []byte, f HeaderField, indexing bool) []byte {
|
||||
dst = append(dst, encodeTypeByte(indexing, f.Sensitive))
|
||||
dst = appendHpackString(dst, f.Name)
|
||||
return appendHpackString(dst, f.Value)
|
||||
}
|
||||
|
||||
// appendIndexedName appends f and index i referring indexed name
|
||||
// entry, as encoded in one of "Literal Header field - Indexed Name"
|
||||
// representation variants, to dst and returns the extended buffer.
|
||||
//
|
||||
// If f.Sensitive is true, "Never Indexed" representation is used. If
|
||||
// f.Sensitive is false and indexing is true, "Incremental Indexing"
|
||||
// representation is used.
|
||||
func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte {
|
||||
first := len(dst)
|
||||
var n byte
|
||||
if indexing {
|
||||
n = 6
|
||||
} else {
|
||||
n = 4
|
||||
}
|
||||
dst = appendVarInt(dst, n, i)
|
||||
dst[first] |= encodeTypeByte(indexing, f.Sensitive)
|
||||
return appendHpackString(dst, f.Value)
|
||||
}
|
||||
|
||||
// appendTableSize appends v, as encoded in "Header Table Size Update"
|
||||
// representation, to dst and returns the extended buffer.
|
||||
func appendTableSize(dst []byte, v uint32) []byte {
|
||||
first := len(dst)
|
||||
dst = appendVarInt(dst, 5, uint64(v))
|
||||
dst[first] |= 0x20
|
||||
return dst
|
||||
}
|
||||
|
||||
// appendVarInt appends i, as encoded in variable integer form using n
|
||||
// bit prefix, to dst and returns the extended buffer.
|
||||
//
|
||||
// See
|
||||
// https://httpwg.org/specs/rfc7541.html#integer.representation
|
||||
func appendVarInt(dst []byte, n byte, i uint64) []byte {
|
||||
k := uint64((1 << n) - 1)
|
||||
if i < k {
|
||||
return append(dst, byte(i))
|
||||
}
|
||||
dst = append(dst, byte(k))
|
||||
i -= k
|
||||
for ; i >= 128; i >>= 7 {
|
||||
dst = append(dst, byte(0x80|(i&0x7f)))
|
||||
}
|
||||
return append(dst, byte(i))
|
||||
}
|
||||
|
||||
// appendHpackString appends s, as encoded in "String Literal"
|
||||
// representation, to dst and returns the extended buffer.
|
||||
//
|
||||
// s will be encoded in Huffman codes only when it produces strictly
|
||||
// shorter byte string.
|
||||
func appendHpackString(dst []byte, s string) []byte {
|
||||
huffmanLength := HuffmanEncodeLength(s)
|
||||
if huffmanLength < uint64(len(s)) {
|
||||
first := len(dst)
|
||||
dst = appendVarInt(dst, 7, huffmanLength)
|
||||
dst = AppendHuffmanString(dst, s)
|
||||
dst[first] |= 0x80
|
||||
} else {
|
||||
dst = appendVarInt(dst, 7, uint64(len(s)))
|
||||
dst = append(dst, s...)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// encodeTypeByte returns type byte. If sensitive is true, type byte
|
||||
// for "Never Indexed" representation is returned. If sensitive is
|
||||
// false and indexing is true, type byte for "Incremental Indexing"
|
||||
// representation is returned. Otherwise, type byte for "Without
|
||||
// Indexing" is returned.
|
||||
func encodeTypeByte(indexing, sensitive bool) byte {
|
||||
if sensitive {
|
||||
return 0x10
|
||||
}
|
||||
if indexing {
|
||||
return 0x40
|
||||
}
|
||||
return 0
|
||||
}
|
||||
523
vendor/golang.org/x/net/http2/hpack/hpack.go
generated
vendored
Normal file
523
vendor/golang.org/x/net/http2/hpack/hpack.go
generated
vendored
Normal file
@@ -0,0 +1,523 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package hpack implements HPACK, a compression format for
|
||||
// efficiently representing HTTP header fields in the context of HTTP/2.
|
||||
//
|
||||
// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// A DecodingError is something the spec defines as a decoding error.
|
||||
type DecodingError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (de DecodingError) Error() string {
|
||||
return fmt.Sprintf("decoding error: %v", de.Err)
|
||||
}
|
||||
|
||||
// An InvalidIndexError is returned when an encoder references a table
|
||||
// entry before the static table or after the end of the dynamic table.
|
||||
type InvalidIndexError int
|
||||
|
||||
func (e InvalidIndexError) Error() string {
|
||||
return fmt.Sprintf("invalid indexed representation index %d", int(e))
|
||||
}
|
||||
|
||||
// A HeaderField is a name-value pair. Both the name and value are
|
||||
// treated as opaque sequences of octets.
|
||||
type HeaderField struct {
|
||||
Name, Value string
|
||||
|
||||
// Sensitive means that this header field should never be
|
||||
// indexed.
|
||||
Sensitive bool
|
||||
}
|
||||
|
||||
// IsPseudo reports whether the header field is an http2 pseudo header.
|
||||
// That is, it reports whether it starts with a colon.
|
||||
// It is not otherwise guaranteed to be a valid pseudo header field,
|
||||
// though.
|
||||
func (hf HeaderField) IsPseudo() bool {
|
||||
return len(hf.Name) != 0 && hf.Name[0] == ':'
|
||||
}
|
||||
|
||||
func (hf HeaderField) String() string {
|
||||
var suffix string
|
||||
if hf.Sensitive {
|
||||
suffix = " (sensitive)"
|
||||
}
|
||||
return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix)
|
||||
}
|
||||
|
||||
// Size returns the size of an entry per RFC 7541 section 4.1.
|
||||
func (hf HeaderField) Size() uint32 {
|
||||
// https://httpwg.org/specs/rfc7541.html#rfc.section.4.1
|
||||
// "The size of the dynamic table is the sum of the size of
|
||||
// its entries. The size of an entry is the sum of its name's
|
||||
// length in octets (as defined in Section 5.2), its value's
|
||||
// length in octets (see Section 5.2), plus 32. The size of
|
||||
// an entry is calculated using the length of the name and
|
||||
// value without any Huffman encoding applied."
|
||||
|
||||
// This can overflow if somebody makes a large HeaderField
|
||||
// Name and/or Value by hand, but we don't care, because that
|
||||
// won't happen on the wire because the encoding doesn't allow
|
||||
// it.
|
||||
return uint32(len(hf.Name) + len(hf.Value) + 32)
|
||||
}
|
||||
|
||||
// A Decoder is the decoding context for incremental processing of
|
||||
// header blocks.
|
||||
type Decoder struct {
|
||||
dynTab dynamicTable
|
||||
emit func(f HeaderField)
|
||||
|
||||
emitEnabled bool // whether calls to emit are enabled
|
||||
maxStrLen int // 0 means unlimited
|
||||
|
||||
// buf is the unparsed buffer. It's only written to
|
||||
// saveBuf if it was truncated in the middle of a header
|
||||
// block. Because it's usually not owned, we can only
|
||||
// process it under Write.
|
||||
buf []byte // not owned; only valid during Write
|
||||
|
||||
// saveBuf is previous data passed to Write which we weren't able
|
||||
// to fully parse before. Unlike buf, we own this data.
|
||||
saveBuf bytes.Buffer
|
||||
|
||||
firstField bool // processing the first field of the header block
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder with the provided maximum dynamic
|
||||
// table size. The emitFunc will be called for each valid field
|
||||
// parsed, in the same goroutine as calls to Write, before Write returns.
|
||||
func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder {
|
||||
d := &Decoder{
|
||||
emit: emitFunc,
|
||||
emitEnabled: true,
|
||||
firstField: true,
|
||||
}
|
||||
d.dynTab.table.init()
|
||||
d.dynTab.allowedMaxSize = maxDynamicTableSize
|
||||
d.dynTab.setMaxSize(maxDynamicTableSize)
|
||||
return d
|
||||
}
|
||||
|
||||
// ErrStringLength is returned by Decoder.Write when the max string length
|
||||
// (as configured by Decoder.SetMaxStringLength) would be violated.
|
||||
var ErrStringLength = errors.New("hpack: string too long")
|
||||
|
||||
// SetMaxStringLength sets the maximum size of a HeaderField name or
|
||||
// value string. If a string exceeds this length (even after any
|
||||
// decompression), Write will return ErrStringLength.
|
||||
// A value of 0 means unlimited and is the default from NewDecoder.
|
||||
func (d *Decoder) SetMaxStringLength(n int) {
|
||||
d.maxStrLen = n
|
||||
}
|
||||
|
||||
// SetEmitFunc changes the callback used when new header fields
|
||||
// are decoded.
|
||||
// It must be non-nil. It does not affect EmitEnabled.
|
||||
func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) {
|
||||
d.emit = emitFunc
|
||||
}
|
||||
|
||||
// SetEmitEnabled controls whether the emitFunc provided to NewDecoder
|
||||
// should be called. The default is true.
|
||||
//
|
||||
// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE
|
||||
// while still decoding and keeping in-sync with decoder state, but
|
||||
// without doing unnecessary decompression or generating unnecessary
|
||||
// garbage for header fields past the limit.
|
||||
func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v }
|
||||
|
||||
// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder
|
||||
// are currently enabled. The default is true.
|
||||
func (d *Decoder) EmitEnabled() bool { return d.emitEnabled }
|
||||
|
||||
// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their
|
||||
// underlying buffers for garbage reasons.
|
||||
|
||||
func (d *Decoder) SetMaxDynamicTableSize(v uint32) {
|
||||
d.dynTab.setMaxSize(v)
|
||||
}
|
||||
|
||||
// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded
|
||||
// stream (via dynamic table size updates) may set the maximum size
|
||||
// to.
|
||||
func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {
|
||||
d.dynTab.allowedMaxSize = v
|
||||
}
|
||||
|
||||
type dynamicTable struct {
|
||||
// https://httpwg.org/specs/rfc7541.html#rfc.section.2.3.2
|
||||
table headerFieldTable
|
||||
size uint32 // in bytes
|
||||
maxSize uint32 // current maxSize
|
||||
allowedMaxSize uint32 // maxSize may go up to this, inclusive
|
||||
}
|
||||
|
||||
func (dt *dynamicTable) setMaxSize(v uint32) {
|
||||
dt.maxSize = v
|
||||
dt.evict()
|
||||
}
|
||||
|
||||
func (dt *dynamicTable) add(f HeaderField) {
|
||||
dt.table.addEntry(f)
|
||||
dt.size += f.Size()
|
||||
dt.evict()
|
||||
}
|
||||
|
||||
// If we're too big, evict old stuff.
|
||||
func (dt *dynamicTable) evict() {
|
||||
var n int
|
||||
for dt.size > dt.maxSize && n < dt.table.len() {
|
||||
dt.size -= dt.table.ents[n].Size()
|
||||
n++
|
||||
}
|
||||
dt.table.evictOldest(n)
|
||||
}
|
||||
|
||||
func (d *Decoder) maxTableIndex() int {
|
||||
// This should never overflow. RFC 7540 Section 6.5.2 limits the size of
|
||||
// the dynamic table to 2^32 bytes, where each entry will occupy more than
|
||||
// one byte. Further, the staticTable has a fixed, small length.
|
||||
return d.dynTab.table.len() + staticTable.len()
|
||||
}
|
||||
|
||||
func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
|
||||
// See Section 2.3.3.
|
||||
if i == 0 {
|
||||
return
|
||||
}
|
||||
if i <= uint64(staticTable.len()) {
|
||||
return staticTable.ents[i-1], true
|
||||
}
|
||||
if i > uint64(d.maxTableIndex()) {
|
||||
return
|
||||
}
|
||||
// In the dynamic table, newer entries have lower indices.
|
||||
// However, dt.ents[0] is the oldest entry. Hence, dt.ents is
|
||||
// the reversed dynamic table.
|
||||
dt := d.dynTab.table
|
||||
return dt.ents[dt.len()-(int(i)-staticTable.len())], true
|
||||
}
|
||||
|
||||
// DecodeFull decodes an entire block.
|
||||
//
|
||||
// TODO: remove this method and make it incremental later? This is
|
||||
// easier for debugging now.
|
||||
func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) {
|
||||
var hf []HeaderField
|
||||
saveFunc := d.emit
|
||||
defer func() { d.emit = saveFunc }()
|
||||
d.emit = func(f HeaderField) { hf = append(hf, f) }
|
||||
if _, err := d.Write(p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := d.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return hf, nil
|
||||
}
|
||||
|
||||
// Close declares that the decoding is complete and resets the Decoder
|
||||
// to be reused again for a new header block. If there is any remaining
|
||||
// data in the decoder's buffer, Close returns an error.
|
||||
func (d *Decoder) Close() error {
|
||||
if d.saveBuf.Len() > 0 {
|
||||
d.saveBuf.Reset()
|
||||
return DecodingError{errors.New("truncated headers")}
|
||||
}
|
||||
d.firstField = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Decoder) Write(p []byte) (n int, err error) {
|
||||
if len(p) == 0 {
|
||||
// Prevent state machine CPU attacks (making us redo
|
||||
// work up to the point of finding out we don't have
|
||||
// enough data)
|
||||
return
|
||||
}
|
||||
// Only copy the data if we have to. Optimistically assume
|
||||
// that p will contain a complete header block.
|
||||
if d.saveBuf.Len() == 0 {
|
||||
d.buf = p
|
||||
} else {
|
||||
d.saveBuf.Write(p)
|
||||
d.buf = d.saveBuf.Bytes()
|
||||
d.saveBuf.Reset()
|
||||
}
|
||||
|
||||
for len(d.buf) > 0 {
|
||||
err = d.parseHeaderFieldRepr()
|
||||
if err == errNeedMore {
|
||||
// Extra paranoia, making sure saveBuf won't
|
||||
// get too large. All the varint and string
|
||||
// reading code earlier should already catch
|
||||
// overlong things and return ErrStringLength,
|
||||
// but keep this as a last resort.
|
||||
const varIntOverhead = 8 // conservative
|
||||
if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) {
|
||||
return 0, ErrStringLength
|
||||
}
|
||||
d.saveBuf.Write(d.buf)
|
||||
return len(p), nil
|
||||
}
|
||||
d.firstField = false
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return len(p), err
|
||||
}
|
||||
|
||||
// errNeedMore is an internal sentinel error value that means the
|
||||
// buffer is truncated and we need to read more data before we can
|
||||
// continue parsing.
|
||||
var errNeedMore = errors.New("need more data")
|
||||
|
||||
type indexType int
|
||||
|
||||
const (
|
||||
indexedTrue indexType = iota
|
||||
indexedFalse
|
||||
indexedNever
|
||||
)
|
||||
|
||||
func (v indexType) indexed() bool { return v == indexedTrue }
|
||||
func (v indexType) sensitive() bool { return v == indexedNever }
|
||||
|
||||
// returns errNeedMore if there isn't enough data available.
|
||||
// any other error is fatal.
|
||||
// consumes d.buf iff it returns nil.
|
||||
// precondition: must be called with len(d.buf) > 0
|
||||
func (d *Decoder) parseHeaderFieldRepr() error {
|
||||
b := d.buf[0]
|
||||
switch {
|
||||
case b&128 != 0:
|
||||
// Indexed representation.
|
||||
// High bit set?
|
||||
// https://httpwg.org/specs/rfc7541.html#rfc.section.6.1
|
||||
return d.parseFieldIndexed()
|
||||
case b&192 == 64:
|
||||
// 6.2.1 Literal Header Field with Incremental Indexing
|
||||
// 0b10xxxxxx: top two bits are 10
|
||||
// https://httpwg.org/specs/rfc7541.html#rfc.section.6.2.1
|
||||
return d.parseFieldLiteral(6, indexedTrue)
|
||||
case b&240 == 0:
|
||||
// 6.2.2 Literal Header Field without Indexing
|
||||
// 0b0000xxxx: top four bits are 0000
|
||||
// https://httpwg.org/specs/rfc7541.html#rfc.section.6.2.2
|
||||
return d.parseFieldLiteral(4, indexedFalse)
|
||||
case b&240 == 16:
|
||||
// 6.2.3 Literal Header Field never Indexed
|
||||
// 0b0001xxxx: top four bits are 0001
|
||||
// https://httpwg.org/specs/rfc7541.html#rfc.section.6.2.3
|
||||
return d.parseFieldLiteral(4, indexedNever)
|
||||
case b&224 == 32:
|
||||
// 6.3 Dynamic Table Size Update
|
||||
// Top three bits are '001'.
|
||||
// https://httpwg.org/specs/rfc7541.html#rfc.section.6.3
|
||||
return d.parseDynamicTableSizeUpdate()
|
||||
}
|
||||
|
||||
return DecodingError{errors.New("invalid encoding")}
|
||||
}
|
||||
|
||||
// (same invariants and behavior as parseHeaderFieldRepr)
|
||||
func (d *Decoder) parseFieldIndexed() error {
|
||||
buf := d.buf
|
||||
idx, buf, err := readVarInt(7, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hf, ok := d.at(idx)
|
||||
if !ok {
|
||||
return DecodingError{InvalidIndexError(idx)}
|
||||
}
|
||||
d.buf = buf
|
||||
return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value})
|
||||
}
|
||||
|
||||
// (same invariants and behavior as parseHeaderFieldRepr)
|
||||
func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
|
||||
buf := d.buf
|
||||
nameIdx, buf, err := readVarInt(n, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var hf HeaderField
|
||||
wantStr := d.emitEnabled || it.indexed()
|
||||
var undecodedName undecodedString
|
||||
if nameIdx > 0 {
|
||||
ihf, ok := d.at(nameIdx)
|
||||
if !ok {
|
||||
return DecodingError{InvalidIndexError(nameIdx)}
|
||||
}
|
||||
hf.Name = ihf.Name
|
||||
} else {
|
||||
undecodedName, buf, err = d.readString(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
undecodedValue, buf, err := d.readString(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if wantStr {
|
||||
if nameIdx <= 0 {
|
||||
hf.Name, err = d.decodeString(undecodedName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
hf.Value, err = d.decodeString(undecodedValue)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
d.buf = buf
|
||||
if it.indexed() {
|
||||
d.dynTab.add(hf)
|
||||
}
|
||||
hf.Sensitive = it.sensitive()
|
||||
return d.callEmit(hf)
|
||||
}
|
||||
|
||||
func (d *Decoder) callEmit(hf HeaderField) error {
|
||||
if d.maxStrLen != 0 {
|
||||
if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen {
|
||||
return ErrStringLength
|
||||
}
|
||||
}
|
||||
if d.emitEnabled {
|
||||
d.emit(hf)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// (same invariants and behavior as parseHeaderFieldRepr)
|
||||
func (d *Decoder) parseDynamicTableSizeUpdate() error {
|
||||
// RFC 7541, sec 4.2: This dynamic table size update MUST occur at the
|
||||
// beginning of the first header block following the change to the dynamic table size.
|
||||
if !d.firstField && d.dynTab.size > 0 {
|
||||
return DecodingError{errors.New("dynamic table size update MUST occur at the beginning of a header block")}
|
||||
}
|
||||
|
||||
buf := d.buf
|
||||
size, buf, err := readVarInt(5, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if size > uint64(d.dynTab.allowedMaxSize) {
|
||||
return DecodingError{errors.New("dynamic table size update too large")}
|
||||
}
|
||||
d.dynTab.setMaxSize(uint32(size))
|
||||
d.buf = buf
|
||||
return nil
|
||||
}
|
||||
|
||||
var errVarintOverflow = DecodingError{errors.New("varint integer overflow")}
|
||||
|
||||
// readVarInt reads an unsigned variable length integer off the
|
||||
// beginning of p. n is the parameter as described in
|
||||
// https://httpwg.org/specs/rfc7541.html#rfc.section.5.1.
|
||||
//
|
||||
// n must always be between 1 and 8.
|
||||
//
|
||||
// The returned remain buffer is either a smaller suffix of p, or err != nil.
|
||||
// The error is errNeedMore if p doesn't contain a complete integer.
|
||||
func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
|
||||
if n < 1 || n > 8 {
|
||||
panic("bad n")
|
||||
}
|
||||
if len(p) == 0 {
|
||||
return 0, p, errNeedMore
|
||||
}
|
||||
i = uint64(p[0])
|
||||
if n < 8 {
|
||||
i &= (1 << uint64(n)) - 1
|
||||
}
|
||||
if i < (1<<uint64(n))-1 {
|
||||
return i, p[1:], nil
|
||||
}
|
||||
|
||||
origP := p
|
||||
p = p[1:]
|
||||
var m uint64
|
||||
for len(p) > 0 {
|
||||
b := p[0]
|
||||
p = p[1:]
|
||||
i += uint64(b&127) << m
|
||||
if b&128 == 0 {
|
||||
return i, p, nil
|
||||
}
|
||||
m += 7
|
||||
if m >= 63 { // TODO: proper overflow check. making this up.
|
||||
return 0, origP, errVarintOverflow
|
||||
}
|
||||
}
|
||||
return 0, origP, errNeedMore
|
||||
}
|
||||
|
||||
// readString reads an hpack string from p.
|
||||
//
|
||||
// It returns a reference to the encoded string data to permit deferring decode costs
|
||||
// until after the caller verifies all data is present.
|
||||
func (d *Decoder) readString(p []byte) (u undecodedString, remain []byte, err error) {
|
||||
if len(p) == 0 {
|
||||
return u, p, errNeedMore
|
||||
}
|
||||
isHuff := p[0]&128 != 0
|
||||
strLen, p, err := readVarInt(7, p)
|
||||
if err != nil {
|
||||
return u, p, err
|
||||
}
|
||||
if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) {
|
||||
// Returning an error here means Huffman decoding errors
|
||||
// for non-indexed strings past the maximum string length
|
||||
// are ignored, but the server is returning an error anyway
|
||||
// and because the string is not indexed the error will not
|
||||
// affect the decoding state.
|
||||
return u, nil, ErrStringLength
|
||||
}
|
||||
if uint64(len(p)) < strLen {
|
||||
return u, p, errNeedMore
|
||||
}
|
||||
u.isHuff = isHuff
|
||||
u.b = p[:strLen]
|
||||
return u, p[strLen:], nil
|
||||
}
|
||||
|
||||
type undecodedString struct {
|
||||
isHuff bool
|
||||
b []byte
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeString(u undecodedString) (string, error) {
|
||||
if !u.isHuff {
|
||||
return string(u.b), nil
|
||||
}
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset() // don't trust others
|
||||
var s string
|
||||
err := huffmanDecode(buf, d.maxStrLen, u.b)
|
||||
if err == nil {
|
||||
s = buf.String()
|
||||
}
|
||||
buf.Reset() // be nice to GC
|
||||
bufPool.Put(buf)
|
||||
return s, err
|
||||
}
|
||||
226
vendor/golang.org/x/net/http2/hpack/huffman.go
generated
vendored
Normal file
226
vendor/golang.org/x/net/http2/hpack/huffman.go
generated
vendored
Normal file
@@ -0,0 +1,226 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
New: func() interface{} { return new(bytes.Buffer) },
|
||||
}
|
||||
|
||||
// HuffmanDecode decodes the string in v and writes the expanded
|
||||
// result to w, returning the number of bytes written to w and the
|
||||
// Write call's return value. At most one Write call is made.
|
||||
func HuffmanDecode(w io.Writer, v []byte) (int, error) {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
defer bufPool.Put(buf)
|
||||
if err := huffmanDecode(buf, 0, v); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return w.Write(buf.Bytes())
|
||||
}
|
||||
|
||||
// HuffmanDecodeToString decodes the string in v.
|
||||
func HuffmanDecodeToString(v []byte) (string, error) {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
defer bufPool.Put(buf)
|
||||
if err := huffmanDecode(buf, 0, v); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// ErrInvalidHuffman is returned for errors found decoding
|
||||
// Huffman-encoded strings.
|
||||
var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data")
|
||||
|
||||
// huffmanDecode decodes v to buf.
|
||||
// If maxLen is greater than 0, attempts to write more to buf than
|
||||
// maxLen bytes will return ErrStringLength.
|
||||
func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
|
||||
rootHuffmanNode := getRootHuffmanNode()
|
||||
n := rootHuffmanNode
|
||||
// cur is the bit buffer that has not been fed into n.
|
||||
// cbits is the number of low order bits in cur that are valid.
|
||||
// sbits is the number of bits of the symbol prefix being decoded.
|
||||
cur, cbits, sbits := uint(0), uint8(0), uint8(0)
|
||||
for _, b := range v {
|
||||
cur = cur<<8 | uint(b)
|
||||
cbits += 8
|
||||
sbits += 8
|
||||
for cbits >= 8 {
|
||||
idx := byte(cur >> (cbits - 8))
|
||||
n = n.children[idx]
|
||||
if n == nil {
|
||||
return ErrInvalidHuffman
|
||||
}
|
||||
if n.children == nil {
|
||||
if maxLen != 0 && buf.Len() == maxLen {
|
||||
return ErrStringLength
|
||||
}
|
||||
buf.WriteByte(n.sym)
|
||||
cbits -= n.codeLen
|
||||
n = rootHuffmanNode
|
||||
sbits = cbits
|
||||
} else {
|
||||
cbits -= 8
|
||||
}
|
||||
}
|
||||
}
|
||||
for cbits > 0 {
|
||||
n = n.children[byte(cur<<(8-cbits))]
|
||||
if n == nil {
|
||||
return ErrInvalidHuffman
|
||||
}
|
||||
if n.children != nil || n.codeLen > cbits {
|
||||
break
|
||||
}
|
||||
if maxLen != 0 && buf.Len() == maxLen {
|
||||
return ErrStringLength
|
||||
}
|
||||
buf.WriteByte(n.sym)
|
||||
cbits -= n.codeLen
|
||||
n = rootHuffmanNode
|
||||
sbits = cbits
|
||||
}
|
||||
if sbits > 7 {
|
||||
// Either there was an incomplete symbol, or overlong padding.
|
||||
// Both are decoding errors per RFC 7541 section 5.2.
|
||||
return ErrInvalidHuffman
|
||||
}
|
||||
if mask := uint(1<<cbits - 1); cur&mask != mask {
|
||||
// Trailing bits must be a prefix of EOS per RFC 7541 section 5.2.
|
||||
return ErrInvalidHuffman
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// incomparable is a zero-width, non-comparable type. Adding it to a struct
|
||||
// makes that struct also non-comparable, and generally doesn't add
|
||||
// any size (as long as it's first).
|
||||
type incomparable [0]func()
|
||||
|
||||
type node struct {
|
||||
_ incomparable
|
||||
|
||||
// children is non-nil for internal nodes
|
||||
children *[256]*node
|
||||
|
||||
// The following are only valid if children is nil:
|
||||
codeLen uint8 // number of bits that led to the output of sym
|
||||
sym byte // output symbol
|
||||
}
|
||||
|
||||
func newInternalNode() *node {
|
||||
return &node{children: new([256]*node)}
|
||||
}
|
||||
|
||||
var (
|
||||
buildRootOnce sync.Once
|
||||
lazyRootHuffmanNode *node
|
||||
)
|
||||
|
||||
func getRootHuffmanNode() *node {
|
||||
buildRootOnce.Do(buildRootHuffmanNode)
|
||||
return lazyRootHuffmanNode
|
||||
}
|
||||
|
||||
func buildRootHuffmanNode() {
|
||||
if len(huffmanCodes) != 256 {
|
||||
panic("unexpected size")
|
||||
}
|
||||
lazyRootHuffmanNode = newInternalNode()
|
||||
// allocate a leaf node for each of the 256 symbols
|
||||
leaves := new([256]node)
|
||||
|
||||
for sym, code := range huffmanCodes {
|
||||
codeLen := huffmanCodeLen[sym]
|
||||
|
||||
cur := lazyRootHuffmanNode
|
||||
for codeLen > 8 {
|
||||
codeLen -= 8
|
||||
i := uint8(code >> codeLen)
|
||||
if cur.children[i] == nil {
|
||||
cur.children[i] = newInternalNode()
|
||||
}
|
||||
cur = cur.children[i]
|
||||
}
|
||||
shift := 8 - codeLen
|
||||
start, end := int(uint8(code<<shift)), int(1<<shift)
|
||||
|
||||
leaves[sym].sym = byte(sym)
|
||||
leaves[sym].codeLen = codeLen
|
||||
for i := start; i < start+end; i++ {
|
||||
cur.children[i] = &leaves[sym]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AppendHuffmanString appends s, as encoded in Huffman codes, to dst
|
||||
// and returns the extended buffer.
|
||||
func AppendHuffmanString(dst []byte, s string) []byte {
|
||||
// This relies on the maximum huffman code length being 30 (See tables.go huffmanCodeLen array)
|
||||
// So if a uint64 buffer has less than 32 valid bits can always accommodate another huffmanCode.
|
||||
var (
|
||||
x uint64 // buffer
|
||||
n uint // number valid of bits present in x
|
||||
)
|
||||
for i := 0; i < len(s); i++ {
|
||||
c := s[i]
|
||||
n += uint(huffmanCodeLen[c])
|
||||
x <<= huffmanCodeLen[c] % 64
|
||||
x |= uint64(huffmanCodes[c])
|
||||
if n >= 32 {
|
||||
n %= 32 // Normally would be -= 32 but %= 32 informs compiler 0 <= n <= 31 for upcoming shift
|
||||
y := uint32(x >> n) // Compiler doesn't combine memory writes if y isn't uint32
|
||||
dst = append(dst, byte(y>>24), byte(y>>16), byte(y>>8), byte(y))
|
||||
}
|
||||
}
|
||||
// Add padding bits if necessary
|
||||
if over := n % 8; over > 0 {
|
||||
const (
|
||||
eosCode = 0x3fffffff
|
||||
eosNBits = 30
|
||||
eosPadByte = eosCode >> (eosNBits - 8)
|
||||
)
|
||||
pad := 8 - over
|
||||
x = (x << pad) | (eosPadByte >> over)
|
||||
n += pad // 8 now divides into n exactly
|
||||
}
|
||||
// n in (0, 8, 16, 24, 32)
|
||||
switch n / 8 {
|
||||
case 0:
|
||||
return dst
|
||||
case 1:
|
||||
return append(dst, byte(x))
|
||||
case 2:
|
||||
y := uint16(x)
|
||||
return append(dst, byte(y>>8), byte(y))
|
||||
case 3:
|
||||
y := uint16(x >> 8)
|
||||
return append(dst, byte(y>>8), byte(y), byte(x))
|
||||
}
|
||||
// case 4:
|
||||
y := uint32(x)
|
||||
return append(dst, byte(y>>24), byte(y>>16), byte(y>>8), byte(y))
|
||||
}
|
||||
|
||||
// HuffmanEncodeLength returns the number of bytes required to encode
|
||||
// s in Huffman codes. The result is round up to byte boundary.
|
||||
func HuffmanEncodeLength(s string) uint64 {
|
||||
n := uint64(0)
|
||||
for i := 0; i < len(s); i++ {
|
||||
n += uint64(huffmanCodeLen[s[i]])
|
||||
}
|
||||
return (n + 7) / 8
|
||||
}
|
||||
188
vendor/golang.org/x/net/http2/hpack/static_table.go
generated
vendored
Normal file
188
vendor/golang.org/x/net/http2/hpack/static_table.go
generated
vendored
Normal file
@@ -0,0 +1,188 @@
|
||||
// go generate gen.go
|
||||
// Code generated by the command above; DO NOT EDIT.
|
||||
|
||||
package hpack
|
||||
|
||||
var staticTable = &headerFieldTable{
|
||||
evictCount: 0,
|
||||
byName: map[string]uint64{
|
||||
":authority": 1,
|
||||
":method": 3,
|
||||
":path": 5,
|
||||
":scheme": 7,
|
||||
":status": 14,
|
||||
"accept-charset": 15,
|
||||
"accept-encoding": 16,
|
||||
"accept-language": 17,
|
||||
"accept-ranges": 18,
|
||||
"accept": 19,
|
||||
"access-control-allow-origin": 20,
|
||||
"age": 21,
|
||||
"allow": 22,
|
||||
"authorization": 23,
|
||||
"cache-control": 24,
|
||||
"content-disposition": 25,
|
||||
"content-encoding": 26,
|
||||
"content-language": 27,
|
||||
"content-length": 28,
|
||||
"content-location": 29,
|
||||
"content-range": 30,
|
||||
"content-type": 31,
|
||||
"cookie": 32,
|
||||
"date": 33,
|
||||
"etag": 34,
|
||||
"expect": 35,
|
||||
"expires": 36,
|
||||
"from": 37,
|
||||
"host": 38,
|
||||
"if-match": 39,
|
||||
"if-modified-since": 40,
|
||||
"if-none-match": 41,
|
||||
"if-range": 42,
|
||||
"if-unmodified-since": 43,
|
||||
"last-modified": 44,
|
||||
"link": 45,
|
||||
"location": 46,
|
||||
"max-forwards": 47,
|
||||
"proxy-authenticate": 48,
|
||||
"proxy-authorization": 49,
|
||||
"range": 50,
|
||||
"referer": 51,
|
||||
"refresh": 52,
|
||||
"retry-after": 53,
|
||||
"server": 54,
|
||||
"set-cookie": 55,
|
||||
"strict-transport-security": 56,
|
||||
"transfer-encoding": 57,
|
||||
"user-agent": 58,
|
||||
"vary": 59,
|
||||
"via": 60,
|
||||
"www-authenticate": 61,
|
||||
},
|
||||
byNameValue: map[pairNameValue]uint64{
|
||||
{name: ":authority", value: ""}: 1,
|
||||
{name: ":method", value: "GET"}: 2,
|
||||
{name: ":method", value: "POST"}: 3,
|
||||
{name: ":path", value: "/"}: 4,
|
||||
{name: ":path", value: "/index.html"}: 5,
|
||||
{name: ":scheme", value: "http"}: 6,
|
||||
{name: ":scheme", value: "https"}: 7,
|
||||
{name: ":status", value: "200"}: 8,
|
||||
{name: ":status", value: "204"}: 9,
|
||||
{name: ":status", value: "206"}: 10,
|
||||
{name: ":status", value: "304"}: 11,
|
||||
{name: ":status", value: "400"}: 12,
|
||||
{name: ":status", value: "404"}: 13,
|
||||
{name: ":status", value: "500"}: 14,
|
||||
{name: "accept-charset", value: ""}: 15,
|
||||
{name: "accept-encoding", value: "gzip, deflate"}: 16,
|
||||
{name: "accept-language", value: ""}: 17,
|
||||
{name: "accept-ranges", value: ""}: 18,
|
||||
{name: "accept", value: ""}: 19,
|
||||
{name: "access-control-allow-origin", value: ""}: 20,
|
||||
{name: "age", value: ""}: 21,
|
||||
{name: "allow", value: ""}: 22,
|
||||
{name: "authorization", value: ""}: 23,
|
||||
{name: "cache-control", value: ""}: 24,
|
||||
{name: "content-disposition", value: ""}: 25,
|
||||
{name: "content-encoding", value: ""}: 26,
|
||||
{name: "content-language", value: ""}: 27,
|
||||
{name: "content-length", value: ""}: 28,
|
||||
{name: "content-location", value: ""}: 29,
|
||||
{name: "content-range", value: ""}: 30,
|
||||
{name: "content-type", value: ""}: 31,
|
||||
{name: "cookie", value: ""}: 32,
|
||||
{name: "date", value: ""}: 33,
|
||||
{name: "etag", value: ""}: 34,
|
||||
{name: "expect", value: ""}: 35,
|
||||
{name: "expires", value: ""}: 36,
|
||||
{name: "from", value: ""}: 37,
|
||||
{name: "host", value: ""}: 38,
|
||||
{name: "if-match", value: ""}: 39,
|
||||
{name: "if-modified-since", value: ""}: 40,
|
||||
{name: "if-none-match", value: ""}: 41,
|
||||
{name: "if-range", value: ""}: 42,
|
||||
{name: "if-unmodified-since", value: ""}: 43,
|
||||
{name: "last-modified", value: ""}: 44,
|
||||
{name: "link", value: ""}: 45,
|
||||
{name: "location", value: ""}: 46,
|
||||
{name: "max-forwards", value: ""}: 47,
|
||||
{name: "proxy-authenticate", value: ""}: 48,
|
||||
{name: "proxy-authorization", value: ""}: 49,
|
||||
{name: "range", value: ""}: 50,
|
||||
{name: "referer", value: ""}: 51,
|
||||
{name: "refresh", value: ""}: 52,
|
||||
{name: "retry-after", value: ""}: 53,
|
||||
{name: "server", value: ""}: 54,
|
||||
{name: "set-cookie", value: ""}: 55,
|
||||
{name: "strict-transport-security", value: ""}: 56,
|
||||
{name: "transfer-encoding", value: ""}: 57,
|
||||
{name: "user-agent", value: ""}: 58,
|
||||
{name: "vary", value: ""}: 59,
|
||||
{name: "via", value: ""}: 60,
|
||||
{name: "www-authenticate", value: ""}: 61,
|
||||
},
|
||||
ents: []HeaderField{
|
||||
{Name: ":authority", Value: "", Sensitive: false},
|
||||
{Name: ":method", Value: "GET", Sensitive: false},
|
||||
{Name: ":method", Value: "POST", Sensitive: false},
|
||||
{Name: ":path", Value: "/", Sensitive: false},
|
||||
{Name: ":path", Value: "/index.html", Sensitive: false},
|
||||
{Name: ":scheme", Value: "http", Sensitive: false},
|
||||
{Name: ":scheme", Value: "https", Sensitive: false},
|
||||
{Name: ":status", Value: "200", Sensitive: false},
|
||||
{Name: ":status", Value: "204", Sensitive: false},
|
||||
{Name: ":status", Value: "206", Sensitive: false},
|
||||
{Name: ":status", Value: "304", Sensitive: false},
|
||||
{Name: ":status", Value: "400", Sensitive: false},
|
||||
{Name: ":status", Value: "404", Sensitive: false},
|
||||
{Name: ":status", Value: "500", Sensitive: false},
|
||||
{Name: "accept-charset", Value: "", Sensitive: false},
|
||||
{Name: "accept-encoding", Value: "gzip, deflate", Sensitive: false},
|
||||
{Name: "accept-language", Value: "", Sensitive: false},
|
||||
{Name: "accept-ranges", Value: "", Sensitive: false},
|
||||
{Name: "accept", Value: "", Sensitive: false},
|
||||
{Name: "access-control-allow-origin", Value: "", Sensitive: false},
|
||||
{Name: "age", Value: "", Sensitive: false},
|
||||
{Name: "allow", Value: "", Sensitive: false},
|
||||
{Name: "authorization", Value: "", Sensitive: false},
|
||||
{Name: "cache-control", Value: "", Sensitive: false},
|
||||
{Name: "content-disposition", Value: "", Sensitive: false},
|
||||
{Name: "content-encoding", Value: "", Sensitive: false},
|
||||
{Name: "content-language", Value: "", Sensitive: false},
|
||||
{Name: "content-length", Value: "", Sensitive: false},
|
||||
{Name: "content-location", Value: "", Sensitive: false},
|
||||
{Name: "content-range", Value: "", Sensitive: false},
|
||||
{Name: "content-type", Value: "", Sensitive: false},
|
||||
{Name: "cookie", Value: "", Sensitive: false},
|
||||
{Name: "date", Value: "", Sensitive: false},
|
||||
{Name: "etag", Value: "", Sensitive: false},
|
||||
{Name: "expect", Value: "", Sensitive: false},
|
||||
{Name: "expires", Value: "", Sensitive: false},
|
||||
{Name: "from", Value: "", Sensitive: false},
|
||||
{Name: "host", Value: "", Sensitive: false},
|
||||
{Name: "if-match", Value: "", Sensitive: false},
|
||||
{Name: "if-modified-since", Value: "", Sensitive: false},
|
||||
{Name: "if-none-match", Value: "", Sensitive: false},
|
||||
{Name: "if-range", Value: "", Sensitive: false},
|
||||
{Name: "if-unmodified-since", Value: "", Sensitive: false},
|
||||
{Name: "last-modified", Value: "", Sensitive: false},
|
||||
{Name: "link", Value: "", Sensitive: false},
|
||||
{Name: "location", Value: "", Sensitive: false},
|
||||
{Name: "max-forwards", Value: "", Sensitive: false},
|
||||
{Name: "proxy-authenticate", Value: "", Sensitive: false},
|
||||
{Name: "proxy-authorization", Value: "", Sensitive: false},
|
||||
{Name: "range", Value: "", Sensitive: false},
|
||||
{Name: "referer", Value: "", Sensitive: false},
|
||||
{Name: "refresh", Value: "", Sensitive: false},
|
||||
{Name: "retry-after", Value: "", Sensitive: false},
|
||||
{Name: "server", Value: "", Sensitive: false},
|
||||
{Name: "set-cookie", Value: "", Sensitive: false},
|
||||
{Name: "strict-transport-security", Value: "", Sensitive: false},
|
||||
{Name: "transfer-encoding", Value: "", Sensitive: false},
|
||||
{Name: "user-agent", Value: "", Sensitive: false},
|
||||
{Name: "vary", Value: "", Sensitive: false},
|
||||
{Name: "via", Value: "", Sensitive: false},
|
||||
{Name: "www-authenticate", Value: "", Sensitive: false},
|
||||
},
|
||||
}
|
||||
403
vendor/golang.org/x/net/http2/hpack/tables.go
generated
vendored
Normal file
403
vendor/golang.org/x/net/http2/hpack/tables.go
generated
vendored
Normal file
@@ -0,0 +1,403 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hpack
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// headerFieldTable implements a list of HeaderFields.
|
||||
// This is used to implement the static and dynamic tables.
|
||||
type headerFieldTable struct {
|
||||
// For static tables, entries are never evicted.
|
||||
//
|
||||
// For dynamic tables, entries are evicted from ents[0] and added to the end.
|
||||
// Each entry has a unique id that starts at one and increments for each
|
||||
// entry that is added. This unique id is stable across evictions, meaning
|
||||
// it can be used as a pointer to a specific entry. As in hpack, unique ids
|
||||
// are 1-based. The unique id for ents[k] is k + evictCount + 1.
|
||||
//
|
||||
// Zero is not a valid unique id.
|
||||
//
|
||||
// evictCount should not overflow in any remotely practical situation. In
|
||||
// practice, we will have one dynamic table per HTTP/2 connection. If we
|
||||
// assume a very powerful server that handles 1M QPS per connection and each
|
||||
// request adds (then evicts) 100 entries from the table, it would still take
|
||||
// 2M years for evictCount to overflow.
|
||||
ents []HeaderField
|
||||
evictCount uint64
|
||||
|
||||
// byName maps a HeaderField name to the unique id of the newest entry with
|
||||
// the same name. See above for a definition of "unique id".
|
||||
byName map[string]uint64
|
||||
|
||||
// byNameValue maps a HeaderField name/value pair to the unique id of the newest
|
||||
// entry with the same name and value. See above for a definition of "unique id".
|
||||
byNameValue map[pairNameValue]uint64
|
||||
}
|
||||
|
||||
type pairNameValue struct {
|
||||
name, value string
|
||||
}
|
||||
|
||||
func (t *headerFieldTable) init() {
|
||||
t.byName = make(map[string]uint64)
|
||||
t.byNameValue = make(map[pairNameValue]uint64)
|
||||
}
|
||||
|
||||
// len reports the number of entries in the table.
|
||||
func (t *headerFieldTable) len() int {
|
||||
return len(t.ents)
|
||||
}
|
||||
|
||||
// addEntry adds a new entry.
|
||||
func (t *headerFieldTable) addEntry(f HeaderField) {
|
||||
id := uint64(t.len()) + t.evictCount + 1
|
||||
t.byName[f.Name] = id
|
||||
t.byNameValue[pairNameValue{f.Name, f.Value}] = id
|
||||
t.ents = append(t.ents, f)
|
||||
}
|
||||
|
||||
// evictOldest evicts the n oldest entries in the table.
|
||||
func (t *headerFieldTable) evictOldest(n int) {
|
||||
if n > t.len() {
|
||||
panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len()))
|
||||
}
|
||||
for k := 0; k < n; k++ {
|
||||
f := t.ents[k]
|
||||
id := t.evictCount + uint64(k) + 1
|
||||
if t.byName[f.Name] == id {
|
||||
delete(t.byName, f.Name)
|
||||
}
|
||||
if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id {
|
||||
delete(t.byNameValue, p)
|
||||
}
|
||||
}
|
||||
copy(t.ents, t.ents[n:])
|
||||
for k := t.len() - n; k < t.len(); k++ {
|
||||
t.ents[k] = HeaderField{} // so strings can be garbage collected
|
||||
}
|
||||
t.ents = t.ents[:t.len()-n]
|
||||
if t.evictCount+uint64(n) < t.evictCount {
|
||||
panic("evictCount overflow")
|
||||
}
|
||||
t.evictCount += uint64(n)
|
||||
}
|
||||
|
||||
// search finds f in the table. If there is no match, i is 0.
|
||||
// If both name and value match, i is the matched index and nameValueMatch
|
||||
// becomes true. If only name matches, i points to that index and
|
||||
// nameValueMatch becomes false.
|
||||
//
|
||||
// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says
|
||||
// that index 1 should be the newest entry, but t.ents[0] is the oldest entry,
|
||||
// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic
|
||||
// table, the return value i actually refers to the entry t.ents[t.len()-i].
|
||||
//
|
||||
// All tables are assumed to be a dynamic tables except for the global staticTable.
|
||||
//
|
||||
// See Section 2.3.3.
|
||||
func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
|
||||
if !f.Sensitive {
|
||||
if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 {
|
||||
return t.idToIndex(id), true
|
||||
}
|
||||
}
|
||||
if id := t.byName[f.Name]; id != 0 {
|
||||
return t.idToIndex(id), false
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// idToIndex converts a unique id to an HPACK index.
|
||||
// See Section 2.3.3.
|
||||
func (t *headerFieldTable) idToIndex(id uint64) uint64 {
|
||||
if id <= t.evictCount {
|
||||
panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount))
|
||||
}
|
||||
k := id - t.evictCount - 1 // convert id to an index t.ents[k]
|
||||
if t != staticTable {
|
||||
return uint64(t.len()) - k // dynamic table
|
||||
}
|
||||
return k + 1
|
||||
}
|
||||
|
||||
var huffmanCodes = [256]uint32{
|
||||
0x1ff8,
|
||||
0x7fffd8,
|
||||
0xfffffe2,
|
||||
0xfffffe3,
|
||||
0xfffffe4,
|
||||
0xfffffe5,
|
||||
0xfffffe6,
|
||||
0xfffffe7,
|
||||
0xfffffe8,
|
||||
0xffffea,
|
||||
0x3ffffffc,
|
||||
0xfffffe9,
|
||||
0xfffffea,
|
||||
0x3ffffffd,
|
||||
0xfffffeb,
|
||||
0xfffffec,
|
||||
0xfffffed,
|
||||
0xfffffee,
|
||||
0xfffffef,
|
||||
0xffffff0,
|
||||
0xffffff1,
|
||||
0xffffff2,
|
||||
0x3ffffffe,
|
||||
0xffffff3,
|
||||
0xffffff4,
|
||||
0xffffff5,
|
||||
0xffffff6,
|
||||
0xffffff7,
|
||||
0xffffff8,
|
||||
0xffffff9,
|
||||
0xffffffa,
|
||||
0xffffffb,
|
||||
0x14,
|
||||
0x3f8,
|
||||
0x3f9,
|
||||
0xffa,
|
||||
0x1ff9,
|
||||
0x15,
|
||||
0xf8,
|
||||
0x7fa,
|
||||
0x3fa,
|
||||
0x3fb,
|
||||
0xf9,
|
||||
0x7fb,
|
||||
0xfa,
|
||||
0x16,
|
||||
0x17,
|
||||
0x18,
|
||||
0x0,
|
||||
0x1,
|
||||
0x2,
|
||||
0x19,
|
||||
0x1a,
|
||||
0x1b,
|
||||
0x1c,
|
||||
0x1d,
|
||||
0x1e,
|
||||
0x1f,
|
||||
0x5c,
|
||||
0xfb,
|
||||
0x7ffc,
|
||||
0x20,
|
||||
0xffb,
|
||||
0x3fc,
|
||||
0x1ffa,
|
||||
0x21,
|
||||
0x5d,
|
||||
0x5e,
|
||||
0x5f,
|
||||
0x60,
|
||||
0x61,
|
||||
0x62,
|
||||
0x63,
|
||||
0x64,
|
||||
0x65,
|
||||
0x66,
|
||||
0x67,
|
||||
0x68,
|
||||
0x69,
|
||||
0x6a,
|
||||
0x6b,
|
||||
0x6c,
|
||||
0x6d,
|
||||
0x6e,
|
||||
0x6f,
|
||||
0x70,
|
||||
0x71,
|
||||
0x72,
|
||||
0xfc,
|
||||
0x73,
|
||||
0xfd,
|
||||
0x1ffb,
|
||||
0x7fff0,
|
||||
0x1ffc,
|
||||
0x3ffc,
|
||||
0x22,
|
||||
0x7ffd,
|
||||
0x3,
|
||||
0x23,
|
||||
0x4,
|
||||
0x24,
|
||||
0x5,
|
||||
0x25,
|
||||
0x26,
|
||||
0x27,
|
||||
0x6,
|
||||
0x74,
|
||||
0x75,
|
||||
0x28,
|
||||
0x29,
|
||||
0x2a,
|
||||
0x7,
|
||||
0x2b,
|
||||
0x76,
|
||||
0x2c,
|
||||
0x8,
|
||||
0x9,
|
||||
0x2d,
|
||||
0x77,
|
||||
0x78,
|
||||
0x79,
|
||||
0x7a,
|
||||
0x7b,
|
||||
0x7ffe,
|
||||
0x7fc,
|
||||
0x3ffd,
|
||||
0x1ffd,
|
||||
0xffffffc,
|
||||
0xfffe6,
|
||||
0x3fffd2,
|
||||
0xfffe7,
|
||||
0xfffe8,
|
||||
0x3fffd3,
|
||||
0x3fffd4,
|
||||
0x3fffd5,
|
||||
0x7fffd9,
|
||||
0x3fffd6,
|
||||
0x7fffda,
|
||||
0x7fffdb,
|
||||
0x7fffdc,
|
||||
0x7fffdd,
|
||||
0x7fffde,
|
||||
0xffffeb,
|
||||
0x7fffdf,
|
||||
0xffffec,
|
||||
0xffffed,
|
||||
0x3fffd7,
|
||||
0x7fffe0,
|
||||
0xffffee,
|
||||
0x7fffe1,
|
||||
0x7fffe2,
|
||||
0x7fffe3,
|
||||
0x7fffe4,
|
||||
0x1fffdc,
|
||||
0x3fffd8,
|
||||
0x7fffe5,
|
||||
0x3fffd9,
|
||||
0x7fffe6,
|
||||
0x7fffe7,
|
||||
0xffffef,
|
||||
0x3fffda,
|
||||
0x1fffdd,
|
||||
0xfffe9,
|
||||
0x3fffdb,
|
||||
0x3fffdc,
|
||||
0x7fffe8,
|
||||
0x7fffe9,
|
||||
0x1fffde,
|
||||
0x7fffea,
|
||||
0x3fffdd,
|
||||
0x3fffde,
|
||||
0xfffff0,
|
||||
0x1fffdf,
|
||||
0x3fffdf,
|
||||
0x7fffeb,
|
||||
0x7fffec,
|
||||
0x1fffe0,
|
||||
0x1fffe1,
|
||||
0x3fffe0,
|
||||
0x1fffe2,
|
||||
0x7fffed,
|
||||
0x3fffe1,
|
||||
0x7fffee,
|
||||
0x7fffef,
|
||||
0xfffea,
|
||||
0x3fffe2,
|
||||
0x3fffe3,
|
||||
0x3fffe4,
|
||||
0x7ffff0,
|
||||
0x3fffe5,
|
||||
0x3fffe6,
|
||||
0x7ffff1,
|
||||
0x3ffffe0,
|
||||
0x3ffffe1,
|
||||
0xfffeb,
|
||||
0x7fff1,
|
||||
0x3fffe7,
|
||||
0x7ffff2,
|
||||
0x3fffe8,
|
||||
0x1ffffec,
|
||||
0x3ffffe2,
|
||||
0x3ffffe3,
|
||||
0x3ffffe4,
|
||||
0x7ffffde,
|
||||
0x7ffffdf,
|
||||
0x3ffffe5,
|
||||
0xfffff1,
|
||||
0x1ffffed,
|
||||
0x7fff2,
|
||||
0x1fffe3,
|
||||
0x3ffffe6,
|
||||
0x7ffffe0,
|
||||
0x7ffffe1,
|
||||
0x3ffffe7,
|
||||
0x7ffffe2,
|
||||
0xfffff2,
|
||||
0x1fffe4,
|
||||
0x1fffe5,
|
||||
0x3ffffe8,
|
||||
0x3ffffe9,
|
||||
0xffffffd,
|
||||
0x7ffffe3,
|
||||
0x7ffffe4,
|
||||
0x7ffffe5,
|
||||
0xfffec,
|
||||
0xfffff3,
|
||||
0xfffed,
|
||||
0x1fffe6,
|
||||
0x3fffe9,
|
||||
0x1fffe7,
|
||||
0x1fffe8,
|
||||
0x7ffff3,
|
||||
0x3fffea,
|
||||
0x3fffeb,
|
||||
0x1ffffee,
|
||||
0x1ffffef,
|
||||
0xfffff4,
|
||||
0xfffff5,
|
||||
0x3ffffea,
|
||||
0x7ffff4,
|
||||
0x3ffffeb,
|
||||
0x7ffffe6,
|
||||
0x3ffffec,
|
||||
0x3ffffed,
|
||||
0x7ffffe7,
|
||||
0x7ffffe8,
|
||||
0x7ffffe9,
|
||||
0x7ffffea,
|
||||
0x7ffffeb,
|
||||
0xffffffe,
|
||||
0x7ffffec,
|
||||
0x7ffffed,
|
||||
0x7ffffee,
|
||||
0x7ffffef,
|
||||
0x7fffff0,
|
||||
0x3ffffee,
|
||||
}
|
||||
|
||||
var huffmanCodeLen = [256]uint8{
|
||||
13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28,
|
||||
28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28,
|
||||
6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6,
|
||||
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10,
|
||||
13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
|
||||
7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6,
|
||||
15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5,
|
||||
6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28,
|
||||
20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23,
|
||||
24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24,
|
||||
22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23,
|
||||
21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23,
|
||||
26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25,
|
||||
19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27,
|
||||
20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23,
|
||||
26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26,
|
||||
}
|
||||
409
vendor/golang.org/x/net/http2/http2.go
generated
vendored
Normal file
409
vendor/golang.org/x/net/http2/http2.go
generated
vendored
Normal file
@@ -0,0 +1,409 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package http2 implements the HTTP/2 protocol.
|
||||
//
|
||||
// This package is low-level and intended to be used directly by very
|
||||
// few people. Most users will use it indirectly through the automatic
|
||||
// use by the net/http package (from Go 1.6 and later).
|
||||
// For use in earlier Go versions see ConfigureServer. (Transport support
|
||||
// requires Go 1.6 or later)
|
||||
//
|
||||
// See https://http2.github.io/ for more information on HTTP/2.
|
||||
package http2 // import "golang.org/x/net/http2"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/http/httpguts"
|
||||
)
|
||||
|
||||
var (
|
||||
VerboseLogs bool
|
||||
logFrameWrites bool
|
||||
logFrameReads bool
|
||||
|
||||
// Enabling extended CONNECT by causes browsers to attempt to use
|
||||
// WebSockets-over-HTTP/2. This results in problems when the server's websocket
|
||||
// package doesn't support extended CONNECT.
|
||||
//
|
||||
// Disable extended CONNECT by default for now.
|
||||
//
|
||||
// Issue #71128.
|
||||
disableExtendedConnectProtocol = true
|
||||
)
|
||||
|
||||
func init() {
|
||||
e := os.Getenv("GODEBUG")
|
||||
if strings.Contains(e, "http2debug=1") {
|
||||
VerboseLogs = true
|
||||
}
|
||||
if strings.Contains(e, "http2debug=2") {
|
||||
VerboseLogs = true
|
||||
logFrameWrites = true
|
||||
logFrameReads = true
|
||||
}
|
||||
if strings.Contains(e, "http2xconnect=1") {
|
||||
disableExtendedConnectProtocol = false
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// ClientPreface is the string that must be sent by new
|
||||
// connections from clients.
|
||||
ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
|
||||
|
||||
// SETTINGS_MAX_FRAME_SIZE default
|
||||
// https://httpwg.org/specs/rfc7540.html#rfc.section.6.5.2
|
||||
initialMaxFrameSize = 16384
|
||||
|
||||
// NextProtoTLS is the NPN/ALPN protocol negotiated during
|
||||
// HTTP/2's TLS setup.
|
||||
NextProtoTLS = "h2"
|
||||
|
||||
// https://httpwg.org/specs/rfc7540.html#SettingValues
|
||||
initialHeaderTableSize = 4096
|
||||
|
||||
initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size
|
||||
|
||||
defaultMaxReadFrameSize = 1 << 20
|
||||
)
|
||||
|
||||
var (
|
||||
clientPreface = []byte(ClientPreface)
|
||||
)
|
||||
|
||||
type streamState int
|
||||
|
||||
// HTTP/2 stream states.
|
||||
//
|
||||
// See http://tools.ietf.org/html/rfc7540#section-5.1.
|
||||
//
|
||||
// For simplicity, the server code merges "reserved (local)" into
|
||||
// "half-closed (remote)". This is one less state transition to track.
|
||||
// The only downside is that we send PUSH_PROMISEs slightly less
|
||||
// liberally than allowable. More discussion here:
|
||||
// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html
|
||||
//
|
||||
// "reserved (remote)" is omitted since the client code does not
|
||||
// support server push.
|
||||
const (
|
||||
stateIdle streamState = iota
|
||||
stateOpen
|
||||
stateHalfClosedLocal
|
||||
stateHalfClosedRemote
|
||||
stateClosed
|
||||
)
|
||||
|
||||
var stateName = [...]string{
|
||||
stateIdle: "Idle",
|
||||
stateOpen: "Open",
|
||||
stateHalfClosedLocal: "HalfClosedLocal",
|
||||
stateHalfClosedRemote: "HalfClosedRemote",
|
||||
stateClosed: "Closed",
|
||||
}
|
||||
|
||||
func (st streamState) String() string {
|
||||
return stateName[st]
|
||||
}
|
||||
|
||||
// Setting is a setting parameter: which setting it is, and its value.
|
||||
type Setting struct {
|
||||
// ID is which setting is being set.
|
||||
// See https://httpwg.org/specs/rfc7540.html#SettingFormat
|
||||
ID SettingID
|
||||
|
||||
// Val is the value.
|
||||
Val uint32
|
||||
}
|
||||
|
||||
func (s Setting) String() string {
|
||||
return fmt.Sprintf("[%v = %d]", s.ID, s.Val)
|
||||
}
|
||||
|
||||
// Valid reports whether the setting is valid.
|
||||
func (s Setting) Valid() error {
|
||||
// Limits and error codes from 6.5.2 Defined SETTINGS Parameters
|
||||
switch s.ID {
|
||||
case SettingEnablePush:
|
||||
if s.Val != 1 && s.Val != 0 {
|
||||
return ConnectionError(ErrCodeProtocol)
|
||||
}
|
||||
case SettingInitialWindowSize:
|
||||
if s.Val > 1<<31-1 {
|
||||
return ConnectionError(ErrCodeFlowControl)
|
||||
}
|
||||
case SettingMaxFrameSize:
|
||||
if s.Val < 16384 || s.Val > 1<<24-1 {
|
||||
return ConnectionError(ErrCodeProtocol)
|
||||
}
|
||||
case SettingEnableConnectProtocol:
|
||||
if s.Val != 1 && s.Val != 0 {
|
||||
return ConnectionError(ErrCodeProtocol)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A SettingID is an HTTP/2 setting as defined in
|
||||
// https://httpwg.org/specs/rfc7540.html#iana-settings
|
||||
type SettingID uint16
|
||||
|
||||
const (
|
||||
SettingHeaderTableSize SettingID = 0x1
|
||||
SettingEnablePush SettingID = 0x2
|
||||
SettingMaxConcurrentStreams SettingID = 0x3
|
||||
SettingInitialWindowSize SettingID = 0x4
|
||||
SettingMaxFrameSize SettingID = 0x5
|
||||
SettingMaxHeaderListSize SettingID = 0x6
|
||||
SettingEnableConnectProtocol SettingID = 0x8
|
||||
)
|
||||
|
||||
var settingName = map[SettingID]string{
|
||||
SettingHeaderTableSize: "HEADER_TABLE_SIZE",
|
||||
SettingEnablePush: "ENABLE_PUSH",
|
||||
SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
|
||||
SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
|
||||
SettingMaxFrameSize: "MAX_FRAME_SIZE",
|
||||
SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
|
||||
SettingEnableConnectProtocol: "ENABLE_CONNECT_PROTOCOL",
|
||||
}
|
||||
|
||||
func (s SettingID) String() string {
|
||||
if v, ok := settingName[s]; ok {
|
||||
return v
|
||||
}
|
||||
return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
|
||||
}
|
||||
|
||||
// validWireHeaderFieldName reports whether v is a valid header field
|
||||
// name (key). See httpguts.ValidHeaderName for the base rules.
|
||||
//
|
||||
// Further, http2 says:
|
||||
//
|
||||
// "Just as in HTTP/1.x, header field names are strings of ASCII
|
||||
// characters that are compared in a case-insensitive
|
||||
// fashion. However, header field names MUST be converted to
|
||||
// lowercase prior to their encoding in HTTP/2. "
|
||||
func validWireHeaderFieldName(v string) bool {
|
||||
if len(v) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, r := range v {
|
||||
if !httpguts.IsTokenRune(r) {
|
||||
return false
|
||||
}
|
||||
if 'A' <= r && r <= 'Z' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func httpCodeString(code int) string {
|
||||
switch code {
|
||||
case 200:
|
||||
return "200"
|
||||
case 404:
|
||||
return "404"
|
||||
}
|
||||
return strconv.Itoa(code)
|
||||
}
|
||||
|
||||
// from pkg io
|
||||
type stringWriter interface {
|
||||
WriteString(s string) (n int, err error)
|
||||
}
|
||||
|
||||
// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
|
||||
type closeWaiter chan struct{}
|
||||
|
||||
// Init makes a closeWaiter usable.
|
||||
// It exists because so a closeWaiter value can be placed inside a
|
||||
// larger struct and have the Mutex and Cond's memory in the same
|
||||
// allocation.
|
||||
func (cw *closeWaiter) Init() {
|
||||
*cw = make(chan struct{})
|
||||
}
|
||||
|
||||
// Close marks the closeWaiter as closed and unblocks any waiters.
|
||||
func (cw closeWaiter) Close() {
|
||||
close(cw)
|
||||
}
|
||||
|
||||
// Wait waits for the closeWaiter to become closed.
|
||||
func (cw closeWaiter) Wait() {
|
||||
<-cw
|
||||
}
|
||||
|
||||
// bufferedWriter is a buffered writer that writes to w.
|
||||
// Its buffered writer is lazily allocated as needed, to minimize
|
||||
// idle memory usage with many connections.
|
||||
type bufferedWriter struct {
|
||||
_ incomparable
|
||||
conn net.Conn // immutable
|
||||
bw *bufio.Writer // non-nil when data is buffered
|
||||
byteTimeout time.Duration // immutable, WriteByteTimeout
|
||||
}
|
||||
|
||||
func newBufferedWriter(conn net.Conn, timeout time.Duration) *bufferedWriter {
|
||||
return &bufferedWriter{
|
||||
conn: conn,
|
||||
byteTimeout: timeout,
|
||||
}
|
||||
}
|
||||
|
||||
// bufWriterPoolBufferSize is the size of bufio.Writer's
|
||||
// buffers created using bufWriterPool.
|
||||
//
|
||||
// TODO: pick a less arbitrary value? this is a bit under
|
||||
// (3 x typical 1500 byte MTU) at least. Other than that,
|
||||
// not much thought went into it.
|
||||
const bufWriterPoolBufferSize = 4 << 10
|
||||
|
||||
var bufWriterPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return bufio.NewWriterSize(nil, bufWriterPoolBufferSize)
|
||||
},
|
||||
}
|
||||
|
||||
func (w *bufferedWriter) Available() int {
|
||||
if w.bw == nil {
|
||||
return bufWriterPoolBufferSize
|
||||
}
|
||||
return w.bw.Available()
|
||||
}
|
||||
|
||||
func (w *bufferedWriter) Write(p []byte) (n int, err error) {
|
||||
if w.bw == nil {
|
||||
bw := bufWriterPool.Get().(*bufio.Writer)
|
||||
bw.Reset((*bufferedWriterTimeoutWriter)(w))
|
||||
w.bw = bw
|
||||
}
|
||||
return w.bw.Write(p)
|
||||
}
|
||||
|
||||
func (w *bufferedWriter) Flush() error {
|
||||
bw := w.bw
|
||||
if bw == nil {
|
||||
return nil
|
||||
}
|
||||
err := bw.Flush()
|
||||
bw.Reset(nil)
|
||||
bufWriterPool.Put(bw)
|
||||
w.bw = nil
|
||||
return err
|
||||
}
|
||||
|
||||
type bufferedWriterTimeoutWriter bufferedWriter
|
||||
|
||||
func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) {
|
||||
return writeWithByteTimeout(w.conn, w.byteTimeout, p)
|
||||
}
|
||||
|
||||
// writeWithByteTimeout writes to conn.
|
||||
// If more than timeout passes without any bytes being written to the connection,
|
||||
// the write fails.
|
||||
func writeWithByteTimeout(conn net.Conn, timeout time.Duration, p []byte) (n int, err error) {
|
||||
if timeout <= 0 {
|
||||
return conn.Write(p)
|
||||
}
|
||||
for {
|
||||
conn.SetWriteDeadline(time.Now().Add(timeout))
|
||||
nn, err := conn.Write(p[n:])
|
||||
n += nn
|
||||
if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) {
|
||||
// Either we finished the write, made no progress, or hit the deadline.
|
||||
// Whichever it is, we're done now.
|
||||
conn.SetWriteDeadline(time.Time{})
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func mustUint31(v int32) uint32 {
|
||||
if v < 0 || v > 2147483647 {
|
||||
panic("out of range")
|
||||
}
|
||||
return uint32(v)
|
||||
}
|
||||
|
||||
// bodyAllowedForStatus reports whether a given response status code
|
||||
// permits a body. See RFC 7230, section 3.3.
|
||||
func bodyAllowedForStatus(status int) bool {
|
||||
switch {
|
||||
case status >= 100 && status <= 199:
|
||||
return false
|
||||
case status == 204:
|
||||
return false
|
||||
case status == 304:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type httpError struct {
|
||||
_ incomparable
|
||||
msg string
|
||||
timeout bool
|
||||
}
|
||||
|
||||
func (e *httpError) Error() string { return e.msg }
|
||||
func (e *httpError) Timeout() bool { return e.timeout }
|
||||
func (e *httpError) Temporary() bool { return true }
|
||||
|
||||
var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true}
|
||||
|
||||
type connectionStater interface {
|
||||
ConnectionState() tls.ConnectionState
|
||||
}
|
||||
|
||||
var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }}
|
||||
|
||||
type sorter struct {
|
||||
v []string // owned by sorter
|
||||
}
|
||||
|
||||
func (s *sorter) Len() int { return len(s.v) }
|
||||
func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] }
|
||||
func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] }
|
||||
|
||||
// Keys returns the sorted keys of h.
|
||||
//
|
||||
// The returned slice is only valid until s used again or returned to
|
||||
// its pool.
|
||||
func (s *sorter) Keys(h http.Header) []string {
|
||||
keys := s.v[:0]
|
||||
for k := range h {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
s.v = keys
|
||||
sort.Sort(s)
|
||||
return keys
|
||||
}
|
||||
|
||||
func (s *sorter) SortStrings(ss []string) {
|
||||
// Our sorter works on s.v, which sorter owns, so
|
||||
// stash it away while we sort the user's buffer.
|
||||
save := s.v
|
||||
s.v = ss
|
||||
sort.Sort(s)
|
||||
s.v = save
|
||||
}
|
||||
|
||||
// incomparable is a zero-width, non-comparable type. Adding it to a struct
|
||||
// makes that struct also non-comparable, and generally doesn't add
|
||||
// any size (as long as it's first).
|
||||
type incomparable [0]func()
|
||||
184
vendor/golang.org/x/net/http2/pipe.go
generated
vendored
Normal file
184
vendor/golang.org/x/net/http2/pipe.go
generated
vendored
Normal file
@@ -0,0 +1,184 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
|
||||
// io.Pipe except there are no PipeReader/PipeWriter halves, and the
|
||||
// underlying buffer is an interface. (io.Pipe is always unbuffered)
|
||||
type pipe struct {
|
||||
mu sync.Mutex
|
||||
c sync.Cond // c.L lazily initialized to &p.mu
|
||||
b pipeBuffer // nil when done reading
|
||||
unread int // bytes unread when done
|
||||
err error // read error once empty. non-nil means closed.
|
||||
breakErr error // immediate read error (caller doesn't see rest of b)
|
||||
donec chan struct{} // closed on error
|
||||
readFn func() // optional code to run in Read before error
|
||||
}
|
||||
|
||||
type pipeBuffer interface {
|
||||
Len() int
|
||||
io.Writer
|
||||
io.Reader
|
||||
}
|
||||
|
||||
// setBuffer initializes the pipe buffer.
|
||||
// It has no effect if the pipe is already closed.
|
||||
func (p *pipe) setBuffer(b pipeBuffer) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.err != nil || p.breakErr != nil {
|
||||
return
|
||||
}
|
||||
p.b = b
|
||||
}
|
||||
|
||||
func (p *pipe) Len() int {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.b == nil {
|
||||
return p.unread
|
||||
}
|
||||
return p.b.Len()
|
||||
}
|
||||
|
||||
// Read waits until data is available and copies bytes
|
||||
// from the buffer into p.
|
||||
func (p *pipe) Read(d []byte) (n int, err error) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.c.L == nil {
|
||||
p.c.L = &p.mu
|
||||
}
|
||||
for {
|
||||
if p.breakErr != nil {
|
||||
return 0, p.breakErr
|
||||
}
|
||||
if p.b != nil && p.b.Len() > 0 {
|
||||
return p.b.Read(d)
|
||||
}
|
||||
if p.err != nil {
|
||||
if p.readFn != nil {
|
||||
p.readFn() // e.g. copy trailers
|
||||
p.readFn = nil // not sticky like p.err
|
||||
}
|
||||
p.b = nil
|
||||
return 0, p.err
|
||||
}
|
||||
p.c.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
errClosedPipeWrite = errors.New("write on closed buffer")
|
||||
errUninitializedPipeWrite = errors.New("write on uninitialized buffer")
|
||||
)
|
||||
|
||||
// Write copies bytes from p into the buffer and wakes a reader.
|
||||
// It is an error to write more data than the buffer can hold.
|
||||
func (p *pipe) Write(d []byte) (n int, err error) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.c.L == nil {
|
||||
p.c.L = &p.mu
|
||||
}
|
||||
defer p.c.Signal()
|
||||
if p.err != nil || p.breakErr != nil {
|
||||
return 0, errClosedPipeWrite
|
||||
}
|
||||
// pipe.setBuffer is never invoked, leaving the buffer uninitialized.
|
||||
// We shouldn't try to write to an uninitialized pipe,
|
||||
// but returning an error is better than panicking.
|
||||
if p.b == nil {
|
||||
return 0, errUninitializedPipeWrite
|
||||
}
|
||||
return p.b.Write(d)
|
||||
}
|
||||
|
||||
// CloseWithError causes the next Read (waking up a current blocked
|
||||
// Read if needed) to return the provided err after all data has been
|
||||
// read.
|
||||
//
|
||||
// The error must be non-nil.
|
||||
func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) }
|
||||
|
||||
// BreakWithError causes the next Read (waking up a current blocked
|
||||
// Read if needed) to return the provided err immediately, without
|
||||
// waiting for unread data.
|
||||
func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) }
|
||||
|
||||
// closeWithErrorAndCode is like CloseWithError but also sets some code to run
|
||||
// in the caller's goroutine before returning the error.
|
||||
func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) }
|
||||
|
||||
func (p *pipe) closeWithError(dst *error, err error, fn func()) {
|
||||
if err == nil {
|
||||
panic("err must be non-nil")
|
||||
}
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.c.L == nil {
|
||||
p.c.L = &p.mu
|
||||
}
|
||||
defer p.c.Signal()
|
||||
if *dst != nil {
|
||||
// Already been done.
|
||||
return
|
||||
}
|
||||
p.readFn = fn
|
||||
if dst == &p.breakErr {
|
||||
if p.b != nil {
|
||||
p.unread += p.b.Len()
|
||||
}
|
||||
p.b = nil
|
||||
}
|
||||
*dst = err
|
||||
p.closeDoneLocked()
|
||||
}
|
||||
|
||||
// requires p.mu be held.
|
||||
func (p *pipe) closeDoneLocked() {
|
||||
if p.donec == nil {
|
||||
return
|
||||
}
|
||||
// Close if unclosed. This isn't racy since we always
|
||||
// hold p.mu while closing.
|
||||
select {
|
||||
case <-p.donec:
|
||||
default:
|
||||
close(p.donec)
|
||||
}
|
||||
}
|
||||
|
||||
// Err returns the error (if any) first set by BreakWithError or CloseWithError.
|
||||
func (p *pipe) Err() error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.breakErr != nil {
|
||||
return p.breakErr
|
||||
}
|
||||
return p.err
|
||||
}
|
||||
|
||||
// Done returns a channel which is closed if and when this pipe is closed
|
||||
// with CloseWithError.
|
||||
func (p *pipe) Done() <-chan struct{} {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.donec == nil {
|
||||
p.donec = make(chan struct{})
|
||||
if p.err != nil || p.breakErr != nil {
|
||||
// Already hit an error.
|
||||
p.closeDoneLocked()
|
||||
}
|
||||
}
|
||||
return p.donec
|
||||
}
|
||||
3341
vendor/golang.org/x/net/http2/server.go
generated
vendored
Normal file
3341
vendor/golang.org/x/net/http2/server.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
3439
vendor/golang.org/x/net/http2/transport.go
generated
vendored
Normal file
3439
vendor/golang.org/x/net/http2/transport.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
32
vendor/golang.org/x/net/http2/unencrypted.go
generated
vendored
Normal file
32
vendor/golang.org/x/net/http2/unencrypted.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"net"
|
||||
)
|
||||
|
||||
const nextProtoUnencryptedHTTP2 = "unencrypted_http2"
|
||||
|
||||
// unencryptedNetConnFromTLSConn retrieves a net.Conn wrapped in a *tls.Conn.
|
||||
//
|
||||
// TLSNextProto functions accept a *tls.Conn.
|
||||
//
|
||||
// When passing an unencrypted HTTP/2 connection to a TLSNextProto function,
|
||||
// we pass a *tls.Conn with an underlying net.Conn containing the unencrypted connection.
|
||||
// To be extra careful about mistakes (accidentally dropping TLS encryption in a place
|
||||
// where we want it), the tls.Conn contains a net.Conn with an UnencryptedNetConn method
|
||||
// that returns the actual connection we want to use.
|
||||
func unencryptedNetConnFromTLSConn(tc *tls.Conn) (net.Conn, error) {
|
||||
conner, ok := tc.NetConn().(interface {
|
||||
UnencryptedNetConn() net.Conn
|
||||
})
|
||||
if !ok {
|
||||
return nil, errors.New("http2: TLS conn unexpectedly found in unencrypted handoff")
|
||||
}
|
||||
return conner.UnencryptedNetConn(), nil
|
||||
}
|
||||
381
vendor/golang.org/x/net/http2/write.go
generated
vendored
Normal file
381
vendor/golang.org/x/net/http2/write.go
generated
vendored
Normal file
@@ -0,0 +1,381 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"golang.org/x/net/http/httpguts"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"golang.org/x/net/internal/httpcommon"
|
||||
)
|
||||
|
||||
// writeFramer is implemented by any type that is used to write frames.
|
||||
type writeFramer interface {
|
||||
writeFrame(writeContext) error
|
||||
|
||||
// staysWithinBuffer reports whether this writer promises that
|
||||
// it will only write less than or equal to size bytes, and it
|
||||
// won't Flush the write context.
|
||||
staysWithinBuffer(size int) bool
|
||||
}
|
||||
|
||||
// writeContext is the interface needed by the various frame writer
|
||||
// types below. All the writeFrame methods below are scheduled via the
|
||||
// frame writing scheduler (see writeScheduler in writesched.go).
|
||||
//
|
||||
// This interface is implemented by *serverConn.
|
||||
//
|
||||
// TODO: decide whether to a) use this in the client code (which didn't
|
||||
// end up using this yet, because it has a simpler design, not
|
||||
// currently implementing priorities), or b) delete this and
|
||||
// make the server code a bit more concrete.
|
||||
type writeContext interface {
|
||||
Framer() *Framer
|
||||
Flush() error
|
||||
CloseConn() error
|
||||
// HeaderEncoder returns an HPACK encoder that writes to the
|
||||
// returned buffer.
|
||||
HeaderEncoder() (*hpack.Encoder, *bytes.Buffer)
|
||||
}
|
||||
|
||||
// writeEndsStream reports whether w writes a frame that will transition
|
||||
// the stream to a half-closed local state. This returns false for RST_STREAM,
|
||||
// which closes the entire stream (not just the local half).
|
||||
func writeEndsStream(w writeFramer) bool {
|
||||
switch v := w.(type) {
|
||||
case *writeData:
|
||||
return v.endStream
|
||||
case *writeResHeaders:
|
||||
return v.endStream
|
||||
case nil:
|
||||
// This can only happen if the caller reuses w after it's
|
||||
// been intentionally nil'ed out to prevent use. Keep this
|
||||
// here to catch future refactoring breaking it.
|
||||
panic("writeEndsStream called on nil writeFramer")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type flushFrameWriter struct{}
|
||||
|
||||
func (flushFrameWriter) writeFrame(ctx writeContext) error {
|
||||
return ctx.Flush()
|
||||
}
|
||||
|
||||
func (flushFrameWriter) staysWithinBuffer(max int) bool { return false }
|
||||
|
||||
type writeSettings []Setting
|
||||
|
||||
func (s writeSettings) staysWithinBuffer(max int) bool {
|
||||
const settingSize = 6 // uint16 + uint32
|
||||
return frameHeaderLen+settingSize*len(s) <= max
|
||||
|
||||
}
|
||||
|
||||
func (s writeSettings) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteSettings([]Setting(s)...)
|
||||
}
|
||||
|
||||
type writeGoAway struct {
|
||||
maxStreamID uint32
|
||||
code ErrCode
|
||||
}
|
||||
|
||||
func (p *writeGoAway) writeFrame(ctx writeContext) error {
|
||||
err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)
|
||||
ctx.Flush() // ignore error: we're hanging up on them anyway
|
||||
return err
|
||||
}
|
||||
|
||||
func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes
|
||||
|
||||
type writeData struct {
|
||||
streamID uint32
|
||||
p []byte
|
||||
endStream bool
|
||||
}
|
||||
|
||||
func (w *writeData) String() string {
|
||||
return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream)
|
||||
}
|
||||
|
||||
func (w *writeData) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
|
||||
}
|
||||
|
||||
func (w *writeData) staysWithinBuffer(max int) bool {
|
||||
return frameHeaderLen+len(w.p) <= max
|
||||
}
|
||||
|
||||
// handlerPanicRST is the message sent from handler goroutines when
|
||||
// the handler panics.
|
||||
type handlerPanicRST struct {
|
||||
StreamID uint32
|
||||
}
|
||||
|
||||
func (hp handlerPanicRST) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal)
|
||||
}
|
||||
|
||||
func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
|
||||
|
||||
func (se StreamError) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
|
||||
}
|
||||
|
||||
func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
|
||||
|
||||
type writePing struct {
|
||||
data [8]byte
|
||||
}
|
||||
|
||||
func (w writePing) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WritePing(false, w.data)
|
||||
}
|
||||
|
||||
func (w writePing) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.data) <= max }
|
||||
|
||||
type writePingAck struct{ pf *PingFrame }
|
||||
|
||||
func (w writePingAck) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WritePing(true, w.pf.Data)
|
||||
}
|
||||
|
||||
func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max }
|
||||
|
||||
type writeSettingsAck struct{}
|
||||
|
||||
func (writeSettingsAck) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteSettingsAck()
|
||||
}
|
||||
|
||||
func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max }
|
||||
|
||||
// splitHeaderBlock splits headerBlock into fragments so that each fragment fits
|
||||
// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true
|
||||
// for the first/last fragment, respectively.
|
||||
func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error {
|
||||
// For now we're lazy and just pick the minimum MAX_FRAME_SIZE
|
||||
// that all peers must support (16KB). Later we could care
|
||||
// more and send larger frames if the peer advertised it, but
|
||||
// there's little point. Most headers are small anyway (so we
|
||||
// generally won't have CONTINUATION frames), and extra frames
|
||||
// only waste 9 bytes anyway.
|
||||
const maxFrameSize = 16384
|
||||
|
||||
first := true
|
||||
for len(headerBlock) > 0 {
|
||||
frag := headerBlock
|
||||
if len(frag) > maxFrameSize {
|
||||
frag = frag[:maxFrameSize]
|
||||
}
|
||||
headerBlock = headerBlock[len(frag):]
|
||||
if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil {
|
||||
return err
|
||||
}
|
||||
first = false
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
|
||||
// for HTTP response headers or trailers from a server handler.
|
||||
type writeResHeaders struct {
|
||||
streamID uint32
|
||||
httpResCode int // 0 means no ":status" line
|
||||
h http.Header // may be nil
|
||||
trailers []string // if non-nil, which keys of h to write. nil means all.
|
||||
endStream bool
|
||||
|
||||
date string
|
||||
contentType string
|
||||
contentLength string
|
||||
}
|
||||
|
||||
func encKV(enc *hpack.Encoder, k, v string) {
|
||||
if VerboseLogs {
|
||||
log.Printf("http2: server encoding header %q = %q", k, v)
|
||||
}
|
||||
enc.WriteField(hpack.HeaderField{Name: k, Value: v})
|
||||
}
|
||||
|
||||
func (w *writeResHeaders) staysWithinBuffer(max int) bool {
|
||||
// TODO: this is a common one. It'd be nice to return true
|
||||
// here and get into the fast path if we could be clever and
|
||||
// calculate the size fast enough, or at least a conservative
|
||||
// upper bound that usually fires. (Maybe if w.h and
|
||||
// w.trailers are nil, so we don't need to enumerate it.)
|
||||
// Otherwise I'm afraid that just calculating the length to
|
||||
// answer this question would be slower than the ~2µs benefit.
|
||||
return false
|
||||
}
|
||||
|
||||
func (w *writeResHeaders) writeFrame(ctx writeContext) error {
|
||||
enc, buf := ctx.HeaderEncoder()
|
||||
buf.Reset()
|
||||
|
||||
if w.httpResCode != 0 {
|
||||
encKV(enc, ":status", httpCodeString(w.httpResCode))
|
||||
}
|
||||
|
||||
encodeHeaders(enc, w.h, w.trailers)
|
||||
|
||||
if w.contentType != "" {
|
||||
encKV(enc, "content-type", w.contentType)
|
||||
}
|
||||
if w.contentLength != "" {
|
||||
encKV(enc, "content-length", w.contentLength)
|
||||
}
|
||||
if w.date != "" {
|
||||
encKV(enc, "date", w.date)
|
||||
}
|
||||
|
||||
headerBlock := buf.Bytes()
|
||||
if len(headerBlock) == 0 && w.trailers == nil {
|
||||
panic("unexpected empty hpack")
|
||||
}
|
||||
|
||||
return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
|
||||
}
|
||||
|
||||
func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {
|
||||
if firstFrag {
|
||||
return ctx.Framer().WriteHeaders(HeadersFrameParam{
|
||||
StreamID: w.streamID,
|
||||
BlockFragment: frag,
|
||||
EndStream: w.endStream,
|
||||
EndHeaders: lastFrag,
|
||||
})
|
||||
} else {
|
||||
return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
|
||||
}
|
||||
}
|
||||
|
||||
// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames.
|
||||
type writePushPromise struct {
|
||||
streamID uint32 // pusher stream
|
||||
method string // for :method
|
||||
url *url.URL // for :scheme, :authority, :path
|
||||
h http.Header
|
||||
|
||||
// Creates an ID for a pushed stream. This runs on serveG just before
|
||||
// the frame is written. The returned ID is copied to promisedID.
|
||||
allocatePromisedID func() (uint32, error)
|
||||
promisedID uint32
|
||||
}
|
||||
|
||||
func (w *writePushPromise) staysWithinBuffer(max int) bool {
|
||||
// TODO: see writeResHeaders.staysWithinBuffer
|
||||
return false
|
||||
}
|
||||
|
||||
func (w *writePushPromise) writeFrame(ctx writeContext) error {
|
||||
enc, buf := ctx.HeaderEncoder()
|
||||
buf.Reset()
|
||||
|
||||
encKV(enc, ":method", w.method)
|
||||
encKV(enc, ":scheme", w.url.Scheme)
|
||||
encKV(enc, ":authority", w.url.Host)
|
||||
encKV(enc, ":path", w.url.RequestURI())
|
||||
encodeHeaders(enc, w.h, nil)
|
||||
|
||||
headerBlock := buf.Bytes()
|
||||
if len(headerBlock) == 0 {
|
||||
panic("unexpected empty hpack")
|
||||
}
|
||||
|
||||
return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
|
||||
}
|
||||
|
||||
func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {
|
||||
if firstFrag {
|
||||
return ctx.Framer().WritePushPromise(PushPromiseParam{
|
||||
StreamID: w.streamID,
|
||||
PromiseID: w.promisedID,
|
||||
BlockFragment: frag,
|
||||
EndHeaders: lastFrag,
|
||||
})
|
||||
} else {
|
||||
return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
|
||||
}
|
||||
}
|
||||
|
||||
type write100ContinueHeadersFrame struct {
|
||||
streamID uint32
|
||||
}
|
||||
|
||||
func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error {
|
||||
enc, buf := ctx.HeaderEncoder()
|
||||
buf.Reset()
|
||||
encKV(enc, ":status", "100")
|
||||
return ctx.Framer().WriteHeaders(HeadersFrameParam{
|
||||
StreamID: w.streamID,
|
||||
BlockFragment: buf.Bytes(),
|
||||
EndStream: false,
|
||||
EndHeaders: true,
|
||||
})
|
||||
}
|
||||
|
||||
func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool {
|
||||
// Sloppy but conservative:
|
||||
return 9+2*(len(":status")+len("100")) <= max
|
||||
}
|
||||
|
||||
type writeWindowUpdate struct {
|
||||
streamID uint32 // or 0 for conn-level
|
||||
n uint32
|
||||
}
|
||||
|
||||
func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
|
||||
|
||||
func (wu writeWindowUpdate) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
|
||||
}
|
||||
|
||||
// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k])
|
||||
// is encoded only if k is in keys.
|
||||
func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
|
||||
if keys == nil {
|
||||
sorter := sorterPool.Get().(*sorter)
|
||||
// Using defer here, since the returned keys from the
|
||||
// sorter.Keys method is only valid until the sorter
|
||||
// is returned:
|
||||
defer sorterPool.Put(sorter)
|
||||
keys = sorter.Keys(h)
|
||||
}
|
||||
for _, k := range keys {
|
||||
vv := h[k]
|
||||
k, ascii := httpcommon.LowerHeader(k)
|
||||
if !ascii {
|
||||
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
|
||||
// field names have to be ASCII characters (just as in HTTP/1.x).
|
||||
continue
|
||||
}
|
||||
if !validWireHeaderFieldName(k) {
|
||||
// Skip it as backup paranoia. Per
|
||||
// golang.org/issue/14048, these should
|
||||
// already be rejected at a higher level.
|
||||
continue
|
||||
}
|
||||
isTE := k == "transfer-encoding"
|
||||
for _, v := range vv {
|
||||
if !httpguts.ValidHeaderFieldValue(v) {
|
||||
// TODO: return an error? golang.org/issue/14048
|
||||
// For now just omit it.
|
||||
continue
|
||||
}
|
||||
// TODO: more of "8.1.2.2 Connection-Specific Header Fields"
|
||||
if isTE && v != "trailers" {
|
||||
continue
|
||||
}
|
||||
encKV(enc, k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
288
vendor/golang.org/x/net/http2/writesched.go
generated
vendored
Normal file
288
vendor/golang.org/x/net/http2/writesched.go
generated
vendored
Normal file
@@ -0,0 +1,288 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import "fmt"
|
||||
|
||||
// WriteScheduler is the interface implemented by HTTP/2 write schedulers.
|
||||
// Methods are never called concurrently.
|
||||
type WriteScheduler interface {
|
||||
// OpenStream opens a new stream in the write scheduler.
|
||||
// It is illegal to call this with streamID=0 or with a streamID that is
|
||||
// already open -- the call may panic.
|
||||
OpenStream(streamID uint32, options OpenStreamOptions)
|
||||
|
||||
// CloseStream closes a stream in the write scheduler. Any frames queued on
|
||||
// this stream should be discarded. It is illegal to call this on a stream
|
||||
// that is not open -- the call may panic.
|
||||
CloseStream(streamID uint32)
|
||||
|
||||
// AdjustStream adjusts the priority of the given stream. This may be called
|
||||
// on a stream that has not yet been opened or has been closed. Note that
|
||||
// RFC 7540 allows PRIORITY frames to be sent on streams in any state. See:
|
||||
// https://tools.ietf.org/html/rfc7540#section-5.1
|
||||
AdjustStream(streamID uint32, priority PriorityParam)
|
||||
|
||||
// Push queues a frame in the scheduler. In most cases, this will not be
|
||||
// called with wr.StreamID()!=0 unless that stream is currently open. The one
|
||||
// exception is RST_STREAM frames, which may be sent on idle or closed streams.
|
||||
Push(wr FrameWriteRequest)
|
||||
|
||||
// Pop dequeues the next frame to write. Returns false if no frames can
|
||||
// be written. Frames with a given wr.StreamID() are Pop'd in the same
|
||||
// order they are Push'd, except RST_STREAM frames. No frames should be
|
||||
// discarded except by CloseStream.
|
||||
Pop() (wr FrameWriteRequest, ok bool)
|
||||
}
|
||||
|
||||
// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream.
|
||||
type OpenStreamOptions struct {
|
||||
// PusherID is zero if the stream was initiated by the client. Otherwise,
|
||||
// PusherID names the stream that pushed the newly opened stream.
|
||||
PusherID uint32
|
||||
// priority is used to set the priority of the newly opened stream.
|
||||
priority PriorityParam
|
||||
}
|
||||
|
||||
// FrameWriteRequest is a request to write a frame.
|
||||
type FrameWriteRequest struct {
|
||||
// write is the interface value that does the writing, once the
|
||||
// WriteScheduler has selected this frame to write. The write
|
||||
// functions are all defined in write.go.
|
||||
write writeFramer
|
||||
|
||||
// stream is the stream on which this frame will be written.
|
||||
// nil for non-stream frames like PING and SETTINGS.
|
||||
// nil for RST_STREAM streams, which use the StreamError.StreamID field instead.
|
||||
stream *stream
|
||||
|
||||
// done, if non-nil, must be a buffered channel with space for
|
||||
// 1 message and is sent the return value from write (or an
|
||||
// earlier error) when the frame has been written.
|
||||
done chan error
|
||||
}
|
||||
|
||||
// StreamID returns the id of the stream this frame will be written to.
|
||||
// 0 is used for non-stream frames such as PING and SETTINGS.
|
||||
func (wr FrameWriteRequest) StreamID() uint32 {
|
||||
if wr.stream == nil {
|
||||
if se, ok := wr.write.(StreamError); ok {
|
||||
// (*serverConn).resetStream doesn't set
|
||||
// stream because it doesn't necessarily have
|
||||
// one. So special case this type of write
|
||||
// message.
|
||||
return se.StreamID
|
||||
}
|
||||
return 0
|
||||
}
|
||||
return wr.stream.id
|
||||
}
|
||||
|
||||
// isControl reports whether wr is a control frame for MaxQueuedControlFrames
|
||||
// purposes. That includes non-stream frames and RST_STREAM frames.
|
||||
func (wr FrameWriteRequest) isControl() bool {
|
||||
return wr.stream == nil
|
||||
}
|
||||
|
||||
// DataSize returns the number of flow control bytes that must be consumed
|
||||
// to write this entire frame. This is 0 for non-DATA frames.
|
||||
func (wr FrameWriteRequest) DataSize() int {
|
||||
if wd, ok := wr.write.(*writeData); ok {
|
||||
return len(wd.p)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Consume consumes min(n, available) bytes from this frame, where available
|
||||
// is the number of flow control bytes available on the stream. Consume returns
|
||||
// 0, 1, or 2 frames, where the integer return value gives the number of frames
|
||||
// returned.
|
||||
//
|
||||
// If flow control prevents consuming any bytes, this returns (_, _, 0). If
|
||||
// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this
|
||||
// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and
|
||||
// 'rest' contains the remaining bytes. The consumed bytes are deducted from the
|
||||
// underlying stream's flow control budget.
|
||||
func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) {
|
||||
var empty FrameWriteRequest
|
||||
|
||||
// Non-DATA frames are always consumed whole.
|
||||
wd, ok := wr.write.(*writeData)
|
||||
if !ok || len(wd.p) == 0 {
|
||||
return wr, empty, 1
|
||||
}
|
||||
|
||||
// Might need to split after applying limits.
|
||||
allowed := wr.stream.flow.available()
|
||||
if n < allowed {
|
||||
allowed = n
|
||||
}
|
||||
if wr.stream.sc.maxFrameSize < allowed {
|
||||
allowed = wr.stream.sc.maxFrameSize
|
||||
}
|
||||
if allowed <= 0 {
|
||||
return empty, empty, 0
|
||||
}
|
||||
if len(wd.p) > int(allowed) {
|
||||
wr.stream.flow.take(allowed)
|
||||
consumed := FrameWriteRequest{
|
||||
stream: wr.stream,
|
||||
write: &writeData{
|
||||
streamID: wd.streamID,
|
||||
p: wd.p[:allowed],
|
||||
// Even if the original had endStream set, there
|
||||
// are bytes remaining because len(wd.p) > allowed,
|
||||
// so we know endStream is false.
|
||||
endStream: false,
|
||||
},
|
||||
// Our caller is blocking on the final DATA frame, not
|
||||
// this intermediate frame, so no need to wait.
|
||||
done: nil,
|
||||
}
|
||||
rest := FrameWriteRequest{
|
||||
stream: wr.stream,
|
||||
write: &writeData{
|
||||
streamID: wd.streamID,
|
||||
p: wd.p[allowed:],
|
||||
endStream: wd.endStream,
|
||||
},
|
||||
done: wr.done,
|
||||
}
|
||||
return consumed, rest, 2
|
||||
}
|
||||
|
||||
// The frame is consumed whole.
|
||||
// NB: This cast cannot overflow because allowed is <= math.MaxInt32.
|
||||
wr.stream.flow.take(int32(len(wd.p)))
|
||||
return wr, empty, 1
|
||||
}
|
||||
|
||||
// String is for debugging only.
|
||||
func (wr FrameWriteRequest) String() string {
|
||||
var des string
|
||||
if s, ok := wr.write.(fmt.Stringer); ok {
|
||||
des = s.String()
|
||||
} else {
|
||||
des = fmt.Sprintf("%T", wr.write)
|
||||
}
|
||||
return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des)
|
||||
}
|
||||
|
||||
// replyToWriter sends err to wr.done and panics if the send must block
|
||||
// This does nothing if wr.done is nil.
|
||||
func (wr *FrameWriteRequest) replyToWriter(err error) {
|
||||
if wr.done == nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case wr.done <- err:
|
||||
default:
|
||||
panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write))
|
||||
}
|
||||
wr.write = nil // prevent use (assume it's tainted after wr.done send)
|
||||
}
|
||||
|
||||
// writeQueue is used by implementations of WriteScheduler.
|
||||
//
|
||||
// Each writeQueue contains a queue of FrameWriteRequests, meant to store all
|
||||
// FrameWriteRequests associated with a given stream. This is implemented as a
|
||||
// two-stage queue: currQueue[currPos:] and nextQueue. Removing an item is done
|
||||
// by incrementing currPos of currQueue. Adding an item is done by appending it
|
||||
// to the nextQueue. If currQueue is empty when trying to remove an item, we
|
||||
// can swap currQueue and nextQueue to remedy the situation.
|
||||
// This two-stage queue is analogous to the use of two lists in Okasaki's
|
||||
// purely functional queue but without the overhead of reversing the list when
|
||||
// swapping stages.
|
||||
//
|
||||
// writeQueue also contains prev and next, this can be used by implementations
|
||||
// of WriteScheduler to construct data structures that represent the order of
|
||||
// writing between different streams (e.g. circular linked list).
|
||||
type writeQueue struct {
|
||||
currQueue []FrameWriteRequest
|
||||
nextQueue []FrameWriteRequest
|
||||
currPos int
|
||||
|
||||
prev, next *writeQueue
|
||||
}
|
||||
|
||||
func (q *writeQueue) empty() bool {
|
||||
return (len(q.currQueue) - q.currPos + len(q.nextQueue)) == 0
|
||||
}
|
||||
|
||||
func (q *writeQueue) push(wr FrameWriteRequest) {
|
||||
q.nextQueue = append(q.nextQueue, wr)
|
||||
}
|
||||
|
||||
func (q *writeQueue) shift() FrameWriteRequest {
|
||||
if q.empty() {
|
||||
panic("invalid use of queue")
|
||||
}
|
||||
if q.currPos >= len(q.currQueue) {
|
||||
q.currQueue, q.currPos, q.nextQueue = q.nextQueue, 0, q.currQueue[:0]
|
||||
}
|
||||
wr := q.currQueue[q.currPos]
|
||||
q.currQueue[q.currPos] = FrameWriteRequest{}
|
||||
q.currPos++
|
||||
return wr
|
||||
}
|
||||
|
||||
func (q *writeQueue) peek() *FrameWriteRequest {
|
||||
if q.currPos < len(q.currQueue) {
|
||||
return &q.currQueue[q.currPos]
|
||||
}
|
||||
if len(q.nextQueue) > 0 {
|
||||
return &q.nextQueue[0]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// consume consumes up to n bytes from q.s[0]. If the frame is
|
||||
// entirely consumed, it is removed from the queue. If the frame
|
||||
// is partially consumed, the frame is kept with the consumed
|
||||
// bytes removed. Returns true iff any bytes were consumed.
|
||||
func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) {
|
||||
if q.empty() {
|
||||
return FrameWriteRequest{}, false
|
||||
}
|
||||
consumed, rest, numresult := q.peek().Consume(n)
|
||||
switch numresult {
|
||||
case 0:
|
||||
return FrameWriteRequest{}, false
|
||||
case 1:
|
||||
q.shift()
|
||||
case 2:
|
||||
*q.peek() = rest
|
||||
}
|
||||
return consumed, true
|
||||
}
|
||||
|
||||
type writeQueuePool []*writeQueue
|
||||
|
||||
// put inserts an unused writeQueue into the pool.
|
||||
func (p *writeQueuePool) put(q *writeQueue) {
|
||||
for i := range q.currQueue {
|
||||
q.currQueue[i] = FrameWriteRequest{}
|
||||
}
|
||||
for i := range q.nextQueue {
|
||||
q.nextQueue[i] = FrameWriteRequest{}
|
||||
}
|
||||
q.currQueue = q.currQueue[:0]
|
||||
q.nextQueue = q.nextQueue[:0]
|
||||
q.currPos = 0
|
||||
*p = append(*p, q)
|
||||
}
|
||||
|
||||
// get returns an empty writeQueue.
|
||||
func (p *writeQueuePool) get() *writeQueue {
|
||||
ln := len(*p)
|
||||
if ln == 0 {
|
||||
return new(writeQueue)
|
||||
}
|
||||
x := ln - 1
|
||||
q := (*p)[x]
|
||||
(*p)[x] = nil
|
||||
*p = (*p)[:x]
|
||||
return q
|
||||
}
|
||||
450
vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go
generated
vendored
Normal file
450
vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go
generated
vendored
Normal file
@@ -0,0 +1,450 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// RFC 7540, Section 5.3.5: the default weight is 16.
|
||||
const priorityDefaultWeightRFC7540 = 15 // 16 = 15 + 1
|
||||
|
||||
// PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
|
||||
type PriorityWriteSchedulerConfig struct {
|
||||
// MaxClosedNodesInTree controls the maximum number of closed streams to
|
||||
// retain in the priority tree. Setting this to zero saves a small amount
|
||||
// of memory at the cost of performance.
|
||||
//
|
||||
// See RFC 7540, Section 5.3.4:
|
||||
// "It is possible for a stream to become closed while prioritization
|
||||
// information ... is in transit. ... This potentially creates suboptimal
|
||||
// prioritization, since the stream could be given a priority that is
|
||||
// different from what is intended. To avoid these problems, an endpoint
|
||||
// SHOULD retain stream prioritization state for a period after streams
|
||||
// become closed. The longer state is retained, the lower the chance that
|
||||
// streams are assigned incorrect or default priority values."
|
||||
MaxClosedNodesInTree int
|
||||
|
||||
// MaxIdleNodesInTree controls the maximum number of idle streams to
|
||||
// retain in the priority tree. Setting this to zero saves a small amount
|
||||
// of memory at the cost of performance.
|
||||
//
|
||||
// See RFC 7540, Section 5.3.4:
|
||||
// Similarly, streams that are in the "idle" state can be assigned
|
||||
// priority or become a parent of other streams. This allows for the
|
||||
// creation of a grouping node in the dependency tree, which enables
|
||||
// more flexible expressions of priority. Idle streams begin with a
|
||||
// default priority (Section 5.3.5).
|
||||
MaxIdleNodesInTree int
|
||||
|
||||
// ThrottleOutOfOrderWrites enables write throttling to help ensure that
|
||||
// data is delivered in priority order. This works around a race where
|
||||
// stream B depends on stream A and both streams are about to call Write
|
||||
// to queue DATA frames. If B wins the race, a naive scheduler would eagerly
|
||||
// write as much data from B as possible, but this is suboptimal because A
|
||||
// is a higher-priority stream. With throttling enabled, we write a small
|
||||
// amount of data from B to minimize the amount of bandwidth that B can
|
||||
// steal from A.
|
||||
ThrottleOutOfOrderWrites bool
|
||||
}
|
||||
|
||||
// NewPriorityWriteScheduler constructs a WriteScheduler that schedules
|
||||
// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3.
|
||||
// If cfg is nil, default options are used.
|
||||
func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler {
|
||||
if cfg == nil {
|
||||
// For justification of these defaults, see:
|
||||
// https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY
|
||||
cfg = &PriorityWriteSchedulerConfig{
|
||||
MaxClosedNodesInTree: 10,
|
||||
MaxIdleNodesInTree: 10,
|
||||
ThrottleOutOfOrderWrites: false,
|
||||
}
|
||||
}
|
||||
|
||||
ws := &priorityWriteSchedulerRFC7540{
|
||||
nodes: make(map[uint32]*priorityNodeRFC7540),
|
||||
maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
|
||||
maxIdleNodesInTree: cfg.MaxIdleNodesInTree,
|
||||
enableWriteThrottle: cfg.ThrottleOutOfOrderWrites,
|
||||
}
|
||||
ws.nodes[0] = &ws.root
|
||||
if cfg.ThrottleOutOfOrderWrites {
|
||||
ws.writeThrottleLimit = 1024
|
||||
} else {
|
||||
ws.writeThrottleLimit = math.MaxInt32
|
||||
}
|
||||
return ws
|
||||
}
|
||||
|
||||
type priorityNodeStateRFC7540 int
|
||||
|
||||
const (
|
||||
priorityNodeOpenRFC7540 priorityNodeStateRFC7540 = iota
|
||||
priorityNodeClosedRFC7540
|
||||
priorityNodeIdleRFC7540
|
||||
)
|
||||
|
||||
// priorityNodeRFC7540 is a node in an HTTP/2 priority tree.
|
||||
// Each node is associated with a single stream ID.
|
||||
// See RFC 7540, Section 5.3.
|
||||
type priorityNodeRFC7540 struct {
|
||||
q writeQueue // queue of pending frames to write
|
||||
id uint32 // id of the stream, or 0 for the root of the tree
|
||||
weight uint8 // the actual weight is weight+1, so the value is in [1,256]
|
||||
state priorityNodeStateRFC7540 // open | closed | idle
|
||||
bytes int64 // number of bytes written by this node, or 0 if closed
|
||||
subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
|
||||
|
||||
// These links form the priority tree.
|
||||
parent *priorityNodeRFC7540
|
||||
kids *priorityNodeRFC7540 // start of the kids list
|
||||
prev, next *priorityNodeRFC7540 // doubly-linked list of siblings
|
||||
}
|
||||
|
||||
func (n *priorityNodeRFC7540) setParent(parent *priorityNodeRFC7540) {
|
||||
if n == parent {
|
||||
panic("setParent to self")
|
||||
}
|
||||
if n.parent == parent {
|
||||
return
|
||||
}
|
||||
// Unlink from current parent.
|
||||
if parent := n.parent; parent != nil {
|
||||
if n.prev == nil {
|
||||
parent.kids = n.next
|
||||
} else {
|
||||
n.prev.next = n.next
|
||||
}
|
||||
if n.next != nil {
|
||||
n.next.prev = n.prev
|
||||
}
|
||||
}
|
||||
// Link to new parent.
|
||||
// If parent=nil, remove n from the tree.
|
||||
// Always insert at the head of parent.kids (this is assumed by walkReadyInOrder).
|
||||
n.parent = parent
|
||||
if parent == nil {
|
||||
n.next = nil
|
||||
n.prev = nil
|
||||
} else {
|
||||
n.next = parent.kids
|
||||
n.prev = nil
|
||||
if n.next != nil {
|
||||
n.next.prev = n
|
||||
}
|
||||
parent.kids = n
|
||||
}
|
||||
}
|
||||
|
||||
func (n *priorityNodeRFC7540) addBytes(b int64) {
|
||||
n.bytes += b
|
||||
for ; n != nil; n = n.parent {
|
||||
n.subtreeBytes += b
|
||||
}
|
||||
}
|
||||
|
||||
// walkReadyInOrder iterates over the tree in priority order, calling f for each node
|
||||
// with a non-empty write queue. When f returns true, this function returns true and the
|
||||
// walk halts. tmp is used as scratch space for sorting.
|
||||
//
|
||||
// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
|
||||
// if any ancestor p of n is still open (ignoring the root node).
|
||||
func (n *priorityNodeRFC7540) walkReadyInOrder(openParent bool, tmp *[]*priorityNodeRFC7540, f func(*priorityNodeRFC7540, bool) bool) bool {
|
||||
if !n.q.empty() && f(n, openParent) {
|
||||
return true
|
||||
}
|
||||
if n.kids == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Don't consider the root "open" when updating openParent since
|
||||
// we can't send data frames on the root stream (only control frames).
|
||||
if n.id != 0 {
|
||||
openParent = openParent || (n.state == priorityNodeOpenRFC7540)
|
||||
}
|
||||
|
||||
// Common case: only one kid or all kids have the same weight.
|
||||
// Some clients don't use weights; other clients (like web browsers)
|
||||
// use mostly-linear priority trees.
|
||||
w := n.kids.weight
|
||||
needSort := false
|
||||
for k := n.kids.next; k != nil; k = k.next {
|
||||
if k.weight != w {
|
||||
needSort = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !needSort {
|
||||
for k := n.kids; k != nil; k = k.next {
|
||||
if k.walkReadyInOrder(openParent, tmp, f) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Uncommon case: sort the child nodes. We remove the kids from the parent,
|
||||
// then re-insert after sorting so we can reuse tmp for future sort calls.
|
||||
*tmp = (*tmp)[:0]
|
||||
for n.kids != nil {
|
||||
*tmp = append(*tmp, n.kids)
|
||||
n.kids.setParent(nil)
|
||||
}
|
||||
sort.Sort(sortPriorityNodeSiblingsRFC7540(*tmp))
|
||||
for i := len(*tmp) - 1; i >= 0; i-- {
|
||||
(*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
|
||||
}
|
||||
for k := n.kids; k != nil; k = k.next {
|
||||
if k.walkReadyInOrder(openParent, tmp, f) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type sortPriorityNodeSiblingsRFC7540 []*priorityNodeRFC7540
|
||||
|
||||
func (z sortPriorityNodeSiblingsRFC7540) Len() int { return len(z) }
|
||||
func (z sortPriorityNodeSiblingsRFC7540) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
|
||||
func (z sortPriorityNodeSiblingsRFC7540) Less(i, k int) bool {
|
||||
// Prefer the subtree that has sent fewer bytes relative to its weight.
|
||||
// See sections 5.3.2 and 5.3.4.
|
||||
wi, bi := float64(z[i].weight)+1, float64(z[i].subtreeBytes)
|
||||
wk, bk := float64(z[k].weight)+1, float64(z[k].subtreeBytes)
|
||||
if bi == 0 && bk == 0 {
|
||||
return wi >= wk
|
||||
}
|
||||
if bk == 0 {
|
||||
return false
|
||||
}
|
||||
return bi/bk <= wi/wk
|
||||
}
|
||||
|
||||
type priorityWriteSchedulerRFC7540 struct {
|
||||
// root is the root of the priority tree, where root.id = 0.
|
||||
// The root queues control frames that are not associated with any stream.
|
||||
root priorityNodeRFC7540
|
||||
|
||||
// nodes maps stream ids to priority tree nodes.
|
||||
nodes map[uint32]*priorityNodeRFC7540
|
||||
|
||||
// maxID is the maximum stream id in nodes.
|
||||
maxID uint32
|
||||
|
||||
// lists of nodes that have been closed or are idle, but are kept in
|
||||
// the tree for improved prioritization. When the lengths exceed either
|
||||
// maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
|
||||
closedNodes, idleNodes []*priorityNodeRFC7540
|
||||
|
||||
// From the config.
|
||||
maxClosedNodesInTree int
|
||||
maxIdleNodesInTree int
|
||||
writeThrottleLimit int32
|
||||
enableWriteThrottle bool
|
||||
|
||||
// tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
|
||||
tmp []*priorityNodeRFC7540
|
||||
|
||||
// pool of empty queues for reuse.
|
||||
queuePool writeQueuePool
|
||||
}
|
||||
|
||||
func (ws *priorityWriteSchedulerRFC7540) OpenStream(streamID uint32, options OpenStreamOptions) {
|
||||
// The stream may be currently idle but cannot be opened or closed.
|
||||
if curr := ws.nodes[streamID]; curr != nil {
|
||||
if curr.state != priorityNodeIdleRFC7540 {
|
||||
panic(fmt.Sprintf("stream %d already opened", streamID))
|
||||
}
|
||||
curr.state = priorityNodeOpenRFC7540
|
||||
return
|
||||
}
|
||||
|
||||
// RFC 7540, Section 5.3.5:
|
||||
// "All streams are initially assigned a non-exclusive dependency on stream 0x0.
|
||||
// Pushed streams initially depend on their associated stream. In both cases,
|
||||
// streams are assigned a default weight of 16."
|
||||
parent := ws.nodes[options.PusherID]
|
||||
if parent == nil {
|
||||
parent = &ws.root
|
||||
}
|
||||
n := &priorityNodeRFC7540{
|
||||
q: *ws.queuePool.get(),
|
||||
id: streamID,
|
||||
weight: priorityDefaultWeightRFC7540,
|
||||
state: priorityNodeOpenRFC7540,
|
||||
}
|
||||
n.setParent(parent)
|
||||
ws.nodes[streamID] = n
|
||||
if streamID > ws.maxID {
|
||||
ws.maxID = streamID
|
||||
}
|
||||
}
|
||||
|
||||
func (ws *priorityWriteSchedulerRFC7540) CloseStream(streamID uint32) {
|
||||
if streamID == 0 {
|
||||
panic("violation of WriteScheduler interface: cannot close stream 0")
|
||||
}
|
||||
if ws.nodes[streamID] == nil {
|
||||
panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
|
||||
}
|
||||
if ws.nodes[streamID].state != priorityNodeOpenRFC7540 {
|
||||
panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
|
||||
}
|
||||
|
||||
n := ws.nodes[streamID]
|
||||
n.state = priorityNodeClosedRFC7540
|
||||
n.addBytes(-n.bytes)
|
||||
|
||||
q := n.q
|
||||
ws.queuePool.put(&q)
|
||||
if ws.maxClosedNodesInTree > 0 {
|
||||
ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n)
|
||||
} else {
|
||||
ws.removeNode(n)
|
||||
}
|
||||
}
|
||||
|
||||
func (ws *priorityWriteSchedulerRFC7540) AdjustStream(streamID uint32, priority PriorityParam) {
|
||||
if streamID == 0 {
|
||||
panic("adjustPriority on root")
|
||||
}
|
||||
|
||||
// If streamID does not exist, there are two cases:
|
||||
// - A closed stream that has been removed (this will have ID <= maxID)
|
||||
// - An idle stream that is being used for "grouping" (this will have ID > maxID)
|
||||
n := ws.nodes[streamID]
|
||||
if n == nil {
|
||||
if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 {
|
||||
return
|
||||
}
|
||||
ws.maxID = streamID
|
||||
n = &priorityNodeRFC7540{
|
||||
q: *ws.queuePool.get(),
|
||||
id: streamID,
|
||||
weight: priorityDefaultWeightRFC7540,
|
||||
state: priorityNodeIdleRFC7540,
|
||||
}
|
||||
n.setParent(&ws.root)
|
||||
ws.nodes[streamID] = n
|
||||
ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n)
|
||||
}
|
||||
|
||||
// Section 5.3.1: A dependency on a stream that is not currently in the tree
|
||||
// results in that stream being given a default priority (Section 5.3.5).
|
||||
parent := ws.nodes[priority.StreamDep]
|
||||
if parent == nil {
|
||||
n.setParent(&ws.root)
|
||||
n.weight = priorityDefaultWeightRFC7540
|
||||
return
|
||||
}
|
||||
|
||||
// Ignore if the client tries to make a node its own parent.
|
||||
if n == parent {
|
||||
return
|
||||
}
|
||||
|
||||
// Section 5.3.3:
|
||||
// "If a stream is made dependent on one of its own dependencies, the
|
||||
// formerly dependent stream is first moved to be dependent on the
|
||||
// reprioritized stream's previous parent. The moved dependency retains
|
||||
// its weight."
|
||||
//
|
||||
// That is: if parent depends on n, move parent to depend on n.parent.
|
||||
for x := parent.parent; x != nil; x = x.parent {
|
||||
if x == n {
|
||||
parent.setParent(n.parent)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Section 5.3.3: The exclusive flag causes the stream to become the sole
|
||||
// dependency of its parent stream, causing other dependencies to become
|
||||
// dependent on the exclusive stream.
|
||||
if priority.Exclusive {
|
||||
k := parent.kids
|
||||
for k != nil {
|
||||
next := k.next
|
||||
if k != n {
|
||||
k.setParent(n)
|
||||
}
|
||||
k = next
|
||||
}
|
||||
}
|
||||
|
||||
n.setParent(parent)
|
||||
n.weight = priority.Weight
|
||||
}
|
||||
|
||||
func (ws *priorityWriteSchedulerRFC7540) Push(wr FrameWriteRequest) {
|
||||
var n *priorityNodeRFC7540
|
||||
if wr.isControl() {
|
||||
n = &ws.root
|
||||
} else {
|
||||
id := wr.StreamID()
|
||||
n = ws.nodes[id]
|
||||
if n == nil {
|
||||
// id is an idle or closed stream. wr should not be a HEADERS or
|
||||
// DATA frame. In other case, we push wr onto the root, rather
|
||||
// than creating a new priorityNode.
|
||||
if wr.DataSize() > 0 {
|
||||
panic("add DATA on non-open stream")
|
||||
}
|
||||
n = &ws.root
|
||||
}
|
||||
}
|
||||
n.q.push(wr)
|
||||
}
|
||||
|
||||
func (ws *priorityWriteSchedulerRFC7540) Pop() (wr FrameWriteRequest, ok bool) {
|
||||
ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNodeRFC7540, openParent bool) bool {
|
||||
limit := int32(math.MaxInt32)
|
||||
if openParent {
|
||||
limit = ws.writeThrottleLimit
|
||||
}
|
||||
wr, ok = n.q.consume(limit)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
n.addBytes(int64(wr.DataSize()))
|
||||
// If B depends on A and B continuously has data available but A
|
||||
// does not, gradually increase the throttling limit to allow B to
|
||||
// steal more and more bandwidth from A.
|
||||
if openParent {
|
||||
ws.writeThrottleLimit += 1024
|
||||
if ws.writeThrottleLimit < 0 {
|
||||
ws.writeThrottleLimit = math.MaxInt32
|
||||
}
|
||||
} else if ws.enableWriteThrottle {
|
||||
ws.writeThrottleLimit = 1024
|
||||
}
|
||||
return true
|
||||
})
|
||||
return wr, ok
|
||||
}
|
||||
|
||||
func (ws *priorityWriteSchedulerRFC7540) addClosedOrIdleNode(list *[]*priorityNodeRFC7540, maxSize int, n *priorityNodeRFC7540) {
|
||||
if maxSize == 0 {
|
||||
return
|
||||
}
|
||||
if len(*list) == maxSize {
|
||||
// Remove the oldest node, then shift left.
|
||||
ws.removeNode((*list)[0])
|
||||
x := (*list)[1:]
|
||||
copy(*list, x)
|
||||
*list = (*list)[:len(x)]
|
||||
}
|
||||
*list = append(*list, n)
|
||||
}
|
||||
|
||||
func (ws *priorityWriteSchedulerRFC7540) removeNode(n *priorityNodeRFC7540) {
|
||||
for n.kids != nil {
|
||||
n.kids.setParent(n.parent)
|
||||
}
|
||||
n.setParent(nil)
|
||||
delete(ws.nodes, n.id)
|
||||
}
|
||||
224
vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go
generated
vendored
Normal file
224
vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go
generated
vendored
Normal file
@@ -0,0 +1,224 @@
|
||||
// Copyright 2025 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
)
|
||||
|
||||
type streamMetadata struct {
|
||||
location *writeQueue
|
||||
priority PriorityParam
|
||||
}
|
||||
|
||||
type priorityWriteSchedulerRFC9218 struct {
|
||||
// control contains control frames (SETTINGS, PING, etc.).
|
||||
control writeQueue
|
||||
|
||||
// heads contain the head of a circular list of streams.
|
||||
// We put these heads within a nested array that represents urgency and
|
||||
// incremental, as defined in
|
||||
// https://www.rfc-editor.org/rfc/rfc9218.html#name-priority-parameters.
|
||||
// 8 represents u=0 up to u=7, and 2 represents i=false and i=true.
|
||||
heads [8][2]*writeQueue
|
||||
|
||||
// streams contains a mapping between each stream ID and their metadata, so
|
||||
// we can quickly locate them when needing to, for example, adjust their
|
||||
// priority.
|
||||
streams map[uint32]streamMetadata
|
||||
|
||||
// queuePool are empty queues for reuse.
|
||||
queuePool writeQueuePool
|
||||
|
||||
// prioritizeIncremental is used to determine whether we should prioritize
|
||||
// incremental streams or not, when urgency is the same in a given Pop()
|
||||
// call.
|
||||
prioritizeIncremental bool
|
||||
|
||||
// priorityUpdateBuf is used to buffer the most recent PRIORITY_UPDATE we
|
||||
// receive per https://www.rfc-editor.org/rfc/rfc9218.html#name-the-priority_update-frame.
|
||||
priorityUpdateBuf struct {
|
||||
// streamID being 0 means that the buffer is empty. This is a safe
|
||||
// assumption as PRIORITY_UPDATE for stream 0 is a PROTOCOL_ERROR.
|
||||
streamID uint32
|
||||
priority PriorityParam
|
||||
}
|
||||
}
|
||||
|
||||
func newPriorityWriteSchedulerRFC9218() WriteScheduler {
|
||||
ws := &priorityWriteSchedulerRFC9218{
|
||||
streams: make(map[uint32]streamMetadata),
|
||||
}
|
||||
return ws
|
||||
}
|
||||
|
||||
func (ws *priorityWriteSchedulerRFC9218) OpenStream(streamID uint32, opt OpenStreamOptions) {
|
||||
if ws.streams[streamID].location != nil {
|
||||
panic(fmt.Errorf("stream %d already opened", streamID))
|
||||
}
|
||||
if streamID == ws.priorityUpdateBuf.streamID {
|
||||
ws.priorityUpdateBuf.streamID = 0
|
||||
opt.priority = ws.priorityUpdateBuf.priority
|
||||
}
|
||||
q := ws.queuePool.get()
|
||||
ws.streams[streamID] = streamMetadata{
|
||||
location: q,
|
||||
priority: opt.priority,
|
||||
}
|
||||
|
||||
u, i := opt.priority.urgency, opt.priority.incremental
|
||||
if ws.heads[u][i] == nil {
|
||||
ws.heads[u][i] = q
|
||||
q.next = q
|
||||
q.prev = q
|
||||
} else {
|
||||
// Queues are stored in a ring.
|
||||
// Insert the new stream before ws.head, putting it at the end of the list.
|
||||
q.prev = ws.heads[u][i].prev
|
||||
q.next = ws.heads[u][i]
|
||||
q.prev.next = q
|
||||
q.next.prev = q
|
||||
}
|
||||
}
|
||||
|
||||
func (ws *priorityWriteSchedulerRFC9218) CloseStream(streamID uint32) {
|
||||
metadata := ws.streams[streamID]
|
||||
q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental
|
||||
if q == nil {
|
||||
return
|
||||
}
|
||||
if q.next == q {
|
||||
// This was the only open stream.
|
||||
ws.heads[u][i] = nil
|
||||
} else {
|
||||
q.prev.next = q.next
|
||||
q.next.prev = q.prev
|
||||
if ws.heads[u][i] == q {
|
||||
ws.heads[u][i] = q.next
|
||||
}
|
||||
}
|
||||
delete(ws.streams, streamID)
|
||||
ws.queuePool.put(q)
|
||||
}
|
||||
|
||||
func (ws *priorityWriteSchedulerRFC9218) AdjustStream(streamID uint32, priority PriorityParam) {
|
||||
metadata := ws.streams[streamID]
|
||||
q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental
|
||||
if q == nil {
|
||||
ws.priorityUpdateBuf.streamID = streamID
|
||||
ws.priorityUpdateBuf.priority = priority
|
||||
return
|
||||
}
|
||||
|
||||
// Remove stream from current location.
|
||||
if q.next == q {
|
||||
// This was the only open stream.
|
||||
ws.heads[u][i] = nil
|
||||
} else {
|
||||
q.prev.next = q.next
|
||||
q.next.prev = q.prev
|
||||
if ws.heads[u][i] == q {
|
||||
ws.heads[u][i] = q.next
|
||||
}
|
||||
}
|
||||
|
||||
// Insert stream to the new queue.
|
||||
u, i = priority.urgency, priority.incremental
|
||||
if ws.heads[u][i] == nil {
|
||||
ws.heads[u][i] = q
|
||||
q.next = q
|
||||
q.prev = q
|
||||
} else {
|
||||
// Queues are stored in a ring.
|
||||
// Insert the new stream before ws.head, putting it at the end of the list.
|
||||
q.prev = ws.heads[u][i].prev
|
||||
q.next = ws.heads[u][i]
|
||||
q.prev.next = q
|
||||
q.next.prev = q
|
||||
}
|
||||
|
||||
// Update the metadata.
|
||||
ws.streams[streamID] = streamMetadata{
|
||||
location: q,
|
||||
priority: priority,
|
||||
}
|
||||
}
|
||||
|
||||
func (ws *priorityWriteSchedulerRFC9218) Push(wr FrameWriteRequest) {
|
||||
if wr.isControl() {
|
||||
ws.control.push(wr)
|
||||
return
|
||||
}
|
||||
q := ws.streams[wr.StreamID()].location
|
||||
if q == nil {
|
||||
// This is a closed stream.
|
||||
// wr should not be a HEADERS or DATA frame.
|
||||
// We push the request onto the control queue.
|
||||
if wr.DataSize() > 0 {
|
||||
panic("add DATA on non-open stream")
|
||||
}
|
||||
ws.control.push(wr)
|
||||
return
|
||||
}
|
||||
q.push(wr)
|
||||
}
|
||||
|
||||
func (ws *priorityWriteSchedulerRFC9218) Pop() (FrameWriteRequest, bool) {
|
||||
// Control and RST_STREAM frames first.
|
||||
if !ws.control.empty() {
|
||||
return ws.control.shift(), true
|
||||
}
|
||||
|
||||
// On the next Pop(), we want to prioritize incremental if we prioritized
|
||||
// non-incremental request of the same urgency this time. Vice-versa.
|
||||
// i.e. when there are incremental and non-incremental requests at the same
|
||||
// priority, we give 50% of our bandwidth to the incremental ones in
|
||||
// aggregate and 50% to the first non-incremental one (since
|
||||
// non-incremental streams do not use round-robin writes).
|
||||
ws.prioritizeIncremental = !ws.prioritizeIncremental
|
||||
|
||||
// Always prioritize lowest u (i.e. highest urgency level).
|
||||
for u := range ws.heads {
|
||||
for i := range ws.heads[u] {
|
||||
// When we want to prioritize incremental, we try to pop i=true
|
||||
// first before i=false when u is the same.
|
||||
if ws.prioritizeIncremental {
|
||||
i = (i + 1) % 2
|
||||
}
|
||||
q := ws.heads[u][i]
|
||||
if q == nil {
|
||||
continue
|
||||
}
|
||||
for {
|
||||
if wr, ok := q.consume(math.MaxInt32); ok {
|
||||
if i == 1 {
|
||||
// For incremental streams, we update head to q.next so
|
||||
// we can round-robin between multiple streams that can
|
||||
// immediately benefit from partial writes.
|
||||
ws.heads[u][i] = q.next
|
||||
} else {
|
||||
// For non-incremental streams, we try to finish one to
|
||||
// completion rather than doing round-robin. However,
|
||||
// we update head here so that if q.consume() is !ok
|
||||
// (e.g. the stream has no more frame to consume), head
|
||||
// is updated to the next q that has frames to consume
|
||||
// on future iterations. This way, we do not prioritize
|
||||
// writing to unavailable stream on next Pop() calls,
|
||||
// preventing head-of-line blocking.
|
||||
ws.heads[u][i] = q
|
||||
}
|
||||
return wr, true
|
||||
}
|
||||
q = q.next
|
||||
if q == ws.heads[u][i] {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return FrameWriteRequest{}, false
|
||||
}
|
||||
77
vendor/golang.org/x/net/http2/writesched_random.go
generated
vendored
Normal file
77
vendor/golang.org/x/net/http2/writesched_random.go
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import "math"
|
||||
|
||||
// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2
|
||||
// priorities. Control frames like SETTINGS and PING are written before DATA
|
||||
// frames, but if no control frames are queued and multiple streams have queued
|
||||
// HEADERS or DATA frames, Pop selects a ready stream arbitrarily.
|
||||
func NewRandomWriteScheduler() WriteScheduler {
|
||||
return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)}
|
||||
}
|
||||
|
||||
type randomWriteScheduler struct {
|
||||
// zero are frames not associated with a specific stream.
|
||||
zero writeQueue
|
||||
|
||||
// sq contains the stream-specific queues, keyed by stream ID.
|
||||
// When a stream is idle, closed, or emptied, it's deleted
|
||||
// from the map.
|
||||
sq map[uint32]*writeQueue
|
||||
|
||||
// pool of empty queues for reuse.
|
||||
queuePool writeQueuePool
|
||||
}
|
||||
|
||||
func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
|
||||
// no-op: idle streams are not tracked
|
||||
}
|
||||
|
||||
func (ws *randomWriteScheduler) CloseStream(streamID uint32) {
|
||||
q, ok := ws.sq[streamID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
delete(ws.sq, streamID)
|
||||
ws.queuePool.put(q)
|
||||
}
|
||||
|
||||
func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
|
||||
// no-op: priorities are ignored
|
||||
}
|
||||
|
||||
func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) {
|
||||
if wr.isControl() {
|
||||
ws.zero.push(wr)
|
||||
return
|
||||
}
|
||||
id := wr.StreamID()
|
||||
q, ok := ws.sq[id]
|
||||
if !ok {
|
||||
q = ws.queuePool.get()
|
||||
ws.sq[id] = q
|
||||
}
|
||||
q.push(wr)
|
||||
}
|
||||
|
||||
func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) {
|
||||
// Control and RST_STREAM frames first.
|
||||
if !ws.zero.empty() {
|
||||
return ws.zero.shift(), true
|
||||
}
|
||||
// Iterate over all non-idle streams until finding one that can be consumed.
|
||||
for streamID, q := range ws.sq {
|
||||
if wr, ok := q.consume(math.MaxInt32); ok {
|
||||
if q.empty() {
|
||||
delete(ws.sq, streamID)
|
||||
ws.queuePool.put(q)
|
||||
}
|
||||
return wr, true
|
||||
}
|
||||
}
|
||||
return FrameWriteRequest{}, false
|
||||
}
|
||||
119
vendor/golang.org/x/net/http2/writesched_roundrobin.go
generated
vendored
Normal file
119
vendor/golang.org/x/net/http2/writesched_roundrobin.go
generated
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
)
|
||||
|
||||
type roundRobinWriteScheduler struct {
|
||||
// control contains control frames (SETTINGS, PING, etc.).
|
||||
control writeQueue
|
||||
|
||||
// streams maps stream ID to a queue.
|
||||
streams map[uint32]*writeQueue
|
||||
|
||||
// stream queues are stored in a circular linked list.
|
||||
// head is the next stream to write, or nil if there are no streams open.
|
||||
head *writeQueue
|
||||
|
||||
// pool of empty queues for reuse.
|
||||
queuePool writeQueuePool
|
||||
}
|
||||
|
||||
// newRoundRobinWriteScheduler constructs a new write scheduler.
|
||||
// The round robin scheduler prioritizes control frames
|
||||
// like SETTINGS and PING over DATA frames.
|
||||
// When there are no control frames to send, it performs a round-robin
|
||||
// selection from the ready streams.
|
||||
func newRoundRobinWriteScheduler() WriteScheduler {
|
||||
ws := &roundRobinWriteScheduler{
|
||||
streams: make(map[uint32]*writeQueue),
|
||||
}
|
||||
return ws
|
||||
}
|
||||
|
||||
func (ws *roundRobinWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
|
||||
if ws.streams[streamID] != nil {
|
||||
panic(fmt.Errorf("stream %d already opened", streamID))
|
||||
}
|
||||
q := ws.queuePool.get()
|
||||
ws.streams[streamID] = q
|
||||
if ws.head == nil {
|
||||
ws.head = q
|
||||
q.next = q
|
||||
q.prev = q
|
||||
} else {
|
||||
// Queues are stored in a ring.
|
||||
// Insert the new stream before ws.head, putting it at the end of the list.
|
||||
q.prev = ws.head.prev
|
||||
q.next = ws.head
|
||||
q.prev.next = q
|
||||
q.next.prev = q
|
||||
}
|
||||
}
|
||||
|
||||
func (ws *roundRobinWriteScheduler) CloseStream(streamID uint32) {
|
||||
q := ws.streams[streamID]
|
||||
if q == nil {
|
||||
return
|
||||
}
|
||||
if q.next == q {
|
||||
// This was the only open stream.
|
||||
ws.head = nil
|
||||
} else {
|
||||
q.prev.next = q.next
|
||||
q.next.prev = q.prev
|
||||
if ws.head == q {
|
||||
ws.head = q.next
|
||||
}
|
||||
}
|
||||
delete(ws.streams, streamID)
|
||||
ws.queuePool.put(q)
|
||||
}
|
||||
|
||||
func (ws *roundRobinWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {}
|
||||
|
||||
func (ws *roundRobinWriteScheduler) Push(wr FrameWriteRequest) {
|
||||
if wr.isControl() {
|
||||
ws.control.push(wr)
|
||||
return
|
||||
}
|
||||
q := ws.streams[wr.StreamID()]
|
||||
if q == nil {
|
||||
// This is a closed stream.
|
||||
// wr should not be a HEADERS or DATA frame.
|
||||
// We push the request onto the control queue.
|
||||
if wr.DataSize() > 0 {
|
||||
panic("add DATA on non-open stream")
|
||||
}
|
||||
ws.control.push(wr)
|
||||
return
|
||||
}
|
||||
q.push(wr)
|
||||
}
|
||||
|
||||
func (ws *roundRobinWriteScheduler) Pop() (FrameWriteRequest, bool) {
|
||||
// Control and RST_STREAM frames first.
|
||||
if !ws.control.empty() {
|
||||
return ws.control.shift(), true
|
||||
}
|
||||
if ws.head == nil {
|
||||
return FrameWriteRequest{}, false
|
||||
}
|
||||
q := ws.head
|
||||
for {
|
||||
if wr, ok := q.consume(math.MaxInt32); ok {
|
||||
ws.head = q.next
|
||||
return wr, true
|
||||
}
|
||||
q = q.next
|
||||
if q == ws.head {
|
||||
break
|
||||
}
|
||||
}
|
||||
return FrameWriteRequest{}, false
|
||||
}
|
||||
13
vendor/golang.org/x/net/idna/go118.go
generated
vendored
Normal file
13
vendor/golang.org/x/net/idna/go118.go
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.18
|
||||
|
||||
package idna
|
||||
|
||||
// Transitional processing is disabled by default in Go 1.18.
|
||||
// https://golang.org/issue/47510
|
||||
const transitionalLookup = false
|
||||
769
vendor/golang.org/x/net/idna/idna10.0.0.go
generated
vendored
Normal file
769
vendor/golang.org/x/net/idna/idna10.0.0.go
generated
vendored
Normal file
@@ -0,0 +1,769 @@
|
||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.10
|
||||
|
||||
// Package idna implements IDNA2008 using the compatibility processing
|
||||
// defined by UTS (Unicode Technical Standard) #46, which defines a standard to
|
||||
// deal with the transition from IDNA2003.
|
||||
//
|
||||
// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC
|
||||
// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894.
|
||||
// UTS #46 is defined in https://www.unicode.org/reports/tr46.
|
||||
// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the
|
||||
// differences between these two standards.
|
||||
package idna // import "golang.org/x/net/idna"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/text/secure/bidirule"
|
||||
"golang.org/x/text/unicode/bidi"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// NOTE: Unlike common practice in Go APIs, the functions will return a
|
||||
// sanitized domain name in case of errors. Browsers sometimes use a partially
|
||||
// evaluated string as lookup.
|
||||
// TODO: the current error handling is, in my opinion, the least opinionated.
|
||||
// Other strategies are also viable, though:
|
||||
// Option 1) Return an empty string in case of error, but allow the user to
|
||||
// specify explicitly which errors to ignore.
|
||||
// Option 2) Return the partially evaluated string if it is itself a valid
|
||||
// string, otherwise return the empty string in case of error.
|
||||
// Option 3) Option 1 and 2.
|
||||
// Option 4) Always return an empty string for now and implement Option 1 as
|
||||
// needed, and document that the return string may not be empty in case of
|
||||
// error in the future.
|
||||
// I think Option 1 is best, but it is quite opinionated.
|
||||
|
||||
// ToASCII is a wrapper for Punycode.ToASCII.
|
||||
func ToASCII(s string) (string, error) {
|
||||
return Punycode.process(s, true)
|
||||
}
|
||||
|
||||
// ToUnicode is a wrapper for Punycode.ToUnicode.
|
||||
func ToUnicode(s string) (string, error) {
|
||||
return Punycode.process(s, false)
|
||||
}
|
||||
|
||||
// An Option configures a Profile at creation time.
|
||||
type Option func(*options)
|
||||
|
||||
// Transitional sets a Profile to use the Transitional mapping as defined in UTS
|
||||
// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
|
||||
// transitional mapping provides a compromise between IDNA2003 and IDNA2008
|
||||
// compatibility. It is used by some browsers when resolving domain names. This
|
||||
// option is only meaningful if combined with MapForLookup.
|
||||
func Transitional(transitional bool) Option {
|
||||
return func(o *options) { o.transitional = transitional }
|
||||
}
|
||||
|
||||
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
|
||||
// are longer than allowed by the RFC.
|
||||
//
|
||||
// This option corresponds to the VerifyDnsLength flag in UTS #46.
|
||||
func VerifyDNSLength(verify bool) Option {
|
||||
return func(o *options) { o.verifyDNSLength = verify }
|
||||
}
|
||||
|
||||
// RemoveLeadingDots removes leading label separators. Leading runes that map to
|
||||
// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well.
|
||||
func RemoveLeadingDots(remove bool) Option {
|
||||
return func(o *options) { o.removeLeadingDots = remove }
|
||||
}
|
||||
|
||||
// ValidateLabels sets whether to check the mandatory label validation criteria
|
||||
// as defined in Section 5.4 of RFC 5891. This includes testing for correct use
|
||||
// of hyphens ('-'), normalization, validity of runes, and the context rules.
|
||||
// In particular, ValidateLabels also sets the CheckHyphens and CheckJoiners flags
|
||||
// in UTS #46.
|
||||
func ValidateLabels(enable bool) Option {
|
||||
return func(o *options) {
|
||||
// Don't override existing mappings, but set one that at least checks
|
||||
// normalization if it is not set.
|
||||
if o.mapping == nil && enable {
|
||||
o.mapping = normalize
|
||||
}
|
||||
o.trie = trie
|
||||
o.checkJoiners = enable
|
||||
o.checkHyphens = enable
|
||||
if enable {
|
||||
o.fromPuny = validateFromPunycode
|
||||
} else {
|
||||
o.fromPuny = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CheckHyphens sets whether to check for correct use of hyphens ('-') in
|
||||
// labels. Most web browsers do not have this option set, since labels such as
|
||||
// "r3---sn-apo3qvuoxuxbt-j5pe" are in common use.
|
||||
//
|
||||
// This option corresponds to the CheckHyphens flag in UTS #46.
|
||||
func CheckHyphens(enable bool) Option {
|
||||
return func(o *options) { o.checkHyphens = enable }
|
||||
}
|
||||
|
||||
// CheckJoiners sets whether to check the ContextJ rules as defined in Appendix
|
||||
// A of RFC 5892, concerning the use of joiner runes.
|
||||
//
|
||||
// This option corresponds to the CheckJoiners flag in UTS #46.
|
||||
func CheckJoiners(enable bool) Option {
|
||||
return func(o *options) {
|
||||
o.trie = trie
|
||||
o.checkJoiners = enable
|
||||
}
|
||||
}
|
||||
|
||||
// StrictDomainName limits the set of permissible ASCII characters to those
|
||||
// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
|
||||
// hyphen). This is set by default for MapForLookup and ValidateForRegistration,
|
||||
// but is only useful if ValidateLabels is set.
|
||||
//
|
||||
// This option is useful, for instance, for browsers that allow characters
|
||||
// outside this range, for example a '_' (U+005F LOW LINE). See
|
||||
// http://www.rfc-editor.org/std/std3.txt for more details.
|
||||
//
|
||||
// This option corresponds to the UseSTD3ASCIIRules flag in UTS #46.
|
||||
func StrictDomainName(use bool) Option {
|
||||
return func(o *options) { o.useSTD3Rules = use }
|
||||
}
|
||||
|
||||
// NOTE: the following options pull in tables. The tables should not be linked
|
||||
// in as long as the options are not used.
|
||||
|
||||
// BidiRule enables the Bidi rule as defined in RFC 5893. Any application
|
||||
// that relies on proper validation of labels should include this rule.
|
||||
//
|
||||
// This option corresponds to the CheckBidi flag in UTS #46.
|
||||
func BidiRule() Option {
|
||||
return func(o *options) { o.bidirule = bidirule.ValidString }
|
||||
}
|
||||
|
||||
// ValidateForRegistration sets validation options to verify that a given IDN is
|
||||
// properly formatted for registration as defined by Section 4 of RFC 5891.
|
||||
func ValidateForRegistration() Option {
|
||||
return func(o *options) {
|
||||
o.mapping = validateRegistration
|
||||
StrictDomainName(true)(o)
|
||||
ValidateLabels(true)(o)
|
||||
VerifyDNSLength(true)(o)
|
||||
BidiRule()(o)
|
||||
}
|
||||
}
|
||||
|
||||
// MapForLookup sets validation and mapping options such that a given IDN is
|
||||
// transformed for domain name lookup according to the requirements set out in
|
||||
// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894,
|
||||
// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option
|
||||
// to add this check.
|
||||
//
|
||||
// The mappings include normalization and mapping case, width and other
|
||||
// compatibility mappings.
|
||||
func MapForLookup() Option {
|
||||
return func(o *options) {
|
||||
o.mapping = validateAndMap
|
||||
StrictDomainName(true)(o)
|
||||
ValidateLabels(true)(o)
|
||||
}
|
||||
}
|
||||
|
||||
type options struct {
|
||||
transitional bool
|
||||
useSTD3Rules bool
|
||||
checkHyphens bool
|
||||
checkJoiners bool
|
||||
verifyDNSLength bool
|
||||
removeLeadingDots bool
|
||||
|
||||
trie *idnaTrie
|
||||
|
||||
// fromPuny calls validation rules when converting A-labels to U-labels.
|
||||
fromPuny func(p *Profile, s string) error
|
||||
|
||||
// mapping implements a validation and mapping step as defined in RFC 5895
|
||||
// or UTS 46, tailored to, for example, domain registration or lookup.
|
||||
mapping func(p *Profile, s string) (mapped string, isBidi bool, err error)
|
||||
|
||||
// bidirule, if specified, checks whether s conforms to the Bidi Rule
|
||||
// defined in RFC 5893.
|
||||
bidirule func(s string) bool
|
||||
}
|
||||
|
||||
// A Profile defines the configuration of an IDNA mapper.
|
||||
type Profile struct {
|
||||
options
|
||||
}
|
||||
|
||||
func apply(o *options, opts []Option) {
|
||||
for _, f := range opts {
|
||||
f(o)
|
||||
}
|
||||
}
|
||||
|
||||
// New creates a new Profile.
|
||||
//
|
||||
// With no options, the returned Profile is the most permissive and equals the
|
||||
// Punycode Profile. Options can be passed to further restrict the Profile. The
|
||||
// MapForLookup and ValidateForRegistration options set a collection of options,
|
||||
// for lookup and registration purposes respectively, which can be tailored by
|
||||
// adding more fine-grained options, where later options override earlier
|
||||
// options.
|
||||
func New(o ...Option) *Profile {
|
||||
p := &Profile{}
|
||||
apply(&p.options, o)
|
||||
return p
|
||||
}
|
||||
|
||||
// ToASCII converts a domain or domain label to its ASCII form. For example,
|
||||
// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
|
||||
// ToASCII("golang") is "golang". If an error is encountered it will return
|
||||
// an error and a (partially) processed result.
|
||||
func (p *Profile) ToASCII(s string) (string, error) {
|
||||
return p.process(s, true)
|
||||
}
|
||||
|
||||
// ToUnicode converts a domain or domain label to its Unicode form. For example,
|
||||
// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and
|
||||
// ToUnicode("golang") is "golang". If an error is encountered it will return
|
||||
// an error and a (partially) processed result.
|
||||
func (p *Profile) ToUnicode(s string) (string, error) {
|
||||
pp := *p
|
||||
pp.transitional = false
|
||||
return pp.process(s, false)
|
||||
}
|
||||
|
||||
// String reports a string with a description of the profile for debugging
|
||||
// purposes. The string format may change with different versions.
|
||||
func (p *Profile) String() string {
|
||||
s := ""
|
||||
if p.transitional {
|
||||
s = "Transitional"
|
||||
} else {
|
||||
s = "NonTransitional"
|
||||
}
|
||||
if p.useSTD3Rules {
|
||||
s += ":UseSTD3Rules"
|
||||
}
|
||||
if p.checkHyphens {
|
||||
s += ":CheckHyphens"
|
||||
}
|
||||
if p.checkJoiners {
|
||||
s += ":CheckJoiners"
|
||||
}
|
||||
if p.verifyDNSLength {
|
||||
s += ":VerifyDNSLength"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
var (
|
||||
// Punycode is a Profile that does raw punycode processing with a minimum
|
||||
// of validation.
|
||||
Punycode *Profile = punycode
|
||||
|
||||
// Lookup is the recommended profile for looking up domain names, according
|
||||
// to Section 5 of RFC 5891. The exact configuration of this profile may
|
||||
// change over time.
|
||||
Lookup *Profile = lookup
|
||||
|
||||
// Display is the recommended profile for displaying domain names.
|
||||
// The configuration of this profile may change over time.
|
||||
Display *Profile = display
|
||||
|
||||
// Registration is the recommended profile for checking whether a given
|
||||
// IDN is valid for registration, according to Section 4 of RFC 5891.
|
||||
Registration *Profile = registration
|
||||
|
||||
punycode = &Profile{}
|
||||
lookup = &Profile{options{
|
||||
transitional: transitionalLookup,
|
||||
useSTD3Rules: true,
|
||||
checkHyphens: true,
|
||||
checkJoiners: true,
|
||||
trie: trie,
|
||||
fromPuny: validateFromPunycode,
|
||||
mapping: validateAndMap,
|
||||
bidirule: bidirule.ValidString,
|
||||
}}
|
||||
display = &Profile{options{
|
||||
useSTD3Rules: true,
|
||||
checkHyphens: true,
|
||||
checkJoiners: true,
|
||||
trie: trie,
|
||||
fromPuny: validateFromPunycode,
|
||||
mapping: validateAndMap,
|
||||
bidirule: bidirule.ValidString,
|
||||
}}
|
||||
registration = &Profile{options{
|
||||
useSTD3Rules: true,
|
||||
verifyDNSLength: true,
|
||||
checkHyphens: true,
|
||||
checkJoiners: true,
|
||||
trie: trie,
|
||||
fromPuny: validateFromPunycode,
|
||||
mapping: validateRegistration,
|
||||
bidirule: bidirule.ValidString,
|
||||
}}
|
||||
|
||||
// TODO: profiles
|
||||
// Register: recommended for approving domain names: don't do any mappings
|
||||
// but rather reject on invalid input. Bundle or block deviation characters.
|
||||
)
|
||||
|
||||
type labelError struct{ label, code_ string }
|
||||
|
||||
func (e labelError) code() string { return e.code_ }
|
||||
func (e labelError) Error() string {
|
||||
return fmt.Sprintf("idna: invalid label %q", e.label)
|
||||
}
|
||||
|
||||
type runeError rune
|
||||
|
||||
func (e runeError) code() string { return "P1" }
|
||||
func (e runeError) Error() string {
|
||||
return fmt.Sprintf("idna: disallowed rune %U", e)
|
||||
}
|
||||
|
||||
// process implements the algorithm described in section 4 of UTS #46,
|
||||
// see https://www.unicode.org/reports/tr46.
|
||||
func (p *Profile) process(s string, toASCII bool) (string, error) {
|
||||
var err error
|
||||
var isBidi bool
|
||||
if p.mapping != nil {
|
||||
s, isBidi, err = p.mapping(p, s)
|
||||
}
|
||||
// Remove leading empty labels.
|
||||
if p.removeLeadingDots {
|
||||
for ; len(s) > 0 && s[0] == '.'; s = s[1:] {
|
||||
}
|
||||
}
|
||||
// TODO: allow for a quick check of the tables data.
|
||||
// It seems like we should only create this error on ToASCII, but the
|
||||
// UTS 46 conformance tests suggests we should always check this.
|
||||
if err == nil && p.verifyDNSLength && s == "" {
|
||||
err = &labelError{s, "A4"}
|
||||
}
|
||||
labels := labelIter{orig: s}
|
||||
for ; !labels.done(); labels.next() {
|
||||
label := labels.label()
|
||||
if label == "" {
|
||||
// Empty labels are not okay. The label iterator skips the last
|
||||
// label if it is empty.
|
||||
if err == nil && p.verifyDNSLength {
|
||||
err = &labelError{s, "A4"}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(label, acePrefix) {
|
||||
u, err2 := decode(label[len(acePrefix):])
|
||||
if err2 != nil {
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
// Spec says keep the old label.
|
||||
continue
|
||||
}
|
||||
isBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight
|
||||
labels.set(u)
|
||||
if err == nil && p.fromPuny != nil {
|
||||
err = p.fromPuny(p, u)
|
||||
}
|
||||
if err == nil {
|
||||
// This should be called on NonTransitional, according to the
|
||||
// spec, but that currently does not have any effect. Use the
|
||||
// original profile to preserve options.
|
||||
err = p.validateLabel(u)
|
||||
}
|
||||
} else if err == nil {
|
||||
err = p.validateLabel(label)
|
||||
}
|
||||
}
|
||||
if isBidi && p.bidirule != nil && err == nil {
|
||||
for labels.reset(); !labels.done(); labels.next() {
|
||||
if !p.bidirule(labels.label()) {
|
||||
err = &labelError{s, "B"}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if toASCII {
|
||||
for labels.reset(); !labels.done(); labels.next() {
|
||||
label := labels.label()
|
||||
if !ascii(label) {
|
||||
a, err2 := encode(acePrefix, label)
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
label = a
|
||||
labels.set(a)
|
||||
}
|
||||
n := len(label)
|
||||
if p.verifyDNSLength && err == nil && (n == 0 || n > 63) {
|
||||
err = &labelError{label, "A4"}
|
||||
}
|
||||
}
|
||||
}
|
||||
s = labels.result()
|
||||
if toASCII && p.verifyDNSLength && err == nil {
|
||||
// Compute the length of the domain name minus the root label and its dot.
|
||||
n := len(s)
|
||||
if n > 0 && s[n-1] == '.' {
|
||||
n--
|
||||
}
|
||||
if len(s) < 1 || n > 253 {
|
||||
err = &labelError{s, "A4"}
|
||||
}
|
||||
}
|
||||
return s, err
|
||||
}
|
||||
|
||||
func normalize(p *Profile, s string) (mapped string, isBidi bool, err error) {
|
||||
// TODO: consider first doing a quick check to see if any of these checks
|
||||
// need to be done. This will make it slower in the general case, but
|
||||
// faster in the common case.
|
||||
mapped = norm.NFC.String(s)
|
||||
isBidi = bidirule.DirectionString(mapped) == bidi.RightToLeft
|
||||
return mapped, isBidi, nil
|
||||
}
|
||||
|
||||
func validateRegistration(p *Profile, s string) (idem string, bidi bool, err error) {
|
||||
// TODO: filter need for normalization in loop below.
|
||||
if !norm.NFC.IsNormalString(s) {
|
||||
return s, false, &labelError{s, "V1"}
|
||||
}
|
||||
for i := 0; i < len(s); {
|
||||
v, sz := trie.lookupString(s[i:])
|
||||
if sz == 0 {
|
||||
return s, bidi, runeError(utf8.RuneError)
|
||||
}
|
||||
bidi = bidi || info(v).isBidi(s[i:])
|
||||
// Copy bytes not copied so far.
|
||||
switch p.simplify(info(v).category()) {
|
||||
// TODO: handle the NV8 defined in the Unicode idna data set to allow
|
||||
// for strict conformance to IDNA2008.
|
||||
case valid, deviation:
|
||||
case disallowed, mapped, unknown, ignored:
|
||||
r, _ := utf8.DecodeRuneInString(s[i:])
|
||||
return s, bidi, runeError(r)
|
||||
}
|
||||
i += sz
|
||||
}
|
||||
return s, bidi, nil
|
||||
}
|
||||
|
||||
func (c info) isBidi(s string) bool {
|
||||
if !c.isMapped() {
|
||||
return c&attributesMask == rtl
|
||||
}
|
||||
// TODO: also store bidi info for mapped data. This is possible, but a bit
|
||||
// cumbersome and not for the common case.
|
||||
p, _ := bidi.LookupString(s)
|
||||
switch p.Class() {
|
||||
case bidi.R, bidi.AL, bidi.AN:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func validateAndMap(p *Profile, s string) (vm string, bidi bool, err error) {
|
||||
var (
|
||||
b []byte
|
||||
k int
|
||||
)
|
||||
// combinedInfoBits contains the or-ed bits of all runes. We use this
|
||||
// to derive the mayNeedNorm bit later. This may trigger normalization
|
||||
// overeagerly, but it will not do so in the common case. The end result
|
||||
// is another 10% saving on BenchmarkProfile for the common case.
|
||||
var combinedInfoBits info
|
||||
for i := 0; i < len(s); {
|
||||
v, sz := trie.lookupString(s[i:])
|
||||
if sz == 0 {
|
||||
b = append(b, s[k:i]...)
|
||||
b = append(b, "\ufffd"...)
|
||||
k = len(s)
|
||||
if err == nil {
|
||||
err = runeError(utf8.RuneError)
|
||||
}
|
||||
break
|
||||
}
|
||||
combinedInfoBits |= info(v)
|
||||
bidi = bidi || info(v).isBidi(s[i:])
|
||||
start := i
|
||||
i += sz
|
||||
// Copy bytes not copied so far.
|
||||
switch p.simplify(info(v).category()) {
|
||||
case valid:
|
||||
continue
|
||||
case disallowed:
|
||||
if err == nil {
|
||||
r, _ := utf8.DecodeRuneInString(s[start:])
|
||||
err = runeError(r)
|
||||
}
|
||||
continue
|
||||
case mapped, deviation:
|
||||
b = append(b, s[k:start]...)
|
||||
b = info(v).appendMapping(b, s[start:i])
|
||||
case ignored:
|
||||
b = append(b, s[k:start]...)
|
||||
// drop the rune
|
||||
case unknown:
|
||||
b = append(b, s[k:start]...)
|
||||
b = append(b, "\ufffd"...)
|
||||
}
|
||||
k = i
|
||||
}
|
||||
if k == 0 {
|
||||
// No changes so far.
|
||||
if combinedInfoBits&mayNeedNorm != 0 {
|
||||
s = norm.NFC.String(s)
|
||||
}
|
||||
} else {
|
||||
b = append(b, s[k:]...)
|
||||
if norm.NFC.QuickSpan(b) != len(b) {
|
||||
b = norm.NFC.Bytes(b)
|
||||
}
|
||||
// TODO: the punycode converters require strings as input.
|
||||
s = string(b)
|
||||
}
|
||||
return s, bidi, err
|
||||
}
|
||||
|
||||
// A labelIter allows iterating over domain name labels.
|
||||
type labelIter struct {
|
||||
orig string
|
||||
slice []string
|
||||
curStart int
|
||||
curEnd int
|
||||
i int
|
||||
}
|
||||
|
||||
func (l *labelIter) reset() {
|
||||
l.curStart = 0
|
||||
l.curEnd = 0
|
||||
l.i = 0
|
||||
}
|
||||
|
||||
func (l *labelIter) done() bool {
|
||||
return l.curStart >= len(l.orig)
|
||||
}
|
||||
|
||||
func (l *labelIter) result() string {
|
||||
if l.slice != nil {
|
||||
return strings.Join(l.slice, ".")
|
||||
}
|
||||
return l.orig
|
||||
}
|
||||
|
||||
func (l *labelIter) label() string {
|
||||
if l.slice != nil {
|
||||
return l.slice[l.i]
|
||||
}
|
||||
p := strings.IndexByte(l.orig[l.curStart:], '.')
|
||||
l.curEnd = l.curStart + p
|
||||
if p == -1 {
|
||||
l.curEnd = len(l.orig)
|
||||
}
|
||||
return l.orig[l.curStart:l.curEnd]
|
||||
}
|
||||
|
||||
// next sets the value to the next label. It skips the last label if it is empty.
|
||||
func (l *labelIter) next() {
|
||||
l.i++
|
||||
if l.slice != nil {
|
||||
if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" {
|
||||
l.curStart = len(l.orig)
|
||||
}
|
||||
} else {
|
||||
l.curStart = l.curEnd + 1
|
||||
if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' {
|
||||
l.curStart = len(l.orig)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *labelIter) set(s string) {
|
||||
if l.slice == nil {
|
||||
l.slice = strings.Split(l.orig, ".")
|
||||
}
|
||||
l.slice[l.i] = s
|
||||
}
|
||||
|
||||
// acePrefix is the ASCII Compatible Encoding prefix.
|
||||
const acePrefix = "xn--"
|
||||
|
||||
func (p *Profile) simplify(cat category) category {
|
||||
switch cat {
|
||||
case disallowedSTD3Mapped:
|
||||
if p.useSTD3Rules {
|
||||
cat = disallowed
|
||||
} else {
|
||||
cat = mapped
|
||||
}
|
||||
case disallowedSTD3Valid:
|
||||
if p.useSTD3Rules {
|
||||
cat = disallowed
|
||||
} else {
|
||||
cat = valid
|
||||
}
|
||||
case deviation:
|
||||
if !p.transitional {
|
||||
cat = valid
|
||||
}
|
||||
case validNV8, validXV8:
|
||||
// TODO: handle V2008
|
||||
cat = valid
|
||||
}
|
||||
return cat
|
||||
}
|
||||
|
||||
func validateFromPunycode(p *Profile, s string) error {
|
||||
if !norm.NFC.IsNormalString(s) {
|
||||
return &labelError{s, "V1"}
|
||||
}
|
||||
// TODO: detect whether string may have to be normalized in the following
|
||||
// loop.
|
||||
for i := 0; i < len(s); {
|
||||
v, sz := trie.lookupString(s[i:])
|
||||
if sz == 0 {
|
||||
return runeError(utf8.RuneError)
|
||||
}
|
||||
if c := p.simplify(info(v).category()); c != valid && c != deviation {
|
||||
return &labelError{s, "V6"}
|
||||
}
|
||||
i += sz
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
zwnj = "\u200c"
|
||||
zwj = "\u200d"
|
||||
)
|
||||
|
||||
type joinState int8
|
||||
|
||||
const (
|
||||
stateStart joinState = iota
|
||||
stateVirama
|
||||
stateBefore
|
||||
stateBeforeVirama
|
||||
stateAfter
|
||||
stateFAIL
|
||||
)
|
||||
|
||||
var joinStates = [][numJoinTypes]joinState{
|
||||
stateStart: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
joinZWNJ: stateFAIL,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateVirama,
|
||||
},
|
||||
stateVirama: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
},
|
||||
stateBefore: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
joiningT: stateBefore,
|
||||
joinZWNJ: stateAfter,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateBeforeVirama,
|
||||
},
|
||||
stateBeforeVirama: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
joiningT: stateBefore,
|
||||
},
|
||||
stateAfter: {
|
||||
joiningL: stateFAIL,
|
||||
joiningD: stateBefore,
|
||||
joiningT: stateAfter,
|
||||
joiningR: stateStart,
|
||||
joinZWNJ: stateFAIL,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateAfter, // no-op as we can't accept joiners here
|
||||
},
|
||||
stateFAIL: {
|
||||
0: stateFAIL,
|
||||
joiningL: stateFAIL,
|
||||
joiningD: stateFAIL,
|
||||
joiningT: stateFAIL,
|
||||
joiningR: stateFAIL,
|
||||
joinZWNJ: stateFAIL,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateFAIL,
|
||||
},
|
||||
}
|
||||
|
||||
// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are
|
||||
// already implicitly satisfied by the overall implementation.
|
||||
func (p *Profile) validateLabel(s string) (err error) {
|
||||
if s == "" {
|
||||
if p.verifyDNSLength {
|
||||
return &labelError{s, "A4"}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if p.checkHyphens {
|
||||
if len(s) > 4 && s[2] == '-' && s[3] == '-' {
|
||||
return &labelError{s, "V2"}
|
||||
}
|
||||
if s[0] == '-' || s[len(s)-1] == '-' {
|
||||
return &labelError{s, "V3"}
|
||||
}
|
||||
}
|
||||
if !p.checkJoiners {
|
||||
return nil
|
||||
}
|
||||
trie := p.trie // p.checkJoiners is only set if trie is set.
|
||||
// TODO: merge the use of this in the trie.
|
||||
v, sz := trie.lookupString(s)
|
||||
x := info(v)
|
||||
if x.isModifier() {
|
||||
return &labelError{s, "V5"}
|
||||
}
|
||||
// Quickly return in the absence of zero-width (non) joiners.
|
||||
if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 {
|
||||
return nil
|
||||
}
|
||||
st := stateStart
|
||||
for i := 0; ; {
|
||||
jt := x.joinType()
|
||||
if s[i:i+sz] == zwj {
|
||||
jt = joinZWJ
|
||||
} else if s[i:i+sz] == zwnj {
|
||||
jt = joinZWNJ
|
||||
}
|
||||
st = joinStates[st][jt]
|
||||
if x.isViramaModifier() {
|
||||
st = joinStates[st][joinVirama]
|
||||
}
|
||||
if i += sz; i == len(s) {
|
||||
break
|
||||
}
|
||||
v, sz = trie.lookupString(s[i:])
|
||||
x = info(v)
|
||||
}
|
||||
if st == stateFAIL || st == stateAfter {
|
||||
return &labelError{s, "C"}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ascii(s string) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] >= utf8.RuneSelf {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
717
vendor/golang.org/x/net/idna/idna9.0.0.go
generated
vendored
Normal file
717
vendor/golang.org/x/net/idna/idna9.0.0.go
generated
vendored
Normal file
@@ -0,0 +1,717 @@
|
||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.10
|
||||
|
||||
// Package idna implements IDNA2008 using the compatibility processing
|
||||
// defined by UTS (Unicode Technical Standard) #46, which defines a standard to
|
||||
// deal with the transition from IDNA2003.
|
||||
//
|
||||
// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC
|
||||
// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894.
|
||||
// UTS #46 is defined in https://www.unicode.org/reports/tr46.
|
||||
// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the
|
||||
// differences between these two standards.
|
||||
package idna // import "golang.org/x/net/idna"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/text/secure/bidirule"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// NOTE: Unlike common practice in Go APIs, the functions will return a
|
||||
// sanitized domain name in case of errors. Browsers sometimes use a partially
|
||||
// evaluated string as lookup.
|
||||
// TODO: the current error handling is, in my opinion, the least opinionated.
|
||||
// Other strategies are also viable, though:
|
||||
// Option 1) Return an empty string in case of error, but allow the user to
|
||||
// specify explicitly which errors to ignore.
|
||||
// Option 2) Return the partially evaluated string if it is itself a valid
|
||||
// string, otherwise return the empty string in case of error.
|
||||
// Option 3) Option 1 and 2.
|
||||
// Option 4) Always return an empty string for now and implement Option 1 as
|
||||
// needed, and document that the return string may not be empty in case of
|
||||
// error in the future.
|
||||
// I think Option 1 is best, but it is quite opinionated.
|
||||
|
||||
// ToASCII is a wrapper for Punycode.ToASCII.
|
||||
func ToASCII(s string) (string, error) {
|
||||
return Punycode.process(s, true)
|
||||
}
|
||||
|
||||
// ToUnicode is a wrapper for Punycode.ToUnicode.
|
||||
func ToUnicode(s string) (string, error) {
|
||||
return Punycode.process(s, false)
|
||||
}
|
||||
|
||||
// An Option configures a Profile at creation time.
|
||||
type Option func(*options)
|
||||
|
||||
// Transitional sets a Profile to use the Transitional mapping as defined in UTS
|
||||
// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
|
||||
// transitional mapping provides a compromise between IDNA2003 and IDNA2008
|
||||
// compatibility. It is used by some browsers when resolving domain names. This
|
||||
// option is only meaningful if combined with MapForLookup.
|
||||
func Transitional(transitional bool) Option {
|
||||
return func(o *options) { o.transitional = transitional }
|
||||
}
|
||||
|
||||
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
|
||||
// are longer than allowed by the RFC.
|
||||
//
|
||||
// This option corresponds to the VerifyDnsLength flag in UTS #46.
|
||||
func VerifyDNSLength(verify bool) Option {
|
||||
return func(o *options) { o.verifyDNSLength = verify }
|
||||
}
|
||||
|
||||
// RemoveLeadingDots removes leading label separators. Leading runes that map to
|
||||
// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well.
|
||||
func RemoveLeadingDots(remove bool) Option {
|
||||
return func(o *options) { o.removeLeadingDots = remove }
|
||||
}
|
||||
|
||||
// ValidateLabels sets whether to check the mandatory label validation criteria
|
||||
// as defined in Section 5.4 of RFC 5891. This includes testing for correct use
|
||||
// of hyphens ('-'), normalization, validity of runes, and the context rules.
|
||||
// In particular, ValidateLabels also sets the CheckHyphens and CheckJoiners flags
|
||||
// in UTS #46.
|
||||
func ValidateLabels(enable bool) Option {
|
||||
return func(o *options) {
|
||||
// Don't override existing mappings, but set one that at least checks
|
||||
// normalization if it is not set.
|
||||
if o.mapping == nil && enable {
|
||||
o.mapping = normalize
|
||||
}
|
||||
o.trie = trie
|
||||
o.checkJoiners = enable
|
||||
o.checkHyphens = enable
|
||||
if enable {
|
||||
o.fromPuny = validateFromPunycode
|
||||
} else {
|
||||
o.fromPuny = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CheckHyphens sets whether to check for correct use of hyphens ('-') in
|
||||
// labels. Most web browsers do not have this option set, since labels such as
|
||||
// "r3---sn-apo3qvuoxuxbt-j5pe" are in common use.
|
||||
//
|
||||
// This option corresponds to the CheckHyphens flag in UTS #46.
|
||||
func CheckHyphens(enable bool) Option {
|
||||
return func(o *options) { o.checkHyphens = enable }
|
||||
}
|
||||
|
||||
// CheckJoiners sets whether to check the ContextJ rules as defined in Appendix
|
||||
// A of RFC 5892, concerning the use of joiner runes.
|
||||
//
|
||||
// This option corresponds to the CheckJoiners flag in UTS #46.
|
||||
func CheckJoiners(enable bool) Option {
|
||||
return func(o *options) {
|
||||
o.trie = trie
|
||||
o.checkJoiners = enable
|
||||
}
|
||||
}
|
||||
|
||||
// StrictDomainName limits the set of permissible ASCII characters to those
|
||||
// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
|
||||
// hyphen). This is set by default for MapForLookup and ValidateForRegistration,
|
||||
// but is only useful if ValidateLabels is set.
|
||||
//
|
||||
// This option is useful, for instance, for browsers that allow characters
|
||||
// outside this range, for example a '_' (U+005F LOW LINE). See
|
||||
// http://www.rfc-editor.org/std/std3.txt for more details.
|
||||
//
|
||||
// This option corresponds to the UseSTD3ASCIIRules flag in UTS #46.
|
||||
func StrictDomainName(use bool) Option {
|
||||
return func(o *options) { o.useSTD3Rules = use }
|
||||
}
|
||||
|
||||
// NOTE: the following options pull in tables. The tables should not be linked
|
||||
// in as long as the options are not used.
|
||||
|
||||
// BidiRule enables the Bidi rule as defined in RFC 5893. Any application
|
||||
// that relies on proper validation of labels should include this rule.
|
||||
//
|
||||
// This option corresponds to the CheckBidi flag in UTS #46.
|
||||
func BidiRule() Option {
|
||||
return func(o *options) { o.bidirule = bidirule.ValidString }
|
||||
}
|
||||
|
||||
// ValidateForRegistration sets validation options to verify that a given IDN is
|
||||
// properly formatted for registration as defined by Section 4 of RFC 5891.
|
||||
func ValidateForRegistration() Option {
|
||||
return func(o *options) {
|
||||
o.mapping = validateRegistration
|
||||
StrictDomainName(true)(o)
|
||||
ValidateLabels(true)(o)
|
||||
VerifyDNSLength(true)(o)
|
||||
BidiRule()(o)
|
||||
}
|
||||
}
|
||||
|
||||
// MapForLookup sets validation and mapping options such that a given IDN is
|
||||
// transformed for domain name lookup according to the requirements set out in
|
||||
// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894,
|
||||
// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option
|
||||
// to add this check.
|
||||
//
|
||||
// The mappings include normalization and mapping case, width and other
|
||||
// compatibility mappings.
|
||||
func MapForLookup() Option {
|
||||
return func(o *options) {
|
||||
o.mapping = validateAndMap
|
||||
StrictDomainName(true)(o)
|
||||
ValidateLabels(true)(o)
|
||||
RemoveLeadingDots(true)(o)
|
||||
}
|
||||
}
|
||||
|
||||
type options struct {
|
||||
transitional bool
|
||||
useSTD3Rules bool
|
||||
checkHyphens bool
|
||||
checkJoiners bool
|
||||
verifyDNSLength bool
|
||||
removeLeadingDots bool
|
||||
|
||||
trie *idnaTrie
|
||||
|
||||
// fromPuny calls validation rules when converting A-labels to U-labels.
|
||||
fromPuny func(p *Profile, s string) error
|
||||
|
||||
// mapping implements a validation and mapping step as defined in RFC 5895
|
||||
// or UTS 46, tailored to, for example, domain registration or lookup.
|
||||
mapping func(p *Profile, s string) (string, error)
|
||||
|
||||
// bidirule, if specified, checks whether s conforms to the Bidi Rule
|
||||
// defined in RFC 5893.
|
||||
bidirule func(s string) bool
|
||||
}
|
||||
|
||||
// A Profile defines the configuration of a IDNA mapper.
|
||||
type Profile struct {
|
||||
options
|
||||
}
|
||||
|
||||
func apply(o *options, opts []Option) {
|
||||
for _, f := range opts {
|
||||
f(o)
|
||||
}
|
||||
}
|
||||
|
||||
// New creates a new Profile.
|
||||
//
|
||||
// With no options, the returned Profile is the most permissive and equals the
|
||||
// Punycode Profile. Options can be passed to further restrict the Profile. The
|
||||
// MapForLookup and ValidateForRegistration options set a collection of options,
|
||||
// for lookup and registration purposes respectively, which can be tailored by
|
||||
// adding more fine-grained options, where later options override earlier
|
||||
// options.
|
||||
func New(o ...Option) *Profile {
|
||||
p := &Profile{}
|
||||
apply(&p.options, o)
|
||||
return p
|
||||
}
|
||||
|
||||
// ToASCII converts a domain or domain label to its ASCII form. For example,
|
||||
// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
|
||||
// ToASCII("golang") is "golang". If an error is encountered it will return
|
||||
// an error and a (partially) processed result.
|
||||
func (p *Profile) ToASCII(s string) (string, error) {
|
||||
return p.process(s, true)
|
||||
}
|
||||
|
||||
// ToUnicode converts a domain or domain label to its Unicode form. For example,
|
||||
// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and
|
||||
// ToUnicode("golang") is "golang". If an error is encountered it will return
|
||||
// an error and a (partially) processed result.
|
||||
func (p *Profile) ToUnicode(s string) (string, error) {
|
||||
pp := *p
|
||||
pp.transitional = false
|
||||
return pp.process(s, false)
|
||||
}
|
||||
|
||||
// String reports a string with a description of the profile for debugging
|
||||
// purposes. The string format may change with different versions.
|
||||
func (p *Profile) String() string {
|
||||
s := ""
|
||||
if p.transitional {
|
||||
s = "Transitional"
|
||||
} else {
|
||||
s = "NonTransitional"
|
||||
}
|
||||
if p.useSTD3Rules {
|
||||
s += ":UseSTD3Rules"
|
||||
}
|
||||
if p.checkHyphens {
|
||||
s += ":CheckHyphens"
|
||||
}
|
||||
if p.checkJoiners {
|
||||
s += ":CheckJoiners"
|
||||
}
|
||||
if p.verifyDNSLength {
|
||||
s += ":VerifyDNSLength"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
var (
|
||||
// Punycode is a Profile that does raw punycode processing with a minimum
|
||||
// of validation.
|
||||
Punycode *Profile = punycode
|
||||
|
||||
// Lookup is the recommended profile for looking up domain names, according
|
||||
// to Section 5 of RFC 5891. The exact configuration of this profile may
|
||||
// change over time.
|
||||
Lookup *Profile = lookup
|
||||
|
||||
// Display is the recommended profile for displaying domain names.
|
||||
// The configuration of this profile may change over time.
|
||||
Display *Profile = display
|
||||
|
||||
// Registration is the recommended profile for checking whether a given
|
||||
// IDN is valid for registration, according to Section 4 of RFC 5891.
|
||||
Registration *Profile = registration
|
||||
|
||||
punycode = &Profile{}
|
||||
lookup = &Profile{options{
|
||||
transitional: true,
|
||||
removeLeadingDots: true,
|
||||
useSTD3Rules: true,
|
||||
checkHyphens: true,
|
||||
checkJoiners: true,
|
||||
trie: trie,
|
||||
fromPuny: validateFromPunycode,
|
||||
mapping: validateAndMap,
|
||||
bidirule: bidirule.ValidString,
|
||||
}}
|
||||
display = &Profile{options{
|
||||
useSTD3Rules: true,
|
||||
removeLeadingDots: true,
|
||||
checkHyphens: true,
|
||||
checkJoiners: true,
|
||||
trie: trie,
|
||||
fromPuny: validateFromPunycode,
|
||||
mapping: validateAndMap,
|
||||
bidirule: bidirule.ValidString,
|
||||
}}
|
||||
registration = &Profile{options{
|
||||
useSTD3Rules: true,
|
||||
verifyDNSLength: true,
|
||||
checkHyphens: true,
|
||||
checkJoiners: true,
|
||||
trie: trie,
|
||||
fromPuny: validateFromPunycode,
|
||||
mapping: validateRegistration,
|
||||
bidirule: bidirule.ValidString,
|
||||
}}
|
||||
|
||||
// TODO: profiles
|
||||
// Register: recommended for approving domain names: don't do any mappings
|
||||
// but rather reject on invalid input. Bundle or block deviation characters.
|
||||
)
|
||||
|
||||
type labelError struct{ label, code_ string }
|
||||
|
||||
func (e labelError) code() string { return e.code_ }
|
||||
func (e labelError) Error() string {
|
||||
return fmt.Sprintf("idna: invalid label %q", e.label)
|
||||
}
|
||||
|
||||
type runeError rune
|
||||
|
||||
func (e runeError) code() string { return "P1" }
|
||||
func (e runeError) Error() string {
|
||||
return fmt.Sprintf("idna: disallowed rune %U", e)
|
||||
}
|
||||
|
||||
// process implements the algorithm described in section 4 of UTS #46,
|
||||
// see https://www.unicode.org/reports/tr46.
|
||||
func (p *Profile) process(s string, toASCII bool) (string, error) {
|
||||
var err error
|
||||
if p.mapping != nil {
|
||||
s, err = p.mapping(p, s)
|
||||
}
|
||||
// Remove leading empty labels.
|
||||
if p.removeLeadingDots {
|
||||
for ; len(s) > 0 && s[0] == '.'; s = s[1:] {
|
||||
}
|
||||
}
|
||||
// It seems like we should only create this error on ToASCII, but the
|
||||
// UTS 46 conformance tests suggests we should always check this.
|
||||
if err == nil && p.verifyDNSLength && s == "" {
|
||||
err = &labelError{s, "A4"}
|
||||
}
|
||||
labels := labelIter{orig: s}
|
||||
for ; !labels.done(); labels.next() {
|
||||
label := labels.label()
|
||||
if label == "" {
|
||||
// Empty labels are not okay. The label iterator skips the last
|
||||
// label if it is empty.
|
||||
if err == nil && p.verifyDNSLength {
|
||||
err = &labelError{s, "A4"}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(label, acePrefix) {
|
||||
u, err2 := decode(label[len(acePrefix):])
|
||||
if err2 != nil {
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
// Spec says keep the old label.
|
||||
continue
|
||||
}
|
||||
labels.set(u)
|
||||
if err == nil && p.fromPuny != nil {
|
||||
err = p.fromPuny(p, u)
|
||||
}
|
||||
if err == nil {
|
||||
// This should be called on NonTransitional, according to the
|
||||
// spec, but that currently does not have any effect. Use the
|
||||
// original profile to preserve options.
|
||||
err = p.validateLabel(u)
|
||||
}
|
||||
} else if err == nil {
|
||||
err = p.validateLabel(label)
|
||||
}
|
||||
}
|
||||
if toASCII {
|
||||
for labels.reset(); !labels.done(); labels.next() {
|
||||
label := labels.label()
|
||||
if !ascii(label) {
|
||||
a, err2 := encode(acePrefix, label)
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
label = a
|
||||
labels.set(a)
|
||||
}
|
||||
n := len(label)
|
||||
if p.verifyDNSLength && err == nil && (n == 0 || n > 63) {
|
||||
err = &labelError{label, "A4"}
|
||||
}
|
||||
}
|
||||
}
|
||||
s = labels.result()
|
||||
if toASCII && p.verifyDNSLength && err == nil {
|
||||
// Compute the length of the domain name minus the root label and its dot.
|
||||
n := len(s)
|
||||
if n > 0 && s[n-1] == '.' {
|
||||
n--
|
||||
}
|
||||
if len(s) < 1 || n > 253 {
|
||||
err = &labelError{s, "A4"}
|
||||
}
|
||||
}
|
||||
return s, err
|
||||
}
|
||||
|
||||
func normalize(p *Profile, s string) (string, error) {
|
||||
return norm.NFC.String(s), nil
|
||||
}
|
||||
|
||||
func validateRegistration(p *Profile, s string) (string, error) {
|
||||
if !norm.NFC.IsNormalString(s) {
|
||||
return s, &labelError{s, "V1"}
|
||||
}
|
||||
for i := 0; i < len(s); {
|
||||
v, sz := trie.lookupString(s[i:])
|
||||
// Copy bytes not copied so far.
|
||||
switch p.simplify(info(v).category()) {
|
||||
// TODO: handle the NV8 defined in the Unicode idna data set to allow
|
||||
// for strict conformance to IDNA2008.
|
||||
case valid, deviation:
|
||||
case disallowed, mapped, unknown, ignored:
|
||||
r, _ := utf8.DecodeRuneInString(s[i:])
|
||||
return s, runeError(r)
|
||||
}
|
||||
i += sz
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func validateAndMap(p *Profile, s string) (string, error) {
|
||||
var (
|
||||
err error
|
||||
b []byte
|
||||
k int
|
||||
)
|
||||
for i := 0; i < len(s); {
|
||||
v, sz := trie.lookupString(s[i:])
|
||||
start := i
|
||||
i += sz
|
||||
// Copy bytes not copied so far.
|
||||
switch p.simplify(info(v).category()) {
|
||||
case valid:
|
||||
continue
|
||||
case disallowed:
|
||||
if err == nil {
|
||||
r, _ := utf8.DecodeRuneInString(s[start:])
|
||||
err = runeError(r)
|
||||
}
|
||||
continue
|
||||
case mapped, deviation:
|
||||
b = append(b, s[k:start]...)
|
||||
b = info(v).appendMapping(b, s[start:i])
|
||||
case ignored:
|
||||
b = append(b, s[k:start]...)
|
||||
// drop the rune
|
||||
case unknown:
|
||||
b = append(b, s[k:start]...)
|
||||
b = append(b, "\ufffd"...)
|
||||
}
|
||||
k = i
|
||||
}
|
||||
if k == 0 {
|
||||
// No changes so far.
|
||||
s = norm.NFC.String(s)
|
||||
} else {
|
||||
b = append(b, s[k:]...)
|
||||
if norm.NFC.QuickSpan(b) != len(b) {
|
||||
b = norm.NFC.Bytes(b)
|
||||
}
|
||||
// TODO: the punycode converters require strings as input.
|
||||
s = string(b)
|
||||
}
|
||||
return s, err
|
||||
}
|
||||
|
||||
// A labelIter allows iterating over domain name labels.
|
||||
type labelIter struct {
|
||||
orig string
|
||||
slice []string
|
||||
curStart int
|
||||
curEnd int
|
||||
i int
|
||||
}
|
||||
|
||||
func (l *labelIter) reset() {
|
||||
l.curStart = 0
|
||||
l.curEnd = 0
|
||||
l.i = 0
|
||||
}
|
||||
|
||||
func (l *labelIter) done() bool {
|
||||
return l.curStart >= len(l.orig)
|
||||
}
|
||||
|
||||
func (l *labelIter) result() string {
|
||||
if l.slice != nil {
|
||||
return strings.Join(l.slice, ".")
|
||||
}
|
||||
return l.orig
|
||||
}
|
||||
|
||||
func (l *labelIter) label() string {
|
||||
if l.slice != nil {
|
||||
return l.slice[l.i]
|
||||
}
|
||||
p := strings.IndexByte(l.orig[l.curStart:], '.')
|
||||
l.curEnd = l.curStart + p
|
||||
if p == -1 {
|
||||
l.curEnd = len(l.orig)
|
||||
}
|
||||
return l.orig[l.curStart:l.curEnd]
|
||||
}
|
||||
|
||||
// next sets the value to the next label. It skips the last label if it is empty.
|
||||
func (l *labelIter) next() {
|
||||
l.i++
|
||||
if l.slice != nil {
|
||||
if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" {
|
||||
l.curStart = len(l.orig)
|
||||
}
|
||||
} else {
|
||||
l.curStart = l.curEnd + 1
|
||||
if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' {
|
||||
l.curStart = len(l.orig)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *labelIter) set(s string) {
|
||||
if l.slice == nil {
|
||||
l.slice = strings.Split(l.orig, ".")
|
||||
}
|
||||
l.slice[l.i] = s
|
||||
}
|
||||
|
||||
// acePrefix is the ASCII Compatible Encoding prefix.
|
||||
const acePrefix = "xn--"
|
||||
|
||||
func (p *Profile) simplify(cat category) category {
|
||||
switch cat {
|
||||
case disallowedSTD3Mapped:
|
||||
if p.useSTD3Rules {
|
||||
cat = disallowed
|
||||
} else {
|
||||
cat = mapped
|
||||
}
|
||||
case disallowedSTD3Valid:
|
||||
if p.useSTD3Rules {
|
||||
cat = disallowed
|
||||
} else {
|
||||
cat = valid
|
||||
}
|
||||
case deviation:
|
||||
if !p.transitional {
|
||||
cat = valid
|
||||
}
|
||||
case validNV8, validXV8:
|
||||
// TODO: handle V2008
|
||||
cat = valid
|
||||
}
|
||||
return cat
|
||||
}
|
||||
|
||||
func validateFromPunycode(p *Profile, s string) error {
|
||||
if !norm.NFC.IsNormalString(s) {
|
||||
return &labelError{s, "V1"}
|
||||
}
|
||||
for i := 0; i < len(s); {
|
||||
v, sz := trie.lookupString(s[i:])
|
||||
if c := p.simplify(info(v).category()); c != valid && c != deviation {
|
||||
return &labelError{s, "V6"}
|
||||
}
|
||||
i += sz
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
zwnj = "\u200c"
|
||||
zwj = "\u200d"
|
||||
)
|
||||
|
||||
type joinState int8
|
||||
|
||||
const (
|
||||
stateStart joinState = iota
|
||||
stateVirama
|
||||
stateBefore
|
||||
stateBeforeVirama
|
||||
stateAfter
|
||||
stateFAIL
|
||||
)
|
||||
|
||||
var joinStates = [][numJoinTypes]joinState{
|
||||
stateStart: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
joinZWNJ: stateFAIL,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateVirama,
|
||||
},
|
||||
stateVirama: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
},
|
||||
stateBefore: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
joiningT: stateBefore,
|
||||
joinZWNJ: stateAfter,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateBeforeVirama,
|
||||
},
|
||||
stateBeforeVirama: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
joiningT: stateBefore,
|
||||
},
|
||||
stateAfter: {
|
||||
joiningL: stateFAIL,
|
||||
joiningD: stateBefore,
|
||||
joiningT: stateAfter,
|
||||
joiningR: stateStart,
|
||||
joinZWNJ: stateFAIL,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateAfter, // no-op as we can't accept joiners here
|
||||
},
|
||||
stateFAIL: {
|
||||
0: stateFAIL,
|
||||
joiningL: stateFAIL,
|
||||
joiningD: stateFAIL,
|
||||
joiningT: stateFAIL,
|
||||
joiningR: stateFAIL,
|
||||
joinZWNJ: stateFAIL,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateFAIL,
|
||||
},
|
||||
}
|
||||
|
||||
// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are
|
||||
// already implicitly satisfied by the overall implementation.
|
||||
func (p *Profile) validateLabel(s string) error {
|
||||
if s == "" {
|
||||
if p.verifyDNSLength {
|
||||
return &labelError{s, "A4"}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if p.bidirule != nil && !p.bidirule(s) {
|
||||
return &labelError{s, "B"}
|
||||
}
|
||||
if p.checkHyphens {
|
||||
if len(s) > 4 && s[2] == '-' && s[3] == '-' {
|
||||
return &labelError{s, "V2"}
|
||||
}
|
||||
if s[0] == '-' || s[len(s)-1] == '-' {
|
||||
return &labelError{s, "V3"}
|
||||
}
|
||||
}
|
||||
if !p.checkJoiners {
|
||||
return nil
|
||||
}
|
||||
trie := p.trie // p.checkJoiners is only set if trie is set.
|
||||
// TODO: merge the use of this in the trie.
|
||||
v, sz := trie.lookupString(s)
|
||||
x := info(v)
|
||||
if x.isModifier() {
|
||||
return &labelError{s, "V5"}
|
||||
}
|
||||
// Quickly return in the absence of zero-width (non) joiners.
|
||||
if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 {
|
||||
return nil
|
||||
}
|
||||
st := stateStart
|
||||
for i := 0; ; {
|
||||
jt := x.joinType()
|
||||
if s[i:i+sz] == zwj {
|
||||
jt = joinZWJ
|
||||
} else if s[i:i+sz] == zwnj {
|
||||
jt = joinZWNJ
|
||||
}
|
||||
st = joinStates[st][jt]
|
||||
if x.isViramaModifier() {
|
||||
st = joinStates[st][joinVirama]
|
||||
}
|
||||
if i += sz; i == len(s) {
|
||||
break
|
||||
}
|
||||
v, sz = trie.lookupString(s[i:])
|
||||
x = info(v)
|
||||
}
|
||||
if st == stateFAIL || st == stateAfter {
|
||||
return &labelError{s, "C"}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ascii(s string) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] >= utf8.RuneSelf {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
11
vendor/golang.org/x/net/idna/pre_go118.go
generated
vendored
Normal file
11
vendor/golang.org/x/net/idna/pre_go118.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
// Copyright 2021 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.18
|
||||
|
||||
package idna
|
||||
|
||||
const transitionalLookup = true
|
||||
217
vendor/golang.org/x/net/idna/punycode.go
generated
vendored
Normal file
217
vendor/golang.org/x/net/idna/punycode.go
generated
vendored
Normal file
@@ -0,0 +1,217 @@
|
||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package idna
|
||||
|
||||
// This file implements the Punycode algorithm from RFC 3492.
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// These parameter values are specified in section 5.
|
||||
//
|
||||
// All computation is done with int32s, so that overflow behavior is identical
|
||||
// regardless of whether int is 32-bit or 64-bit.
|
||||
const (
|
||||
base int32 = 36
|
||||
damp int32 = 700
|
||||
initialBias int32 = 72
|
||||
initialN int32 = 128
|
||||
skew int32 = 38
|
||||
tmax int32 = 26
|
||||
tmin int32 = 1
|
||||
)
|
||||
|
||||
func punyError(s string) error { return &labelError{s, "A3"} }
|
||||
|
||||
// decode decodes a string as specified in section 6.2.
|
||||
func decode(encoded string) (string, error) {
|
||||
if encoded == "" {
|
||||
return "", nil
|
||||
}
|
||||
pos := 1 + strings.LastIndex(encoded, "-")
|
||||
if pos == 1 {
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
if pos == len(encoded) {
|
||||
return encoded[:len(encoded)-1], nil
|
||||
}
|
||||
output := make([]rune, 0, len(encoded))
|
||||
if pos != 0 {
|
||||
for _, r := range encoded[:pos-1] {
|
||||
output = append(output, r)
|
||||
}
|
||||
}
|
||||
i, n, bias := int32(0), initialN, initialBias
|
||||
overflow := false
|
||||
for pos < len(encoded) {
|
||||
oldI, w := i, int32(1)
|
||||
for k := base; ; k += base {
|
||||
if pos == len(encoded) {
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
digit, ok := decodeDigit(encoded[pos])
|
||||
if !ok {
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
pos++
|
||||
i, overflow = madd(i, digit, w)
|
||||
if overflow {
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
t := k - bias
|
||||
if k <= bias {
|
||||
t = tmin
|
||||
} else if k >= bias+tmax {
|
||||
t = tmax
|
||||
}
|
||||
if digit < t {
|
||||
break
|
||||
}
|
||||
w, overflow = madd(0, w, base-t)
|
||||
if overflow {
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
}
|
||||
if len(output) >= 1024 {
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
x := int32(len(output) + 1)
|
||||
bias = adapt(i-oldI, x, oldI == 0)
|
||||
n += i / x
|
||||
i %= x
|
||||
if n < 0 || n > utf8.MaxRune {
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
output = append(output, 0)
|
||||
copy(output[i+1:], output[i:])
|
||||
output[i] = n
|
||||
i++
|
||||
}
|
||||
return string(output), nil
|
||||
}
|
||||
|
||||
// encode encodes a string as specified in section 6.3 and prepends prefix to
|
||||
// the result.
|
||||
//
|
||||
// The "while h < length(input)" line in the specification becomes "for
|
||||
// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes.
|
||||
func encode(prefix, s string) (string, error) {
|
||||
output := make([]byte, len(prefix), len(prefix)+1+2*len(s))
|
||||
copy(output, prefix)
|
||||
delta, n, bias := int32(0), initialN, initialBias
|
||||
b, remaining := int32(0), int32(0)
|
||||
for _, r := range s {
|
||||
if r < 0x80 {
|
||||
b++
|
||||
output = append(output, byte(r))
|
||||
} else {
|
||||
remaining++
|
||||
}
|
||||
}
|
||||
h := b
|
||||
if b > 0 {
|
||||
output = append(output, '-')
|
||||
}
|
||||
overflow := false
|
||||
for remaining != 0 {
|
||||
m := int32(0x7fffffff)
|
||||
for _, r := range s {
|
||||
if m > r && r >= n {
|
||||
m = r
|
||||
}
|
||||
}
|
||||
delta, overflow = madd(delta, m-n, h+1)
|
||||
if overflow {
|
||||
return "", punyError(s)
|
||||
}
|
||||
n = m
|
||||
for _, r := range s {
|
||||
if r < n {
|
||||
delta++
|
||||
if delta < 0 {
|
||||
return "", punyError(s)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if r > n {
|
||||
continue
|
||||
}
|
||||
q := delta
|
||||
for k := base; ; k += base {
|
||||
t := k - bias
|
||||
if k <= bias {
|
||||
t = tmin
|
||||
} else if k >= bias+tmax {
|
||||
t = tmax
|
||||
}
|
||||
if q < t {
|
||||
break
|
||||
}
|
||||
output = append(output, encodeDigit(t+(q-t)%(base-t)))
|
||||
q = (q - t) / (base - t)
|
||||
}
|
||||
output = append(output, encodeDigit(q))
|
||||
bias = adapt(delta, h+1, h == b)
|
||||
delta = 0
|
||||
h++
|
||||
remaining--
|
||||
}
|
||||
delta++
|
||||
n++
|
||||
}
|
||||
return string(output), nil
|
||||
}
|
||||
|
||||
// madd computes a + (b * c), detecting overflow.
|
||||
func madd(a, b, c int32) (next int32, overflow bool) {
|
||||
p := int64(b) * int64(c)
|
||||
if p > math.MaxInt32-int64(a) {
|
||||
return 0, true
|
||||
}
|
||||
return a + int32(p), false
|
||||
}
|
||||
|
||||
func decodeDigit(x byte) (digit int32, ok bool) {
|
||||
switch {
|
||||
case '0' <= x && x <= '9':
|
||||
return int32(x - ('0' - 26)), true
|
||||
case 'A' <= x && x <= 'Z':
|
||||
return int32(x - 'A'), true
|
||||
case 'a' <= x && x <= 'z':
|
||||
return int32(x - 'a'), true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
func encodeDigit(digit int32) byte {
|
||||
switch {
|
||||
case 0 <= digit && digit < 26:
|
||||
return byte(digit + 'a')
|
||||
case 26 <= digit && digit < 36:
|
||||
return byte(digit + ('0' - 26))
|
||||
}
|
||||
panic("idna: internal error in punycode encoding")
|
||||
}
|
||||
|
||||
// adapt is the bias adaptation function specified in section 6.1.
|
||||
func adapt(delta, numPoints int32, firstTime bool) int32 {
|
||||
if firstTime {
|
||||
delta /= damp
|
||||
} else {
|
||||
delta /= 2
|
||||
}
|
||||
delta += delta / numPoints
|
||||
k := int32(0)
|
||||
for delta > ((base-tmin)*tmax)/2 {
|
||||
delta /= base - tmin
|
||||
k += base
|
||||
}
|
||||
return k + (base-tmin+1)*delta/(delta+skew)
|
||||
}
|
||||
4559
vendor/golang.org/x/net/idna/tables10.0.0.go
generated
vendored
Normal file
4559
vendor/golang.org/x/net/idna/tables10.0.0.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
4653
vendor/golang.org/x/net/idna/tables11.0.0.go
generated
vendored
Normal file
4653
vendor/golang.org/x/net/idna/tables11.0.0.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
4733
vendor/golang.org/x/net/idna/tables12.0.0.go
generated
vendored
Normal file
4733
vendor/golang.org/x/net/idna/tables12.0.0.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
4959
vendor/golang.org/x/net/idna/tables13.0.0.go
generated
vendored
Normal file
4959
vendor/golang.org/x/net/idna/tables13.0.0.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
5144
vendor/golang.org/x/net/idna/tables15.0.0.go
generated
vendored
Normal file
5144
vendor/golang.org/x/net/idna/tables15.0.0.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
4486
vendor/golang.org/x/net/idna/tables9.0.0.go
generated
vendored
Normal file
4486
vendor/golang.org/x/net/idna/tables9.0.0.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
51
vendor/golang.org/x/net/idna/trie.go
generated
vendored
Normal file
51
vendor/golang.org/x/net/idna/trie.go
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package idna
|
||||
|
||||
// Sparse block handling code.
|
||||
|
||||
type valueRange struct {
|
||||
value uint16 // header: value:stride
|
||||
lo, hi byte // header: lo:n
|
||||
}
|
||||
|
||||
type sparseBlocks struct {
|
||||
values []valueRange
|
||||
offset []uint16
|
||||
}
|
||||
|
||||
var idnaSparse = sparseBlocks{
|
||||
values: idnaSparseValues[:],
|
||||
offset: idnaSparseOffset[:],
|
||||
}
|
||||
|
||||
// Don't use newIdnaTrie to avoid unconditional linking in of the table.
|
||||
var trie = &idnaTrie{}
|
||||
|
||||
// lookup determines the type of block n and looks up the value for b.
|
||||
// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block
|
||||
// is a list of ranges with an accompanying value. Given a matching range r,
|
||||
// the value for b is by r.value + (b - r.lo) * stride.
|
||||
func (t *sparseBlocks) lookup(n uint32, b byte) uint16 {
|
||||
offset := t.offset[n]
|
||||
header := t.values[offset]
|
||||
lo := offset + 1
|
||||
hi := lo + uint16(header.lo)
|
||||
for lo < hi {
|
||||
m := lo + (hi-lo)/2
|
||||
r := t.values[m]
|
||||
if r.lo <= b && b <= r.hi {
|
||||
return r.value + uint16(b-r.lo)*header.value
|
||||
}
|
||||
if b < r.lo {
|
||||
hi = m
|
||||
} else {
|
||||
lo = m + 1
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user