mirror of
https://github.com/taigrr/wtf
synced 2025-01-18 04:03:14 -08:00
Delete the vendored golang.org packages
This commit is contained in:
parent
b62969e07c
commit
e9bd5e2fb9
3
vendor/golang.org/x/crypto/AUTHORS
generated
vendored
3
vendor/golang.org/x/crypto/AUTHORS
generated
vendored
@ -1,3 +0,0 @@
|
|||||||
# This source code refers to The Go Authors for copyright purposes.
|
|
||||||
# The master list of authors is in the main Go distribution,
|
|
||||||
# visible at https://tip.golang.org/AUTHORS.
|
|
3
vendor/golang.org/x/crypto/CONTRIBUTORS
generated
vendored
3
vendor/golang.org/x/crypto/CONTRIBUTORS
generated
vendored
@ -1,3 +0,0 @@
|
|||||||
# This source code was written by the Go contributors.
|
|
||||||
# The master list of contributors is in the main Go distribution,
|
|
||||||
# visible at https://tip.golang.org/CONTRIBUTORS.
|
|
27
vendor/golang.org/x/crypto/LICENSE
generated
vendored
27
vendor/golang.org/x/crypto/LICENSE
generated
vendored
@ -1,27 +0,0 @@
|
|||||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are
|
|
||||||
met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
* Redistributions in binary form must reproduce the above
|
|
||||||
copyright notice, this list of conditions and the following disclaimer
|
|
||||||
in the documentation and/or other materials provided with the
|
|
||||||
distribution.
|
|
||||||
* Neither the name of Google Inc. nor the names of its
|
|
||||||
contributors may be used to endorse or promote products derived from
|
|
||||||
this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
22
vendor/golang.org/x/crypto/PATENTS
generated
vendored
22
vendor/golang.org/x/crypto/PATENTS
generated
vendored
@ -1,22 +0,0 @@
|
|||||||
Additional IP Rights Grant (Patents)
|
|
||||||
|
|
||||||
"This implementation" means the copyrightable works distributed by
|
|
||||||
Google as part of the Go project.
|
|
||||||
|
|
||||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
|
||||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
|
||||||
patent license to make, have made, use, offer to sell, sell, import,
|
|
||||||
transfer and otherwise run, modify and propagate the contents of this
|
|
||||||
implementation of Go, where such license applies only to those patent
|
|
||||||
claims, both currently owned or controlled by Google and acquired in
|
|
||||||
the future, licensable by Google that are necessarily infringed by this
|
|
||||||
implementation of Go. This grant does not include claims that would be
|
|
||||||
infringed only as a consequence of further modification of this
|
|
||||||
implementation. If you or your agent or exclusive licensee institute or
|
|
||||||
order or agree to the institution of patent litigation against any
|
|
||||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
|
||||||
that this implementation of Go or any code incorporated within this
|
|
||||||
implementation of Go constitutes direct or contributory patent
|
|
||||||
infringement, or inducement of patent infringement, then any patent
|
|
||||||
rights granted to you under this License for this implementation of Go
|
|
||||||
shall terminate as of the date such litigation is filed.
|
|
533
vendor/golang.org/x/crypto/cast5/cast5.go
generated
vendored
533
vendor/golang.org/x/crypto/cast5/cast5.go
generated
vendored
@ -1,533 +0,0 @@
|
|||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package cast5 implements CAST5, as defined in RFC 2144.
|
|
||||||
//
|
|
||||||
// CAST5 is a legacy cipher and its short block size makes it vulnerable to
|
|
||||||
// birthday bound attacks (see https://sweet32.info). It should only be used
|
|
||||||
// where compatibility with legacy systems, not security, is the goal.
|
|
||||||
//
|
|
||||||
// Deprecated: any new system should use AES (from crypto/aes, if necessary in
|
|
||||||
// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from
|
|
||||||
// golang.org/x/crypto/chacha20poly1305).
|
|
||||||
package cast5 // import "golang.org/x/crypto/cast5"
|
|
||||||
|
|
||||||
import "errors"
|
|
||||||
|
|
||||||
const BlockSize = 8
|
|
||||||
const KeySize = 16
|
|
||||||
|
|
||||||
type Cipher struct {
|
|
||||||
masking [16]uint32
|
|
||||||
rotate [16]uint8
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewCipher(key []byte) (c *Cipher, err error) {
|
|
||||||
if len(key) != KeySize {
|
|
||||||
return nil, errors.New("CAST5: keys must be 16 bytes")
|
|
||||||
}
|
|
||||||
|
|
||||||
c = new(Cipher)
|
|
||||||
c.keySchedule(key)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cipher) BlockSize() int {
|
|
||||||
return BlockSize
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cipher) Encrypt(dst, src []byte) {
|
|
||||||
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
|
|
||||||
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
|
|
||||||
|
|
||||||
l, r = r, l^f1(r, c.masking[0], c.rotate[0])
|
|
||||||
l, r = r, l^f2(r, c.masking[1], c.rotate[1])
|
|
||||||
l, r = r, l^f3(r, c.masking[2], c.rotate[2])
|
|
||||||
l, r = r, l^f1(r, c.masking[3], c.rotate[3])
|
|
||||||
|
|
||||||
l, r = r, l^f2(r, c.masking[4], c.rotate[4])
|
|
||||||
l, r = r, l^f3(r, c.masking[5], c.rotate[5])
|
|
||||||
l, r = r, l^f1(r, c.masking[6], c.rotate[6])
|
|
||||||
l, r = r, l^f2(r, c.masking[7], c.rotate[7])
|
|
||||||
|
|
||||||
l, r = r, l^f3(r, c.masking[8], c.rotate[8])
|
|
||||||
l, r = r, l^f1(r, c.masking[9], c.rotate[9])
|
|
||||||
l, r = r, l^f2(r, c.masking[10], c.rotate[10])
|
|
||||||
l, r = r, l^f3(r, c.masking[11], c.rotate[11])
|
|
||||||
|
|
||||||
l, r = r, l^f1(r, c.masking[12], c.rotate[12])
|
|
||||||
l, r = r, l^f2(r, c.masking[13], c.rotate[13])
|
|
||||||
l, r = r, l^f3(r, c.masking[14], c.rotate[14])
|
|
||||||
l, r = r, l^f1(r, c.masking[15], c.rotate[15])
|
|
||||||
|
|
||||||
dst[0] = uint8(r >> 24)
|
|
||||||
dst[1] = uint8(r >> 16)
|
|
||||||
dst[2] = uint8(r >> 8)
|
|
||||||
dst[3] = uint8(r)
|
|
||||||
dst[4] = uint8(l >> 24)
|
|
||||||
dst[5] = uint8(l >> 16)
|
|
||||||
dst[6] = uint8(l >> 8)
|
|
||||||
dst[7] = uint8(l)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cipher) Decrypt(dst, src []byte) {
|
|
||||||
l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
|
|
||||||
r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
|
|
||||||
|
|
||||||
l, r = r, l^f1(r, c.masking[15], c.rotate[15])
|
|
||||||
l, r = r, l^f3(r, c.masking[14], c.rotate[14])
|
|
||||||
l, r = r, l^f2(r, c.masking[13], c.rotate[13])
|
|
||||||
l, r = r, l^f1(r, c.masking[12], c.rotate[12])
|
|
||||||
|
|
||||||
l, r = r, l^f3(r, c.masking[11], c.rotate[11])
|
|
||||||
l, r = r, l^f2(r, c.masking[10], c.rotate[10])
|
|
||||||
l, r = r, l^f1(r, c.masking[9], c.rotate[9])
|
|
||||||
l, r = r, l^f3(r, c.masking[8], c.rotate[8])
|
|
||||||
|
|
||||||
l, r = r, l^f2(r, c.masking[7], c.rotate[7])
|
|
||||||
l, r = r, l^f1(r, c.masking[6], c.rotate[6])
|
|
||||||
l, r = r, l^f3(r, c.masking[5], c.rotate[5])
|
|
||||||
l, r = r, l^f2(r, c.masking[4], c.rotate[4])
|
|
||||||
|
|
||||||
l, r = r, l^f1(r, c.masking[3], c.rotate[3])
|
|
||||||
l, r = r, l^f3(r, c.masking[2], c.rotate[2])
|
|
||||||
l, r = r, l^f2(r, c.masking[1], c.rotate[1])
|
|
||||||
l, r = r, l^f1(r, c.masking[0], c.rotate[0])
|
|
||||||
|
|
||||||
dst[0] = uint8(r >> 24)
|
|
||||||
dst[1] = uint8(r >> 16)
|
|
||||||
dst[2] = uint8(r >> 8)
|
|
||||||
dst[3] = uint8(r)
|
|
||||||
dst[4] = uint8(l >> 24)
|
|
||||||
dst[5] = uint8(l >> 16)
|
|
||||||
dst[6] = uint8(l >> 8)
|
|
||||||
dst[7] = uint8(l)
|
|
||||||
}
|
|
||||||
|
|
||||||
type keyScheduleA [4][7]uint8
|
|
||||||
type keyScheduleB [4][5]uint8
|
|
||||||
|
|
||||||
// keyScheduleRound contains the magic values for a round of the key schedule.
|
|
||||||
// The keyScheduleA deals with the lines like:
|
|
||||||
// z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8]
|
|
||||||
// Conceptually, both x and z are in the same array, x first. The first
|
|
||||||
// element describes which word of this array gets written to and the
|
|
||||||
// second, which word gets read. So, for the line above, it's "4, 0", because
|
|
||||||
// it's writing to the first word of z, which, being after x, is word 4, and
|
|
||||||
// reading from the first word of x: word 0.
|
|
||||||
//
|
|
||||||
// Next are the indexes into the S-boxes. Now the array is treated as bytes. So
|
|
||||||
// "xD" is 0xd. The first byte of z is written as "16 + 0", just to be clear
|
|
||||||
// that it's z that we're indexing.
|
|
||||||
//
|
|
||||||
// keyScheduleB deals with lines like:
|
|
||||||
// K1 = S5[z8] ^ S6[z9] ^ S7[z7] ^ S8[z6] ^ S5[z2]
|
|
||||||
// "K1" is ignored because key words are always written in order. So the five
|
|
||||||
// elements are the S-box indexes. They use the same form as in keyScheduleA,
|
|
||||||
// above.
|
|
||||||
|
|
||||||
type keyScheduleRound struct{}
|
|
||||||
type keySchedule []keyScheduleRound
|
|
||||||
|
|
||||||
var schedule = []struct {
|
|
||||||
a keyScheduleA
|
|
||||||
b keyScheduleB
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
keyScheduleA{
|
|
||||||
{4, 0, 0xd, 0xf, 0xc, 0xe, 0x8},
|
|
||||||
{5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa},
|
|
||||||
{6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9},
|
|
||||||
{7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb},
|
|
||||||
},
|
|
||||||
keyScheduleB{
|
|
||||||
{16 + 8, 16 + 9, 16 + 7, 16 + 6, 16 + 2},
|
|
||||||
{16 + 0xa, 16 + 0xb, 16 + 5, 16 + 4, 16 + 6},
|
|
||||||
{16 + 0xc, 16 + 0xd, 16 + 3, 16 + 2, 16 + 9},
|
|
||||||
{16 + 0xe, 16 + 0xf, 16 + 1, 16 + 0, 16 + 0xc},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
keyScheduleA{
|
|
||||||
{0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0},
|
|
||||||
{1, 4, 0, 2, 1, 3, 16 + 2},
|
|
||||||
{2, 5, 7, 6, 5, 4, 16 + 1},
|
|
||||||
{3, 7, 0xa, 9, 0xb, 8, 16 + 3},
|
|
||||||
},
|
|
||||||
keyScheduleB{
|
|
||||||
{3, 2, 0xc, 0xd, 8},
|
|
||||||
{1, 0, 0xe, 0xf, 0xd},
|
|
||||||
{7, 6, 8, 9, 3},
|
|
||||||
{5, 4, 0xa, 0xb, 7},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
keyScheduleA{
|
|
||||||
{4, 0, 0xd, 0xf, 0xc, 0xe, 8},
|
|
||||||
{5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa},
|
|
||||||
{6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9},
|
|
||||||
{7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb},
|
|
||||||
},
|
|
||||||
keyScheduleB{
|
|
||||||
{16 + 3, 16 + 2, 16 + 0xc, 16 + 0xd, 16 + 9},
|
|
||||||
{16 + 1, 16 + 0, 16 + 0xe, 16 + 0xf, 16 + 0xc},
|
|
||||||
{16 + 7, 16 + 6, 16 + 8, 16 + 9, 16 + 2},
|
|
||||||
{16 + 5, 16 + 4, 16 + 0xa, 16 + 0xb, 16 + 6},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
keyScheduleA{
|
|
||||||
{0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0},
|
|
||||||
{1, 4, 0, 2, 1, 3, 16 + 2},
|
|
||||||
{2, 5, 7, 6, 5, 4, 16 + 1},
|
|
||||||
{3, 7, 0xa, 9, 0xb, 8, 16 + 3},
|
|
||||||
},
|
|
||||||
keyScheduleB{
|
|
||||||
{8, 9, 7, 6, 3},
|
|
||||||
{0xa, 0xb, 5, 4, 7},
|
|
||||||
{0xc, 0xd, 3, 2, 8},
|
|
||||||
{0xe, 0xf, 1, 0, 0xd},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cipher) keySchedule(in []byte) {
|
|
||||||
var t [8]uint32
|
|
||||||
var k [32]uint32
|
|
||||||
|
|
||||||
for i := 0; i < 4; i++ {
|
|
||||||
j := i * 4
|
|
||||||
t[i] = uint32(in[j])<<24 | uint32(in[j+1])<<16 | uint32(in[j+2])<<8 | uint32(in[j+3])
|
|
||||||
}
|
|
||||||
|
|
||||||
x := []byte{6, 7, 4, 5}
|
|
||||||
ki := 0
|
|
||||||
|
|
||||||
for half := 0; half < 2; half++ {
|
|
||||||
for _, round := range schedule {
|
|
||||||
for j := 0; j < 4; j++ {
|
|
||||||
var a [7]uint8
|
|
||||||
copy(a[:], round.a[j][:])
|
|
||||||
w := t[a[1]]
|
|
||||||
w ^= sBox[4][(t[a[2]>>2]>>(24-8*(a[2]&3)))&0xff]
|
|
||||||
w ^= sBox[5][(t[a[3]>>2]>>(24-8*(a[3]&3)))&0xff]
|
|
||||||
w ^= sBox[6][(t[a[4]>>2]>>(24-8*(a[4]&3)))&0xff]
|
|
||||||
w ^= sBox[7][(t[a[5]>>2]>>(24-8*(a[5]&3)))&0xff]
|
|
||||||
w ^= sBox[x[j]][(t[a[6]>>2]>>(24-8*(a[6]&3)))&0xff]
|
|
||||||
t[a[0]] = w
|
|
||||||
}
|
|
||||||
|
|
||||||
for j := 0; j < 4; j++ {
|
|
||||||
var b [5]uint8
|
|
||||||
copy(b[:], round.b[j][:])
|
|
||||||
w := sBox[4][(t[b[0]>>2]>>(24-8*(b[0]&3)))&0xff]
|
|
||||||
w ^= sBox[5][(t[b[1]>>2]>>(24-8*(b[1]&3)))&0xff]
|
|
||||||
w ^= sBox[6][(t[b[2]>>2]>>(24-8*(b[2]&3)))&0xff]
|
|
||||||
w ^= sBox[7][(t[b[3]>>2]>>(24-8*(b[3]&3)))&0xff]
|
|
||||||
w ^= sBox[4+j][(t[b[4]>>2]>>(24-8*(b[4]&3)))&0xff]
|
|
||||||
k[ki] = w
|
|
||||||
ki++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < 16; i++ {
|
|
||||||
c.masking[i] = k[i]
|
|
||||||
c.rotate[i] = uint8(k[16+i] & 0x1f)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// These are the three 'f' functions. See RFC 2144, section 2.2.
|
|
||||||
func f1(d, m uint32, r uint8) uint32 {
|
|
||||||
t := m + d
|
|
||||||
I := (t << r) | (t >> (32 - r))
|
|
||||||
return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff]
|
|
||||||
}
|
|
||||||
|
|
||||||
func f2(d, m uint32, r uint8) uint32 {
|
|
||||||
t := m ^ d
|
|
||||||
I := (t << r) | (t >> (32 - r))
|
|
||||||
return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff]
|
|
||||||
}
|
|
||||||
|
|
||||||
func f3(d, m uint32, r uint8) uint32 {
|
|
||||||
t := m - d
|
|
||||||
I := (t << r) | (t >> (32 - r))
|
|
||||||
return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff]
|
|
||||||
}
|
|
||||||
|
|
||||||
var sBox = [8][256]uint32{
|
|
||||||
{
|
|
||||||
0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949,
|
|
||||||
0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e,
|
|
||||||
0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d,
|
|
||||||
0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0,
|
|
||||||
0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7,
|
|
||||||
0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935,
|
|
||||||
0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d,
|
|
||||||
0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50,
|
|
||||||
0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe,
|
|
||||||
0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3,
|
|
||||||
0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167,
|
|
||||||
0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291,
|
|
||||||
0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779,
|
|
||||||
0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2,
|
|
||||||
0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511,
|
|
||||||
0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d,
|
|
||||||
0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5,
|
|
||||||
0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324,
|
|
||||||
0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c,
|
|
||||||
0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc,
|
|
||||||
0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d,
|
|
||||||
0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96,
|
|
||||||
0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a,
|
|
||||||
0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d,
|
|
||||||
0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd,
|
|
||||||
0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6,
|
|
||||||
0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9,
|
|
||||||
0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872,
|
|
||||||
0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c,
|
|
||||||
0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e,
|
|
||||||
0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9,
|
|
||||||
0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651,
|
|
||||||
0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3,
|
|
||||||
0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb,
|
|
||||||
0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806,
|
|
||||||
0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b,
|
|
||||||
0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359,
|
|
||||||
0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b,
|
|
||||||
0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c,
|
|
||||||
0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34,
|
|
||||||
0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb,
|
|
||||||
0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd,
|
|
||||||
0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860,
|
|
||||||
0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b,
|
|
||||||
0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304,
|
|
||||||
0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b,
|
|
||||||
0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf,
|
|
||||||
0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c,
|
|
||||||
0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13,
|
|
||||||
0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f,
|
|
||||||
0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6,
|
|
||||||
0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6,
|
|
||||||
0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58,
|
|
||||||
0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906,
|
|
||||||
0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d,
|
|
||||||
0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6,
|
|
||||||
0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4,
|
|
||||||
0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6,
|
|
||||||
0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f,
|
|
||||||
0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249,
|
|
||||||
0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa,
|
|
||||||
0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9,
|
|
||||||
0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90,
|
|
||||||
0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5,
|
|
||||||
0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e,
|
|
||||||
0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240,
|
|
||||||
0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5,
|
|
||||||
0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b,
|
|
||||||
0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71,
|
|
||||||
0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04,
|
|
||||||
0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82,
|
|
||||||
0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15,
|
|
||||||
0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2,
|
|
||||||
0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176,
|
|
||||||
0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148,
|
|
||||||
0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc,
|
|
||||||
0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341,
|
|
||||||
0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e,
|
|
||||||
0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51,
|
|
||||||
0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f,
|
|
||||||
0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a,
|
|
||||||
0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b,
|
|
||||||
0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b,
|
|
||||||
0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5,
|
|
||||||
0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45,
|
|
||||||
0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536,
|
|
||||||
0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc,
|
|
||||||
0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0,
|
|
||||||
0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69,
|
|
||||||
0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2,
|
|
||||||
0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49,
|
|
||||||
0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d,
|
|
||||||
0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a,
|
|
||||||
0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1,
|
|
||||||
0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf,
|
|
||||||
0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15,
|
|
||||||
0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121,
|
|
||||||
0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25,
|
|
||||||
0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5,
|
|
||||||
0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb,
|
|
||||||
0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5,
|
|
||||||
0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d,
|
|
||||||
0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6,
|
|
||||||
0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23,
|
|
||||||
0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003,
|
|
||||||
0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6,
|
|
||||||
0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119,
|
|
||||||
0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24,
|
|
||||||
0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a,
|
|
||||||
0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79,
|
|
||||||
0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df,
|
|
||||||
0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26,
|
|
||||||
0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab,
|
|
||||||
0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7,
|
|
||||||
0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417,
|
|
||||||
0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2,
|
|
||||||
0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2,
|
|
||||||
0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a,
|
|
||||||
0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919,
|
|
||||||
0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef,
|
|
||||||
0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876,
|
|
||||||
0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab,
|
|
||||||
0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04,
|
|
||||||
0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282,
|
|
||||||
0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f,
|
|
||||||
0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a,
|
|
||||||
0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff,
|
|
||||||
0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02,
|
|
||||||
0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a,
|
|
||||||
0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7,
|
|
||||||
0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9,
|
|
||||||
0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981,
|
|
||||||
0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774,
|
|
||||||
0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655,
|
|
||||||
0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2,
|
|
||||||
0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910,
|
|
||||||
0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1,
|
|
||||||
0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da,
|
|
||||||
0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049,
|
|
||||||
0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f,
|
|
||||||
0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba,
|
|
||||||
0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be,
|
|
||||||
0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3,
|
|
||||||
0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840,
|
|
||||||
0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4,
|
|
||||||
0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2,
|
|
||||||
0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7,
|
|
||||||
0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5,
|
|
||||||
0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e,
|
|
||||||
0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e,
|
|
||||||
0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801,
|
|
||||||
0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad,
|
|
||||||
0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0,
|
|
||||||
0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20,
|
|
||||||
0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8,
|
|
||||||
0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac,
|
|
||||||
0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138,
|
|
||||||
0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367,
|
|
||||||
0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98,
|
|
||||||
0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072,
|
|
||||||
0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3,
|
|
||||||
0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd,
|
|
||||||
0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8,
|
|
||||||
0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9,
|
|
||||||
0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54,
|
|
||||||
0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387,
|
|
||||||
0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc,
|
|
||||||
0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf,
|
|
||||||
0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf,
|
|
||||||
0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f,
|
|
||||||
0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289,
|
|
||||||
0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950,
|
|
||||||
0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f,
|
|
||||||
0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b,
|
|
||||||
0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be,
|
|
||||||
0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13,
|
|
||||||
0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976,
|
|
||||||
0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0,
|
|
||||||
0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891,
|
|
||||||
0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da,
|
|
||||||
0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc,
|
|
||||||
0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084,
|
|
||||||
0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25,
|
|
||||||
0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121,
|
|
||||||
0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5,
|
|
||||||
0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd,
|
|
||||||
0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f,
|
|
||||||
0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de,
|
|
||||||
0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43,
|
|
||||||
0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19,
|
|
||||||
0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2,
|
|
||||||
0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516,
|
|
||||||
0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88,
|
|
||||||
0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816,
|
|
||||||
0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756,
|
|
||||||
0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a,
|
|
||||||
0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264,
|
|
||||||
0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688,
|
|
||||||
0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28,
|
|
||||||
0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3,
|
|
||||||
0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7,
|
|
||||||
0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06,
|
|
||||||
0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033,
|
|
||||||
0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a,
|
|
||||||
0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566,
|
|
||||||
0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509,
|
|
||||||
0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962,
|
|
||||||
0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e,
|
|
||||||
0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c,
|
|
||||||
0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c,
|
|
||||||
0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285,
|
|
||||||
0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301,
|
|
||||||
0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be,
|
|
||||||
0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767,
|
|
||||||
0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647,
|
|
||||||
0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914,
|
|
||||||
0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c,
|
|
||||||
0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5,
|
|
||||||
0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc,
|
|
||||||
0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd,
|
|
||||||
0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d,
|
|
||||||
0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2,
|
|
||||||
0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862,
|
|
||||||
0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc,
|
|
||||||
0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c,
|
|
||||||
0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e,
|
|
||||||
0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039,
|
|
||||||
0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8,
|
|
||||||
0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42,
|
|
||||||
0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5,
|
|
||||||
0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472,
|
|
||||||
0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225,
|
|
||||||
0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c,
|
|
||||||
0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb,
|
|
||||||
0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054,
|
|
||||||
0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70,
|
|
||||||
0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc,
|
|
||||||
0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c,
|
|
||||||
0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3,
|
|
||||||
0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4,
|
|
||||||
0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101,
|
|
||||||
0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f,
|
|
||||||
0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e,
|
|
||||||
0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a,
|
|
||||||
0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c,
|
|
||||||
0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384,
|
|
||||||
0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c,
|
|
||||||
0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82,
|
|
||||||
0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e,
|
|
||||||
},
|
|
||||||
}
|
|
219
vendor/golang.org/x/crypto/openpgp/armor/armor.go
generated
vendored
219
vendor/golang.org/x/crypto/openpgp/armor/armor.go
generated
vendored
@ -1,219 +0,0 @@
|
|||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is
|
|
||||||
// very similar to PEM except that it has an additional CRC checksum.
|
|
||||||
package armor // import "golang.org/x/crypto/openpgp/armor"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"encoding/base64"
|
|
||||||
"golang.org/x/crypto/openpgp/errors"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Block represents an OpenPGP armored structure.
|
|
||||||
//
|
|
||||||
// The encoded form is:
|
|
||||||
// -----BEGIN Type-----
|
|
||||||
// Headers
|
|
||||||
//
|
|
||||||
// base64-encoded Bytes
|
|
||||||
// '=' base64 encoded checksum
|
|
||||||
// -----END Type-----
|
|
||||||
// where Headers is a possibly empty sequence of Key: Value lines.
|
|
||||||
//
|
|
||||||
// Since the armored data can be very large, this package presents a streaming
|
|
||||||
// interface.
|
|
||||||
type Block struct {
|
|
||||||
Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE").
|
|
||||||
Header map[string]string // Optional headers.
|
|
||||||
Body io.Reader // A Reader from which the contents can be read
|
|
||||||
lReader lineReader
|
|
||||||
oReader openpgpReader
|
|
||||||
}
|
|
||||||
|
|
||||||
var ArmorCorrupt error = errors.StructuralError("armor invalid")
|
|
||||||
|
|
||||||
const crc24Init = 0xb704ce
|
|
||||||
const crc24Poly = 0x1864cfb
|
|
||||||
const crc24Mask = 0xffffff
|
|
||||||
|
|
||||||
// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1
|
|
||||||
func crc24(crc uint32, d []byte) uint32 {
|
|
||||||
for _, b := range d {
|
|
||||||
crc ^= uint32(b) << 16
|
|
||||||
for i := 0; i < 8; i++ {
|
|
||||||
crc <<= 1
|
|
||||||
if crc&0x1000000 != 0 {
|
|
||||||
crc ^= crc24Poly
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return crc
|
|
||||||
}
|
|
||||||
|
|
||||||
var armorStart = []byte("-----BEGIN ")
|
|
||||||
var armorEnd = []byte("-----END ")
|
|
||||||
var armorEndOfLine = []byte("-----")
|
|
||||||
|
|
||||||
// lineReader wraps a line based reader. It watches for the end of an armor
|
|
||||||
// block and records the expected CRC value.
|
|
||||||
type lineReader struct {
|
|
||||||
in *bufio.Reader
|
|
||||||
buf []byte
|
|
||||||
eof bool
|
|
||||||
crc uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *lineReader) Read(p []byte) (n int, err error) {
|
|
||||||
if l.eof {
|
|
||||||
return 0, io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(l.buf) > 0 {
|
|
||||||
n = copy(p, l.buf)
|
|
||||||
l.buf = l.buf[n:]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
line, isPrefix, err := l.in.ReadLine()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if isPrefix {
|
|
||||||
return 0, ArmorCorrupt
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(line) == 5 && line[0] == '=' {
|
|
||||||
// This is the checksum line
|
|
||||||
var expectedBytes [3]byte
|
|
||||||
var m int
|
|
||||||
m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:])
|
|
||||||
if m != 3 || err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
l.crc = uint32(expectedBytes[0])<<16 |
|
|
||||||
uint32(expectedBytes[1])<<8 |
|
|
||||||
uint32(expectedBytes[2])
|
|
||||||
|
|
||||||
line, _, err = l.in.ReadLine()
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !bytes.HasPrefix(line, armorEnd) {
|
|
||||||
return 0, ArmorCorrupt
|
|
||||||
}
|
|
||||||
|
|
||||||
l.eof = true
|
|
||||||
return 0, io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(line) > 96 {
|
|
||||||
return 0, ArmorCorrupt
|
|
||||||
}
|
|
||||||
|
|
||||||
n = copy(p, line)
|
|
||||||
bytesToSave := len(line) - n
|
|
||||||
if bytesToSave > 0 {
|
|
||||||
if cap(l.buf) < bytesToSave {
|
|
||||||
l.buf = make([]byte, 0, bytesToSave)
|
|
||||||
}
|
|
||||||
l.buf = l.buf[0:bytesToSave]
|
|
||||||
copy(l.buf, line[n:])
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// openpgpReader passes Read calls to the underlying base64 decoder, but keeps
|
|
||||||
// a running CRC of the resulting data and checks the CRC against the value
|
|
||||||
// found by the lineReader at EOF.
|
|
||||||
type openpgpReader struct {
|
|
||||||
lReader *lineReader
|
|
||||||
b64Reader io.Reader
|
|
||||||
currentCRC uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *openpgpReader) Read(p []byte) (n int, err error) {
|
|
||||||
n, err = r.b64Reader.Read(p)
|
|
||||||
r.currentCRC = crc24(r.currentCRC, p[:n])
|
|
||||||
|
|
||||||
if err == io.EOF {
|
|
||||||
if r.lReader.crc != uint32(r.currentCRC&crc24Mask) {
|
|
||||||
return 0, ArmorCorrupt
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode reads a PGP armored block from the given Reader. It will ignore
|
|
||||||
// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The
|
|
||||||
// given Reader is not usable after calling this function: an arbitrary amount
|
|
||||||
// of data may have been read past the end of the block.
|
|
||||||
func Decode(in io.Reader) (p *Block, err error) {
|
|
||||||
r := bufio.NewReaderSize(in, 100)
|
|
||||||
var line []byte
|
|
||||||
ignoreNext := false
|
|
||||||
|
|
||||||
TryNextBlock:
|
|
||||||
p = nil
|
|
||||||
|
|
||||||
// Skip leading garbage
|
|
||||||
for {
|
|
||||||
ignoreThis := ignoreNext
|
|
||||||
line, ignoreNext, err = r.ReadLine()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if ignoreNext || ignoreThis {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
line = bytes.TrimSpace(line)
|
|
||||||
if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
p = new(Block)
|
|
||||||
p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)])
|
|
||||||
p.Header = make(map[string]string)
|
|
||||||
nextIsContinuation := false
|
|
||||||
var lastKey string
|
|
||||||
|
|
||||||
// Read headers
|
|
||||||
for {
|
|
||||||
isContinuation := nextIsContinuation
|
|
||||||
line, nextIsContinuation, err = r.ReadLine()
|
|
||||||
if err != nil {
|
|
||||||
p = nil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if isContinuation {
|
|
||||||
p.Header[lastKey] += string(line)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
line = bytes.TrimSpace(line)
|
|
||||||
if len(line) == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
i := bytes.Index(line, []byte(": "))
|
|
||||||
if i == -1 {
|
|
||||||
goto TryNextBlock
|
|
||||||
}
|
|
||||||
lastKey = string(line[:i])
|
|
||||||
p.Header[lastKey] = string(line[i+2:])
|
|
||||||
}
|
|
||||||
|
|
||||||
p.lReader.in = r
|
|
||||||
p.oReader.currentCRC = crc24Init
|
|
||||||
p.oReader.lReader = &p.lReader
|
|
||||||
p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader)
|
|
||||||
p.Body = &p.oReader
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
160
vendor/golang.org/x/crypto/openpgp/armor/encode.go
generated
vendored
160
vendor/golang.org/x/crypto/openpgp/armor/encode.go
generated
vendored
@ -1,160 +0,0 @@
|
|||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package armor
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/base64"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
var armorHeaderSep = []byte(": ")
|
|
||||||
var blockEnd = []byte("\n=")
|
|
||||||
var newline = []byte("\n")
|
|
||||||
var armorEndOfLineOut = []byte("-----\n")
|
|
||||||
|
|
||||||
// writeSlices writes its arguments to the given Writer.
|
|
||||||
func writeSlices(out io.Writer, slices ...[]byte) (err error) {
|
|
||||||
for _, s := range slices {
|
|
||||||
_, err = out.Write(s)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// lineBreaker breaks data across several lines, all of the same byte length
|
|
||||||
// (except possibly the last). Lines are broken with a single '\n'.
|
|
||||||
type lineBreaker struct {
|
|
||||||
lineLength int
|
|
||||||
line []byte
|
|
||||||
used int
|
|
||||||
out io.Writer
|
|
||||||
haveWritten bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func newLineBreaker(out io.Writer, lineLength int) *lineBreaker {
|
|
||||||
return &lineBreaker{
|
|
||||||
lineLength: lineLength,
|
|
||||||
line: make([]byte, lineLength),
|
|
||||||
used: 0,
|
|
||||||
out: out,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *lineBreaker) Write(b []byte) (n int, err error) {
|
|
||||||
n = len(b)
|
|
||||||
|
|
||||||
if n == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if l.used == 0 && l.haveWritten {
|
|
||||||
_, err = l.out.Write([]byte{'\n'})
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if l.used+len(b) < l.lineLength {
|
|
||||||
l.used += copy(l.line[l.used:], b)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
l.haveWritten = true
|
|
||||||
_, err = l.out.Write(l.line[0:l.used])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
excess := l.lineLength - l.used
|
|
||||||
l.used = 0
|
|
||||||
|
|
||||||
_, err = l.out.Write(b[0:excess])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = l.Write(b[excess:])
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *lineBreaker) Close() (err error) {
|
|
||||||
if l.used > 0 {
|
|
||||||
_, err = l.out.Write(l.line[0:l.used])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// encoding keeps track of a running CRC24 over the data which has been written
|
|
||||||
// to it and outputs a OpenPGP checksum when closed, followed by an armor
|
|
||||||
// trailer.
|
|
||||||
//
|
|
||||||
// It's built into a stack of io.Writers:
|
|
||||||
// encoding -> base64 encoder -> lineBreaker -> out
|
|
||||||
type encoding struct {
|
|
||||||
out io.Writer
|
|
||||||
breaker *lineBreaker
|
|
||||||
b64 io.WriteCloser
|
|
||||||
crc uint32
|
|
||||||
blockType []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *encoding) Write(data []byte) (n int, err error) {
|
|
||||||
e.crc = crc24(e.crc, data)
|
|
||||||
return e.b64.Write(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *encoding) Close() (err error) {
|
|
||||||
err = e.b64.Close()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
e.breaker.Close()
|
|
||||||
|
|
||||||
var checksumBytes [3]byte
|
|
||||||
checksumBytes[0] = byte(e.crc >> 16)
|
|
||||||
checksumBytes[1] = byte(e.crc >> 8)
|
|
||||||
checksumBytes[2] = byte(e.crc)
|
|
||||||
|
|
||||||
var b64ChecksumBytes [4]byte
|
|
||||||
base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:])
|
|
||||||
|
|
||||||
return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode returns a WriteCloser which will encode the data written to it in
|
|
||||||
// OpenPGP armor.
|
|
||||||
func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) {
|
|
||||||
bType := []byte(blockType)
|
|
||||||
err = writeSlices(out, armorStart, bType, armorEndOfLineOut)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range headers {
|
|
||||||
err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = out.Write(newline)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
e := &encoding{
|
|
||||||
out: out,
|
|
||||||
breaker: newLineBreaker(out, 64),
|
|
||||||
crc: crc24Init,
|
|
||||||
blockType: bType,
|
|
||||||
}
|
|
||||||
e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker)
|
|
||||||
return e, nil
|
|
||||||
}
|
|
59
vendor/golang.org/x/crypto/openpgp/canonical_text.go
generated
vendored
59
vendor/golang.org/x/crypto/openpgp/canonical_text.go
generated
vendored
@ -1,59 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package openpgp
|
|
||||||
|
|
||||||
import "hash"
|
|
||||||
|
|
||||||
// NewCanonicalTextHash reformats text written to it into the canonical
|
|
||||||
// form and then applies the hash h. See RFC 4880, section 5.2.1.
|
|
||||||
func NewCanonicalTextHash(h hash.Hash) hash.Hash {
|
|
||||||
return &canonicalTextHash{h, 0}
|
|
||||||
}
|
|
||||||
|
|
||||||
type canonicalTextHash struct {
|
|
||||||
h hash.Hash
|
|
||||||
s int
|
|
||||||
}
|
|
||||||
|
|
||||||
var newline = []byte{'\r', '\n'}
|
|
||||||
|
|
||||||
func (cth *canonicalTextHash) Write(buf []byte) (int, error) {
|
|
||||||
start := 0
|
|
||||||
|
|
||||||
for i, c := range buf {
|
|
||||||
switch cth.s {
|
|
||||||
case 0:
|
|
||||||
if c == '\r' {
|
|
||||||
cth.s = 1
|
|
||||||
} else if c == '\n' {
|
|
||||||
cth.h.Write(buf[start:i])
|
|
||||||
cth.h.Write(newline)
|
|
||||||
start = i + 1
|
|
||||||
}
|
|
||||||
case 1:
|
|
||||||
cth.s = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
cth.h.Write(buf[start:])
|
|
||||||
return len(buf), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cth *canonicalTextHash) Sum(in []byte) []byte {
|
|
||||||
return cth.h.Sum(in)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cth *canonicalTextHash) Reset() {
|
|
||||||
cth.h.Reset()
|
|
||||||
cth.s = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cth *canonicalTextHash) Size() int {
|
|
||||||
return cth.h.Size()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cth *canonicalTextHash) BlockSize() int {
|
|
||||||
return cth.h.BlockSize()
|
|
||||||
}
|
|
122
vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go
generated
vendored
122
vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go
generated
vendored
@ -1,122 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package elgamal implements ElGamal encryption, suitable for OpenPGP,
|
|
||||||
// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on
|
|
||||||
// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31,
|
|
||||||
// n. 4, 1985, pp. 469-472.
|
|
||||||
//
|
|
||||||
// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it
|
|
||||||
// unsuitable for other protocols. RSA should be used in preference in any
|
|
||||||
// case.
|
|
||||||
package elgamal // import "golang.org/x/crypto/openpgp/elgamal"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/subtle"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"math/big"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PublicKey represents an ElGamal public key.
|
|
||||||
type PublicKey struct {
|
|
||||||
G, P, Y *big.Int
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrivateKey represents an ElGamal private key.
|
|
||||||
type PrivateKey struct {
|
|
||||||
PublicKey
|
|
||||||
X *big.Int
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encrypt encrypts the given message to the given public key. The result is a
|
|
||||||
// pair of integers. Errors can result from reading random, or because msg is
|
|
||||||
// too large to be encrypted to the public key.
|
|
||||||
func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) {
|
|
||||||
pLen := (pub.P.BitLen() + 7) / 8
|
|
||||||
if len(msg) > pLen-11 {
|
|
||||||
err = errors.New("elgamal: message too long")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// EM = 0x02 || PS || 0x00 || M
|
|
||||||
em := make([]byte, pLen-1)
|
|
||||||
em[0] = 2
|
|
||||||
ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):]
|
|
||||||
err = nonZeroRandomBytes(ps, random)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
em[len(em)-len(msg)-1] = 0
|
|
||||||
copy(mm, msg)
|
|
||||||
|
|
||||||
m := new(big.Int).SetBytes(em)
|
|
||||||
|
|
||||||
k, err := rand.Int(random, pub.P)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c1 = new(big.Int).Exp(pub.G, k, pub.P)
|
|
||||||
s := new(big.Int).Exp(pub.Y, k, pub.P)
|
|
||||||
c2 = s.Mul(s, m)
|
|
||||||
c2.Mod(c2, pub.P)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decrypt takes two integers, resulting from an ElGamal encryption, and
|
|
||||||
// returns the plaintext of the message. An error can result only if the
|
|
||||||
// ciphertext is invalid. Users should keep in mind that this is a padding
|
|
||||||
// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can
|
|
||||||
// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks
|
|
||||||
// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel
|
|
||||||
// Bleichenbacher, Advances in Cryptology (Crypto '98),
|
|
||||||
func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) {
|
|
||||||
s := new(big.Int).Exp(c1, priv.X, priv.P)
|
|
||||||
s.ModInverse(s, priv.P)
|
|
||||||
s.Mul(s, c2)
|
|
||||||
s.Mod(s, priv.P)
|
|
||||||
em := s.Bytes()
|
|
||||||
|
|
||||||
firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2)
|
|
||||||
|
|
||||||
// The remainder of the plaintext must be a string of non-zero random
|
|
||||||
// octets, followed by a 0, followed by the message.
|
|
||||||
// lookingForIndex: 1 iff we are still looking for the zero.
|
|
||||||
// index: the offset of the first zero byte.
|
|
||||||
var lookingForIndex, index int
|
|
||||||
lookingForIndex = 1
|
|
||||||
|
|
||||||
for i := 1; i < len(em); i++ {
|
|
||||||
equals0 := subtle.ConstantTimeByteEq(em[i], 0)
|
|
||||||
index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index)
|
|
||||||
lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex)
|
|
||||||
}
|
|
||||||
|
|
||||||
if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 {
|
|
||||||
return nil, errors.New("elgamal: decryption error")
|
|
||||||
}
|
|
||||||
return em[index+1:], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// nonZeroRandomBytes fills the given slice with non-zero random octets.
|
|
||||||
func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) {
|
|
||||||
_, err = io.ReadFull(rand, s)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
for s[i] == 0 {
|
|
||||||
_, err = io.ReadFull(rand, s[i:i+1])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
72
vendor/golang.org/x/crypto/openpgp/errors/errors.go
generated
vendored
72
vendor/golang.org/x/crypto/openpgp/errors/errors.go
generated
vendored
@ -1,72 +0,0 @@
|
|||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package errors contains common error types for the OpenPGP packages.
|
|
||||||
package errors // import "golang.org/x/crypto/openpgp/errors"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A StructuralError is returned when OpenPGP data is found to be syntactically
|
|
||||||
// invalid.
|
|
||||||
type StructuralError string
|
|
||||||
|
|
||||||
func (s StructuralError) Error() string {
|
|
||||||
return "openpgp: invalid data: " + string(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnsupportedError indicates that, although the OpenPGP data is valid, it
|
|
||||||
// makes use of currently unimplemented features.
|
|
||||||
type UnsupportedError string
|
|
||||||
|
|
||||||
func (s UnsupportedError) Error() string {
|
|
||||||
return "openpgp: unsupported feature: " + string(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InvalidArgumentError indicates that the caller is in error and passed an
|
|
||||||
// incorrect value.
|
|
||||||
type InvalidArgumentError string
|
|
||||||
|
|
||||||
func (i InvalidArgumentError) Error() string {
|
|
||||||
return "openpgp: invalid argument: " + string(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignatureError indicates that a syntactically valid signature failed to
|
|
||||||
// validate.
|
|
||||||
type SignatureError string
|
|
||||||
|
|
||||||
func (b SignatureError) Error() string {
|
|
||||||
return "openpgp: invalid signature: " + string(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
type keyIncorrectError int
|
|
||||||
|
|
||||||
func (ki keyIncorrectError) Error() string {
|
|
||||||
return "openpgp: incorrect key"
|
|
||||||
}
|
|
||||||
|
|
||||||
var ErrKeyIncorrect error = keyIncorrectError(0)
|
|
||||||
|
|
||||||
type unknownIssuerError int
|
|
||||||
|
|
||||||
func (unknownIssuerError) Error() string {
|
|
||||||
return "openpgp: signature made by unknown entity"
|
|
||||||
}
|
|
||||||
|
|
||||||
var ErrUnknownIssuer error = unknownIssuerError(0)
|
|
||||||
|
|
||||||
type keyRevokedError int
|
|
||||||
|
|
||||||
func (keyRevokedError) Error() string {
|
|
||||||
return "openpgp: signature made by revoked key"
|
|
||||||
}
|
|
||||||
|
|
||||||
var ErrKeyRevoked error = keyRevokedError(0)
|
|
||||||
|
|
||||||
type UnknownPacketTypeError uint8
|
|
||||||
|
|
||||||
func (upte UnknownPacketTypeError) Error() string {
|
|
||||||
return "openpgp: unknown packet type: " + strconv.Itoa(int(upte))
|
|
||||||
}
|
|
693
vendor/golang.org/x/crypto/openpgp/keys.go
generated
vendored
693
vendor/golang.org/x/crypto/openpgp/keys.go
generated
vendored
@ -1,693 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package openpgp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/rsa"
|
|
||||||
"io"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/crypto/openpgp/armor"
|
|
||||||
"golang.org/x/crypto/openpgp/errors"
|
|
||||||
"golang.org/x/crypto/openpgp/packet"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PublicKeyType is the armor type for a PGP public key.
|
|
||||||
var PublicKeyType = "PGP PUBLIC KEY BLOCK"
|
|
||||||
|
|
||||||
// PrivateKeyType is the armor type for a PGP private key.
|
|
||||||
var PrivateKeyType = "PGP PRIVATE KEY BLOCK"
|
|
||||||
|
|
||||||
// An Entity represents the components of an OpenPGP key: a primary public key
|
|
||||||
// (which must be a signing key), one or more identities claimed by that key,
|
|
||||||
// and zero or more subkeys, which may be encryption keys.
|
|
||||||
type Entity struct {
|
|
||||||
PrimaryKey *packet.PublicKey
|
|
||||||
PrivateKey *packet.PrivateKey
|
|
||||||
Identities map[string]*Identity // indexed by Identity.Name
|
|
||||||
Revocations []*packet.Signature
|
|
||||||
Subkeys []Subkey
|
|
||||||
}
|
|
||||||
|
|
||||||
// An Identity represents an identity claimed by an Entity and zero or more
|
|
||||||
// assertions by other entities about that claim.
|
|
||||||
type Identity struct {
|
|
||||||
Name string // by convention, has the form "Full Name (comment) <email@example.com>"
|
|
||||||
UserId *packet.UserId
|
|
||||||
SelfSignature *packet.Signature
|
|
||||||
Signatures []*packet.Signature
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Subkey is an additional public key in an Entity. Subkeys can be used for
|
|
||||||
// encryption.
|
|
||||||
type Subkey struct {
|
|
||||||
PublicKey *packet.PublicKey
|
|
||||||
PrivateKey *packet.PrivateKey
|
|
||||||
Sig *packet.Signature
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Key identifies a specific public key in an Entity. This is either the
|
|
||||||
// Entity's primary key or a subkey.
|
|
||||||
type Key struct {
|
|
||||||
Entity *Entity
|
|
||||||
PublicKey *packet.PublicKey
|
|
||||||
PrivateKey *packet.PrivateKey
|
|
||||||
SelfSignature *packet.Signature
|
|
||||||
}
|
|
||||||
|
|
||||||
// A KeyRing provides access to public and private keys.
|
|
||||||
type KeyRing interface {
|
|
||||||
// KeysById returns the set of keys that have the given key id.
|
|
||||||
KeysById(id uint64) []Key
|
|
||||||
// KeysByIdAndUsage returns the set of keys with the given id
|
|
||||||
// that also meet the key usage given by requiredUsage.
|
|
||||||
// The requiredUsage is expressed as the bitwise-OR of
|
|
||||||
// packet.KeyFlag* values.
|
|
||||||
KeysByIdUsage(id uint64, requiredUsage byte) []Key
|
|
||||||
// DecryptionKeys returns all private keys that are valid for
|
|
||||||
// decryption.
|
|
||||||
DecryptionKeys() []Key
|
|
||||||
}
|
|
||||||
|
|
||||||
// primaryIdentity returns the Identity marked as primary or the first identity
|
|
||||||
// if none are so marked.
|
|
||||||
func (e *Entity) primaryIdentity() *Identity {
|
|
||||||
var firstIdentity *Identity
|
|
||||||
for _, ident := range e.Identities {
|
|
||||||
if firstIdentity == nil {
|
|
||||||
firstIdentity = ident
|
|
||||||
}
|
|
||||||
if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId {
|
|
||||||
return ident
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return firstIdentity
|
|
||||||
}
|
|
||||||
|
|
||||||
// encryptionKey returns the best candidate Key for encrypting a message to the
|
|
||||||
// given Entity.
|
|
||||||
func (e *Entity) encryptionKey(now time.Time) (Key, bool) {
|
|
||||||
candidateSubkey := -1
|
|
||||||
|
|
||||||
// Iterate the keys to find the newest key
|
|
||||||
var maxTime time.Time
|
|
||||||
for i, subkey := range e.Subkeys {
|
|
||||||
if subkey.Sig.FlagsValid &&
|
|
||||||
subkey.Sig.FlagEncryptCommunications &&
|
|
||||||
subkey.PublicKey.PubKeyAlgo.CanEncrypt() &&
|
|
||||||
!subkey.Sig.KeyExpired(now) &&
|
|
||||||
(maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) {
|
|
||||||
candidateSubkey = i
|
|
||||||
maxTime = subkey.Sig.CreationTime
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if candidateSubkey != -1 {
|
|
||||||
subkey := e.Subkeys[candidateSubkey]
|
|
||||||
return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we don't have any candidate subkeys for encryption and
|
|
||||||
// the primary key doesn't have any usage metadata then we
|
|
||||||
// assume that the primary key is ok. Or, if the primary key is
|
|
||||||
// marked as ok to encrypt to, then we can obviously use it.
|
|
||||||
i := e.primaryIdentity()
|
|
||||||
if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications &&
|
|
||||||
e.PrimaryKey.PubKeyAlgo.CanEncrypt() &&
|
|
||||||
!i.SelfSignature.KeyExpired(now) {
|
|
||||||
return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// This Entity appears to be signing only.
|
|
||||||
return Key{}, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// signingKey return the best candidate Key for signing a message with this
|
|
||||||
// Entity.
|
|
||||||
func (e *Entity) signingKey(now time.Time) (Key, bool) {
|
|
||||||
candidateSubkey := -1
|
|
||||||
|
|
||||||
for i, subkey := range e.Subkeys {
|
|
||||||
if subkey.Sig.FlagsValid &&
|
|
||||||
subkey.Sig.FlagSign &&
|
|
||||||
subkey.PublicKey.PubKeyAlgo.CanSign() &&
|
|
||||||
!subkey.Sig.KeyExpired(now) {
|
|
||||||
candidateSubkey = i
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if candidateSubkey != -1 {
|
|
||||||
subkey := e.Subkeys[candidateSubkey]
|
|
||||||
return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we have no candidate subkey then we assume that it's ok to sign
|
|
||||||
// with the primary key.
|
|
||||||
i := e.primaryIdentity()
|
|
||||||
if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign &&
|
|
||||||
!i.SelfSignature.KeyExpired(now) {
|
|
||||||
return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true
|
|
||||||
}
|
|
||||||
|
|
||||||
return Key{}, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// An EntityList contains one or more Entities.
|
|
||||||
type EntityList []*Entity
|
|
||||||
|
|
||||||
// KeysById returns the set of keys that have the given key id.
|
|
||||||
func (el EntityList) KeysById(id uint64) (keys []Key) {
|
|
||||||
for _, e := range el {
|
|
||||||
if e.PrimaryKey.KeyId == id {
|
|
||||||
var selfSig *packet.Signature
|
|
||||||
for _, ident := range e.Identities {
|
|
||||||
if selfSig == nil {
|
|
||||||
selfSig = ident.SelfSignature
|
|
||||||
} else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId {
|
|
||||||
selfSig = ident.SelfSignature
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig})
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, subKey := range e.Subkeys {
|
|
||||||
if subKey.PublicKey.KeyId == id {
|
|
||||||
keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// KeysByIdAndUsage returns the set of keys with the given id that also meet
|
|
||||||
// the key usage given by requiredUsage. The requiredUsage is expressed as
|
|
||||||
// the bitwise-OR of packet.KeyFlag* values.
|
|
||||||
func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) {
|
|
||||||
for _, key := range el.KeysById(id) {
|
|
||||||
if len(key.Entity.Revocations) > 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if key.SelfSignature.RevocationReason != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if key.SelfSignature.FlagsValid && requiredUsage != 0 {
|
|
||||||
var usage byte
|
|
||||||
if key.SelfSignature.FlagCertify {
|
|
||||||
usage |= packet.KeyFlagCertify
|
|
||||||
}
|
|
||||||
if key.SelfSignature.FlagSign {
|
|
||||||
usage |= packet.KeyFlagSign
|
|
||||||
}
|
|
||||||
if key.SelfSignature.FlagEncryptCommunications {
|
|
||||||
usage |= packet.KeyFlagEncryptCommunications
|
|
||||||
}
|
|
||||||
if key.SelfSignature.FlagEncryptStorage {
|
|
||||||
usage |= packet.KeyFlagEncryptStorage
|
|
||||||
}
|
|
||||||
if usage&requiredUsage != requiredUsage {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
keys = append(keys, key)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecryptionKeys returns all private keys that are valid for decryption.
|
|
||||||
func (el EntityList) DecryptionKeys() (keys []Key) {
|
|
||||||
for _, e := range el {
|
|
||||||
for _, subKey := range e.Subkeys {
|
|
||||||
if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) {
|
|
||||||
keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file.
|
|
||||||
func ReadArmoredKeyRing(r io.Reader) (EntityList, error) {
|
|
||||||
block, err := armor.Decode(r)
|
|
||||||
if err == io.EOF {
|
|
||||||
return nil, errors.InvalidArgumentError("no armored data found")
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if block.Type != PublicKeyType && block.Type != PrivateKeyType {
|
|
||||||
return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ReadKeyRing(block.Body)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadKeyRing reads one or more public/private keys. Unsupported keys are
|
|
||||||
// ignored as long as at least a single valid key is found.
|
|
||||||
func ReadKeyRing(r io.Reader) (el EntityList, err error) {
|
|
||||||
packets := packet.NewReader(r)
|
|
||||||
var lastUnsupportedError error
|
|
||||||
|
|
||||||
for {
|
|
||||||
var e *Entity
|
|
||||||
e, err = ReadEntity(packets)
|
|
||||||
if err != nil {
|
|
||||||
// TODO: warn about skipped unsupported/unreadable keys
|
|
||||||
if _, ok := err.(errors.UnsupportedError); ok {
|
|
||||||
lastUnsupportedError = err
|
|
||||||
err = readToNextPublicKey(packets)
|
|
||||||
} else if _, ok := err.(errors.StructuralError); ok {
|
|
||||||
// Skip unreadable, badly-formatted keys
|
|
||||||
lastUnsupportedError = err
|
|
||||||
err = readToNextPublicKey(packets)
|
|
||||||
}
|
|
||||||
if err == io.EOF {
|
|
||||||
err = nil
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
el = nil
|
|
||||||
break
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
el = append(el, e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(el) == 0 && err == nil {
|
|
||||||
err = lastUnsupportedError
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// readToNextPublicKey reads packets until the start of the entity and leaves
|
|
||||||
// the first packet of the new entity in the Reader.
|
|
||||||
func readToNextPublicKey(packets *packet.Reader) (err error) {
|
|
||||||
var p packet.Packet
|
|
||||||
for {
|
|
||||||
p, err = packets.Next()
|
|
||||||
if err == io.EOF {
|
|
||||||
return
|
|
||||||
} else if err != nil {
|
|
||||||
if _, ok := err.(errors.UnsupportedError); ok {
|
|
||||||
err = nil
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey {
|
|
||||||
packets.Unread(p)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadEntity reads an entity (public key, identities, subkeys etc) from the
|
|
||||||
// given Reader.
|
|
||||||
func ReadEntity(packets *packet.Reader) (*Entity, error) {
|
|
||||||
e := new(Entity)
|
|
||||||
e.Identities = make(map[string]*Identity)
|
|
||||||
|
|
||||||
p, err := packets.Next()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var ok bool
|
|
||||||
if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok {
|
|
||||||
if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok {
|
|
||||||
packets.Unread(p)
|
|
||||||
return nil, errors.StructuralError("first packet was not a public/private key")
|
|
||||||
}
|
|
||||||
e.PrimaryKey = &e.PrivateKey.PublicKey
|
|
||||||
}
|
|
||||||
|
|
||||||
if !e.PrimaryKey.PubKeyAlgo.CanSign() {
|
|
||||||
return nil, errors.StructuralError("primary key cannot be used for signatures")
|
|
||||||
}
|
|
||||||
|
|
||||||
var revocations []*packet.Signature
|
|
||||||
EachPacket:
|
|
||||||
for {
|
|
||||||
p, err := packets.Next()
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
} else if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch pkt := p.(type) {
|
|
||||||
case *packet.UserId:
|
|
||||||
if err := addUserID(e, packets, pkt); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
case *packet.Signature:
|
|
||||||
if pkt.SigType == packet.SigTypeKeyRevocation {
|
|
||||||
revocations = append(revocations, pkt)
|
|
||||||
} else if pkt.SigType == packet.SigTypeDirectSignature {
|
|
||||||
// TODO: RFC4880 5.2.1 permits signatures
|
|
||||||
// directly on keys (eg. to bind additional
|
|
||||||
// revocation keys).
|
|
||||||
}
|
|
||||||
// Else, ignoring the signature as it does not follow anything
|
|
||||||
// we would know to attach it to.
|
|
||||||
case *packet.PrivateKey:
|
|
||||||
if pkt.IsSubkey == false {
|
|
||||||
packets.Unread(p)
|
|
||||||
break EachPacket
|
|
||||||
}
|
|
||||||
err = addSubkey(e, packets, &pkt.PublicKey, pkt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
case *packet.PublicKey:
|
|
||||||
if pkt.IsSubkey == false {
|
|
||||||
packets.Unread(p)
|
|
||||||
break EachPacket
|
|
||||||
}
|
|
||||||
err = addSubkey(e, packets, pkt, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
// we ignore unknown packets
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(e.Identities) == 0 {
|
|
||||||
return nil, errors.StructuralError("entity without any identities")
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, revocation := range revocations {
|
|
||||||
err = e.PrimaryKey.VerifyRevocationSignature(revocation)
|
|
||||||
if err == nil {
|
|
||||||
e.Revocations = append(e.Revocations, revocation)
|
|
||||||
} else {
|
|
||||||
// TODO: RFC 4880 5.2.3.15 defines revocation keys.
|
|
||||||
return nil, errors.StructuralError("revocation signature signed by alternate key")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return e, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error {
|
|
||||||
// Make a new Identity object, that we might wind up throwing away.
|
|
||||||
// We'll only add it if we get a valid self-signature over this
|
|
||||||
// userID.
|
|
||||||
identity := new(Identity)
|
|
||||||
identity.Name = pkt.Id
|
|
||||||
identity.UserId = pkt
|
|
||||||
|
|
||||||
for {
|
|
||||||
p, err := packets.Next()
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
} else if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
sig, ok := p.(*packet.Signature)
|
|
||||||
if !ok {
|
|
||||||
packets.Unread(p)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId {
|
|
||||||
if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil {
|
|
||||||
return errors.StructuralError("user ID self-signature invalid: " + err.Error())
|
|
||||||
}
|
|
||||||
identity.SelfSignature = sig
|
|
||||||
e.Identities[pkt.Id] = identity
|
|
||||||
} else {
|
|
||||||
identity.Signatures = append(identity.Signatures, sig)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error {
|
|
||||||
var subKey Subkey
|
|
||||||
subKey.PublicKey = pub
|
|
||||||
subKey.PrivateKey = priv
|
|
||||||
|
|
||||||
for {
|
|
||||||
p, err := packets.Next()
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
} else if err != nil {
|
|
||||||
return errors.StructuralError("subkey signature invalid: " + err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
sig, ok := p.(*packet.Signature)
|
|
||||||
if !ok {
|
|
||||||
packets.Unread(p)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if sig.SigType != packet.SigTypeSubkeyBinding && sig.SigType != packet.SigTypeSubkeyRevocation {
|
|
||||||
return errors.StructuralError("subkey signature with wrong type")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig); err != nil {
|
|
||||||
return errors.StructuralError("subkey signature invalid: " + err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
switch sig.SigType {
|
|
||||||
case packet.SigTypeSubkeyRevocation:
|
|
||||||
subKey.Sig = sig
|
|
||||||
case packet.SigTypeSubkeyBinding:
|
|
||||||
|
|
||||||
if shouldReplaceSubkeySig(subKey.Sig, sig) {
|
|
||||||
subKey.Sig = sig
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if subKey.Sig == nil {
|
|
||||||
return errors.StructuralError("subkey packet not followed by signature")
|
|
||||||
}
|
|
||||||
|
|
||||||
e.Subkeys = append(e.Subkeys, subKey)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func shouldReplaceSubkeySig(existingSig, potentialNewSig *packet.Signature) bool {
|
|
||||||
if potentialNewSig == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if existingSig == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if existingSig.SigType == packet.SigTypeSubkeyRevocation {
|
|
||||||
return false // never override a revocation signature
|
|
||||||
}
|
|
||||||
|
|
||||||
return potentialNewSig.CreationTime.After(existingSig.CreationTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
const defaultRSAKeyBits = 2048
|
|
||||||
|
|
||||||
// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a
|
|
||||||
// single identity composed of the given full name, comment and email, any of
|
|
||||||
// which may be empty but must not contain any of "()<>\x00".
|
|
||||||
// If config is nil, sensible defaults will be used.
|
|
||||||
func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) {
|
|
||||||
creationTime := config.Now()
|
|
||||||
|
|
||||||
bits := defaultRSAKeyBits
|
|
||||||
if config != nil && config.RSABits != 0 {
|
|
||||||
bits = config.RSABits
|
|
||||||
}
|
|
||||||
|
|
||||||
uid := packet.NewUserId(name, comment, email)
|
|
||||||
if uid == nil {
|
|
||||||
return nil, errors.InvalidArgumentError("user id field contained invalid characters")
|
|
||||||
}
|
|
||||||
signingPriv, err := rsa.GenerateKey(config.Random(), bits)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
encryptingPriv, err := rsa.GenerateKey(config.Random(), bits)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
e := &Entity{
|
|
||||||
PrimaryKey: packet.NewRSAPublicKey(creationTime, &signingPriv.PublicKey),
|
|
||||||
PrivateKey: packet.NewRSAPrivateKey(creationTime, signingPriv),
|
|
||||||
Identities: make(map[string]*Identity),
|
|
||||||
}
|
|
||||||
isPrimaryId := true
|
|
||||||
e.Identities[uid.Id] = &Identity{
|
|
||||||
Name: uid.Id,
|
|
||||||
UserId: uid,
|
|
||||||
SelfSignature: &packet.Signature{
|
|
||||||
CreationTime: creationTime,
|
|
||||||
SigType: packet.SigTypePositiveCert,
|
|
||||||
PubKeyAlgo: packet.PubKeyAlgoRSA,
|
|
||||||
Hash: config.Hash(),
|
|
||||||
IsPrimaryId: &isPrimaryId,
|
|
||||||
FlagsValid: true,
|
|
||||||
FlagSign: true,
|
|
||||||
FlagCertify: true,
|
|
||||||
IssuerKeyId: &e.PrimaryKey.KeyId,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
err = e.Identities[uid.Id].SelfSignature.SignUserId(uid.Id, e.PrimaryKey, e.PrivateKey, config)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the user passes in a DefaultHash via packet.Config,
|
|
||||||
// set the PreferredHash for the SelfSignature.
|
|
||||||
if config != nil && config.DefaultHash != 0 {
|
|
||||||
e.Identities[uid.Id].SelfSignature.PreferredHash = []uint8{hashToHashId(config.DefaultHash)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Likewise for DefaultCipher.
|
|
||||||
if config != nil && config.DefaultCipher != 0 {
|
|
||||||
e.Identities[uid.Id].SelfSignature.PreferredSymmetric = []uint8{uint8(config.DefaultCipher)}
|
|
||||||
}
|
|
||||||
|
|
||||||
e.Subkeys = make([]Subkey, 1)
|
|
||||||
e.Subkeys[0] = Subkey{
|
|
||||||
PublicKey: packet.NewRSAPublicKey(creationTime, &encryptingPriv.PublicKey),
|
|
||||||
PrivateKey: packet.NewRSAPrivateKey(creationTime, encryptingPriv),
|
|
||||||
Sig: &packet.Signature{
|
|
||||||
CreationTime: creationTime,
|
|
||||||
SigType: packet.SigTypeSubkeyBinding,
|
|
||||||
PubKeyAlgo: packet.PubKeyAlgoRSA,
|
|
||||||
Hash: config.Hash(),
|
|
||||||
FlagsValid: true,
|
|
||||||
FlagEncryptStorage: true,
|
|
||||||
FlagEncryptCommunications: true,
|
|
||||||
IssuerKeyId: &e.PrimaryKey.KeyId,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
e.Subkeys[0].PublicKey.IsSubkey = true
|
|
||||||
e.Subkeys[0].PrivateKey.IsSubkey = true
|
|
||||||
err = e.Subkeys[0].Sig.SignKey(e.Subkeys[0].PublicKey, e.PrivateKey, config)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return e, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SerializePrivate serializes an Entity, including private key material, but
|
|
||||||
// excluding signatures from other entities, to the given Writer.
|
|
||||||
// Identities and subkeys are re-signed in case they changed since NewEntry.
|
|
||||||
// If config is nil, sensible defaults will be used.
|
|
||||||
func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) {
|
|
||||||
err = e.PrivateKey.Serialize(w)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, ident := range e.Identities {
|
|
||||||
err = ident.UserId.Serialize(w)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err = ident.SelfSignature.Serialize(w)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, subkey := range e.Subkeys {
|
|
||||||
err = subkey.PrivateKey.Serialize(w)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err = subkey.Sig.Serialize(w)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Serialize writes the public part of the given Entity to w, including
|
|
||||||
// signatures from other entities. No private key material will be output.
|
|
||||||
func (e *Entity) Serialize(w io.Writer) error {
|
|
||||||
err := e.PrimaryKey.Serialize(w)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, ident := range e.Identities {
|
|
||||||
err = ident.UserId.Serialize(w)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = ident.SelfSignature.Serialize(w)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, sig := range ident.Signatures {
|
|
||||||
err = sig.Serialize(w)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, subkey := range e.Subkeys {
|
|
||||||
err = subkey.PublicKey.Serialize(w)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = subkey.Sig.Serialize(w)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignIdentity adds a signature to e, from signer, attesting that identity is
|
|
||||||
// associated with e. The provided identity must already be an element of
|
|
||||||
// e.Identities and the private key of signer must have been decrypted if
|
|
||||||
// necessary.
|
|
||||||
// If config is nil, sensible defaults will be used.
|
|
||||||
func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error {
|
|
||||||
if signer.PrivateKey == nil {
|
|
||||||
return errors.InvalidArgumentError("signing Entity must have a private key")
|
|
||||||
}
|
|
||||||
if signer.PrivateKey.Encrypted {
|
|
||||||
return errors.InvalidArgumentError("signing Entity's private key must be decrypted")
|
|
||||||
}
|
|
||||||
ident, ok := e.Identities[identity]
|
|
||||||
if !ok {
|
|
||||||
return errors.InvalidArgumentError("given identity string not found in Entity")
|
|
||||||
}
|
|
||||||
|
|
||||||
sig := &packet.Signature{
|
|
||||||
SigType: packet.SigTypeGenericCert,
|
|
||||||
PubKeyAlgo: signer.PrivateKey.PubKeyAlgo,
|
|
||||||
Hash: config.Hash(),
|
|
||||||
CreationTime: config.Now(),
|
|
||||||
IssuerKeyId: &signer.PrivateKey.KeyId,
|
|
||||||
}
|
|
||||||
if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ident.Signatures = append(ident.Signatures, sig)
|
|
||||||
return nil
|
|
||||||
}
|
|
123
vendor/golang.org/x/crypto/openpgp/packet/compressed.go
generated
vendored
123
vendor/golang.org/x/crypto/openpgp/packet/compressed.go
generated
vendored
@ -1,123 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package packet
|
|
||||||
|
|
||||||
import (
|
|
||||||
"compress/bzip2"
|
|
||||||
"compress/flate"
|
|
||||||
"compress/zlib"
|
|
||||||
"golang.org/x/crypto/openpgp/errors"
|
|
||||||
"io"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Compressed represents a compressed OpenPGP packet. The decompressed contents
|
|
||||||
// will contain more OpenPGP packets. See RFC 4880, section 5.6.
|
|
||||||
type Compressed struct {
|
|
||||||
Body io.Reader
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
NoCompression = flate.NoCompression
|
|
||||||
BestSpeed = flate.BestSpeed
|
|
||||||
BestCompression = flate.BestCompression
|
|
||||||
DefaultCompression = flate.DefaultCompression
|
|
||||||
)
|
|
||||||
|
|
||||||
// CompressionConfig contains compressor configuration settings.
|
|
||||||
type CompressionConfig struct {
|
|
||||||
// Level is the compression level to use. It must be set to
|
|
||||||
// between -1 and 9, with -1 causing the compressor to use the
|
|
||||||
// default compression level, 0 causing the compressor to use
|
|
||||||
// no compression and 1 to 9 representing increasing (better,
|
|
||||||
// slower) compression levels. If Level is less than -1 or
|
|
||||||
// more then 9, a non-nil error will be returned during
|
|
||||||
// encryption. See the constants above for convenient common
|
|
||||||
// settings for Level.
|
|
||||||
Level int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Compressed) parse(r io.Reader) error {
|
|
||||||
var buf [1]byte
|
|
||||||
_, err := readFull(r, buf[:])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch buf[0] {
|
|
||||||
case 1:
|
|
||||||
c.Body = flate.NewReader(r)
|
|
||||||
case 2:
|
|
||||||
c.Body, err = zlib.NewReader(r)
|
|
||||||
case 3:
|
|
||||||
c.Body = bzip2.NewReader(r)
|
|
||||||
default:
|
|
||||||
err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0])))
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// compressedWriterCloser represents the serialized compression stream
|
|
||||||
// header and the compressor. Its Close() method ensures that both the
|
|
||||||
// compressor and serialized stream header are closed. Its Write()
|
|
||||||
// method writes to the compressor.
|
|
||||||
type compressedWriteCloser struct {
|
|
||||||
sh io.Closer // Stream Header
|
|
||||||
c io.WriteCloser // Compressor
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cwc compressedWriteCloser) Write(p []byte) (int, error) {
|
|
||||||
return cwc.c.Write(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cwc compressedWriteCloser) Close() (err error) {
|
|
||||||
err = cwc.c.Close()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return cwc.sh.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SerializeCompressed serializes a compressed data packet to w and
|
|
||||||
// returns a WriteCloser to which the literal data packets themselves
|
|
||||||
// can be written and which MUST be closed on completion. If cc is
|
|
||||||
// nil, sensible defaults will be used to configure the compression
|
|
||||||
// algorithm.
|
|
||||||
func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) {
|
|
||||||
compressed, err := serializeStreamHeader(w, packetTypeCompressed)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = compressed.Write([]byte{uint8(algo)})
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
level := DefaultCompression
|
|
||||||
if cc != nil {
|
|
||||||
level = cc.Level
|
|
||||||
}
|
|
||||||
|
|
||||||
var compressor io.WriteCloser
|
|
||||||
switch algo {
|
|
||||||
case CompressionZIP:
|
|
||||||
compressor, err = flate.NewWriter(compressed, level)
|
|
||||||
case CompressionZLIB:
|
|
||||||
compressor, err = zlib.NewWriterLevel(compressed, level)
|
|
||||||
default:
|
|
||||||
s := strconv.Itoa(int(algo))
|
|
||||||
err = errors.UnsupportedError("Unsupported compression algorithm: " + s)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
literaldata = compressedWriteCloser{compressed, compressor}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
91
vendor/golang.org/x/crypto/openpgp/packet/config.go
generated
vendored
91
vendor/golang.org/x/crypto/openpgp/packet/config.go
generated
vendored
@ -1,91 +0,0 @@
|
|||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package packet
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto"
|
|
||||||
"crypto/rand"
|
|
||||||
"io"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Config collects a number of parameters along with sensible defaults.
|
|
||||||
// A nil *Config is valid and results in all default values.
|
|
||||||
type Config struct {
|
|
||||||
// Rand provides the source of entropy.
|
|
||||||
// If nil, the crypto/rand Reader is used.
|
|
||||||
Rand io.Reader
|
|
||||||
// DefaultHash is the default hash function to be used.
|
|
||||||
// If zero, SHA-256 is used.
|
|
||||||
DefaultHash crypto.Hash
|
|
||||||
// DefaultCipher is the cipher to be used.
|
|
||||||
// If zero, AES-128 is used.
|
|
||||||
DefaultCipher CipherFunction
|
|
||||||
// Time returns the current time as the number of seconds since the
|
|
||||||
// epoch. If Time is nil, time.Now is used.
|
|
||||||
Time func() time.Time
|
|
||||||
// DefaultCompressionAlgo is the compression algorithm to be
|
|
||||||
// applied to the plaintext before encryption. If zero, no
|
|
||||||
// compression is done.
|
|
||||||
DefaultCompressionAlgo CompressionAlgo
|
|
||||||
// CompressionConfig configures the compression settings.
|
|
||||||
CompressionConfig *CompressionConfig
|
|
||||||
// S2KCount is only used for symmetric encryption. It
|
|
||||||
// determines the strength of the passphrase stretching when
|
|
||||||
// the said passphrase is hashed to produce a key. S2KCount
|
|
||||||
// should be between 1024 and 65011712, inclusive. If Config
|
|
||||||
// is nil or S2KCount is 0, the value 65536 used. Not all
|
|
||||||
// values in the above range can be represented. S2KCount will
|
|
||||||
// be rounded up to the next representable value if it cannot
|
|
||||||
// be encoded exactly. When set, it is strongly encrouraged to
|
|
||||||
// use a value that is at least 65536. See RFC 4880 Section
|
|
||||||
// 3.7.1.3.
|
|
||||||
S2KCount int
|
|
||||||
// RSABits is the number of bits in new RSA keys made with NewEntity.
|
|
||||||
// If zero, then 2048 bit keys are created.
|
|
||||||
RSABits int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Config) Random() io.Reader {
|
|
||||||
if c == nil || c.Rand == nil {
|
|
||||||
return rand.Reader
|
|
||||||
}
|
|
||||||
return c.Rand
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Config) Hash() crypto.Hash {
|
|
||||||
if c == nil || uint(c.DefaultHash) == 0 {
|
|
||||||
return crypto.SHA256
|
|
||||||
}
|
|
||||||
return c.DefaultHash
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Config) Cipher() CipherFunction {
|
|
||||||
if c == nil || uint8(c.DefaultCipher) == 0 {
|
|
||||||
return CipherAES128
|
|
||||||
}
|
|
||||||
return c.DefaultCipher
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Config) Now() time.Time {
|
|
||||||
if c == nil || c.Time == nil {
|
|
||||||
return time.Now()
|
|
||||||
}
|
|
||||||
return c.Time()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Config) Compression() CompressionAlgo {
|
|
||||||
if c == nil {
|
|
||||||
return CompressionNone
|
|
||||||
}
|
|
||||||
return c.DefaultCompressionAlgo
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Config) PasswordHashIterations() int {
|
|
||||||
if c == nil || c.S2KCount == 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return c.S2KCount
|
|
||||||
}
|
|
206
vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go
generated
vendored
206
vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go
generated
vendored
@ -1,206 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package packet
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/rsa"
|
|
||||||
"encoding/binary"
|
|
||||||
"io"
|
|
||||||
"math/big"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"golang.org/x/crypto/openpgp/elgamal"
|
|
||||||
"golang.org/x/crypto/openpgp/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
const encryptedKeyVersion = 3
|
|
||||||
|
|
||||||
// EncryptedKey represents a public-key encrypted session key. See RFC 4880,
|
|
||||||
// section 5.1.
|
|
||||||
type EncryptedKey struct {
|
|
||||||
KeyId uint64
|
|
||||||
Algo PublicKeyAlgorithm
|
|
||||||
CipherFunc CipherFunction // only valid after a successful Decrypt
|
|
||||||
Key []byte // only valid after a successful Decrypt
|
|
||||||
|
|
||||||
encryptedMPI1, encryptedMPI2 parsedMPI
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *EncryptedKey) parse(r io.Reader) (err error) {
|
|
||||||
var buf [10]byte
|
|
||||||
_, err = readFull(r, buf[:])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if buf[0] != encryptedKeyVersion {
|
|
||||||
return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0])))
|
|
||||||
}
|
|
||||||
e.KeyId = binary.BigEndian.Uint64(buf[1:9])
|
|
||||||
e.Algo = PublicKeyAlgorithm(buf[9])
|
|
||||||
switch e.Algo {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
|
|
||||||
e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case PubKeyAlgoElGamal:
|
|
||||||
e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_, err = consumeAll(r)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func checksumKeyMaterial(key []byte) uint16 {
|
|
||||||
var checksum uint16
|
|
||||||
for _, v := range key {
|
|
||||||
checksum += uint16(v)
|
|
||||||
}
|
|
||||||
return checksum
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decrypt decrypts an encrypted session key with the given private key. The
|
|
||||||
// private key must have been decrypted first.
|
|
||||||
// If config is nil, sensible defaults will be used.
|
|
||||||
func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error {
|
|
||||||
var err error
|
|
||||||
var b []byte
|
|
||||||
|
|
||||||
// TODO(agl): use session key decryption routines here to avoid
|
|
||||||
// padding oracle attacks.
|
|
||||||
switch priv.PubKeyAlgo {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
|
|
||||||
k := priv.PrivateKey.(*rsa.PrivateKey)
|
|
||||||
b, err = rsa.DecryptPKCS1v15(config.Random(), k, padToKeySize(&k.PublicKey, e.encryptedMPI1.bytes))
|
|
||||||
case PubKeyAlgoElGamal:
|
|
||||||
c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes)
|
|
||||||
c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes)
|
|
||||||
b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2)
|
|
||||||
default:
|
|
||||||
err = errors.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo)))
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
e.CipherFunc = CipherFunction(b[0])
|
|
||||||
e.Key = b[1 : len(b)-2]
|
|
||||||
expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1])
|
|
||||||
checksum := checksumKeyMaterial(e.Key)
|
|
||||||
if checksum != expectedChecksum {
|
|
||||||
return errors.StructuralError("EncryptedKey checksum incorrect")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Serialize writes the encrypted key packet, e, to w.
|
|
||||||
func (e *EncryptedKey) Serialize(w io.Writer) error {
|
|
||||||
var mpiLen int
|
|
||||||
switch e.Algo {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
|
|
||||||
mpiLen = 2 + len(e.encryptedMPI1.bytes)
|
|
||||||
case PubKeyAlgoElGamal:
|
|
||||||
mpiLen = 2 + len(e.encryptedMPI1.bytes) + 2 + len(e.encryptedMPI2.bytes)
|
|
||||||
default:
|
|
||||||
return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo)))
|
|
||||||
}
|
|
||||||
|
|
||||||
serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen)
|
|
||||||
|
|
||||||
w.Write([]byte{encryptedKeyVersion})
|
|
||||||
binary.Write(w, binary.BigEndian, e.KeyId)
|
|
||||||
w.Write([]byte{byte(e.Algo)})
|
|
||||||
|
|
||||||
switch e.Algo {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
|
|
||||||
writeMPIs(w, e.encryptedMPI1)
|
|
||||||
case PubKeyAlgoElGamal:
|
|
||||||
writeMPIs(w, e.encryptedMPI1, e.encryptedMPI2)
|
|
||||||
default:
|
|
||||||
panic("internal error")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SerializeEncryptedKey serializes an encrypted key packet to w that contains
|
|
||||||
// key, encrypted to pub.
|
|
||||||
// If config is nil, sensible defaults will be used.
|
|
||||||
func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error {
|
|
||||||
var buf [10]byte
|
|
||||||
buf[0] = encryptedKeyVersion
|
|
||||||
binary.BigEndian.PutUint64(buf[1:9], pub.KeyId)
|
|
||||||
buf[9] = byte(pub.PubKeyAlgo)
|
|
||||||
|
|
||||||
keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */)
|
|
||||||
keyBlock[0] = byte(cipherFunc)
|
|
||||||
copy(keyBlock[1:], key)
|
|
||||||
checksum := checksumKeyMaterial(key)
|
|
||||||
keyBlock[1+len(key)] = byte(checksum >> 8)
|
|
||||||
keyBlock[1+len(key)+1] = byte(checksum)
|
|
||||||
|
|
||||||
switch pub.PubKeyAlgo {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
|
|
||||||
return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock)
|
|
||||||
case PubKeyAlgoElGamal:
|
|
||||||
return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock)
|
|
||||||
case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly:
|
|
||||||
return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo)))
|
|
||||||
}
|
|
||||||
|
|
||||||
return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo)))
|
|
||||||
}
|
|
||||||
|
|
||||||
func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error {
|
|
||||||
cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock)
|
|
||||||
if err != nil {
|
|
||||||
return errors.InvalidArgumentError("RSA encryption failed: " + err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
packetLen := 10 /* header length */ + 2 /* mpi size */ + len(cipherText)
|
|
||||||
|
|
||||||
err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = w.Write(header[:])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return writeMPI(w, 8*uint16(len(cipherText)), cipherText)
|
|
||||||
}
|
|
||||||
|
|
||||||
func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error {
|
|
||||||
c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock)
|
|
||||||
if err != nil {
|
|
||||||
return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
packetLen := 10 /* header length */
|
|
||||||
packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8
|
|
||||||
packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8
|
|
||||||
|
|
||||||
err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = w.Write(header[:])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = writeBig(w, c1)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return writeBig(w, c2)
|
|
||||||
}
|
|
89
vendor/golang.org/x/crypto/openpgp/packet/literal.go
generated
vendored
89
vendor/golang.org/x/crypto/openpgp/packet/literal.go
generated
vendored
@ -1,89 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package packet
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// LiteralData represents an encrypted file. See RFC 4880, section 5.9.
|
|
||||||
type LiteralData struct {
|
|
||||||
IsBinary bool
|
|
||||||
FileName string
|
|
||||||
Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined.
|
|
||||||
Body io.Reader
|
|
||||||
}
|
|
||||||
|
|
||||||
// ForEyesOnly returns whether the contents of the LiteralData have been marked
|
|
||||||
// as especially sensitive.
|
|
||||||
func (l *LiteralData) ForEyesOnly() bool {
|
|
||||||
return l.FileName == "_CONSOLE"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *LiteralData) parse(r io.Reader) (err error) {
|
|
||||||
var buf [256]byte
|
|
||||||
|
|
||||||
_, err = readFull(r, buf[:2])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
l.IsBinary = buf[0] == 'b'
|
|
||||||
fileNameLen := int(buf[1])
|
|
||||||
|
|
||||||
_, err = readFull(r, buf[:fileNameLen])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
l.FileName = string(buf[:fileNameLen])
|
|
||||||
|
|
||||||
_, err = readFull(r, buf[:4])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
l.Time = binary.BigEndian.Uint32(buf[:4])
|
|
||||||
l.Body = r
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// SerializeLiteral serializes a literal data packet to w and returns a
|
|
||||||
// WriteCloser to which the data itself can be written and which MUST be closed
|
|
||||||
// on completion. The fileName is truncated to 255 bytes.
|
|
||||||
func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) {
|
|
||||||
var buf [4]byte
|
|
||||||
buf[0] = 't'
|
|
||||||
if isBinary {
|
|
||||||
buf[0] = 'b'
|
|
||||||
}
|
|
||||||
if len(fileName) > 255 {
|
|
||||||
fileName = fileName[:255]
|
|
||||||
}
|
|
||||||
buf[1] = byte(len(fileName))
|
|
||||||
|
|
||||||
inner, err := serializeStreamHeader(w, packetTypeLiteralData)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = inner.Write(buf[:2])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
_, err = inner.Write([]byte(fileName))
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
binary.BigEndian.PutUint32(buf[:], time)
|
|
||||||
_, err = inner.Write(buf[:])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
plaintext = inner
|
|
||||||
return
|
|
||||||
}
|
|
143
vendor/golang.org/x/crypto/openpgp/packet/ocfb.go
generated
vendored
143
vendor/golang.org/x/crypto/openpgp/packet/ocfb.go
generated
vendored
@ -1,143 +0,0 @@
|
|||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9
|
|
||||||
|
|
||||||
package packet
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/cipher"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ocfbEncrypter struct {
|
|
||||||
b cipher.Block
|
|
||||||
fre []byte
|
|
||||||
outUsed int
|
|
||||||
}
|
|
||||||
|
|
||||||
// An OCFBResyncOption determines if the "resynchronization step" of OCFB is
|
|
||||||
// performed.
|
|
||||||
type OCFBResyncOption bool
|
|
||||||
|
|
||||||
const (
|
|
||||||
OCFBResync OCFBResyncOption = true
|
|
||||||
OCFBNoResync OCFBResyncOption = false
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's
|
|
||||||
// cipher feedback mode using the given cipher.Block, and an initial amount of
|
|
||||||
// ciphertext. randData must be random bytes and be the same length as the
|
|
||||||
// cipher.Block's block size. Resync determines if the "resynchronization step"
|
|
||||||
// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on
|
|
||||||
// this point.
|
|
||||||
func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) {
|
|
||||||
blockSize := block.BlockSize()
|
|
||||||
if len(randData) != blockSize {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
x := &ocfbEncrypter{
|
|
||||||
b: block,
|
|
||||||
fre: make([]byte, blockSize),
|
|
||||||
outUsed: 0,
|
|
||||||
}
|
|
||||||
prefix := make([]byte, blockSize+2)
|
|
||||||
|
|
||||||
block.Encrypt(x.fre, x.fre)
|
|
||||||
for i := 0; i < blockSize; i++ {
|
|
||||||
prefix[i] = randData[i] ^ x.fre[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
block.Encrypt(x.fre, prefix[:blockSize])
|
|
||||||
prefix[blockSize] = x.fre[0] ^ randData[blockSize-2]
|
|
||||||
prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1]
|
|
||||||
|
|
||||||
if resync {
|
|
||||||
block.Encrypt(x.fre, prefix[2:])
|
|
||||||
} else {
|
|
||||||
x.fre[0] = prefix[blockSize]
|
|
||||||
x.fre[1] = prefix[blockSize+1]
|
|
||||||
x.outUsed = 2
|
|
||||||
}
|
|
||||||
return x, prefix
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) {
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
if x.outUsed == len(x.fre) {
|
|
||||||
x.b.Encrypt(x.fre, x.fre)
|
|
||||||
x.outUsed = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
x.fre[x.outUsed] ^= src[i]
|
|
||||||
dst[i] = x.fre[x.outUsed]
|
|
||||||
x.outUsed++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type ocfbDecrypter struct {
|
|
||||||
b cipher.Block
|
|
||||||
fre []byte
|
|
||||||
outUsed int
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's
|
|
||||||
// cipher feedback mode using the given cipher.Block. Prefix must be the first
|
|
||||||
// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's
|
|
||||||
// block size. If an incorrect key is detected then nil is returned. On
|
|
||||||
// successful exit, blockSize+2 bytes of decrypted data are written into
|
|
||||||
// prefix. Resync determines if the "resynchronization step" from RFC 4880,
|
|
||||||
// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point.
|
|
||||||
func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream {
|
|
||||||
blockSize := block.BlockSize()
|
|
||||||
if len(prefix) != blockSize+2 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
x := &ocfbDecrypter{
|
|
||||||
b: block,
|
|
||||||
fre: make([]byte, blockSize),
|
|
||||||
outUsed: 0,
|
|
||||||
}
|
|
||||||
prefixCopy := make([]byte, len(prefix))
|
|
||||||
copy(prefixCopy, prefix)
|
|
||||||
|
|
||||||
block.Encrypt(x.fre, x.fre)
|
|
||||||
for i := 0; i < blockSize; i++ {
|
|
||||||
prefixCopy[i] ^= x.fre[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
block.Encrypt(x.fre, prefix[:blockSize])
|
|
||||||
prefixCopy[blockSize] ^= x.fre[0]
|
|
||||||
prefixCopy[blockSize+1] ^= x.fre[1]
|
|
||||||
|
|
||||||
if prefixCopy[blockSize-2] != prefixCopy[blockSize] ||
|
|
||||||
prefixCopy[blockSize-1] != prefixCopy[blockSize+1] {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if resync {
|
|
||||||
block.Encrypt(x.fre, prefix[2:])
|
|
||||||
} else {
|
|
||||||
x.fre[0] = prefix[blockSize]
|
|
||||||
x.fre[1] = prefix[blockSize+1]
|
|
||||||
x.outUsed = 2
|
|
||||||
}
|
|
||||||
copy(prefix, prefixCopy)
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) {
|
|
||||||
for i := 0; i < len(src); i++ {
|
|
||||||
if x.outUsed == len(x.fre) {
|
|
||||||
x.b.Encrypt(x.fre, x.fre)
|
|
||||||
x.outUsed = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
c := src[i]
|
|
||||||
dst[i] = x.fre[x.outUsed] ^ src[i]
|
|
||||||
x.fre[x.outUsed] = c
|
|
||||||
x.outUsed++
|
|
||||||
}
|
|
||||||
}
|
|
73
vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go
generated
vendored
73
vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go
generated
vendored
@ -1,73 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package packet
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto"
|
|
||||||
"encoding/binary"
|
|
||||||
"golang.org/x/crypto/openpgp/errors"
|
|
||||||
"golang.org/x/crypto/openpgp/s2k"
|
|
||||||
"io"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// OnePassSignature represents a one-pass signature packet. See RFC 4880,
|
|
||||||
// section 5.4.
|
|
||||||
type OnePassSignature struct {
|
|
||||||
SigType SignatureType
|
|
||||||
Hash crypto.Hash
|
|
||||||
PubKeyAlgo PublicKeyAlgorithm
|
|
||||||
KeyId uint64
|
|
||||||
IsLast bool
|
|
||||||
}
|
|
||||||
|
|
||||||
const onePassSignatureVersion = 3
|
|
||||||
|
|
||||||
func (ops *OnePassSignature) parse(r io.Reader) (err error) {
|
|
||||||
var buf [13]byte
|
|
||||||
|
|
||||||
_, err = readFull(r, buf[:])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if buf[0] != onePassSignatureVersion {
|
|
||||||
err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0])))
|
|
||||||
}
|
|
||||||
|
|
||||||
var ok bool
|
|
||||||
ops.Hash, ok = s2k.HashIdToHash(buf[2])
|
|
||||||
if !ok {
|
|
||||||
return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2])))
|
|
||||||
}
|
|
||||||
|
|
||||||
ops.SigType = SignatureType(buf[1])
|
|
||||||
ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3])
|
|
||||||
ops.KeyId = binary.BigEndian.Uint64(buf[4:12])
|
|
||||||
ops.IsLast = buf[12] != 0
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Serialize marshals the given OnePassSignature to w.
|
|
||||||
func (ops *OnePassSignature) Serialize(w io.Writer) error {
|
|
||||||
var buf [13]byte
|
|
||||||
buf[0] = onePassSignatureVersion
|
|
||||||
buf[1] = uint8(ops.SigType)
|
|
||||||
var ok bool
|
|
||||||
buf[2], ok = s2k.HashToHashId(ops.Hash)
|
|
||||||
if !ok {
|
|
||||||
return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash)))
|
|
||||||
}
|
|
||||||
buf[3] = uint8(ops.PubKeyAlgo)
|
|
||||||
binary.BigEndian.PutUint64(buf[4:12], ops.KeyId)
|
|
||||||
if ops.IsLast {
|
|
||||||
buf[12] = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err := w.Write(buf[:])
|
|
||||||
return err
|
|
||||||
}
|
|
162
vendor/golang.org/x/crypto/openpgp/packet/opaque.go
generated
vendored
162
vendor/golang.org/x/crypto/openpgp/packet/opaque.go
generated
vendored
@ -1,162 +0,0 @@
|
|||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package packet
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
|
|
||||||
"golang.org/x/crypto/openpgp/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is
|
|
||||||
// useful for splitting and storing the original packet contents separately,
|
|
||||||
// handling unsupported packet types or accessing parts of the packet not yet
|
|
||||||
// implemented by this package.
|
|
||||||
type OpaquePacket struct {
|
|
||||||
// Packet type
|
|
||||||
Tag uint8
|
|
||||||
// Reason why the packet was parsed opaquely
|
|
||||||
Reason error
|
|
||||||
// Binary contents of the packet data
|
|
||||||
Contents []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (op *OpaquePacket) parse(r io.Reader) (err error) {
|
|
||||||
op.Contents, err = ioutil.ReadAll(r)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Serialize marshals the packet to a writer in its original form, including
|
|
||||||
// the packet header.
|
|
||||||
func (op *OpaquePacket) Serialize(w io.Writer) (err error) {
|
|
||||||
err = serializeHeader(w, packetType(op.Tag), len(op.Contents))
|
|
||||||
if err == nil {
|
|
||||||
_, err = w.Write(op.Contents)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse attempts to parse the opaque contents into a structure supported by
|
|
||||||
// this package. If the packet is not known then the result will be another
|
|
||||||
// OpaquePacket.
|
|
||||||
func (op *OpaquePacket) Parse() (p Packet, err error) {
|
|
||||||
hdr := bytes.NewBuffer(nil)
|
|
||||||
err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents))
|
|
||||||
if err != nil {
|
|
||||||
op.Reason = err
|
|
||||||
return op, err
|
|
||||||
}
|
|
||||||
p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents)))
|
|
||||||
if err != nil {
|
|
||||||
op.Reason = err
|
|
||||||
p = op
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpaqueReader reads OpaquePackets from an io.Reader.
|
|
||||||
type OpaqueReader struct {
|
|
||||||
r io.Reader
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewOpaqueReader(r io.Reader) *OpaqueReader {
|
|
||||||
return &OpaqueReader{r: r}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read the next OpaquePacket.
|
|
||||||
func (or *OpaqueReader) Next() (op *OpaquePacket, err error) {
|
|
||||||
tag, _, contents, err := readHeader(or.r)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
op = &OpaquePacket{Tag: uint8(tag), Reason: err}
|
|
||||||
err = op.parse(contents)
|
|
||||||
if err != nil {
|
|
||||||
consumeAll(contents)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpaqueSubpacket represents an unparsed OpenPGP subpacket,
|
|
||||||
// as found in signature and user attribute packets.
|
|
||||||
type OpaqueSubpacket struct {
|
|
||||||
SubType uint8
|
|
||||||
Contents []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from
|
|
||||||
// their byte representation.
|
|
||||||
func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) {
|
|
||||||
var (
|
|
||||||
subHeaderLen int
|
|
||||||
subPacket *OpaqueSubpacket
|
|
||||||
)
|
|
||||||
for len(contents) > 0 {
|
|
||||||
subHeaderLen, subPacket, err = nextSubpacket(contents)
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
result = append(result, subPacket)
|
|
||||||
contents = contents[subHeaderLen+len(subPacket.Contents):]
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) {
|
|
||||||
// RFC 4880, section 5.2.3.1
|
|
||||||
var subLen uint32
|
|
||||||
if len(contents) < 1 {
|
|
||||||
goto Truncated
|
|
||||||
}
|
|
||||||
subPacket = &OpaqueSubpacket{}
|
|
||||||
switch {
|
|
||||||
case contents[0] < 192:
|
|
||||||
subHeaderLen = 2 // 1 length byte, 1 subtype byte
|
|
||||||
if len(contents) < subHeaderLen {
|
|
||||||
goto Truncated
|
|
||||||
}
|
|
||||||
subLen = uint32(contents[0])
|
|
||||||
contents = contents[1:]
|
|
||||||
case contents[0] < 255:
|
|
||||||
subHeaderLen = 3 // 2 length bytes, 1 subtype
|
|
||||||
if len(contents) < subHeaderLen {
|
|
||||||
goto Truncated
|
|
||||||
}
|
|
||||||
subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192
|
|
||||||
contents = contents[2:]
|
|
||||||
default:
|
|
||||||
subHeaderLen = 6 // 5 length bytes, 1 subtype
|
|
||||||
if len(contents) < subHeaderLen {
|
|
||||||
goto Truncated
|
|
||||||
}
|
|
||||||
subLen = uint32(contents[1])<<24 |
|
|
||||||
uint32(contents[2])<<16 |
|
|
||||||
uint32(contents[3])<<8 |
|
|
||||||
uint32(contents[4])
|
|
||||||
contents = contents[5:]
|
|
||||||
}
|
|
||||||
if subLen > uint32(len(contents)) || subLen == 0 {
|
|
||||||
goto Truncated
|
|
||||||
}
|
|
||||||
subPacket.SubType = contents[0]
|
|
||||||
subPacket.Contents = contents[1:subLen]
|
|
||||||
return
|
|
||||||
Truncated:
|
|
||||||
err = errors.StructuralError("subpacket truncated")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) {
|
|
||||||
buf := make([]byte, 6)
|
|
||||||
n := serializeSubpacketLength(buf, len(osp.Contents)+1)
|
|
||||||
buf[n] = osp.SubType
|
|
||||||
if _, err = w.Write(buf[:n+1]); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
_, err = w.Write(osp.Contents)
|
|
||||||
return
|
|
||||||
}
|
|
551
vendor/golang.org/x/crypto/openpgp/packet/packet.go
generated
vendored
551
vendor/golang.org/x/crypto/openpgp/packet/packet.go
generated
vendored
@ -1,551 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package packet implements parsing and serialization of OpenPGP packets, as
|
|
||||||
// specified in RFC 4880.
|
|
||||||
package packet // import "golang.org/x/crypto/openpgp/packet"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"crypto/aes"
|
|
||||||
"crypto/cipher"
|
|
||||||
"crypto/des"
|
|
||||||
"crypto/rsa"
|
|
||||||
"io"
|
|
||||||
"math/big"
|
|
||||||
|
|
||||||
"golang.org/x/crypto/cast5"
|
|
||||||
"golang.org/x/crypto/openpgp/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// readFull is the same as io.ReadFull except that reading zero bytes returns
|
|
||||||
// ErrUnexpectedEOF rather than EOF.
|
|
||||||
func readFull(r io.Reader, buf []byte) (n int, err error) {
|
|
||||||
n, err = io.ReadFull(r, buf)
|
|
||||||
if err == io.EOF {
|
|
||||||
err = io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2.
|
|
||||||
func readLength(r io.Reader) (length int64, isPartial bool, err error) {
|
|
||||||
var buf [4]byte
|
|
||||||
_, err = readFull(r, buf[:1])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch {
|
|
||||||
case buf[0] < 192:
|
|
||||||
length = int64(buf[0])
|
|
||||||
case buf[0] < 224:
|
|
||||||
length = int64(buf[0]-192) << 8
|
|
||||||
_, err = readFull(r, buf[0:1])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
length += int64(buf[0]) + 192
|
|
||||||
case buf[0] < 255:
|
|
||||||
length = int64(1) << (buf[0] & 0x1f)
|
|
||||||
isPartial = true
|
|
||||||
default:
|
|
||||||
_, err = readFull(r, buf[0:4])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
length = int64(buf[0])<<24 |
|
|
||||||
int64(buf[1])<<16 |
|
|
||||||
int64(buf[2])<<8 |
|
|
||||||
int64(buf[3])
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths.
|
|
||||||
// The continuation lengths are parsed and removed from the stream and EOF is
|
|
||||||
// returned at the end of the packet. See RFC 4880, section 4.2.2.4.
|
|
||||||
type partialLengthReader struct {
|
|
||||||
r io.Reader
|
|
||||||
remaining int64
|
|
||||||
isPartial bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *partialLengthReader) Read(p []byte) (n int, err error) {
|
|
||||||
for r.remaining == 0 {
|
|
||||||
if !r.isPartial {
|
|
||||||
return 0, io.EOF
|
|
||||||
}
|
|
||||||
r.remaining, r.isPartial, err = readLength(r.r)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
toRead := int64(len(p))
|
|
||||||
if toRead > r.remaining {
|
|
||||||
toRead = r.remaining
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err = r.r.Read(p[:int(toRead)])
|
|
||||||
r.remaining -= int64(n)
|
|
||||||
if n < int(toRead) && err == io.EOF {
|
|
||||||
err = io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// partialLengthWriter writes a stream of data using OpenPGP partial lengths.
|
|
||||||
// See RFC 4880, section 4.2.2.4.
|
|
||||||
type partialLengthWriter struct {
|
|
||||||
w io.WriteCloser
|
|
||||||
lengthByte [1]byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *partialLengthWriter) Write(p []byte) (n int, err error) {
|
|
||||||
for len(p) > 0 {
|
|
||||||
for power := uint(14); power < 32; power-- {
|
|
||||||
l := 1 << power
|
|
||||||
if len(p) >= l {
|
|
||||||
w.lengthByte[0] = 224 + uint8(power)
|
|
||||||
_, err = w.w.Write(w.lengthByte[:])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var m int
|
|
||||||
m, err = w.w.Write(p[:l])
|
|
||||||
n += m
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p = p[l:]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *partialLengthWriter) Close() error {
|
|
||||||
w.lengthByte[0] = 0
|
|
||||||
_, err := w.w.Write(w.lengthByte[:])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return w.w.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the
|
|
||||||
// underlying Reader returns EOF before the limit has been reached.
|
|
||||||
type spanReader struct {
|
|
||||||
r io.Reader
|
|
||||||
n int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *spanReader) Read(p []byte) (n int, err error) {
|
|
||||||
if l.n <= 0 {
|
|
||||||
return 0, io.EOF
|
|
||||||
}
|
|
||||||
if int64(len(p)) > l.n {
|
|
||||||
p = p[0:l.n]
|
|
||||||
}
|
|
||||||
n, err = l.r.Read(p)
|
|
||||||
l.n -= int64(n)
|
|
||||||
if l.n > 0 && err == io.EOF {
|
|
||||||
err = io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// readHeader parses a packet header and returns an io.Reader which will return
|
|
||||||
// the contents of the packet. See RFC 4880, section 4.2.
|
|
||||||
func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) {
|
|
||||||
var buf [4]byte
|
|
||||||
_, err = io.ReadFull(r, buf[:1])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if buf[0]&0x80 == 0 {
|
|
||||||
err = errors.StructuralError("tag byte does not have MSB set")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if buf[0]&0x40 == 0 {
|
|
||||||
// Old format packet
|
|
||||||
tag = packetType((buf[0] & 0x3f) >> 2)
|
|
||||||
lengthType := buf[0] & 3
|
|
||||||
if lengthType == 3 {
|
|
||||||
length = -1
|
|
||||||
contents = r
|
|
||||||
return
|
|
||||||
}
|
|
||||||
lengthBytes := 1 << lengthType
|
|
||||||
_, err = readFull(r, buf[0:lengthBytes])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for i := 0; i < lengthBytes; i++ {
|
|
||||||
length <<= 8
|
|
||||||
length |= int64(buf[i])
|
|
||||||
}
|
|
||||||
contents = &spanReader{r, length}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// New format packet
|
|
||||||
tag = packetType(buf[0] & 0x3f)
|
|
||||||
length, isPartial, err := readLength(r)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if isPartial {
|
|
||||||
contents = &partialLengthReader{
|
|
||||||
remaining: length,
|
|
||||||
isPartial: true,
|
|
||||||
r: r,
|
|
||||||
}
|
|
||||||
length = -1
|
|
||||||
} else {
|
|
||||||
contents = &spanReader{r, length}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section
|
|
||||||
// 4.2.
|
|
||||||
func serializeHeader(w io.Writer, ptype packetType, length int) (err error) {
|
|
||||||
var buf [6]byte
|
|
||||||
var n int
|
|
||||||
|
|
||||||
buf[0] = 0x80 | 0x40 | byte(ptype)
|
|
||||||
if length < 192 {
|
|
||||||
buf[1] = byte(length)
|
|
||||||
n = 2
|
|
||||||
} else if length < 8384 {
|
|
||||||
length -= 192
|
|
||||||
buf[1] = 192 + byte(length>>8)
|
|
||||||
buf[2] = byte(length)
|
|
||||||
n = 3
|
|
||||||
} else {
|
|
||||||
buf[1] = 255
|
|
||||||
buf[2] = byte(length >> 24)
|
|
||||||
buf[3] = byte(length >> 16)
|
|
||||||
buf[4] = byte(length >> 8)
|
|
||||||
buf[5] = byte(length)
|
|
||||||
n = 6
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = w.Write(buf[:n])
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// serializeStreamHeader writes an OpenPGP packet header to w where the
|
|
||||||
// length of the packet is unknown. It returns a io.WriteCloser which can be
|
|
||||||
// used to write the contents of the packet. See RFC 4880, section 4.2.
|
|
||||||
func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) {
|
|
||||||
var buf [1]byte
|
|
||||||
buf[0] = 0x80 | 0x40 | byte(ptype)
|
|
||||||
_, err = w.Write(buf[:])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
out = &partialLengthWriter{w: w}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Packet represents an OpenPGP packet. Users are expected to try casting
|
|
||||||
// instances of this interface to specific packet types.
|
|
||||||
type Packet interface {
|
|
||||||
parse(io.Reader) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// consumeAll reads from the given Reader until error, returning the number of
|
|
||||||
// bytes read.
|
|
||||||
func consumeAll(r io.Reader) (n int64, err error) {
|
|
||||||
var m int
|
|
||||||
var buf [1024]byte
|
|
||||||
|
|
||||||
for {
|
|
||||||
m, err = r.Read(buf[:])
|
|
||||||
n += int64(m)
|
|
||||||
if err == io.EOF {
|
|
||||||
err = nil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// packetType represents the numeric ids of the different OpenPGP packet types. See
|
|
||||||
// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2
|
|
||||||
type packetType uint8
|
|
||||||
|
|
||||||
const (
|
|
||||||
packetTypeEncryptedKey packetType = 1
|
|
||||||
packetTypeSignature packetType = 2
|
|
||||||
packetTypeSymmetricKeyEncrypted packetType = 3
|
|
||||||
packetTypeOnePassSignature packetType = 4
|
|
||||||
packetTypePrivateKey packetType = 5
|
|
||||||
packetTypePublicKey packetType = 6
|
|
||||||
packetTypePrivateSubkey packetType = 7
|
|
||||||
packetTypeCompressed packetType = 8
|
|
||||||
packetTypeSymmetricallyEncrypted packetType = 9
|
|
||||||
packetTypeLiteralData packetType = 11
|
|
||||||
packetTypeUserId packetType = 13
|
|
||||||
packetTypePublicSubkey packetType = 14
|
|
||||||
packetTypeUserAttribute packetType = 17
|
|
||||||
packetTypeSymmetricallyEncryptedMDC packetType = 18
|
|
||||||
)
|
|
||||||
|
|
||||||
// peekVersion detects the version of a public key packet about to
|
|
||||||
// be read. A bufio.Reader at the original position of the io.Reader
|
|
||||||
// is returned.
|
|
||||||
func peekVersion(r io.Reader) (bufr *bufio.Reader, ver byte, err error) {
|
|
||||||
bufr = bufio.NewReader(r)
|
|
||||||
var verBuf []byte
|
|
||||||
if verBuf, err = bufr.Peek(1); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ver = verBuf[0]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read reads a single OpenPGP packet from the given io.Reader. If there is an
|
|
||||||
// error parsing a packet, the whole packet is consumed from the input.
|
|
||||||
func Read(r io.Reader) (p Packet, err error) {
|
|
||||||
tag, _, contents, err := readHeader(r)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
switch tag {
|
|
||||||
case packetTypeEncryptedKey:
|
|
||||||
p = new(EncryptedKey)
|
|
||||||
case packetTypeSignature:
|
|
||||||
var version byte
|
|
||||||
// Detect signature version
|
|
||||||
if contents, version, err = peekVersion(contents); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if version < 4 {
|
|
||||||
p = new(SignatureV3)
|
|
||||||
} else {
|
|
||||||
p = new(Signature)
|
|
||||||
}
|
|
||||||
case packetTypeSymmetricKeyEncrypted:
|
|
||||||
p = new(SymmetricKeyEncrypted)
|
|
||||||
case packetTypeOnePassSignature:
|
|
||||||
p = new(OnePassSignature)
|
|
||||||
case packetTypePrivateKey, packetTypePrivateSubkey:
|
|
||||||
pk := new(PrivateKey)
|
|
||||||
if tag == packetTypePrivateSubkey {
|
|
||||||
pk.IsSubkey = true
|
|
||||||
}
|
|
||||||
p = pk
|
|
||||||
case packetTypePublicKey, packetTypePublicSubkey:
|
|
||||||
var version byte
|
|
||||||
if contents, version, err = peekVersion(contents); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
isSubkey := tag == packetTypePublicSubkey
|
|
||||||
if version < 4 {
|
|
||||||
p = &PublicKeyV3{IsSubkey: isSubkey}
|
|
||||||
} else {
|
|
||||||
p = &PublicKey{IsSubkey: isSubkey}
|
|
||||||
}
|
|
||||||
case packetTypeCompressed:
|
|
||||||
p = new(Compressed)
|
|
||||||
case packetTypeSymmetricallyEncrypted:
|
|
||||||
p = new(SymmetricallyEncrypted)
|
|
||||||
case packetTypeLiteralData:
|
|
||||||
p = new(LiteralData)
|
|
||||||
case packetTypeUserId:
|
|
||||||
p = new(UserId)
|
|
||||||
case packetTypeUserAttribute:
|
|
||||||
p = new(UserAttribute)
|
|
||||||
case packetTypeSymmetricallyEncryptedMDC:
|
|
||||||
se := new(SymmetricallyEncrypted)
|
|
||||||
se.MDC = true
|
|
||||||
p = se
|
|
||||||
default:
|
|
||||||
err = errors.UnknownPacketTypeError(tag)
|
|
||||||
}
|
|
||||||
if p != nil {
|
|
||||||
err = p.parse(contents)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
consumeAll(contents)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignatureType represents the different semantic meanings of an OpenPGP
|
|
||||||
// signature. See RFC 4880, section 5.2.1.
|
|
||||||
type SignatureType uint8
|
|
||||||
|
|
||||||
const (
|
|
||||||
SigTypeBinary SignatureType = 0
|
|
||||||
SigTypeText = 1
|
|
||||||
SigTypeGenericCert = 0x10
|
|
||||||
SigTypePersonaCert = 0x11
|
|
||||||
SigTypeCasualCert = 0x12
|
|
||||||
SigTypePositiveCert = 0x13
|
|
||||||
SigTypeSubkeyBinding = 0x18
|
|
||||||
SigTypePrimaryKeyBinding = 0x19
|
|
||||||
SigTypeDirectSignature = 0x1F
|
|
||||||
SigTypeKeyRevocation = 0x20
|
|
||||||
SigTypeSubkeyRevocation = 0x28
|
|
||||||
)
|
|
||||||
|
|
||||||
// PublicKeyAlgorithm represents the different public key system specified for
|
|
||||||
// OpenPGP. See
|
|
||||||
// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12
|
|
||||||
type PublicKeyAlgorithm uint8
|
|
||||||
|
|
||||||
const (
|
|
||||||
PubKeyAlgoRSA PublicKeyAlgorithm = 1
|
|
||||||
PubKeyAlgoElGamal PublicKeyAlgorithm = 16
|
|
||||||
PubKeyAlgoDSA PublicKeyAlgorithm = 17
|
|
||||||
// RFC 6637, Section 5.
|
|
||||||
PubKeyAlgoECDH PublicKeyAlgorithm = 18
|
|
||||||
PubKeyAlgoECDSA PublicKeyAlgorithm = 19
|
|
||||||
|
|
||||||
// Deprecated in RFC 4880, Section 13.5. Use key flags instead.
|
|
||||||
PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2
|
|
||||||
PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3
|
|
||||||
)
|
|
||||||
|
|
||||||
// CanEncrypt returns true if it's possible to encrypt a message to a public
|
|
||||||
// key of the given type.
|
|
||||||
func (pka PublicKeyAlgorithm) CanEncrypt() bool {
|
|
||||||
switch pka {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal:
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// CanSign returns true if it's possible for a public key of the given type to
|
|
||||||
// sign a message.
|
|
||||||
func (pka PublicKeyAlgorithm) CanSign() bool {
|
|
||||||
switch pka {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA:
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// CipherFunction represents the different block ciphers specified for OpenPGP. See
|
|
||||||
// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13
|
|
||||||
type CipherFunction uint8
|
|
||||||
|
|
||||||
const (
|
|
||||||
Cipher3DES CipherFunction = 2
|
|
||||||
CipherCAST5 CipherFunction = 3
|
|
||||||
CipherAES128 CipherFunction = 7
|
|
||||||
CipherAES192 CipherFunction = 8
|
|
||||||
CipherAES256 CipherFunction = 9
|
|
||||||
)
|
|
||||||
|
|
||||||
// KeySize returns the key size, in bytes, of cipher.
|
|
||||||
func (cipher CipherFunction) KeySize() int {
|
|
||||||
switch cipher {
|
|
||||||
case Cipher3DES:
|
|
||||||
return 24
|
|
||||||
case CipherCAST5:
|
|
||||||
return cast5.KeySize
|
|
||||||
case CipherAES128:
|
|
||||||
return 16
|
|
||||||
case CipherAES192:
|
|
||||||
return 24
|
|
||||||
case CipherAES256:
|
|
||||||
return 32
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// blockSize returns the block size, in bytes, of cipher.
|
|
||||||
func (cipher CipherFunction) blockSize() int {
|
|
||||||
switch cipher {
|
|
||||||
case Cipher3DES:
|
|
||||||
return des.BlockSize
|
|
||||||
case CipherCAST5:
|
|
||||||
return 8
|
|
||||||
case CipherAES128, CipherAES192, CipherAES256:
|
|
||||||
return 16
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// new returns a fresh instance of the given cipher.
|
|
||||||
func (cipher CipherFunction) new(key []byte) (block cipher.Block) {
|
|
||||||
switch cipher {
|
|
||||||
case Cipher3DES:
|
|
||||||
block, _ = des.NewTripleDESCipher(key)
|
|
||||||
case CipherCAST5:
|
|
||||||
block, _ = cast5.NewCipher(key)
|
|
||||||
case CipherAES128, CipherAES192, CipherAES256:
|
|
||||||
block, _ = aes.NewCipher(key)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// readMPI reads a big integer from r. The bit length returned is the bit
|
|
||||||
// length that was specified in r. This is preserved so that the integer can be
|
|
||||||
// reserialized exactly.
|
|
||||||
func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) {
|
|
||||||
var buf [2]byte
|
|
||||||
_, err = readFull(r, buf[0:])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
bitLength = uint16(buf[0])<<8 | uint16(buf[1])
|
|
||||||
numBytes := (int(bitLength) + 7) / 8
|
|
||||||
mpi = make([]byte, numBytes)
|
|
||||||
_, err = readFull(r, mpi)
|
|
||||||
// According to RFC 4880 3.2. we should check that the MPI has no leading
|
|
||||||
// zeroes (at least when not an encrypted MPI?), but this implementation
|
|
||||||
// does generate leading zeroes, so we keep accepting them.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeMPI serializes a big integer to w.
|
|
||||||
func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) {
|
|
||||||
// Note that we can produce leading zeroes, in violation of RFC 4880 3.2.
|
|
||||||
// Implementations seem to be tolerant of them, and stripping them would
|
|
||||||
// make it complex to guarantee matching re-serialization.
|
|
||||||
_, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)})
|
|
||||||
if err == nil {
|
|
||||||
_, err = w.Write(mpiBytes)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeBig serializes a *big.Int to w.
|
|
||||||
func writeBig(w io.Writer, i *big.Int) error {
|
|
||||||
return writeMPI(w, uint16(i.BitLen()), i.Bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
// padToKeySize left-pads a MPI with zeroes to match the length of the
|
|
||||||
// specified RSA public.
|
|
||||||
func padToKeySize(pub *rsa.PublicKey, b []byte) []byte {
|
|
||||||
k := (pub.N.BitLen() + 7) / 8
|
|
||||||
if len(b) >= k {
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
bb := make([]byte, k)
|
|
||||||
copy(bb[len(bb)-len(b):], b)
|
|
||||||
return bb
|
|
||||||
}
|
|
||||||
|
|
||||||
// CompressionAlgo Represents the different compression algorithms
|
|
||||||
// supported by OpenPGP (except for BZIP2, which is not currently
|
|
||||||
// supported). See Section 9.3 of RFC 4880.
|
|
||||||
type CompressionAlgo uint8
|
|
||||||
|
|
||||||
const (
|
|
||||||
CompressionNone CompressionAlgo = 0
|
|
||||||
CompressionZIP CompressionAlgo = 1
|
|
||||||
CompressionZLIB CompressionAlgo = 2
|
|
||||||
)
|
|
385
vendor/golang.org/x/crypto/openpgp/packet/private_key.go
generated
vendored
385
vendor/golang.org/x/crypto/openpgp/packet/private_key.go
generated
vendored
@ -1,385 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package packet
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto"
|
|
||||||
"crypto/cipher"
|
|
||||||
"crypto/dsa"
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/rsa"
|
|
||||||
"crypto/sha1"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"math/big"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/crypto/openpgp/elgamal"
|
|
||||||
"golang.org/x/crypto/openpgp/errors"
|
|
||||||
"golang.org/x/crypto/openpgp/s2k"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PrivateKey represents a possibly encrypted private key. See RFC 4880,
|
|
||||||
// section 5.5.3.
|
|
||||||
type PrivateKey struct {
|
|
||||||
PublicKey
|
|
||||||
Encrypted bool // if true then the private key is unavailable until Decrypt has been called.
|
|
||||||
encryptedData []byte
|
|
||||||
cipher CipherFunction
|
|
||||||
s2k func(out, in []byte)
|
|
||||||
PrivateKey interface{} // An *{rsa|dsa|ecdsa}.PrivateKey or a crypto.Signer.
|
|
||||||
sha1Checksum bool
|
|
||||||
iv []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewRSAPrivateKey(creationTime time.Time, priv *rsa.PrivateKey) *PrivateKey {
|
|
||||||
pk := new(PrivateKey)
|
|
||||||
pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey)
|
|
||||||
pk.PrivateKey = priv
|
|
||||||
return pk
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewDSAPrivateKey(creationTime time.Time, priv *dsa.PrivateKey) *PrivateKey {
|
|
||||||
pk := new(PrivateKey)
|
|
||||||
pk.PublicKey = *NewDSAPublicKey(creationTime, &priv.PublicKey)
|
|
||||||
pk.PrivateKey = priv
|
|
||||||
return pk
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewElGamalPrivateKey(creationTime time.Time, priv *elgamal.PrivateKey) *PrivateKey {
|
|
||||||
pk := new(PrivateKey)
|
|
||||||
pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey)
|
|
||||||
pk.PrivateKey = priv
|
|
||||||
return pk
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewECDSAPrivateKey(creationTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey {
|
|
||||||
pk := new(PrivateKey)
|
|
||||||
pk.PublicKey = *NewECDSAPublicKey(creationTime, &priv.PublicKey)
|
|
||||||
pk.PrivateKey = priv
|
|
||||||
return pk
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that
|
|
||||||
// implements RSA or ECDSA.
|
|
||||||
func NewSignerPrivateKey(creationTime time.Time, signer crypto.Signer) *PrivateKey {
|
|
||||||
pk := new(PrivateKey)
|
|
||||||
// In general, the public Keys should be used as pointers. We still
|
|
||||||
// type-switch on the values, for backwards-compatibility.
|
|
||||||
switch pubkey := signer.Public().(type) {
|
|
||||||
case *rsa.PublicKey:
|
|
||||||
pk.PublicKey = *NewRSAPublicKey(creationTime, pubkey)
|
|
||||||
case rsa.PublicKey:
|
|
||||||
pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey)
|
|
||||||
case *ecdsa.PublicKey:
|
|
||||||
pk.PublicKey = *NewECDSAPublicKey(creationTime, pubkey)
|
|
||||||
case ecdsa.PublicKey:
|
|
||||||
pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey)
|
|
||||||
default:
|
|
||||||
panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey")
|
|
||||||
}
|
|
||||||
pk.PrivateKey = signer
|
|
||||||
return pk
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pk *PrivateKey) parse(r io.Reader) (err error) {
|
|
||||||
err = (&pk.PublicKey).parse(r)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var buf [1]byte
|
|
||||||
_, err = readFull(r, buf[:])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s2kType := buf[0]
|
|
||||||
|
|
||||||
switch s2kType {
|
|
||||||
case 0:
|
|
||||||
pk.s2k = nil
|
|
||||||
pk.Encrypted = false
|
|
||||||
case 254, 255:
|
|
||||||
_, err = readFull(r, buf[:])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
pk.cipher = CipherFunction(buf[0])
|
|
||||||
pk.Encrypted = true
|
|
||||||
pk.s2k, err = s2k.Parse(r)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if s2kType == 254 {
|
|
||||||
pk.sha1Checksum = true
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return errors.UnsupportedError("deprecated s2k function in private key")
|
|
||||||
}
|
|
||||||
|
|
||||||
if pk.Encrypted {
|
|
||||||
blockSize := pk.cipher.blockSize()
|
|
||||||
if blockSize == 0 {
|
|
||||||
return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher)))
|
|
||||||
}
|
|
||||||
pk.iv = make([]byte, blockSize)
|
|
||||||
_, err = readFull(r, pk.iv)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pk.encryptedData, err = ioutil.ReadAll(r)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if !pk.Encrypted {
|
|
||||||
return pk.parsePrivateKey(pk.encryptedData)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func mod64kHash(d []byte) uint16 {
|
|
||||||
var h uint16
|
|
||||||
for _, b := range d {
|
|
||||||
h += uint16(b)
|
|
||||||
}
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pk *PrivateKey) Serialize(w io.Writer) (err error) {
|
|
||||||
// TODO(agl): support encrypted private keys
|
|
||||||
buf := bytes.NewBuffer(nil)
|
|
||||||
err = pk.PublicKey.serializeWithoutHeaders(buf)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
buf.WriteByte(0 /* no encryption */)
|
|
||||||
|
|
||||||
privateKeyBuf := bytes.NewBuffer(nil)
|
|
||||||
|
|
||||||
switch priv := pk.PrivateKey.(type) {
|
|
||||||
case *rsa.PrivateKey:
|
|
||||||
err = serializeRSAPrivateKey(privateKeyBuf, priv)
|
|
||||||
case *dsa.PrivateKey:
|
|
||||||
err = serializeDSAPrivateKey(privateKeyBuf, priv)
|
|
||||||
case *elgamal.PrivateKey:
|
|
||||||
err = serializeElGamalPrivateKey(privateKeyBuf, priv)
|
|
||||||
case *ecdsa.PrivateKey:
|
|
||||||
err = serializeECDSAPrivateKey(privateKeyBuf, priv)
|
|
||||||
default:
|
|
||||||
err = errors.InvalidArgumentError("unknown private key type")
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ptype := packetTypePrivateKey
|
|
||||||
contents := buf.Bytes()
|
|
||||||
privateKeyBytes := privateKeyBuf.Bytes()
|
|
||||||
if pk.IsSubkey {
|
|
||||||
ptype = packetTypePrivateSubkey
|
|
||||||
}
|
|
||||||
err = serializeHeader(w, ptype, len(contents)+len(privateKeyBytes)+2)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
_, err = w.Write(contents)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
_, err = w.Write(privateKeyBytes)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
checksum := mod64kHash(privateKeyBytes)
|
|
||||||
var checksumBytes [2]byte
|
|
||||||
checksumBytes[0] = byte(checksum >> 8)
|
|
||||||
checksumBytes[1] = byte(checksum)
|
|
||||||
_, err = w.Write(checksumBytes[:])
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error {
|
|
||||||
err := writeBig(w, priv.D)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = writeBig(w, priv.Primes[1])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = writeBig(w, priv.Primes[0])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return writeBig(w, priv.Precomputed.Qinv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error {
|
|
||||||
return writeBig(w, priv.X)
|
|
||||||
}
|
|
||||||
|
|
||||||
func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error {
|
|
||||||
return writeBig(w, priv.X)
|
|
||||||
}
|
|
||||||
|
|
||||||
func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error {
|
|
||||||
return writeBig(w, priv.D)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decrypt decrypts an encrypted private key using a passphrase.
|
|
||||||
func (pk *PrivateKey) Decrypt(passphrase []byte) error {
|
|
||||||
if !pk.Encrypted {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
key := make([]byte, pk.cipher.KeySize())
|
|
||||||
pk.s2k(key, passphrase)
|
|
||||||
block := pk.cipher.new(key)
|
|
||||||
cfb := cipher.NewCFBDecrypter(block, pk.iv)
|
|
||||||
|
|
||||||
data := make([]byte, len(pk.encryptedData))
|
|
||||||
cfb.XORKeyStream(data, pk.encryptedData)
|
|
||||||
|
|
||||||
if pk.sha1Checksum {
|
|
||||||
if len(data) < sha1.Size {
|
|
||||||
return errors.StructuralError("truncated private key data")
|
|
||||||
}
|
|
||||||
h := sha1.New()
|
|
||||||
h.Write(data[:len(data)-sha1.Size])
|
|
||||||
sum := h.Sum(nil)
|
|
||||||
if !bytes.Equal(sum, data[len(data)-sha1.Size:]) {
|
|
||||||
return errors.StructuralError("private key checksum failure")
|
|
||||||
}
|
|
||||||
data = data[:len(data)-sha1.Size]
|
|
||||||
} else {
|
|
||||||
if len(data) < 2 {
|
|
||||||
return errors.StructuralError("truncated private key data")
|
|
||||||
}
|
|
||||||
var sum uint16
|
|
||||||
for i := 0; i < len(data)-2; i++ {
|
|
||||||
sum += uint16(data[i])
|
|
||||||
}
|
|
||||||
if data[len(data)-2] != uint8(sum>>8) ||
|
|
||||||
data[len(data)-1] != uint8(sum) {
|
|
||||||
return errors.StructuralError("private key checksum failure")
|
|
||||||
}
|
|
||||||
data = data[:len(data)-2]
|
|
||||||
}
|
|
||||||
|
|
||||||
return pk.parsePrivateKey(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) {
|
|
||||||
switch pk.PublicKey.PubKeyAlgo {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly:
|
|
||||||
return pk.parseRSAPrivateKey(data)
|
|
||||||
case PubKeyAlgoDSA:
|
|
||||||
return pk.parseDSAPrivateKey(data)
|
|
||||||
case PubKeyAlgoElGamal:
|
|
||||||
return pk.parseElGamalPrivateKey(data)
|
|
||||||
case PubKeyAlgoECDSA:
|
|
||||||
return pk.parseECDSAPrivateKey(data)
|
|
||||||
}
|
|
||||||
panic("impossible")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) {
|
|
||||||
rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey)
|
|
||||||
rsaPriv := new(rsa.PrivateKey)
|
|
||||||
rsaPriv.PublicKey = *rsaPub
|
|
||||||
|
|
||||||
buf := bytes.NewBuffer(data)
|
|
||||||
d, _, err := readMPI(buf)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p, _, err := readMPI(buf)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
q, _, err := readMPI(buf)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
rsaPriv.D = new(big.Int).SetBytes(d)
|
|
||||||
rsaPriv.Primes = make([]*big.Int, 2)
|
|
||||||
rsaPriv.Primes[0] = new(big.Int).SetBytes(p)
|
|
||||||
rsaPriv.Primes[1] = new(big.Int).SetBytes(q)
|
|
||||||
if err := rsaPriv.Validate(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rsaPriv.Precompute()
|
|
||||||
pk.PrivateKey = rsaPriv
|
|
||||||
pk.Encrypted = false
|
|
||||||
pk.encryptedData = nil
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) {
|
|
||||||
dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey)
|
|
||||||
dsaPriv := new(dsa.PrivateKey)
|
|
||||||
dsaPriv.PublicKey = *dsaPub
|
|
||||||
|
|
||||||
buf := bytes.NewBuffer(data)
|
|
||||||
x, _, err := readMPI(buf)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
dsaPriv.X = new(big.Int).SetBytes(x)
|
|
||||||
pk.PrivateKey = dsaPriv
|
|
||||||
pk.Encrypted = false
|
|
||||||
pk.encryptedData = nil
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) {
|
|
||||||
pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey)
|
|
||||||
priv := new(elgamal.PrivateKey)
|
|
||||||
priv.PublicKey = *pub
|
|
||||||
|
|
||||||
buf := bytes.NewBuffer(data)
|
|
||||||
x, _, err := readMPI(buf)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
priv.X = new(big.Int).SetBytes(x)
|
|
||||||
pk.PrivateKey = priv
|
|
||||||
pk.Encrypted = false
|
|
||||||
pk.encryptedData = nil
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) {
|
|
||||||
ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey)
|
|
||||||
|
|
||||||
buf := bytes.NewBuffer(data)
|
|
||||||
d, _, err := readMPI(buf)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
pk.PrivateKey = &ecdsa.PrivateKey{
|
|
||||||
PublicKey: *ecdsaPub,
|
|
||||||
D: new(big.Int).SetBytes(d),
|
|
||||||
}
|
|
||||||
pk.Encrypted = false
|
|
||||||
pk.encryptedData = nil
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
753
vendor/golang.org/x/crypto/openpgp/packet/public_key.go
generated
vendored
753
vendor/golang.org/x/crypto/openpgp/packet/public_key.go
generated
vendored
@ -1,753 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package packet
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto"
|
|
||||||
"crypto/dsa"
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/elliptic"
|
|
||||||
"crypto/rsa"
|
|
||||||
"crypto/sha1"
|
|
||||||
_ "crypto/sha256"
|
|
||||||
_ "crypto/sha512"
|
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"hash"
|
|
||||||
"io"
|
|
||||||
"math/big"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/crypto/openpgp/elgamal"
|
|
||||||
"golang.org/x/crypto/openpgp/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// NIST curve P-256
|
|
||||||
oidCurveP256 []byte = []byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07}
|
|
||||||
// NIST curve P-384
|
|
||||||
oidCurveP384 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x22}
|
|
||||||
// NIST curve P-521
|
|
||||||
oidCurveP521 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x23}
|
|
||||||
)
|
|
||||||
|
|
||||||
const maxOIDLength = 8
|
|
||||||
|
|
||||||
// ecdsaKey stores the algorithm-specific fields for ECDSA keys.
|
|
||||||
// as defined in RFC 6637, Section 9.
|
|
||||||
type ecdsaKey struct {
|
|
||||||
// oid contains the OID byte sequence identifying the elliptic curve used
|
|
||||||
oid []byte
|
|
||||||
// p contains the elliptic curve point that represents the public key
|
|
||||||
p parsedMPI
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseOID reads the OID for the curve as defined in RFC 6637, Section 9.
|
|
||||||
func parseOID(r io.Reader) (oid []byte, err error) {
|
|
||||||
buf := make([]byte, maxOIDLength)
|
|
||||||
if _, err = readFull(r, buf[:1]); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
oidLen := buf[0]
|
|
||||||
if int(oidLen) > len(buf) {
|
|
||||||
err = errors.UnsupportedError("invalid oid length: " + strconv.Itoa(int(oidLen)))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
oid = buf[:oidLen]
|
|
||||||
_, err = readFull(r, oid)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *ecdsaKey) parse(r io.Reader) (err error) {
|
|
||||||
if f.oid, err = parseOID(r); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
f.p.bytes, f.p.bitLength, err = readMPI(r)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *ecdsaKey) serialize(w io.Writer) (err error) {
|
|
||||||
buf := make([]byte, maxOIDLength+1)
|
|
||||||
buf[0] = byte(len(f.oid))
|
|
||||||
copy(buf[1:], f.oid)
|
|
||||||
if _, err = w.Write(buf[:len(f.oid)+1]); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return writeMPIs(w, f.p)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *ecdsaKey) newECDSA() (*ecdsa.PublicKey, error) {
|
|
||||||
var c elliptic.Curve
|
|
||||||
if bytes.Equal(f.oid, oidCurveP256) {
|
|
||||||
c = elliptic.P256()
|
|
||||||
} else if bytes.Equal(f.oid, oidCurveP384) {
|
|
||||||
c = elliptic.P384()
|
|
||||||
} else if bytes.Equal(f.oid, oidCurveP521) {
|
|
||||||
c = elliptic.P521()
|
|
||||||
} else {
|
|
||||||
return nil, errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", f.oid))
|
|
||||||
}
|
|
||||||
x, y := elliptic.Unmarshal(c, f.p.bytes)
|
|
||||||
if x == nil {
|
|
||||||
return nil, errors.UnsupportedError("failed to parse EC point")
|
|
||||||
}
|
|
||||||
return &ecdsa.PublicKey{Curve: c, X: x, Y: y}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *ecdsaKey) byteLen() int {
|
|
||||||
return 1 + len(f.oid) + 2 + len(f.p.bytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
type kdfHashFunction byte
|
|
||||||
type kdfAlgorithm byte
|
|
||||||
|
|
||||||
// ecdhKdf stores key derivation function parameters
|
|
||||||
// used for ECDH encryption. See RFC 6637, Section 9.
|
|
||||||
type ecdhKdf struct {
|
|
||||||
KdfHash kdfHashFunction
|
|
||||||
KdfAlgo kdfAlgorithm
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *ecdhKdf) parse(r io.Reader) (err error) {
|
|
||||||
buf := make([]byte, 1)
|
|
||||||
if _, err = readFull(r, buf); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
kdfLen := int(buf[0])
|
|
||||||
if kdfLen < 3 {
|
|
||||||
return errors.UnsupportedError("Unsupported ECDH KDF length: " + strconv.Itoa(kdfLen))
|
|
||||||
}
|
|
||||||
buf = make([]byte, kdfLen)
|
|
||||||
if _, err = readFull(r, buf); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
reserved := int(buf[0])
|
|
||||||
f.KdfHash = kdfHashFunction(buf[1])
|
|
||||||
f.KdfAlgo = kdfAlgorithm(buf[2])
|
|
||||||
if reserved != 0x01 {
|
|
||||||
return errors.UnsupportedError("Unsupported KDF reserved field: " + strconv.Itoa(reserved))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *ecdhKdf) serialize(w io.Writer) (err error) {
|
|
||||||
buf := make([]byte, 4)
|
|
||||||
// See RFC 6637, Section 9, Algorithm-Specific Fields for ECDH keys.
|
|
||||||
buf[0] = byte(0x03) // Length of the following fields
|
|
||||||
buf[1] = byte(0x01) // Reserved for future extensions, must be 1 for now
|
|
||||||
buf[2] = byte(f.KdfHash)
|
|
||||||
buf[3] = byte(f.KdfAlgo)
|
|
||||||
_, err = w.Write(buf[:])
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *ecdhKdf) byteLen() int {
|
|
||||||
return 4
|
|
||||||
}
|
|
||||||
|
|
||||||
// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2.
|
|
||||||
type PublicKey struct {
|
|
||||||
CreationTime time.Time
|
|
||||||
PubKeyAlgo PublicKeyAlgorithm
|
|
||||||
PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey or *ecdsa.PublicKey
|
|
||||||
Fingerprint [20]byte
|
|
||||||
KeyId uint64
|
|
||||||
IsSubkey bool
|
|
||||||
|
|
||||||
n, e, p, q, g, y parsedMPI
|
|
||||||
|
|
||||||
// RFC 6637 fields
|
|
||||||
ec *ecdsaKey
|
|
||||||
ecdh *ecdhKdf
|
|
||||||
}
|
|
||||||
|
|
||||||
// signingKey provides a convenient abstraction over signature verification
|
|
||||||
// for v3 and v4 public keys.
|
|
||||||
type signingKey interface {
|
|
||||||
SerializeSignaturePrefix(io.Writer)
|
|
||||||
serializeWithoutHeaders(io.Writer) error
|
|
||||||
}
|
|
||||||
|
|
||||||
func fromBig(n *big.Int) parsedMPI {
|
|
||||||
return parsedMPI{
|
|
||||||
bytes: n.Bytes(),
|
|
||||||
bitLength: uint16(n.BitLen()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey.
|
|
||||||
func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey {
|
|
||||||
pk := &PublicKey{
|
|
||||||
CreationTime: creationTime,
|
|
||||||
PubKeyAlgo: PubKeyAlgoRSA,
|
|
||||||
PublicKey: pub,
|
|
||||||
n: fromBig(pub.N),
|
|
||||||
e: fromBig(big.NewInt(int64(pub.E))),
|
|
||||||
}
|
|
||||||
|
|
||||||
pk.setFingerPrintAndKeyId()
|
|
||||||
return pk
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey.
|
|
||||||
func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey {
|
|
||||||
pk := &PublicKey{
|
|
||||||
CreationTime: creationTime,
|
|
||||||
PubKeyAlgo: PubKeyAlgoDSA,
|
|
||||||
PublicKey: pub,
|
|
||||||
p: fromBig(pub.P),
|
|
||||||
q: fromBig(pub.Q),
|
|
||||||
g: fromBig(pub.G),
|
|
||||||
y: fromBig(pub.Y),
|
|
||||||
}
|
|
||||||
|
|
||||||
pk.setFingerPrintAndKeyId()
|
|
||||||
return pk
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey.
|
|
||||||
func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey {
|
|
||||||
pk := &PublicKey{
|
|
||||||
CreationTime: creationTime,
|
|
||||||
PubKeyAlgo: PubKeyAlgoElGamal,
|
|
||||||
PublicKey: pub,
|
|
||||||
p: fromBig(pub.P),
|
|
||||||
g: fromBig(pub.G),
|
|
||||||
y: fromBig(pub.Y),
|
|
||||||
}
|
|
||||||
|
|
||||||
pk.setFingerPrintAndKeyId()
|
|
||||||
return pk
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey {
|
|
||||||
pk := &PublicKey{
|
|
||||||
CreationTime: creationTime,
|
|
||||||
PubKeyAlgo: PubKeyAlgoECDSA,
|
|
||||||
PublicKey: pub,
|
|
||||||
ec: new(ecdsaKey),
|
|
||||||
}
|
|
||||||
|
|
||||||
switch pub.Curve {
|
|
||||||
case elliptic.P256():
|
|
||||||
pk.ec.oid = oidCurveP256
|
|
||||||
case elliptic.P384():
|
|
||||||
pk.ec.oid = oidCurveP384
|
|
||||||
case elliptic.P521():
|
|
||||||
pk.ec.oid = oidCurveP521
|
|
||||||
default:
|
|
||||||
panic("unknown elliptic curve")
|
|
||||||
}
|
|
||||||
|
|
||||||
pk.ec.p.bytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y)
|
|
||||||
|
|
||||||
// The bit length is 3 (for the 0x04 specifying an uncompressed key)
|
|
||||||
// plus two field elements (for x and y), which are rounded up to the
|
|
||||||
// nearest byte. See https://tools.ietf.org/html/rfc6637#section-6
|
|
||||||
fieldBytes := (pub.Curve.Params().BitSize + 7) & ^7
|
|
||||||
pk.ec.p.bitLength = uint16(3 + fieldBytes + fieldBytes)
|
|
||||||
|
|
||||||
pk.setFingerPrintAndKeyId()
|
|
||||||
return pk
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pk *PublicKey) parse(r io.Reader) (err error) {
|
|
||||||
// RFC 4880, section 5.5.2
|
|
||||||
var buf [6]byte
|
|
||||||
_, err = readFull(r, buf[:])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if buf[0] != 4 {
|
|
||||||
return errors.UnsupportedError("public key version")
|
|
||||||
}
|
|
||||||
pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0)
|
|
||||||
pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5])
|
|
||||||
switch pk.PubKeyAlgo {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
|
|
||||||
err = pk.parseRSA(r)
|
|
||||||
case PubKeyAlgoDSA:
|
|
||||||
err = pk.parseDSA(r)
|
|
||||||
case PubKeyAlgoElGamal:
|
|
||||||
err = pk.parseElGamal(r)
|
|
||||||
case PubKeyAlgoECDSA:
|
|
||||||
pk.ec = new(ecdsaKey)
|
|
||||||
if err = pk.ec.parse(r); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
pk.PublicKey, err = pk.ec.newECDSA()
|
|
||||||
case PubKeyAlgoECDH:
|
|
||||||
pk.ec = new(ecdsaKey)
|
|
||||||
if err = pk.ec.parse(r); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
pk.ecdh = new(ecdhKdf)
|
|
||||||
if err = pk.ecdh.parse(r); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// The ECDH key is stored in an ecdsa.PublicKey for convenience.
|
|
||||||
pk.PublicKey, err = pk.ec.newECDSA()
|
|
||||||
default:
|
|
||||||
err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo)))
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
pk.setFingerPrintAndKeyId()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pk *PublicKey) setFingerPrintAndKeyId() {
|
|
||||||
// RFC 4880, section 12.2
|
|
||||||
fingerPrint := sha1.New()
|
|
||||||
pk.SerializeSignaturePrefix(fingerPrint)
|
|
||||||
pk.serializeWithoutHeaders(fingerPrint)
|
|
||||||
copy(pk.Fingerprint[:], fingerPrint.Sum(nil))
|
|
||||||
pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20])
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseRSA parses RSA public key material from the given Reader. See RFC 4880,
|
|
||||||
// section 5.5.2.
|
|
||||||
func (pk *PublicKey) parseRSA(r io.Reader) (err error) {
|
|
||||||
pk.n.bytes, pk.n.bitLength, err = readMPI(r)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
pk.e.bytes, pk.e.bitLength, err = readMPI(r)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(pk.e.bytes) > 3 {
|
|
||||||
err = errors.UnsupportedError("large public exponent")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
rsa := &rsa.PublicKey{
|
|
||||||
N: new(big.Int).SetBytes(pk.n.bytes),
|
|
||||||
E: 0,
|
|
||||||
}
|
|
||||||
for i := 0; i < len(pk.e.bytes); i++ {
|
|
||||||
rsa.E <<= 8
|
|
||||||
rsa.E |= int(pk.e.bytes[i])
|
|
||||||
}
|
|
||||||
pk.PublicKey = rsa
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseDSA parses DSA public key material from the given Reader. See RFC 4880,
|
|
||||||
// section 5.5.2.
|
|
||||||
func (pk *PublicKey) parseDSA(r io.Reader) (err error) {
|
|
||||||
pk.p.bytes, pk.p.bitLength, err = readMPI(r)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
pk.q.bytes, pk.q.bitLength, err = readMPI(r)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
pk.g.bytes, pk.g.bitLength, err = readMPI(r)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
pk.y.bytes, pk.y.bitLength, err = readMPI(r)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
dsa := new(dsa.PublicKey)
|
|
||||||
dsa.P = new(big.Int).SetBytes(pk.p.bytes)
|
|
||||||
dsa.Q = new(big.Int).SetBytes(pk.q.bytes)
|
|
||||||
dsa.G = new(big.Int).SetBytes(pk.g.bytes)
|
|
||||||
dsa.Y = new(big.Int).SetBytes(pk.y.bytes)
|
|
||||||
pk.PublicKey = dsa
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseElGamal parses ElGamal public key material from the given Reader. See
|
|
||||||
// RFC 4880, section 5.5.2.
|
|
||||||
func (pk *PublicKey) parseElGamal(r io.Reader) (err error) {
|
|
||||||
pk.p.bytes, pk.p.bitLength, err = readMPI(r)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
pk.g.bytes, pk.g.bitLength, err = readMPI(r)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
pk.y.bytes, pk.y.bitLength, err = readMPI(r)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
elgamal := new(elgamal.PublicKey)
|
|
||||||
elgamal.P = new(big.Int).SetBytes(pk.p.bytes)
|
|
||||||
elgamal.G = new(big.Int).SetBytes(pk.g.bytes)
|
|
||||||
elgamal.Y = new(big.Int).SetBytes(pk.y.bytes)
|
|
||||||
pk.PublicKey = elgamal
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// SerializeSignaturePrefix writes the prefix for this public key to the given Writer.
|
|
||||||
// The prefix is used when calculating a signature over this public key. See
|
|
||||||
// RFC 4880, section 5.2.4.
|
|
||||||
func (pk *PublicKey) SerializeSignaturePrefix(h io.Writer) {
|
|
||||||
var pLength uint16
|
|
||||||
switch pk.PubKeyAlgo {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
|
|
||||||
pLength += 2 + uint16(len(pk.n.bytes))
|
|
||||||
pLength += 2 + uint16(len(pk.e.bytes))
|
|
||||||
case PubKeyAlgoDSA:
|
|
||||||
pLength += 2 + uint16(len(pk.p.bytes))
|
|
||||||
pLength += 2 + uint16(len(pk.q.bytes))
|
|
||||||
pLength += 2 + uint16(len(pk.g.bytes))
|
|
||||||
pLength += 2 + uint16(len(pk.y.bytes))
|
|
||||||
case PubKeyAlgoElGamal:
|
|
||||||
pLength += 2 + uint16(len(pk.p.bytes))
|
|
||||||
pLength += 2 + uint16(len(pk.g.bytes))
|
|
||||||
pLength += 2 + uint16(len(pk.y.bytes))
|
|
||||||
case PubKeyAlgoECDSA:
|
|
||||||
pLength += uint16(pk.ec.byteLen())
|
|
||||||
case PubKeyAlgoECDH:
|
|
||||||
pLength += uint16(pk.ec.byteLen())
|
|
||||||
pLength += uint16(pk.ecdh.byteLen())
|
|
||||||
default:
|
|
||||||
panic("unknown public key algorithm")
|
|
||||||
}
|
|
||||||
pLength += 6
|
|
||||||
h.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pk *PublicKey) Serialize(w io.Writer) (err error) {
|
|
||||||
length := 6 // 6 byte header
|
|
||||||
|
|
||||||
switch pk.PubKeyAlgo {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
|
|
||||||
length += 2 + len(pk.n.bytes)
|
|
||||||
length += 2 + len(pk.e.bytes)
|
|
||||||
case PubKeyAlgoDSA:
|
|
||||||
length += 2 + len(pk.p.bytes)
|
|
||||||
length += 2 + len(pk.q.bytes)
|
|
||||||
length += 2 + len(pk.g.bytes)
|
|
||||||
length += 2 + len(pk.y.bytes)
|
|
||||||
case PubKeyAlgoElGamal:
|
|
||||||
length += 2 + len(pk.p.bytes)
|
|
||||||
length += 2 + len(pk.g.bytes)
|
|
||||||
length += 2 + len(pk.y.bytes)
|
|
||||||
case PubKeyAlgoECDSA:
|
|
||||||
length += pk.ec.byteLen()
|
|
||||||
case PubKeyAlgoECDH:
|
|
||||||
length += pk.ec.byteLen()
|
|
||||||
length += pk.ecdh.byteLen()
|
|
||||||
default:
|
|
||||||
panic("unknown public key algorithm")
|
|
||||||
}
|
|
||||||
|
|
||||||
packetType := packetTypePublicKey
|
|
||||||
if pk.IsSubkey {
|
|
||||||
packetType = packetTypePublicSubkey
|
|
||||||
}
|
|
||||||
err = serializeHeader(w, packetType, length)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return pk.serializeWithoutHeaders(w)
|
|
||||||
}
|
|
||||||
|
|
||||||
// serializeWithoutHeaders marshals the PublicKey to w in the form of an
|
|
||||||
// OpenPGP public key packet, not including the packet header.
|
|
||||||
func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) {
|
|
||||||
var buf [6]byte
|
|
||||||
buf[0] = 4
|
|
||||||
t := uint32(pk.CreationTime.Unix())
|
|
||||||
buf[1] = byte(t >> 24)
|
|
||||||
buf[2] = byte(t >> 16)
|
|
||||||
buf[3] = byte(t >> 8)
|
|
||||||
buf[4] = byte(t)
|
|
||||||
buf[5] = byte(pk.PubKeyAlgo)
|
|
||||||
|
|
||||||
_, err = w.Write(buf[:])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
switch pk.PubKeyAlgo {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
|
|
||||||
return writeMPIs(w, pk.n, pk.e)
|
|
||||||
case PubKeyAlgoDSA:
|
|
||||||
return writeMPIs(w, pk.p, pk.q, pk.g, pk.y)
|
|
||||||
case PubKeyAlgoElGamal:
|
|
||||||
return writeMPIs(w, pk.p, pk.g, pk.y)
|
|
||||||
case PubKeyAlgoECDSA:
|
|
||||||
return pk.ec.serialize(w)
|
|
||||||
case PubKeyAlgoECDH:
|
|
||||||
if err = pk.ec.serialize(w); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return pk.ecdh.serialize(w)
|
|
||||||
}
|
|
||||||
return errors.InvalidArgumentError("bad public-key algorithm")
|
|
||||||
}
|
|
||||||
|
|
||||||
// CanSign returns true iff this public key can generate signatures
|
|
||||||
func (pk *PublicKey) CanSign() bool {
|
|
||||||
return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifySignature returns nil iff sig is a valid signature, made by this
|
|
||||||
// public key, of the data hashed into signed. signed is mutated by this call.
|
|
||||||
func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) {
|
|
||||||
if !pk.CanSign() {
|
|
||||||
return errors.InvalidArgumentError("public key cannot generate signatures")
|
|
||||||
}
|
|
||||||
|
|
||||||
signed.Write(sig.HashSuffix)
|
|
||||||
hashBytes := signed.Sum(nil)
|
|
||||||
|
|
||||||
if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] {
|
|
||||||
return errors.SignatureError("hash tag doesn't match")
|
|
||||||
}
|
|
||||||
|
|
||||||
if pk.PubKeyAlgo != sig.PubKeyAlgo {
|
|
||||||
return errors.InvalidArgumentError("public key and signature use different algorithms")
|
|
||||||
}
|
|
||||||
|
|
||||||
switch pk.PubKeyAlgo {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
|
|
||||||
rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey)
|
|
||||||
err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes))
|
|
||||||
if err != nil {
|
|
||||||
return errors.SignatureError("RSA verification failure")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
case PubKeyAlgoDSA:
|
|
||||||
dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey)
|
|
||||||
// Need to truncate hashBytes to match FIPS 186-3 section 4.6.
|
|
||||||
subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8
|
|
||||||
if len(hashBytes) > subgroupSize {
|
|
||||||
hashBytes = hashBytes[:subgroupSize]
|
|
||||||
}
|
|
||||||
if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) {
|
|
||||||
return errors.SignatureError("DSA verification failure")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
case PubKeyAlgoECDSA:
|
|
||||||
ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey)
|
|
||||||
if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.bytes), new(big.Int).SetBytes(sig.ECDSASigS.bytes)) {
|
|
||||||
return errors.SignatureError("ECDSA verification failure")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
default:
|
|
||||||
return errors.SignatureError("Unsupported public key algorithm used in signature")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifySignatureV3 returns nil iff sig is a valid signature, made by this
|
|
||||||
// public key, of the data hashed into signed. signed is mutated by this call.
|
|
||||||
func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) {
|
|
||||||
if !pk.CanSign() {
|
|
||||||
return errors.InvalidArgumentError("public key cannot generate signatures")
|
|
||||||
}
|
|
||||||
|
|
||||||
suffix := make([]byte, 5)
|
|
||||||
suffix[0] = byte(sig.SigType)
|
|
||||||
binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix()))
|
|
||||||
signed.Write(suffix)
|
|
||||||
hashBytes := signed.Sum(nil)
|
|
||||||
|
|
||||||
if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] {
|
|
||||||
return errors.SignatureError("hash tag doesn't match")
|
|
||||||
}
|
|
||||||
|
|
||||||
if pk.PubKeyAlgo != sig.PubKeyAlgo {
|
|
||||||
return errors.InvalidArgumentError("public key and signature use different algorithms")
|
|
||||||
}
|
|
||||||
|
|
||||||
switch pk.PubKeyAlgo {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
|
|
||||||
rsaPublicKey := pk.PublicKey.(*rsa.PublicKey)
|
|
||||||
if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)); err != nil {
|
|
||||||
return errors.SignatureError("RSA verification failure")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
case PubKeyAlgoDSA:
|
|
||||||
dsaPublicKey := pk.PublicKey.(*dsa.PublicKey)
|
|
||||||
// Need to truncate hashBytes to match FIPS 186-3 section 4.6.
|
|
||||||
subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8
|
|
||||||
if len(hashBytes) > subgroupSize {
|
|
||||||
hashBytes = hashBytes[:subgroupSize]
|
|
||||||
}
|
|
||||||
if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) {
|
|
||||||
return errors.SignatureError("DSA verification failure")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
default:
|
|
||||||
panic("shouldn't happen")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// keySignatureHash returns a Hash of the message that needs to be signed for
|
|
||||||
// pk to assert a subkey relationship to signed.
|
|
||||||
func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) {
|
|
||||||
if !hashFunc.Available() {
|
|
||||||
return nil, errors.UnsupportedError("hash function")
|
|
||||||
}
|
|
||||||
h = hashFunc.New()
|
|
||||||
|
|
||||||
// RFC 4880, section 5.2.4
|
|
||||||
pk.SerializeSignaturePrefix(h)
|
|
||||||
pk.serializeWithoutHeaders(h)
|
|
||||||
signed.SerializeSignaturePrefix(h)
|
|
||||||
signed.serializeWithoutHeaders(h)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifyKeySignature returns nil iff sig is a valid signature, made by this
|
|
||||||
// public key, of signed.
|
|
||||||
func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error {
|
|
||||||
h, err := keySignatureHash(pk, signed, sig.Hash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err = pk.VerifySignature(h, sig); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if sig.FlagSign {
|
|
||||||
// Signing subkeys must be cross-signed. See
|
|
||||||
// https://www.gnupg.org/faq/subkey-cross-certify.html.
|
|
||||||
if sig.EmbeddedSignature == nil {
|
|
||||||
return errors.StructuralError("signing subkey is missing cross-signature")
|
|
||||||
}
|
|
||||||
// Verify the cross-signature. This is calculated over the same
|
|
||||||
// data as the main signature, so we cannot just recursively
|
|
||||||
// call signed.VerifyKeySignature(...)
|
|
||||||
if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil {
|
|
||||||
return errors.StructuralError("error while hashing for cross-signature: " + err.Error())
|
|
||||||
}
|
|
||||||
if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil {
|
|
||||||
return errors.StructuralError("error while verifying cross-signature: " + err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) {
|
|
||||||
if !hashFunc.Available() {
|
|
||||||
return nil, errors.UnsupportedError("hash function")
|
|
||||||
}
|
|
||||||
h = hashFunc.New()
|
|
||||||
|
|
||||||
// RFC 4880, section 5.2.4
|
|
||||||
pk.SerializeSignaturePrefix(h)
|
|
||||||
pk.serializeWithoutHeaders(h)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this
|
|
||||||
// public key.
|
|
||||||
func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) {
|
|
||||||
h, err := keyRevocationHash(pk, sig.Hash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return pk.VerifySignature(h, sig)
|
|
||||||
}
|
|
||||||
|
|
||||||
// userIdSignatureHash returns a Hash of the message that needs to be signed
|
|
||||||
// to assert that pk is a valid key for id.
|
|
||||||
func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) {
|
|
||||||
if !hashFunc.Available() {
|
|
||||||
return nil, errors.UnsupportedError("hash function")
|
|
||||||
}
|
|
||||||
h = hashFunc.New()
|
|
||||||
|
|
||||||
// RFC 4880, section 5.2.4
|
|
||||||
pk.SerializeSignaturePrefix(h)
|
|
||||||
pk.serializeWithoutHeaders(h)
|
|
||||||
|
|
||||||
var buf [5]byte
|
|
||||||
buf[0] = 0xb4
|
|
||||||
buf[1] = byte(len(id) >> 24)
|
|
||||||
buf[2] = byte(len(id) >> 16)
|
|
||||||
buf[3] = byte(len(id) >> 8)
|
|
||||||
buf[4] = byte(len(id))
|
|
||||||
h.Write(buf[:])
|
|
||||||
h.Write([]byte(id))
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this
|
|
||||||
// public key, that id is the identity of pub.
|
|
||||||
func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) {
|
|
||||||
h, err := userIdSignatureHash(id, pub, sig.Hash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return pk.VerifySignature(h, sig)
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this
|
|
||||||
// public key, that id is the identity of pub.
|
|
||||||
func (pk *PublicKey) VerifyUserIdSignatureV3(id string, pub *PublicKey, sig *SignatureV3) (err error) {
|
|
||||||
h, err := userIdSignatureV3Hash(id, pub, sig.Hash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return pk.VerifySignatureV3(h, sig)
|
|
||||||
}
|
|
||||||
|
|
||||||
// KeyIdString returns the public key's fingerprint in capital hex
|
|
||||||
// (e.g. "6C7EE1B8621CC013").
|
|
||||||
func (pk *PublicKey) KeyIdString() string {
|
|
||||||
return fmt.Sprintf("%X", pk.Fingerprint[12:20])
|
|
||||||
}
|
|
||||||
|
|
||||||
// KeyIdShortString returns the short form of public key's fingerprint
|
|
||||||
// in capital hex, as shown by gpg --list-keys (e.g. "621CC013").
|
|
||||||
func (pk *PublicKey) KeyIdShortString() string {
|
|
||||||
return fmt.Sprintf("%X", pk.Fingerprint[16:20])
|
|
||||||
}
|
|
||||||
|
|
||||||
// A parsedMPI is used to store the contents of a big integer, along with the
|
|
||||||
// bit length that was specified in the original input. This allows the MPI to
|
|
||||||
// be reserialized exactly.
|
|
||||||
type parsedMPI struct {
|
|
||||||
bytes []byte
|
|
||||||
bitLength uint16
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeMPIs is a utility function for serializing several big integers to the
|
|
||||||
// given Writer.
|
|
||||||
func writeMPIs(w io.Writer, mpis ...parsedMPI) (err error) {
|
|
||||||
for _, mpi := range mpis {
|
|
||||||
err = writeMPI(w, mpi.bitLength, mpi.bytes)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// BitLength returns the bit length for the given public key.
|
|
||||||
func (pk *PublicKey) BitLength() (bitLength uint16, err error) {
|
|
||||||
switch pk.PubKeyAlgo {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
|
|
||||||
bitLength = pk.n.bitLength
|
|
||||||
case PubKeyAlgoDSA:
|
|
||||||
bitLength = pk.p.bitLength
|
|
||||||
case PubKeyAlgoElGamal:
|
|
||||||
bitLength = pk.p.bitLength
|
|
||||||
default:
|
|
||||||
err = errors.InvalidArgumentError("bad public-key algorithm")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
279
vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go
generated
vendored
279
vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go
generated
vendored
@ -1,279 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package packet
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto"
|
|
||||||
"crypto/md5"
|
|
||||||
"crypto/rsa"
|
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"hash"
|
|
||||||
"io"
|
|
||||||
"math/big"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/crypto/openpgp/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PublicKeyV3 represents older, version 3 public keys. These keys are less secure and
|
|
||||||
// should not be used for signing or encrypting. They are supported here only for
|
|
||||||
// parsing version 3 key material and validating signatures.
|
|
||||||
// See RFC 4880, section 5.5.2.
|
|
||||||
type PublicKeyV3 struct {
|
|
||||||
CreationTime time.Time
|
|
||||||
DaysToExpire uint16
|
|
||||||
PubKeyAlgo PublicKeyAlgorithm
|
|
||||||
PublicKey *rsa.PublicKey
|
|
||||||
Fingerprint [16]byte
|
|
||||||
KeyId uint64
|
|
||||||
IsSubkey bool
|
|
||||||
|
|
||||||
n, e parsedMPI
|
|
||||||
}
|
|
||||||
|
|
||||||
// newRSAPublicKeyV3 returns a PublicKey that wraps the given rsa.PublicKey.
|
|
||||||
// Included here for testing purposes only. RFC 4880, section 5.5.2:
|
|
||||||
// "an implementation MUST NOT generate a V3 key, but MAY accept it."
|
|
||||||
func newRSAPublicKeyV3(creationTime time.Time, pub *rsa.PublicKey) *PublicKeyV3 {
|
|
||||||
pk := &PublicKeyV3{
|
|
||||||
CreationTime: creationTime,
|
|
||||||
PublicKey: pub,
|
|
||||||
n: fromBig(pub.N),
|
|
||||||
e: fromBig(big.NewInt(int64(pub.E))),
|
|
||||||
}
|
|
||||||
|
|
||||||
pk.setFingerPrintAndKeyId()
|
|
||||||
return pk
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pk *PublicKeyV3) parse(r io.Reader) (err error) {
|
|
||||||
// RFC 4880, section 5.5.2
|
|
||||||
var buf [8]byte
|
|
||||||
if _, err = readFull(r, buf[:]); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if buf[0] < 2 || buf[0] > 3 {
|
|
||||||
return errors.UnsupportedError("public key version")
|
|
||||||
}
|
|
||||||
pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0)
|
|
||||||
pk.DaysToExpire = binary.BigEndian.Uint16(buf[5:7])
|
|
||||||
pk.PubKeyAlgo = PublicKeyAlgorithm(buf[7])
|
|
||||||
switch pk.PubKeyAlgo {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
|
|
||||||
err = pk.parseRSA(r)
|
|
||||||
default:
|
|
||||||
err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo)))
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
pk.setFingerPrintAndKeyId()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pk *PublicKeyV3) setFingerPrintAndKeyId() {
|
|
||||||
// RFC 4880, section 12.2
|
|
||||||
fingerPrint := md5.New()
|
|
||||||
fingerPrint.Write(pk.n.bytes)
|
|
||||||
fingerPrint.Write(pk.e.bytes)
|
|
||||||
fingerPrint.Sum(pk.Fingerprint[:0])
|
|
||||||
pk.KeyId = binary.BigEndian.Uint64(pk.n.bytes[len(pk.n.bytes)-8:])
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseRSA parses RSA public key material from the given Reader. See RFC 4880,
|
|
||||||
// section 5.5.2.
|
|
||||||
func (pk *PublicKeyV3) parseRSA(r io.Reader) (err error) {
|
|
||||||
if pk.n.bytes, pk.n.bitLength, err = readMPI(r); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if pk.e.bytes, pk.e.bitLength, err = readMPI(r); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// RFC 4880 Section 12.2 requires the low 8 bytes of the
|
|
||||||
// modulus to form the key id.
|
|
||||||
if len(pk.n.bytes) < 8 {
|
|
||||||
return errors.StructuralError("v3 public key modulus is too short")
|
|
||||||
}
|
|
||||||
if len(pk.e.bytes) > 3 {
|
|
||||||
err = errors.UnsupportedError("large public exponent")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
rsa := &rsa.PublicKey{N: new(big.Int).SetBytes(pk.n.bytes)}
|
|
||||||
for i := 0; i < len(pk.e.bytes); i++ {
|
|
||||||
rsa.E <<= 8
|
|
||||||
rsa.E |= int(pk.e.bytes[i])
|
|
||||||
}
|
|
||||||
pk.PublicKey = rsa
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// SerializeSignaturePrefix writes the prefix for this public key to the given Writer.
|
|
||||||
// The prefix is used when calculating a signature over this public key. See
|
|
||||||
// RFC 4880, section 5.2.4.
|
|
||||||
func (pk *PublicKeyV3) SerializeSignaturePrefix(w io.Writer) {
|
|
||||||
var pLength uint16
|
|
||||||
switch pk.PubKeyAlgo {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
|
|
||||||
pLength += 2 + uint16(len(pk.n.bytes))
|
|
||||||
pLength += 2 + uint16(len(pk.e.bytes))
|
|
||||||
default:
|
|
||||||
panic("unknown public key algorithm")
|
|
||||||
}
|
|
||||||
pLength += 6
|
|
||||||
w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pk *PublicKeyV3) Serialize(w io.Writer) (err error) {
|
|
||||||
length := 8 // 8 byte header
|
|
||||||
|
|
||||||
switch pk.PubKeyAlgo {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
|
|
||||||
length += 2 + len(pk.n.bytes)
|
|
||||||
length += 2 + len(pk.e.bytes)
|
|
||||||
default:
|
|
||||||
panic("unknown public key algorithm")
|
|
||||||
}
|
|
||||||
|
|
||||||
packetType := packetTypePublicKey
|
|
||||||
if pk.IsSubkey {
|
|
||||||
packetType = packetTypePublicSubkey
|
|
||||||
}
|
|
||||||
if err = serializeHeader(w, packetType, length); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return pk.serializeWithoutHeaders(w)
|
|
||||||
}
|
|
||||||
|
|
||||||
// serializeWithoutHeaders marshals the PublicKey to w in the form of an
|
|
||||||
// OpenPGP public key packet, not including the packet header.
|
|
||||||
func (pk *PublicKeyV3) serializeWithoutHeaders(w io.Writer) (err error) {
|
|
||||||
var buf [8]byte
|
|
||||||
// Version 3
|
|
||||||
buf[0] = 3
|
|
||||||
// Creation time
|
|
||||||
t := uint32(pk.CreationTime.Unix())
|
|
||||||
buf[1] = byte(t >> 24)
|
|
||||||
buf[2] = byte(t >> 16)
|
|
||||||
buf[3] = byte(t >> 8)
|
|
||||||
buf[4] = byte(t)
|
|
||||||
// Days to expire
|
|
||||||
buf[5] = byte(pk.DaysToExpire >> 8)
|
|
||||||
buf[6] = byte(pk.DaysToExpire)
|
|
||||||
// Public key algorithm
|
|
||||||
buf[7] = byte(pk.PubKeyAlgo)
|
|
||||||
|
|
||||||
if _, err = w.Write(buf[:]); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
switch pk.PubKeyAlgo {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
|
|
||||||
return writeMPIs(w, pk.n, pk.e)
|
|
||||||
}
|
|
||||||
return errors.InvalidArgumentError("bad public-key algorithm")
|
|
||||||
}
|
|
||||||
|
|
||||||
// CanSign returns true iff this public key can generate signatures
|
|
||||||
func (pk *PublicKeyV3) CanSign() bool {
|
|
||||||
return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifySignatureV3 returns nil iff sig is a valid signature, made by this
|
|
||||||
// public key, of the data hashed into signed. signed is mutated by this call.
|
|
||||||
func (pk *PublicKeyV3) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) {
|
|
||||||
if !pk.CanSign() {
|
|
||||||
return errors.InvalidArgumentError("public key cannot generate signatures")
|
|
||||||
}
|
|
||||||
|
|
||||||
suffix := make([]byte, 5)
|
|
||||||
suffix[0] = byte(sig.SigType)
|
|
||||||
binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix()))
|
|
||||||
signed.Write(suffix)
|
|
||||||
hashBytes := signed.Sum(nil)
|
|
||||||
|
|
||||||
if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] {
|
|
||||||
return errors.SignatureError("hash tag doesn't match")
|
|
||||||
}
|
|
||||||
|
|
||||||
if pk.PubKeyAlgo != sig.PubKeyAlgo {
|
|
||||||
return errors.InvalidArgumentError("public key and signature use different algorithms")
|
|
||||||
}
|
|
||||||
|
|
||||||
switch pk.PubKeyAlgo {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
|
|
||||||
if err = rsa.VerifyPKCS1v15(pk.PublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil {
|
|
||||||
return errors.SignatureError("RSA verification failure")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
// V3 public keys only support RSA.
|
|
||||||
panic("shouldn't happen")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this
|
|
||||||
// public key, that id is the identity of pub.
|
|
||||||
func (pk *PublicKeyV3) VerifyUserIdSignatureV3(id string, pub *PublicKeyV3, sig *SignatureV3) (err error) {
|
|
||||||
h, err := userIdSignatureV3Hash(id, pk, sig.Hash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return pk.VerifySignatureV3(h, sig)
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifyKeySignatureV3 returns nil iff sig is a valid signature, made by this
|
|
||||||
// public key, of signed.
|
|
||||||
func (pk *PublicKeyV3) VerifyKeySignatureV3(signed *PublicKeyV3, sig *SignatureV3) (err error) {
|
|
||||||
h, err := keySignatureHash(pk, signed, sig.Hash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return pk.VerifySignatureV3(h, sig)
|
|
||||||
}
|
|
||||||
|
|
||||||
// userIdSignatureV3Hash returns a Hash of the message that needs to be signed
|
|
||||||
// to assert that pk is a valid key for id.
|
|
||||||
func userIdSignatureV3Hash(id string, pk signingKey, hfn crypto.Hash) (h hash.Hash, err error) {
|
|
||||||
if !hfn.Available() {
|
|
||||||
return nil, errors.UnsupportedError("hash function")
|
|
||||||
}
|
|
||||||
h = hfn.New()
|
|
||||||
|
|
||||||
// RFC 4880, section 5.2.4
|
|
||||||
pk.SerializeSignaturePrefix(h)
|
|
||||||
pk.serializeWithoutHeaders(h)
|
|
||||||
|
|
||||||
h.Write([]byte(id))
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// KeyIdString returns the public key's fingerprint in capital hex
|
|
||||||
// (e.g. "6C7EE1B8621CC013").
|
|
||||||
func (pk *PublicKeyV3) KeyIdString() string {
|
|
||||||
return fmt.Sprintf("%X", pk.KeyId)
|
|
||||||
}
|
|
||||||
|
|
||||||
// KeyIdShortString returns the short form of public key's fingerprint
|
|
||||||
// in capital hex, as shown by gpg --list-keys (e.g. "621CC013").
|
|
||||||
func (pk *PublicKeyV3) KeyIdShortString() string {
|
|
||||||
return fmt.Sprintf("%X", pk.KeyId&0xFFFFFFFF)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BitLength returns the bit length for the given public key.
|
|
||||||
func (pk *PublicKeyV3) BitLength() (bitLength uint16, err error) {
|
|
||||||
switch pk.PubKeyAlgo {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
|
|
||||||
bitLength = pk.n.bitLength
|
|
||||||
default:
|
|
||||||
err = errors.InvalidArgumentError("bad public-key algorithm")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
76
vendor/golang.org/x/crypto/openpgp/packet/reader.go
generated
vendored
76
vendor/golang.org/x/crypto/openpgp/packet/reader.go
generated
vendored
@ -1,76 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package packet
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/crypto/openpgp/errors"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Reader reads packets from an io.Reader and allows packets to be 'unread' so
|
|
||||||
// that they result from the next call to Next.
|
|
||||||
type Reader struct {
|
|
||||||
q []Packet
|
|
||||||
readers []io.Reader
|
|
||||||
}
|
|
||||||
|
|
||||||
// New io.Readers are pushed when a compressed or encrypted packet is processed
|
|
||||||
// and recursively treated as a new source of packets. However, a carefully
|
|
||||||
// crafted packet can trigger an infinite recursive sequence of packets. See
|
|
||||||
// http://mumble.net/~campbell/misc/pgp-quine
|
|
||||||
// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402
|
|
||||||
// This constant limits the number of recursive packets that may be pushed.
|
|
||||||
const maxReaders = 32
|
|
||||||
|
|
||||||
// Next returns the most recently unread Packet, or reads another packet from
|
|
||||||
// the top-most io.Reader. Unknown packet types are skipped.
|
|
||||||
func (r *Reader) Next() (p Packet, err error) {
|
|
||||||
if len(r.q) > 0 {
|
|
||||||
p = r.q[len(r.q)-1]
|
|
||||||
r.q = r.q[:len(r.q)-1]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for len(r.readers) > 0 {
|
|
||||||
p, err = Read(r.readers[len(r.readers)-1])
|
|
||||||
if err == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err == io.EOF {
|
|
||||||
r.readers = r.readers[:len(r.readers)-1]
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if _, ok := err.(errors.UnknownPacketTypeError); !ok {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
// Push causes the Reader to start reading from a new io.Reader. When an EOF
|
|
||||||
// error is seen from the new io.Reader, it is popped and the Reader continues
|
|
||||||
// to read from the next most recent io.Reader. Push returns a StructuralError
|
|
||||||
// if pushing the reader would exceed the maximum recursion level, otherwise it
|
|
||||||
// returns nil.
|
|
||||||
func (r *Reader) Push(reader io.Reader) (err error) {
|
|
||||||
if len(r.readers) >= maxReaders {
|
|
||||||
return errors.StructuralError("too many layers of packets")
|
|
||||||
}
|
|
||||||
r.readers = append(r.readers, reader)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unread causes the given Packet to be returned from the next call to Next.
|
|
||||||
func (r *Reader) Unread(p Packet) {
|
|
||||||
r.q = append(r.q, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewReader(r io.Reader) *Reader {
|
|
||||||
return &Reader{
|
|
||||||
q: nil,
|
|
||||||
readers: []io.Reader{r},
|
|
||||||
}
|
|
||||||
}
|
|
731
vendor/golang.org/x/crypto/openpgp/packet/signature.go
generated
vendored
731
vendor/golang.org/x/crypto/openpgp/packet/signature.go
generated
vendored
@ -1,731 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package packet
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto"
|
|
||||||
"crypto/dsa"
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"encoding/asn1"
|
|
||||||
"encoding/binary"
|
|
||||||
"hash"
|
|
||||||
"io"
|
|
||||||
"math/big"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/crypto/openpgp/errors"
|
|
||||||
"golang.org/x/crypto/openpgp/s2k"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// See RFC 4880, section 5.2.3.21 for details.
|
|
||||||
KeyFlagCertify = 1 << iota
|
|
||||||
KeyFlagSign
|
|
||||||
KeyFlagEncryptCommunications
|
|
||||||
KeyFlagEncryptStorage
|
|
||||||
)
|
|
||||||
|
|
||||||
// Signature represents a signature. See RFC 4880, section 5.2.
|
|
||||||
type Signature struct {
|
|
||||||
SigType SignatureType
|
|
||||||
PubKeyAlgo PublicKeyAlgorithm
|
|
||||||
Hash crypto.Hash
|
|
||||||
|
|
||||||
// HashSuffix is extra data that is hashed in after the signed data.
|
|
||||||
HashSuffix []byte
|
|
||||||
// HashTag contains the first two bytes of the hash for fast rejection
|
|
||||||
// of bad signed data.
|
|
||||||
HashTag [2]byte
|
|
||||||
CreationTime time.Time
|
|
||||||
|
|
||||||
RSASignature parsedMPI
|
|
||||||
DSASigR, DSASigS parsedMPI
|
|
||||||
ECDSASigR, ECDSASigS parsedMPI
|
|
||||||
|
|
||||||
// rawSubpackets contains the unparsed subpackets, in order.
|
|
||||||
rawSubpackets []outputSubpacket
|
|
||||||
|
|
||||||
// The following are optional so are nil when not included in the
|
|
||||||
// signature.
|
|
||||||
|
|
||||||
SigLifetimeSecs, KeyLifetimeSecs *uint32
|
|
||||||
PreferredSymmetric, PreferredHash, PreferredCompression []uint8
|
|
||||||
IssuerKeyId *uint64
|
|
||||||
IsPrimaryId *bool
|
|
||||||
|
|
||||||
// FlagsValid is set if any flags were given. See RFC 4880, section
|
|
||||||
// 5.2.3.21 for details.
|
|
||||||
FlagsValid bool
|
|
||||||
FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool
|
|
||||||
|
|
||||||
// RevocationReason is set if this signature has been revoked.
|
|
||||||
// See RFC 4880, section 5.2.3.23 for details.
|
|
||||||
RevocationReason *uint8
|
|
||||||
RevocationReasonText string
|
|
||||||
|
|
||||||
// MDC is set if this signature has a feature packet that indicates
|
|
||||||
// support for MDC subpackets.
|
|
||||||
MDC bool
|
|
||||||
|
|
||||||
// EmbeddedSignature, if non-nil, is a signature of the parent key, by
|
|
||||||
// this key. This prevents an attacker from claiming another's signing
|
|
||||||
// subkey as their own.
|
|
||||||
EmbeddedSignature *Signature
|
|
||||||
|
|
||||||
outSubpackets []outputSubpacket
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sig *Signature) parse(r io.Reader) (err error) {
|
|
||||||
// RFC 4880, section 5.2.3
|
|
||||||
var buf [5]byte
|
|
||||||
_, err = readFull(r, buf[:1])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if buf[0] != 4 {
|
|
||||||
err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0])))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = readFull(r, buf[:5])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sig.SigType = SignatureType(buf[0])
|
|
||||||
sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1])
|
|
||||||
switch sig.PubKeyAlgo {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA:
|
|
||||||
default:
|
|
||||||
err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo)))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var ok bool
|
|
||||||
sig.Hash, ok = s2k.HashIdToHash(buf[2])
|
|
||||||
if !ok {
|
|
||||||
return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2])))
|
|
||||||
}
|
|
||||||
|
|
||||||
hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4])
|
|
||||||
l := 6 + hashedSubpacketsLength
|
|
||||||
sig.HashSuffix = make([]byte, l+6)
|
|
||||||
sig.HashSuffix[0] = 4
|
|
||||||
copy(sig.HashSuffix[1:], buf[:5])
|
|
||||||
hashedSubpackets := sig.HashSuffix[6:l]
|
|
||||||
_, err = readFull(r, hashedSubpackets)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// See RFC 4880, section 5.2.4
|
|
||||||
trailer := sig.HashSuffix[l:]
|
|
||||||
trailer[0] = 4
|
|
||||||
trailer[1] = 0xff
|
|
||||||
trailer[2] = uint8(l >> 24)
|
|
||||||
trailer[3] = uint8(l >> 16)
|
|
||||||
trailer[4] = uint8(l >> 8)
|
|
||||||
trailer[5] = uint8(l)
|
|
||||||
|
|
||||||
err = parseSignatureSubpackets(sig, hashedSubpackets, true)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = readFull(r, buf[:2])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1])
|
|
||||||
unhashedSubpackets := make([]byte, unhashedSubpacketsLength)
|
|
||||||
_, err = readFull(r, unhashedSubpackets)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err = parseSignatureSubpackets(sig, unhashedSubpackets, false)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = readFull(r, sig.HashTag[:2])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
switch sig.PubKeyAlgo {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
|
|
||||||
sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r)
|
|
||||||
case PubKeyAlgoDSA:
|
|
||||||
sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r)
|
|
||||||
if err == nil {
|
|
||||||
sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r)
|
|
||||||
}
|
|
||||||
case PubKeyAlgoECDSA:
|
|
||||||
sig.ECDSASigR.bytes, sig.ECDSASigR.bitLength, err = readMPI(r)
|
|
||||||
if err == nil {
|
|
||||||
sig.ECDSASigS.bytes, sig.ECDSASigS.bitLength, err = readMPI(r)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseSignatureSubpackets parses subpackets of the main signature packet. See
|
|
||||||
// RFC 4880, section 5.2.3.1.
|
|
||||||
func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) {
|
|
||||||
for len(subpackets) > 0 {
|
|
||||||
subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if sig.CreationTime.IsZero() {
|
|
||||||
err = errors.StructuralError("no creation time in signature")
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
type signatureSubpacketType uint8
|
|
||||||
|
|
||||||
const (
|
|
||||||
creationTimeSubpacket signatureSubpacketType = 2
|
|
||||||
signatureExpirationSubpacket signatureSubpacketType = 3
|
|
||||||
keyExpirationSubpacket signatureSubpacketType = 9
|
|
||||||
prefSymmetricAlgosSubpacket signatureSubpacketType = 11
|
|
||||||
issuerSubpacket signatureSubpacketType = 16
|
|
||||||
prefHashAlgosSubpacket signatureSubpacketType = 21
|
|
||||||
prefCompressionSubpacket signatureSubpacketType = 22
|
|
||||||
primaryUserIdSubpacket signatureSubpacketType = 25
|
|
||||||
keyFlagsSubpacket signatureSubpacketType = 27
|
|
||||||
reasonForRevocationSubpacket signatureSubpacketType = 29
|
|
||||||
featuresSubpacket signatureSubpacketType = 30
|
|
||||||
embeddedSignatureSubpacket signatureSubpacketType = 32
|
|
||||||
)
|
|
||||||
|
|
||||||
// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1.
|
|
||||||
func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) {
|
|
||||||
// RFC 4880, section 5.2.3.1
|
|
||||||
var (
|
|
||||||
length uint32
|
|
||||||
packetType signatureSubpacketType
|
|
||||||
isCritical bool
|
|
||||||
)
|
|
||||||
switch {
|
|
||||||
case subpacket[0] < 192:
|
|
||||||
length = uint32(subpacket[0])
|
|
||||||
subpacket = subpacket[1:]
|
|
||||||
case subpacket[0] < 255:
|
|
||||||
if len(subpacket) < 2 {
|
|
||||||
goto Truncated
|
|
||||||
}
|
|
||||||
length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192
|
|
||||||
subpacket = subpacket[2:]
|
|
||||||
default:
|
|
||||||
if len(subpacket) < 5 {
|
|
||||||
goto Truncated
|
|
||||||
}
|
|
||||||
length = uint32(subpacket[1])<<24 |
|
|
||||||
uint32(subpacket[2])<<16 |
|
|
||||||
uint32(subpacket[3])<<8 |
|
|
||||||
uint32(subpacket[4])
|
|
||||||
subpacket = subpacket[5:]
|
|
||||||
}
|
|
||||||
if length > uint32(len(subpacket)) {
|
|
||||||
goto Truncated
|
|
||||||
}
|
|
||||||
rest = subpacket[length:]
|
|
||||||
subpacket = subpacket[:length]
|
|
||||||
if len(subpacket) == 0 {
|
|
||||||
err = errors.StructuralError("zero length signature subpacket")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
packetType = signatureSubpacketType(subpacket[0] & 0x7f)
|
|
||||||
isCritical = subpacket[0]&0x80 == 0x80
|
|
||||||
subpacket = subpacket[1:]
|
|
||||||
sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket})
|
|
||||||
switch packetType {
|
|
||||||
case creationTimeSubpacket:
|
|
||||||
if !isHashed {
|
|
||||||
err = errors.StructuralError("signature creation time in non-hashed area")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(subpacket) != 4 {
|
|
||||||
err = errors.StructuralError("signature creation time not four bytes")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t := binary.BigEndian.Uint32(subpacket)
|
|
||||||
sig.CreationTime = time.Unix(int64(t), 0)
|
|
||||||
case signatureExpirationSubpacket:
|
|
||||||
// Signature expiration time, section 5.2.3.10
|
|
||||||
if !isHashed {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(subpacket) != 4 {
|
|
||||||
err = errors.StructuralError("expiration subpacket with bad length")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sig.SigLifetimeSecs = new(uint32)
|
|
||||||
*sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket)
|
|
||||||
case keyExpirationSubpacket:
|
|
||||||
// Key expiration time, section 5.2.3.6
|
|
||||||
if !isHashed {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(subpacket) != 4 {
|
|
||||||
err = errors.StructuralError("key expiration subpacket with bad length")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sig.KeyLifetimeSecs = new(uint32)
|
|
||||||
*sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket)
|
|
||||||
case prefSymmetricAlgosSubpacket:
|
|
||||||
// Preferred symmetric algorithms, section 5.2.3.7
|
|
||||||
if !isHashed {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sig.PreferredSymmetric = make([]byte, len(subpacket))
|
|
||||||
copy(sig.PreferredSymmetric, subpacket)
|
|
||||||
case issuerSubpacket:
|
|
||||||
// Issuer, section 5.2.3.5
|
|
||||||
if len(subpacket) != 8 {
|
|
||||||
err = errors.StructuralError("issuer subpacket with bad length")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sig.IssuerKeyId = new(uint64)
|
|
||||||
*sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket)
|
|
||||||
case prefHashAlgosSubpacket:
|
|
||||||
// Preferred hash algorithms, section 5.2.3.8
|
|
||||||
if !isHashed {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sig.PreferredHash = make([]byte, len(subpacket))
|
|
||||||
copy(sig.PreferredHash, subpacket)
|
|
||||||
case prefCompressionSubpacket:
|
|
||||||
// Preferred compression algorithms, section 5.2.3.9
|
|
||||||
if !isHashed {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sig.PreferredCompression = make([]byte, len(subpacket))
|
|
||||||
copy(sig.PreferredCompression, subpacket)
|
|
||||||
case primaryUserIdSubpacket:
|
|
||||||
// Primary User ID, section 5.2.3.19
|
|
||||||
if !isHashed {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(subpacket) != 1 {
|
|
||||||
err = errors.StructuralError("primary user id subpacket with bad length")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sig.IsPrimaryId = new(bool)
|
|
||||||
if subpacket[0] > 0 {
|
|
||||||
*sig.IsPrimaryId = true
|
|
||||||
}
|
|
||||||
case keyFlagsSubpacket:
|
|
||||||
// Key flags, section 5.2.3.21
|
|
||||||
if !isHashed {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(subpacket) == 0 {
|
|
||||||
err = errors.StructuralError("empty key flags subpacket")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sig.FlagsValid = true
|
|
||||||
if subpacket[0]&KeyFlagCertify != 0 {
|
|
||||||
sig.FlagCertify = true
|
|
||||||
}
|
|
||||||
if subpacket[0]&KeyFlagSign != 0 {
|
|
||||||
sig.FlagSign = true
|
|
||||||
}
|
|
||||||
if subpacket[0]&KeyFlagEncryptCommunications != 0 {
|
|
||||||
sig.FlagEncryptCommunications = true
|
|
||||||
}
|
|
||||||
if subpacket[0]&KeyFlagEncryptStorage != 0 {
|
|
||||||
sig.FlagEncryptStorage = true
|
|
||||||
}
|
|
||||||
case reasonForRevocationSubpacket:
|
|
||||||
// Reason For Revocation, section 5.2.3.23
|
|
||||||
if !isHashed {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(subpacket) == 0 {
|
|
||||||
err = errors.StructuralError("empty revocation reason subpacket")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sig.RevocationReason = new(uint8)
|
|
||||||
*sig.RevocationReason = subpacket[0]
|
|
||||||
sig.RevocationReasonText = string(subpacket[1:])
|
|
||||||
case featuresSubpacket:
|
|
||||||
// Features subpacket, section 5.2.3.24 specifies a very general
|
|
||||||
// mechanism for OpenPGP implementations to signal support for new
|
|
||||||
// features. In practice, the subpacket is used exclusively to
|
|
||||||
// indicate support for MDC-protected encryption.
|
|
||||||
sig.MDC = len(subpacket) >= 1 && subpacket[0]&1 == 1
|
|
||||||
case embeddedSignatureSubpacket:
|
|
||||||
// Only usage is in signatures that cross-certify
|
|
||||||
// signing subkeys. section 5.2.3.26 describes the
|
|
||||||
// format, with its usage described in section 11.1
|
|
||||||
if sig.EmbeddedSignature != nil {
|
|
||||||
err = errors.StructuralError("Cannot have multiple embedded signatures")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sig.EmbeddedSignature = new(Signature)
|
|
||||||
// Embedded signatures are required to be v4 signatures see
|
|
||||||
// section 12.1. However, we only parse v4 signatures in this
|
|
||||||
// file anyway.
|
|
||||||
if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding {
|
|
||||||
return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType)))
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
if isCritical {
|
|
||||||
err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType)))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
|
|
||||||
Truncated:
|
|
||||||
err = errors.StructuralError("signature subpacket truncated")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// subpacketLengthLength returns the length, in bytes, of an encoded length value.
|
|
||||||
func subpacketLengthLength(length int) int {
|
|
||||||
if length < 192 {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
if length < 16320 {
|
|
||||||
return 2
|
|
||||||
}
|
|
||||||
return 5
|
|
||||||
}
|
|
||||||
|
|
||||||
// serializeSubpacketLength marshals the given length into to.
|
|
||||||
func serializeSubpacketLength(to []byte, length int) int {
|
|
||||||
// RFC 4880, Section 4.2.2.
|
|
||||||
if length < 192 {
|
|
||||||
to[0] = byte(length)
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
if length < 16320 {
|
|
||||||
length -= 192
|
|
||||||
to[0] = byte((length >> 8) + 192)
|
|
||||||
to[1] = byte(length)
|
|
||||||
return 2
|
|
||||||
}
|
|
||||||
to[0] = 255
|
|
||||||
to[1] = byte(length >> 24)
|
|
||||||
to[2] = byte(length >> 16)
|
|
||||||
to[3] = byte(length >> 8)
|
|
||||||
to[4] = byte(length)
|
|
||||||
return 5
|
|
||||||
}
|
|
||||||
|
|
||||||
// subpacketsLength returns the serialized length, in bytes, of the given
|
|
||||||
// subpackets.
|
|
||||||
func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) {
|
|
||||||
for _, subpacket := range subpackets {
|
|
||||||
if subpacket.hashed == hashed {
|
|
||||||
length += subpacketLengthLength(len(subpacket.contents) + 1)
|
|
||||||
length += 1 // type byte
|
|
||||||
length += len(subpacket.contents)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// serializeSubpackets marshals the given subpackets into to.
|
|
||||||
func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) {
|
|
||||||
for _, subpacket := range subpackets {
|
|
||||||
if subpacket.hashed == hashed {
|
|
||||||
n := serializeSubpacketLength(to, len(subpacket.contents)+1)
|
|
||||||
to[n] = byte(subpacket.subpacketType)
|
|
||||||
to = to[1+n:]
|
|
||||||
n = copy(to, subpacket.contents)
|
|
||||||
to = to[n:]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// KeyExpired returns whether sig is a self-signature of a key that has
|
|
||||||
// expired.
|
|
||||||
func (sig *Signature) KeyExpired(currentTime time.Time) bool {
|
|
||||||
if sig.KeyLifetimeSecs == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
expiry := sig.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second)
|
|
||||||
return currentTime.After(expiry)
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing.
|
|
||||||
func (sig *Signature) buildHashSuffix() (err error) {
|
|
||||||
hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true)
|
|
||||||
|
|
||||||
var ok bool
|
|
||||||
l := 6 + hashedSubpacketsLen
|
|
||||||
sig.HashSuffix = make([]byte, l+6)
|
|
||||||
sig.HashSuffix[0] = 4
|
|
||||||
sig.HashSuffix[1] = uint8(sig.SigType)
|
|
||||||
sig.HashSuffix[2] = uint8(sig.PubKeyAlgo)
|
|
||||||
sig.HashSuffix[3], ok = s2k.HashToHashId(sig.Hash)
|
|
||||||
if !ok {
|
|
||||||
sig.HashSuffix = nil
|
|
||||||
return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash)))
|
|
||||||
}
|
|
||||||
sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8)
|
|
||||||
sig.HashSuffix[5] = byte(hashedSubpacketsLen)
|
|
||||||
serializeSubpackets(sig.HashSuffix[6:l], sig.outSubpackets, true)
|
|
||||||
trailer := sig.HashSuffix[l:]
|
|
||||||
trailer[0] = 4
|
|
||||||
trailer[1] = 0xff
|
|
||||||
trailer[2] = byte(l >> 24)
|
|
||||||
trailer[3] = byte(l >> 16)
|
|
||||||
trailer[4] = byte(l >> 8)
|
|
||||||
trailer[5] = byte(l)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) {
|
|
||||||
err = sig.buildHashSuffix()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
h.Write(sig.HashSuffix)
|
|
||||||
digest = h.Sum(nil)
|
|
||||||
copy(sig.HashTag[:], digest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sign signs a message with a private key. The hash, h, must contain
|
|
||||||
// the hash of the message to be signed and will be mutated by this function.
|
|
||||||
// On success, the signature is stored in sig. Call Serialize to write it out.
|
|
||||||
// If config is nil, sensible defaults will be used.
|
|
||||||
func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) {
|
|
||||||
sig.outSubpackets = sig.buildSubpackets()
|
|
||||||
digest, err := sig.signPrepareHash(h)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
switch priv.PubKeyAlgo {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
|
|
||||||
// supports both *rsa.PrivateKey and crypto.Signer
|
|
||||||
sig.RSASignature.bytes, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash)
|
|
||||||
sig.RSASignature.bitLength = uint16(8 * len(sig.RSASignature.bytes))
|
|
||||||
case PubKeyAlgoDSA:
|
|
||||||
dsaPriv := priv.PrivateKey.(*dsa.PrivateKey)
|
|
||||||
|
|
||||||
// Need to truncate hashBytes to match FIPS 186-3 section 4.6.
|
|
||||||
subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8
|
|
||||||
if len(digest) > subgroupSize {
|
|
||||||
digest = digest[:subgroupSize]
|
|
||||||
}
|
|
||||||
r, s, err := dsa.Sign(config.Random(), dsaPriv, digest)
|
|
||||||
if err == nil {
|
|
||||||
sig.DSASigR.bytes = r.Bytes()
|
|
||||||
sig.DSASigR.bitLength = uint16(8 * len(sig.DSASigR.bytes))
|
|
||||||
sig.DSASigS.bytes = s.Bytes()
|
|
||||||
sig.DSASigS.bitLength = uint16(8 * len(sig.DSASigS.bytes))
|
|
||||||
}
|
|
||||||
case PubKeyAlgoECDSA:
|
|
||||||
var r, s *big.Int
|
|
||||||
if pk, ok := priv.PrivateKey.(*ecdsa.PrivateKey); ok {
|
|
||||||
// direct support, avoid asn1 wrapping/unwrapping
|
|
||||||
r, s, err = ecdsa.Sign(config.Random(), pk, digest)
|
|
||||||
} else {
|
|
||||||
var b []byte
|
|
||||||
b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash)
|
|
||||||
if err == nil {
|
|
||||||
r, s, err = unwrapECDSASig(b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
sig.ECDSASigR = fromBig(r)
|
|
||||||
sig.ECDSASigS = fromBig(s)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo)))
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// unwrapECDSASig parses the two integer components of an ASN.1-encoded ECDSA
|
|
||||||
// signature.
|
|
||||||
func unwrapECDSASig(b []byte) (r, s *big.Int, err error) {
|
|
||||||
var ecsdaSig struct {
|
|
||||||
R, S *big.Int
|
|
||||||
}
|
|
||||||
_, err = asn1.Unmarshal(b, &ecsdaSig)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return ecsdaSig.R, ecsdaSig.S, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignUserId computes a signature from priv, asserting that pub is a valid
|
|
||||||
// key for the identity id. On success, the signature is stored in sig. Call
|
|
||||||
// Serialize to write it out.
|
|
||||||
// If config is nil, sensible defaults will be used.
|
|
||||||
func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error {
|
|
||||||
h, err := userIdSignatureHash(id, pub, sig.Hash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return sig.Sign(h, priv, config)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignKey computes a signature from priv, asserting that pub is a subkey. On
|
|
||||||
// success, the signature is stored in sig. Call Serialize to write it out.
|
|
||||||
// If config is nil, sensible defaults will be used.
|
|
||||||
func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error {
|
|
||||||
h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return sig.Sign(h, priv, config)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been
|
|
||||||
// called first.
|
|
||||||
func (sig *Signature) Serialize(w io.Writer) (err error) {
|
|
||||||
if len(sig.outSubpackets) == 0 {
|
|
||||||
sig.outSubpackets = sig.rawSubpackets
|
|
||||||
}
|
|
||||||
if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil && sig.ECDSASigR.bytes == nil {
|
|
||||||
return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize")
|
|
||||||
}
|
|
||||||
|
|
||||||
sigLength := 0
|
|
||||||
switch sig.PubKeyAlgo {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
|
|
||||||
sigLength = 2 + len(sig.RSASignature.bytes)
|
|
||||||
case PubKeyAlgoDSA:
|
|
||||||
sigLength = 2 + len(sig.DSASigR.bytes)
|
|
||||||
sigLength += 2 + len(sig.DSASigS.bytes)
|
|
||||||
case PubKeyAlgoECDSA:
|
|
||||||
sigLength = 2 + len(sig.ECDSASigR.bytes)
|
|
||||||
sigLength += 2 + len(sig.ECDSASigS.bytes)
|
|
||||||
default:
|
|
||||||
panic("impossible")
|
|
||||||
}
|
|
||||||
|
|
||||||
unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false)
|
|
||||||
length := len(sig.HashSuffix) - 6 /* trailer not included */ +
|
|
||||||
2 /* length of unhashed subpackets */ + unhashedSubpacketsLen +
|
|
||||||
2 /* hash tag */ + sigLength
|
|
||||||
err = serializeHeader(w, packetTypeSignature, length)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = w.Write(sig.HashSuffix[:len(sig.HashSuffix)-6])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen)
|
|
||||||
unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8)
|
|
||||||
unhashedSubpackets[1] = byte(unhashedSubpacketsLen)
|
|
||||||
serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false)
|
|
||||||
|
|
||||||
_, err = w.Write(unhashedSubpackets)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
_, err = w.Write(sig.HashTag[:])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
switch sig.PubKeyAlgo {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
|
|
||||||
err = writeMPIs(w, sig.RSASignature)
|
|
||||||
case PubKeyAlgoDSA:
|
|
||||||
err = writeMPIs(w, sig.DSASigR, sig.DSASigS)
|
|
||||||
case PubKeyAlgoECDSA:
|
|
||||||
err = writeMPIs(w, sig.ECDSASigR, sig.ECDSASigS)
|
|
||||||
default:
|
|
||||||
panic("impossible")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// outputSubpacket represents a subpacket to be marshaled.
|
|
||||||
type outputSubpacket struct {
|
|
||||||
hashed bool // true if this subpacket is in the hashed area.
|
|
||||||
subpacketType signatureSubpacketType
|
|
||||||
isCritical bool
|
|
||||||
contents []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sig *Signature) buildSubpackets() (subpackets []outputSubpacket) {
|
|
||||||
creationTime := make([]byte, 4)
|
|
||||||
binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix()))
|
|
||||||
subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime})
|
|
||||||
|
|
||||||
if sig.IssuerKeyId != nil {
|
|
||||||
keyId := make([]byte, 8)
|
|
||||||
binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId)
|
|
||||||
subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId})
|
|
||||||
}
|
|
||||||
|
|
||||||
if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 {
|
|
||||||
sigLifetime := make([]byte, 4)
|
|
||||||
binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs)
|
|
||||||
subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Key flags may only appear in self-signatures or certification signatures.
|
|
||||||
|
|
||||||
if sig.FlagsValid {
|
|
||||||
var flags byte
|
|
||||||
if sig.FlagCertify {
|
|
||||||
flags |= KeyFlagCertify
|
|
||||||
}
|
|
||||||
if sig.FlagSign {
|
|
||||||
flags |= KeyFlagSign
|
|
||||||
}
|
|
||||||
if sig.FlagEncryptCommunications {
|
|
||||||
flags |= KeyFlagEncryptCommunications
|
|
||||||
}
|
|
||||||
if sig.FlagEncryptStorage {
|
|
||||||
flags |= KeyFlagEncryptStorage
|
|
||||||
}
|
|
||||||
subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}})
|
|
||||||
}
|
|
||||||
|
|
||||||
// The following subpackets may only appear in self-signatures
|
|
||||||
|
|
||||||
if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 {
|
|
||||||
keyLifetime := make([]byte, 4)
|
|
||||||
binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs)
|
|
||||||
subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime})
|
|
||||||
}
|
|
||||||
|
|
||||||
if sig.IsPrimaryId != nil && *sig.IsPrimaryId {
|
|
||||||
subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}})
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(sig.PreferredSymmetric) > 0 {
|
|
||||||
subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric})
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(sig.PreferredHash) > 0 {
|
|
||||||
subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash})
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(sig.PreferredCompression) > 0 {
|
|
||||||
subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression})
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
146
vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go
generated
vendored
146
vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go
generated
vendored
@ -1,146 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package packet
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto"
|
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/crypto/openpgp/errors"
|
|
||||||
"golang.org/x/crypto/openpgp/s2k"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SignatureV3 represents older version 3 signatures. These signatures are less secure
|
|
||||||
// than version 4 and should not be used to create new signatures. They are included
|
|
||||||
// here for backwards compatibility to read and validate with older key material.
|
|
||||||
// See RFC 4880, section 5.2.2.
|
|
||||||
type SignatureV3 struct {
|
|
||||||
SigType SignatureType
|
|
||||||
CreationTime time.Time
|
|
||||||
IssuerKeyId uint64
|
|
||||||
PubKeyAlgo PublicKeyAlgorithm
|
|
||||||
Hash crypto.Hash
|
|
||||||
HashTag [2]byte
|
|
||||||
|
|
||||||
RSASignature parsedMPI
|
|
||||||
DSASigR, DSASigS parsedMPI
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sig *SignatureV3) parse(r io.Reader) (err error) {
|
|
||||||
// RFC 4880, section 5.2.2
|
|
||||||
var buf [8]byte
|
|
||||||
if _, err = readFull(r, buf[:1]); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if buf[0] < 2 || buf[0] > 3 {
|
|
||||||
err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0])))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if _, err = readFull(r, buf[:1]); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if buf[0] != 5 {
|
|
||||||
err = errors.UnsupportedError(
|
|
||||||
"invalid hashed material length " + strconv.Itoa(int(buf[0])))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read hashed material: signature type + creation time
|
|
||||||
if _, err = readFull(r, buf[:5]); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sig.SigType = SignatureType(buf[0])
|
|
||||||
t := binary.BigEndian.Uint32(buf[1:5])
|
|
||||||
sig.CreationTime = time.Unix(int64(t), 0)
|
|
||||||
|
|
||||||
// Eight-octet Key ID of signer.
|
|
||||||
if _, err = readFull(r, buf[:8]); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sig.IssuerKeyId = binary.BigEndian.Uint64(buf[:])
|
|
||||||
|
|
||||||
// Public-key and hash algorithm
|
|
||||||
if _, err = readFull(r, buf[:2]); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sig.PubKeyAlgo = PublicKeyAlgorithm(buf[0])
|
|
||||||
switch sig.PubKeyAlgo {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA:
|
|
||||||
default:
|
|
||||||
err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo)))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var ok bool
|
|
||||||
if sig.Hash, ok = s2k.HashIdToHash(buf[1]); !ok {
|
|
||||||
return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2])))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Two-octet field holding left 16 bits of signed hash value.
|
|
||||||
if _, err = readFull(r, sig.HashTag[:2]); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
switch sig.PubKeyAlgo {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
|
|
||||||
sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r)
|
|
||||||
case PubKeyAlgoDSA:
|
|
||||||
if sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r)
|
|
||||||
default:
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been
|
|
||||||
// called first.
|
|
||||||
func (sig *SignatureV3) Serialize(w io.Writer) (err error) {
|
|
||||||
buf := make([]byte, 8)
|
|
||||||
|
|
||||||
// Write the sig type and creation time
|
|
||||||
buf[0] = byte(sig.SigType)
|
|
||||||
binary.BigEndian.PutUint32(buf[1:5], uint32(sig.CreationTime.Unix()))
|
|
||||||
if _, err = w.Write(buf[:5]); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write the issuer long key ID
|
|
||||||
binary.BigEndian.PutUint64(buf[:8], sig.IssuerKeyId)
|
|
||||||
if _, err = w.Write(buf[:8]); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write public key algorithm, hash ID, and hash value
|
|
||||||
buf[0] = byte(sig.PubKeyAlgo)
|
|
||||||
hashId, ok := s2k.HashToHashId(sig.Hash)
|
|
||||||
if !ok {
|
|
||||||
return errors.UnsupportedError(fmt.Sprintf("hash function %v", sig.Hash))
|
|
||||||
}
|
|
||||||
buf[1] = hashId
|
|
||||||
copy(buf[2:4], sig.HashTag[:])
|
|
||||||
if _, err = w.Write(buf[:4]); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil {
|
|
||||||
return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize")
|
|
||||||
}
|
|
||||||
|
|
||||||
switch sig.PubKeyAlgo {
|
|
||||||
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
|
|
||||||
err = writeMPIs(w, sig.RSASignature)
|
|
||||||
case PubKeyAlgoDSA:
|
|
||||||
err = writeMPIs(w, sig.DSASigR, sig.DSASigS)
|
|
||||||
default:
|
|
||||||
panic("impossible")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
155
vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go
generated
vendored
155
vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go
generated
vendored
@ -1,155 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package packet
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/cipher"
|
|
||||||
"io"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"golang.org/x/crypto/openpgp/errors"
|
|
||||||
"golang.org/x/crypto/openpgp/s2k"
|
|
||||||
)
|
|
||||||
|
|
||||||
// This is the largest session key that we'll support. Since no 512-bit cipher
|
|
||||||
// has even been seriously used, this is comfortably large.
|
|
||||||
const maxSessionKeySizeInBytes = 64
|
|
||||||
|
|
||||||
// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC
|
|
||||||
// 4880, section 5.3.
|
|
||||||
type SymmetricKeyEncrypted struct {
|
|
||||||
CipherFunc CipherFunction
|
|
||||||
s2k func(out, in []byte)
|
|
||||||
encryptedKey []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
const symmetricKeyEncryptedVersion = 4
|
|
||||||
|
|
||||||
func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error {
|
|
||||||
// RFC 4880, section 5.3.
|
|
||||||
var buf [2]byte
|
|
||||||
if _, err := readFull(r, buf[:]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if buf[0] != symmetricKeyEncryptedVersion {
|
|
||||||
return errors.UnsupportedError("SymmetricKeyEncrypted version")
|
|
||||||
}
|
|
||||||
ske.CipherFunc = CipherFunction(buf[1])
|
|
||||||
|
|
||||||
if ske.CipherFunc.KeySize() == 0 {
|
|
||||||
return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1])))
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
ske.s2k, err = s2k.Parse(r)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
encryptedKey := make([]byte, maxSessionKeySizeInBytes)
|
|
||||||
// The session key may follow. We just have to try and read to find
|
|
||||||
// out. If it exists then we limit it to maxSessionKeySizeInBytes.
|
|
||||||
n, err := readFull(r, encryptedKey)
|
|
||||||
if err != nil && err != io.ErrUnexpectedEOF {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if n != 0 {
|
|
||||||
if n == maxSessionKeySizeInBytes {
|
|
||||||
return errors.UnsupportedError("oversized encrypted session key")
|
|
||||||
}
|
|
||||||
ske.encryptedKey = encryptedKey[:n]
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decrypt attempts to decrypt an encrypted session key and returns the key and
|
|
||||||
// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data
|
|
||||||
// packet.
|
|
||||||
func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) {
|
|
||||||
key := make([]byte, ske.CipherFunc.KeySize())
|
|
||||||
ske.s2k(key, passphrase)
|
|
||||||
|
|
||||||
if len(ske.encryptedKey) == 0 {
|
|
||||||
return key, ske.CipherFunc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// the IV is all zeros
|
|
||||||
iv := make([]byte, ske.CipherFunc.blockSize())
|
|
||||||
c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv)
|
|
||||||
plaintextKey := make([]byte, len(ske.encryptedKey))
|
|
||||||
c.XORKeyStream(plaintextKey, ske.encryptedKey)
|
|
||||||
cipherFunc := CipherFunction(plaintextKey[0])
|
|
||||||
if cipherFunc.blockSize() == 0 {
|
|
||||||
return nil, ske.CipherFunc, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc)))
|
|
||||||
}
|
|
||||||
plaintextKey = plaintextKey[1:]
|
|
||||||
if l, cipherKeySize := len(plaintextKey), cipherFunc.KeySize(); l != cipherFunc.KeySize() {
|
|
||||||
return nil, cipherFunc, errors.StructuralError("length of decrypted key (" + strconv.Itoa(l) + ") " +
|
|
||||||
"not equal to cipher keysize (" + strconv.Itoa(cipherKeySize) + ")")
|
|
||||||
}
|
|
||||||
return plaintextKey, cipherFunc, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. The
|
|
||||||
// packet contains a random session key, encrypted by a key derived from the
|
|
||||||
// given passphrase. The session key is returned and must be passed to
|
|
||||||
// SerializeSymmetricallyEncrypted.
|
|
||||||
// If config is nil, sensible defaults will be used.
|
|
||||||
func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) {
|
|
||||||
cipherFunc := config.Cipher()
|
|
||||||
keySize := cipherFunc.KeySize()
|
|
||||||
if keySize == 0 {
|
|
||||||
return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc)))
|
|
||||||
}
|
|
||||||
|
|
||||||
s2kBuf := new(bytes.Buffer)
|
|
||||||
keyEncryptingKey := make([]byte, keySize)
|
|
||||||
// s2k.Serialize salts and stretches the passphrase, and writes the
|
|
||||||
// resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf.
|
|
||||||
err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()})
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s2kBytes := s2kBuf.Bytes()
|
|
||||||
|
|
||||||
packetLength := 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize
|
|
||||||
err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var buf [2]byte
|
|
||||||
buf[0] = symmetricKeyEncryptedVersion
|
|
||||||
buf[1] = byte(cipherFunc)
|
|
||||||
_, err = w.Write(buf[:])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
_, err = w.Write(s2kBytes)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
sessionKey := make([]byte, keySize)
|
|
||||||
_, err = io.ReadFull(config.Random(), sessionKey)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
iv := make([]byte, cipherFunc.blockSize())
|
|
||||||
c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv)
|
|
||||||
encryptedCipherAndKey := make([]byte, keySize+1)
|
|
||||||
c.XORKeyStream(encryptedCipherAndKey, buf[1:])
|
|
||||||
c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey)
|
|
||||||
_, err = w.Write(encryptedCipherAndKey)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
key = sessionKey
|
|
||||||
return
|
|
||||||
}
|
|
290
vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go
generated
vendored
290
vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go
generated
vendored
@ -1,290 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package packet
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/cipher"
|
|
||||||
"crypto/sha1"
|
|
||||||
"crypto/subtle"
|
|
||||||
"golang.org/x/crypto/openpgp/errors"
|
|
||||||
"hash"
|
|
||||||
"io"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The
|
|
||||||
// encrypted contents will consist of more OpenPGP packets. See RFC 4880,
|
|
||||||
// sections 5.7 and 5.13.
|
|
||||||
type SymmetricallyEncrypted struct {
|
|
||||||
MDC bool // true iff this is a type 18 packet and thus has an embedded MAC.
|
|
||||||
contents io.Reader
|
|
||||||
prefix []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
const symmetricallyEncryptedVersion = 1
|
|
||||||
|
|
||||||
func (se *SymmetricallyEncrypted) parse(r io.Reader) error {
|
|
||||||
if se.MDC {
|
|
||||||
// See RFC 4880, section 5.13.
|
|
||||||
var buf [1]byte
|
|
||||||
_, err := readFull(r, buf[:])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if buf[0] != symmetricallyEncryptedVersion {
|
|
||||||
return errors.UnsupportedError("unknown SymmetricallyEncrypted version")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
se.contents = r
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decrypt returns a ReadCloser, from which the decrypted contents of the
|
|
||||||
// packet can be read. An incorrect key can, with high probability, be detected
|
|
||||||
// immediately and this will result in a KeyIncorrect error being returned.
|
|
||||||
func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) {
|
|
||||||
keySize := c.KeySize()
|
|
||||||
if keySize == 0 {
|
|
||||||
return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c)))
|
|
||||||
}
|
|
||||||
if len(key) != keySize {
|
|
||||||
return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length")
|
|
||||||
}
|
|
||||||
|
|
||||||
if se.prefix == nil {
|
|
||||||
se.prefix = make([]byte, c.blockSize()+2)
|
|
||||||
_, err := readFull(se.contents, se.prefix)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else if len(se.prefix) != c.blockSize()+2 {
|
|
||||||
return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths")
|
|
||||||
}
|
|
||||||
|
|
||||||
ocfbResync := OCFBResync
|
|
||||||
if se.MDC {
|
|
||||||
// MDC packets use a different form of OCFB mode.
|
|
||||||
ocfbResync = OCFBNoResync
|
|
||||||
}
|
|
||||||
|
|
||||||
s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync)
|
|
||||||
if s == nil {
|
|
||||||
return nil, errors.ErrKeyIncorrect
|
|
||||||
}
|
|
||||||
|
|
||||||
plaintext := cipher.StreamReader{S: s, R: se.contents}
|
|
||||||
|
|
||||||
if se.MDC {
|
|
||||||
// MDC packets have an embedded hash that we need to check.
|
|
||||||
h := sha1.New()
|
|
||||||
h.Write(se.prefix)
|
|
||||||
return &seMDCReader{in: plaintext, h: h}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser.
|
|
||||||
return seReader{plaintext}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// seReader wraps an io.Reader with a no-op Close method.
|
|
||||||
type seReader struct {
|
|
||||||
in io.Reader
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ser seReader) Read(buf []byte) (int, error) {
|
|
||||||
return ser.in.Read(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ser seReader) Close() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size
|
|
||||||
|
|
||||||
// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold
|
|
||||||
// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an
|
|
||||||
// MDC packet containing a hash of the previous contents which is checked
|
|
||||||
// against the running hash. See RFC 4880, section 5.13.
|
|
||||||
type seMDCReader struct {
|
|
||||||
in io.Reader
|
|
||||||
h hash.Hash
|
|
||||||
trailer [mdcTrailerSize]byte
|
|
||||||
scratch [mdcTrailerSize]byte
|
|
||||||
trailerUsed int
|
|
||||||
error bool
|
|
||||||
eof bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ser *seMDCReader) Read(buf []byte) (n int, err error) {
|
|
||||||
if ser.error {
|
|
||||||
err = io.ErrUnexpectedEOF
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if ser.eof {
|
|
||||||
err = io.EOF
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we haven't yet filled the trailer buffer then we must do that
|
|
||||||
// first.
|
|
||||||
for ser.trailerUsed < mdcTrailerSize {
|
|
||||||
n, err = ser.in.Read(ser.trailer[ser.trailerUsed:])
|
|
||||||
ser.trailerUsed += n
|
|
||||||
if err == io.EOF {
|
|
||||||
if ser.trailerUsed != mdcTrailerSize {
|
|
||||||
n = 0
|
|
||||||
err = io.ErrUnexpectedEOF
|
|
||||||
ser.error = true
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ser.eof = true
|
|
||||||
n = 0
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
n = 0
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If it's a short read then we read into a temporary buffer and shift
|
|
||||||
// the data into the caller's buffer.
|
|
||||||
if len(buf) <= mdcTrailerSize {
|
|
||||||
n, err = readFull(ser.in, ser.scratch[:len(buf)])
|
|
||||||
copy(buf, ser.trailer[:n])
|
|
||||||
ser.h.Write(buf[:n])
|
|
||||||
copy(ser.trailer[:], ser.trailer[n:])
|
|
||||||
copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:])
|
|
||||||
if n < len(buf) {
|
|
||||||
ser.eof = true
|
|
||||||
err = io.EOF
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err = ser.in.Read(buf[mdcTrailerSize:])
|
|
||||||
copy(buf, ser.trailer[:])
|
|
||||||
ser.h.Write(buf[:n])
|
|
||||||
copy(ser.trailer[:], buf[n:])
|
|
||||||
|
|
||||||
if err == io.EOF {
|
|
||||||
ser.eof = true
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is a new-format packet tag byte for a type 19 (MDC) packet.
|
|
||||||
const mdcPacketTagByte = byte(0x80) | 0x40 | 19
|
|
||||||
|
|
||||||
func (ser *seMDCReader) Close() error {
|
|
||||||
if ser.error {
|
|
||||||
return errors.SignatureError("error during reading")
|
|
||||||
}
|
|
||||||
|
|
||||||
for !ser.eof {
|
|
||||||
// We haven't seen EOF so we need to read to the end
|
|
||||||
var buf [1024]byte
|
|
||||||
_, err := ser.Read(buf[:])
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return errors.SignatureError("error during reading")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size {
|
|
||||||
return errors.SignatureError("MDC packet not found")
|
|
||||||
}
|
|
||||||
ser.h.Write(ser.trailer[:2])
|
|
||||||
|
|
||||||
final := ser.h.Sum(nil)
|
|
||||||
if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 {
|
|
||||||
return errors.SignatureError("hash mismatch")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// An seMDCWriter writes through to an io.WriteCloser while maintains a running
|
|
||||||
// hash of the data written. On close, it emits an MDC packet containing the
|
|
||||||
// running hash.
|
|
||||||
type seMDCWriter struct {
|
|
||||||
w io.WriteCloser
|
|
||||||
h hash.Hash
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *seMDCWriter) Write(buf []byte) (n int, err error) {
|
|
||||||
w.h.Write(buf)
|
|
||||||
return w.w.Write(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *seMDCWriter) Close() (err error) {
|
|
||||||
var buf [mdcTrailerSize]byte
|
|
||||||
|
|
||||||
buf[0] = mdcPacketTagByte
|
|
||||||
buf[1] = sha1.Size
|
|
||||||
w.h.Write(buf[:2])
|
|
||||||
digest := w.h.Sum(nil)
|
|
||||||
copy(buf[2:], digest)
|
|
||||||
|
|
||||||
_, err = w.w.Write(buf[:])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return w.w.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// noOpCloser is like an ioutil.NopCloser, but for an io.Writer.
|
|
||||||
type noOpCloser struct {
|
|
||||||
w io.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c noOpCloser) Write(data []byte) (n int, err error) {
|
|
||||||
return c.w.Write(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c noOpCloser) Close() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet
|
|
||||||
// to w and returns a WriteCloser to which the to-be-encrypted packets can be
|
|
||||||
// written.
|
|
||||||
// If config is nil, sensible defaults will be used.
|
|
||||||
func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (contents io.WriteCloser, err error) {
|
|
||||||
if c.KeySize() != len(key) {
|
|
||||||
return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length")
|
|
||||||
}
|
|
||||||
writeCloser := noOpCloser{w}
|
|
||||||
ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion})
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
block := c.new(key)
|
|
||||||
blockSize := block.BlockSize()
|
|
||||||
iv := make([]byte, blockSize)
|
|
||||||
_, err = config.Random().Read(iv)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync)
|
|
||||||
_, err = ciphertext.Write(prefix)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
plaintext := cipher.StreamWriter{S: s, W: ciphertext}
|
|
||||||
|
|
||||||
h := sha1.New()
|
|
||||||
h.Write(iv)
|
|
||||||
h.Write(iv[blockSize-2:])
|
|
||||||
contents = &seMDCWriter{w: plaintext, h: h}
|
|
||||||
return
|
|
||||||
}
|
|
91
vendor/golang.org/x/crypto/openpgp/packet/userattribute.go
generated
vendored
91
vendor/golang.org/x/crypto/openpgp/packet/userattribute.go
generated
vendored
@ -1,91 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package packet
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"image"
|
|
||||||
"image/jpeg"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
const UserAttrImageSubpacket = 1
|
|
||||||
|
|
||||||
// UserAttribute is capable of storing other types of data about a user
|
|
||||||
// beyond name, email and a text comment. In practice, user attributes are typically used
|
|
||||||
// to store a signed thumbnail photo JPEG image of the user.
|
|
||||||
// See RFC 4880, section 5.12.
|
|
||||||
type UserAttribute struct {
|
|
||||||
Contents []*OpaqueSubpacket
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewUserAttributePhoto creates a user attribute packet
|
|
||||||
// containing the given images.
|
|
||||||
func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) {
|
|
||||||
uat = new(UserAttribute)
|
|
||||||
for _, photo := range photos {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
// RFC 4880, Section 5.12.1.
|
|
||||||
data := []byte{
|
|
||||||
0x10, 0x00, // Little-endian image header length (16 bytes)
|
|
||||||
0x01, // Image header version 1
|
|
||||||
0x01, // JPEG
|
|
||||||
0, 0, 0, 0, // 12 reserved octets, must be all zero.
|
|
||||||
0, 0, 0, 0,
|
|
||||||
0, 0, 0, 0}
|
|
||||||
if _, err = buf.Write(data); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err = jpeg.Encode(&buf, photo, nil); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
uat.Contents = append(uat.Contents, &OpaqueSubpacket{
|
|
||||||
SubType: UserAttrImageSubpacket,
|
|
||||||
Contents: buf.Bytes()})
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewUserAttribute creates a new user attribute packet containing the given subpackets.
|
|
||||||
func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute {
|
|
||||||
return &UserAttribute{Contents: contents}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (uat *UserAttribute) parse(r io.Reader) (err error) {
|
|
||||||
// RFC 4880, section 5.13
|
|
||||||
b, err := ioutil.ReadAll(r)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
uat.Contents, err = OpaqueSubpackets(b)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including
|
|
||||||
// header.
|
|
||||||
func (uat *UserAttribute) Serialize(w io.Writer) (err error) {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
for _, sp := range uat.Contents {
|
|
||||||
sp.Serialize(&buf)
|
|
||||||
}
|
|
||||||
if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = w.Write(buf.Bytes())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// ImageData returns zero or more byte slices, each containing
|
|
||||||
// JPEG File Interchange Format (JFIF), for each photo in the
|
|
||||||
// user attribute packet.
|
|
||||||
func (uat *UserAttribute) ImageData() (imageData [][]byte) {
|
|
||||||
for _, sp := range uat.Contents {
|
|
||||||
if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 {
|
|
||||||
imageData = append(imageData, sp.Contents[16:])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
160
vendor/golang.org/x/crypto/openpgp/packet/userid.go
generated
vendored
160
vendor/golang.org/x/crypto/openpgp/packet/userid.go
generated
vendored
@ -1,160 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package packet
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// UserId contains text that is intended to represent the name and email
|
|
||||||
// address of the key holder. See RFC 4880, section 5.11. By convention, this
|
|
||||||
// takes the form "Full Name (Comment) <email@example.com>"
|
|
||||||
type UserId struct {
|
|
||||||
Id string // By convention, this takes the form "Full Name (Comment) <email@example.com>" which is split out in the fields below.
|
|
||||||
|
|
||||||
Name, Comment, Email string
|
|
||||||
}
|
|
||||||
|
|
||||||
func hasInvalidCharacters(s string) bool {
|
|
||||||
for _, c := range s {
|
|
||||||
switch c {
|
|
||||||
case '(', ')', '<', '>', 0:
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewUserId returns a UserId or nil if any of the arguments contain invalid
|
|
||||||
// characters. The invalid characters are '\x00', '(', ')', '<' and '>'
|
|
||||||
func NewUserId(name, comment, email string) *UserId {
|
|
||||||
// RFC 4880 doesn't deal with the structure of userid strings; the
|
|
||||||
// name, comment and email form is just a convention. However, there's
|
|
||||||
// no convention about escaping the metacharacters and GPG just refuses
|
|
||||||
// to create user ids where, say, the name contains a '('. We mirror
|
|
||||||
// this behaviour.
|
|
||||||
|
|
||||||
if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
uid := new(UserId)
|
|
||||||
uid.Name, uid.Comment, uid.Email = name, comment, email
|
|
||||||
uid.Id = name
|
|
||||||
if len(comment) > 0 {
|
|
||||||
if len(uid.Id) > 0 {
|
|
||||||
uid.Id += " "
|
|
||||||
}
|
|
||||||
uid.Id += "("
|
|
||||||
uid.Id += comment
|
|
||||||
uid.Id += ")"
|
|
||||||
}
|
|
||||||
if len(email) > 0 {
|
|
||||||
if len(uid.Id) > 0 {
|
|
||||||
uid.Id += " "
|
|
||||||
}
|
|
||||||
uid.Id += "<"
|
|
||||||
uid.Id += email
|
|
||||||
uid.Id += ">"
|
|
||||||
}
|
|
||||||
return uid
|
|
||||||
}
|
|
||||||
|
|
||||||
func (uid *UserId) parse(r io.Reader) (err error) {
|
|
||||||
// RFC 4880, section 5.11
|
|
||||||
b, err := ioutil.ReadAll(r)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
uid.Id = string(b)
|
|
||||||
uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Serialize marshals uid to w in the form of an OpenPGP packet, including
|
|
||||||
// header.
|
|
||||||
func (uid *UserId) Serialize(w io.Writer) error {
|
|
||||||
err := serializeHeader(w, packetTypeUserId, len(uid.Id))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = w.Write([]byte(uid.Id))
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseUserId extracts the name, comment and email from a user id string that
|
|
||||||
// is formatted as "Full Name (Comment) <email@example.com>".
|
|
||||||
func parseUserId(id string) (name, comment, email string) {
|
|
||||||
var n, c, e struct {
|
|
||||||
start, end int
|
|
||||||
}
|
|
||||||
var state int
|
|
||||||
|
|
||||||
for offset, rune := range id {
|
|
||||||
switch state {
|
|
||||||
case 0:
|
|
||||||
// Entering name
|
|
||||||
n.start = offset
|
|
||||||
state = 1
|
|
||||||
fallthrough
|
|
||||||
case 1:
|
|
||||||
// In name
|
|
||||||
if rune == '(' {
|
|
||||||
state = 2
|
|
||||||
n.end = offset
|
|
||||||
} else if rune == '<' {
|
|
||||||
state = 5
|
|
||||||
n.end = offset
|
|
||||||
}
|
|
||||||
case 2:
|
|
||||||
// Entering comment
|
|
||||||
c.start = offset
|
|
||||||
state = 3
|
|
||||||
fallthrough
|
|
||||||
case 3:
|
|
||||||
// In comment
|
|
||||||
if rune == ')' {
|
|
||||||
state = 4
|
|
||||||
c.end = offset
|
|
||||||
}
|
|
||||||
case 4:
|
|
||||||
// Between comment and email
|
|
||||||
if rune == '<' {
|
|
||||||
state = 5
|
|
||||||
}
|
|
||||||
case 5:
|
|
||||||
// Entering email
|
|
||||||
e.start = offset
|
|
||||||
state = 6
|
|
||||||
fallthrough
|
|
||||||
case 6:
|
|
||||||
// In email
|
|
||||||
if rune == '>' {
|
|
||||||
state = 7
|
|
||||||
e.end = offset
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
// After email
|
|
||||||
}
|
|
||||||
}
|
|
||||||
switch state {
|
|
||||||
case 1:
|
|
||||||
// ended in the name
|
|
||||||
n.end = len(id)
|
|
||||||
case 3:
|
|
||||||
// ended in comment
|
|
||||||
c.end = len(id)
|
|
||||||
case 6:
|
|
||||||
// ended in email
|
|
||||||
e.end = len(id)
|
|
||||||
}
|
|
||||||
|
|
||||||
name = strings.TrimSpace(id[n.start:n.end])
|
|
||||||
comment = strings.TrimSpace(id[c.start:c.end])
|
|
||||||
email = strings.TrimSpace(id[e.start:e.end])
|
|
||||||
return
|
|
||||||
}
|
|
442
vendor/golang.org/x/crypto/openpgp/read.go
generated
vendored
442
vendor/golang.org/x/crypto/openpgp/read.go
generated
vendored
@ -1,442 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package openpgp implements high level operations on OpenPGP messages.
|
|
||||||
package openpgp // import "golang.org/x/crypto/openpgp"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto"
|
|
||||||
_ "crypto/sha256"
|
|
||||||
"hash"
|
|
||||||
"io"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"golang.org/x/crypto/openpgp/armor"
|
|
||||||
"golang.org/x/crypto/openpgp/errors"
|
|
||||||
"golang.org/x/crypto/openpgp/packet"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SignatureType is the armor type for a PGP signature.
|
|
||||||
var SignatureType = "PGP SIGNATURE"
|
|
||||||
|
|
||||||
// readArmored reads an armored block with the given type.
|
|
||||||
func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) {
|
|
||||||
block, err := armor.Decode(r)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if block.Type != expectedType {
|
|
||||||
return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type)
|
|
||||||
}
|
|
||||||
|
|
||||||
return block.Body, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MessageDetails contains the result of parsing an OpenPGP encrypted and/or
|
|
||||||
// signed message.
|
|
||||||
type MessageDetails struct {
|
|
||||||
IsEncrypted bool // true if the message was encrypted.
|
|
||||||
EncryptedToKeyIds []uint64 // the list of recipient key ids.
|
|
||||||
IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message.
|
|
||||||
DecryptedWith Key // the private key used to decrypt the message, if any.
|
|
||||||
IsSigned bool // true if the message is signed.
|
|
||||||
SignedByKeyId uint64 // the key id of the signer, if any.
|
|
||||||
SignedBy *Key // the key of the signer, if available.
|
|
||||||
LiteralData *packet.LiteralData // the metadata of the contents
|
|
||||||
UnverifiedBody io.Reader // the contents of the message.
|
|
||||||
|
|
||||||
// If IsSigned is true and SignedBy is non-zero then the signature will
|
|
||||||
// be verified as UnverifiedBody is read. The signature cannot be
|
|
||||||
// checked until the whole of UnverifiedBody is read so UnverifiedBody
|
|
||||||
// must be consumed until EOF before the data can be trusted. Even if a
|
|
||||||
// message isn't signed (or the signer is unknown) the data may contain
|
|
||||||
// an authentication code that is only checked once UnverifiedBody has
|
|
||||||
// been consumed. Once EOF has been seen, the following fields are
|
|
||||||
// valid. (An authentication code failure is reported as a
|
|
||||||
// SignatureError error when reading from UnverifiedBody.)
|
|
||||||
SignatureError error // nil if the signature is good.
|
|
||||||
Signature *packet.Signature // the signature packet itself, if v4 (default)
|
|
||||||
SignatureV3 *packet.SignatureV3 // the signature packet if it is a v2 or v3 signature
|
|
||||||
|
|
||||||
decrypted io.ReadCloser
|
|
||||||
}
|
|
||||||
|
|
||||||
// A PromptFunction is used as a callback by functions that may need to decrypt
|
|
||||||
// a private key, or prompt for a passphrase. It is called with a list of
|
|
||||||
// acceptable, encrypted private keys and a boolean that indicates whether a
|
|
||||||
// passphrase is usable. It should either decrypt a private key or return a
|
|
||||||
// passphrase to try. If the decrypted private key or given passphrase isn't
|
|
||||||
// correct, the function will be called again, forever. Any error returned will
|
|
||||||
// be passed up.
|
|
||||||
type PromptFunction func(keys []Key, symmetric bool) ([]byte, error)
|
|
||||||
|
|
||||||
// A keyEnvelopePair is used to store a private key with the envelope that
|
|
||||||
// contains a symmetric key, encrypted with that key.
|
|
||||||
type keyEnvelopePair struct {
|
|
||||||
key Key
|
|
||||||
encryptedKey *packet.EncryptedKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadMessage parses an OpenPGP message that may be signed and/or encrypted.
|
|
||||||
// The given KeyRing should contain both public keys (for signature
|
|
||||||
// verification) and, possibly encrypted, private keys for decrypting.
|
|
||||||
// If config is nil, sensible defaults will be used.
|
|
||||||
func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) {
|
|
||||||
var p packet.Packet
|
|
||||||
|
|
||||||
var symKeys []*packet.SymmetricKeyEncrypted
|
|
||||||
var pubKeys []keyEnvelopePair
|
|
||||||
var se *packet.SymmetricallyEncrypted
|
|
||||||
|
|
||||||
packets := packet.NewReader(r)
|
|
||||||
md = new(MessageDetails)
|
|
||||||
md.IsEncrypted = true
|
|
||||||
|
|
||||||
// The message, if encrypted, starts with a number of packets
|
|
||||||
// containing an encrypted decryption key. The decryption key is either
|
|
||||||
// encrypted to a public key, or with a passphrase. This loop
|
|
||||||
// collects these packets.
|
|
||||||
ParsePackets:
|
|
||||||
for {
|
|
||||||
p, err = packets.Next()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
switch p := p.(type) {
|
|
||||||
case *packet.SymmetricKeyEncrypted:
|
|
||||||
// This packet contains the decryption key encrypted with a passphrase.
|
|
||||||
md.IsSymmetricallyEncrypted = true
|
|
||||||
symKeys = append(symKeys, p)
|
|
||||||
case *packet.EncryptedKey:
|
|
||||||
// This packet contains the decryption key encrypted to a public key.
|
|
||||||
md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId)
|
|
||||||
switch p.Algo {
|
|
||||||
case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal:
|
|
||||||
break
|
|
||||||
default:
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var keys []Key
|
|
||||||
if p.KeyId == 0 {
|
|
||||||
keys = keyring.DecryptionKeys()
|
|
||||||
} else {
|
|
||||||
keys = keyring.KeysById(p.KeyId)
|
|
||||||
}
|
|
||||||
for _, k := range keys {
|
|
||||||
pubKeys = append(pubKeys, keyEnvelopePair{k, p})
|
|
||||||
}
|
|
||||||
case *packet.SymmetricallyEncrypted:
|
|
||||||
se = p
|
|
||||||
break ParsePackets
|
|
||||||
case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature:
|
|
||||||
// This message isn't encrypted.
|
|
||||||
if len(symKeys) != 0 || len(pubKeys) != 0 {
|
|
||||||
return nil, errors.StructuralError("key material not followed by encrypted message")
|
|
||||||
}
|
|
||||||
packets.Unread(p)
|
|
||||||
return readSignedMessage(packets, nil, keyring)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var candidates []Key
|
|
||||||
var decrypted io.ReadCloser
|
|
||||||
|
|
||||||
// Now that we have the list of encrypted keys we need to decrypt at
|
|
||||||
// least one of them or, if we cannot, we need to call the prompt
|
|
||||||
// function so that it can decrypt a key or give us a passphrase.
|
|
||||||
FindKey:
|
|
||||||
for {
|
|
||||||
// See if any of the keys already have a private key available
|
|
||||||
candidates = candidates[:0]
|
|
||||||
candidateFingerprints := make(map[string]bool)
|
|
||||||
|
|
||||||
for _, pk := range pubKeys {
|
|
||||||
if pk.key.PrivateKey == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if !pk.key.PrivateKey.Encrypted {
|
|
||||||
if len(pk.encryptedKey.Key) == 0 {
|
|
||||||
pk.encryptedKey.Decrypt(pk.key.PrivateKey, config)
|
|
||||||
}
|
|
||||||
if len(pk.encryptedKey.Key) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
decrypted, err = se.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key)
|
|
||||||
if err != nil && err != errors.ErrKeyIncorrect {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if decrypted != nil {
|
|
||||||
md.DecryptedWith = pk.key
|
|
||||||
break FindKey
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fpr := string(pk.key.PublicKey.Fingerprint[:])
|
|
||||||
if v := candidateFingerprints[fpr]; v {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
candidates = append(candidates, pk.key)
|
|
||||||
candidateFingerprints[fpr] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(candidates) == 0 && len(symKeys) == 0 {
|
|
||||||
return nil, errors.ErrKeyIncorrect
|
|
||||||
}
|
|
||||||
|
|
||||||
if prompt == nil {
|
|
||||||
return nil, errors.ErrKeyIncorrect
|
|
||||||
}
|
|
||||||
|
|
||||||
passphrase, err := prompt(candidates, len(symKeys) != 0)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try the symmetric passphrase first
|
|
||||||
if len(symKeys) != 0 && passphrase != nil {
|
|
||||||
for _, s := range symKeys {
|
|
||||||
key, cipherFunc, err := s.Decrypt(passphrase)
|
|
||||||
if err == nil {
|
|
||||||
decrypted, err = se.Decrypt(cipherFunc, key)
|
|
||||||
if err != nil && err != errors.ErrKeyIncorrect {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if decrypted != nil {
|
|
||||||
break FindKey
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
md.decrypted = decrypted
|
|
||||||
if err := packets.Push(decrypted); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return readSignedMessage(packets, md, keyring)
|
|
||||||
}
|
|
||||||
|
|
||||||
// readSignedMessage reads a possibly signed message if mdin is non-zero then
|
|
||||||
// that structure is updated and returned. Otherwise a fresh MessageDetails is
|
|
||||||
// used.
|
|
||||||
func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing) (md *MessageDetails, err error) {
|
|
||||||
if mdin == nil {
|
|
||||||
mdin = new(MessageDetails)
|
|
||||||
}
|
|
||||||
md = mdin
|
|
||||||
|
|
||||||
var p packet.Packet
|
|
||||||
var h hash.Hash
|
|
||||||
var wrappedHash hash.Hash
|
|
||||||
FindLiteralData:
|
|
||||||
for {
|
|
||||||
p, err = packets.Next()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
switch p := p.(type) {
|
|
||||||
case *packet.Compressed:
|
|
||||||
if err := packets.Push(p.Body); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
case *packet.OnePassSignature:
|
|
||||||
if !p.IsLast {
|
|
||||||
return nil, errors.UnsupportedError("nested signatures")
|
|
||||||
}
|
|
||||||
|
|
||||||
h, wrappedHash, err = hashForSignature(p.Hash, p.SigType)
|
|
||||||
if err != nil {
|
|
||||||
md = nil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
md.IsSigned = true
|
|
||||||
md.SignedByKeyId = p.KeyId
|
|
||||||
keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign)
|
|
||||||
if len(keys) > 0 {
|
|
||||||
md.SignedBy = &keys[0]
|
|
||||||
}
|
|
||||||
case *packet.LiteralData:
|
|
||||||
md.LiteralData = p
|
|
||||||
break FindLiteralData
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if md.SignedBy != nil {
|
|
||||||
md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md}
|
|
||||||
} else if md.decrypted != nil {
|
|
||||||
md.UnverifiedBody = checkReader{md}
|
|
||||||
} else {
|
|
||||||
md.UnverifiedBody = md.LiteralData.Body
|
|
||||||
}
|
|
||||||
|
|
||||||
return md, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// hashForSignature returns a pair of hashes that can be used to verify a
|
|
||||||
// signature. The signature may specify that the contents of the signed message
|
|
||||||
// should be preprocessed (i.e. to normalize line endings). Thus this function
|
|
||||||
// returns two hashes. The second should be used to hash the message itself and
|
|
||||||
// performs any needed preprocessing.
|
|
||||||
func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) {
|
|
||||||
if !hashId.Available() {
|
|
||||||
return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId)))
|
|
||||||
}
|
|
||||||
h := hashId.New()
|
|
||||||
|
|
||||||
switch sigType {
|
|
||||||
case packet.SigTypeBinary:
|
|
||||||
return h, h, nil
|
|
||||||
case packet.SigTypeText:
|
|
||||||
return h, NewCanonicalTextHash(h), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF
|
|
||||||
// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger
|
|
||||||
// MDC checks.
|
|
||||||
type checkReader struct {
|
|
||||||
md *MessageDetails
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cr checkReader) Read(buf []byte) (n int, err error) {
|
|
||||||
n, err = cr.md.LiteralData.Body.Read(buf)
|
|
||||||
if err == io.EOF {
|
|
||||||
mdcErr := cr.md.decrypted.Close()
|
|
||||||
if mdcErr != nil {
|
|
||||||
err = mdcErr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes
|
|
||||||
// the data as it is read. When it sees an EOF from the underlying io.Reader
|
|
||||||
// it parses and checks a trailing Signature packet and triggers any MDC checks.
|
|
||||||
type signatureCheckReader struct {
|
|
||||||
packets *packet.Reader
|
|
||||||
h, wrappedHash hash.Hash
|
|
||||||
md *MessageDetails
|
|
||||||
}
|
|
||||||
|
|
||||||
func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) {
|
|
||||||
n, err = scr.md.LiteralData.Body.Read(buf)
|
|
||||||
scr.wrappedHash.Write(buf[:n])
|
|
||||||
if err == io.EOF {
|
|
||||||
var p packet.Packet
|
|
||||||
p, scr.md.SignatureError = scr.packets.Next()
|
|
||||||
if scr.md.SignatureError != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var ok bool
|
|
||||||
if scr.md.Signature, ok = p.(*packet.Signature); ok {
|
|
||||||
scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature)
|
|
||||||
} else if scr.md.SignatureV3, ok = p.(*packet.SignatureV3); ok {
|
|
||||||
scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignatureV3(scr.h, scr.md.SignatureV3)
|
|
||||||
} else {
|
|
||||||
scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// The SymmetricallyEncrypted packet, if any, might have an
|
|
||||||
// unsigned hash of its own. In order to check this we need to
|
|
||||||
// close that Reader.
|
|
||||||
if scr.md.decrypted != nil {
|
|
||||||
mdcErr := scr.md.decrypted.Close()
|
|
||||||
if mdcErr != nil {
|
|
||||||
err = mdcErr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckDetachedSignature takes a signed file and a detached signature and
|
|
||||||
// returns the signer if the signature is valid. If the signer isn't known,
|
|
||||||
// ErrUnknownIssuer is returned.
|
|
||||||
func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) {
|
|
||||||
var issuerKeyId uint64
|
|
||||||
var hashFunc crypto.Hash
|
|
||||||
var sigType packet.SignatureType
|
|
||||||
var keys []Key
|
|
||||||
var p packet.Packet
|
|
||||||
|
|
||||||
packets := packet.NewReader(signature)
|
|
||||||
for {
|
|
||||||
p, err = packets.Next()
|
|
||||||
if err == io.EOF {
|
|
||||||
return nil, errors.ErrUnknownIssuer
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch sig := p.(type) {
|
|
||||||
case *packet.Signature:
|
|
||||||
if sig.IssuerKeyId == nil {
|
|
||||||
return nil, errors.StructuralError("signature doesn't have an issuer")
|
|
||||||
}
|
|
||||||
issuerKeyId = *sig.IssuerKeyId
|
|
||||||
hashFunc = sig.Hash
|
|
||||||
sigType = sig.SigType
|
|
||||||
case *packet.SignatureV3:
|
|
||||||
issuerKeyId = sig.IssuerKeyId
|
|
||||||
hashFunc = sig.Hash
|
|
||||||
sigType = sig.SigType
|
|
||||||
default:
|
|
||||||
return nil, errors.StructuralError("non signature packet found")
|
|
||||||
}
|
|
||||||
|
|
||||||
keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign)
|
|
||||||
if len(keys) > 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(keys) == 0 {
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
h, wrappedHash, err := hashForSignature(hashFunc, sigType)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, key := range keys {
|
|
||||||
switch sig := p.(type) {
|
|
||||||
case *packet.Signature:
|
|
||||||
err = key.PublicKey.VerifySignature(h, sig)
|
|
||||||
case *packet.SignatureV3:
|
|
||||||
err = key.PublicKey.VerifySignatureV3(h, sig)
|
|
||||||
default:
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
return key.Entity, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// CheckArmoredDetachedSignature performs the same actions as
|
|
||||||
// CheckDetachedSignature but expects the signature to be armored.
|
|
||||||
func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) {
|
|
||||||
body, err := readArmored(signature, SignatureType)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
return CheckDetachedSignature(keyring, signed, body)
|
|
||||||
}
|
|
273
vendor/golang.org/x/crypto/openpgp/s2k/s2k.go
generated
vendored
273
vendor/golang.org/x/crypto/openpgp/s2k/s2k.go
generated
vendored
@ -1,273 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package s2k implements the various OpenPGP string-to-key transforms as
|
|
||||||
// specified in RFC 4800 section 3.7.1.
|
|
||||||
package s2k // import "golang.org/x/crypto/openpgp/s2k"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto"
|
|
||||||
"hash"
|
|
||||||
"io"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"golang.org/x/crypto/openpgp/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Config collects configuration parameters for s2k key-stretching
|
|
||||||
// transformatioms. A nil *Config is valid and results in all default
|
|
||||||
// values. Currently, Config is used only by the Serialize function in
|
|
||||||
// this package.
|
|
||||||
type Config struct {
|
|
||||||
// Hash is the default hash function to be used. If
|
|
||||||
// nil, SHA1 is used.
|
|
||||||
Hash crypto.Hash
|
|
||||||
// S2KCount is only used for symmetric encryption. It
|
|
||||||
// determines the strength of the passphrase stretching when
|
|
||||||
// the said passphrase is hashed to produce a key. S2KCount
|
|
||||||
// should be between 1024 and 65011712, inclusive. If Config
|
|
||||||
// is nil or S2KCount is 0, the value 65536 used. Not all
|
|
||||||
// values in the above range can be represented. S2KCount will
|
|
||||||
// be rounded up to the next representable value if it cannot
|
|
||||||
// be encoded exactly. When set, it is strongly encrouraged to
|
|
||||||
// use a value that is at least 65536. See RFC 4880 Section
|
|
||||||
// 3.7.1.3.
|
|
||||||
S2KCount int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Config) hash() crypto.Hash {
|
|
||||||
if c == nil || uint(c.Hash) == 0 {
|
|
||||||
// SHA1 is the historical default in this package.
|
|
||||||
return crypto.SHA1
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.Hash
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Config) encodedCount() uint8 {
|
|
||||||
if c == nil || c.S2KCount == 0 {
|
|
||||||
return 96 // The common case. Correspoding to 65536
|
|
||||||
}
|
|
||||||
|
|
||||||
i := c.S2KCount
|
|
||||||
switch {
|
|
||||||
// Behave like GPG. Should we make 65536 the lowest value used?
|
|
||||||
case i < 1024:
|
|
||||||
i = 1024
|
|
||||||
case i > 65011712:
|
|
||||||
i = 65011712
|
|
||||||
}
|
|
||||||
|
|
||||||
return encodeCount(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
// encodeCount converts an iterative "count" in the range 1024 to
|
|
||||||
// 65011712, inclusive, to an encoded count. The return value is the
|
|
||||||
// octet that is actually stored in the GPG file. encodeCount panics
|
|
||||||
// if i is not in the above range (encodedCount above takes care to
|
|
||||||
// pass i in the correct range). See RFC 4880 Section 3.7.7.1.
|
|
||||||
func encodeCount(i int) uint8 {
|
|
||||||
if i < 1024 || i > 65011712 {
|
|
||||||
panic("count arg i outside the required range")
|
|
||||||
}
|
|
||||||
|
|
||||||
for encoded := 0; encoded < 256; encoded++ {
|
|
||||||
count := decodeCount(uint8(encoded))
|
|
||||||
if count >= i {
|
|
||||||
return uint8(encoded)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return 255
|
|
||||||
}
|
|
||||||
|
|
||||||
// decodeCount returns the s2k mode 3 iterative "count" corresponding to
|
|
||||||
// the encoded octet c.
|
|
||||||
func decodeCount(c uint8) int {
|
|
||||||
return (16 + int(c&15)) << (uint32(c>>4) + 6)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Simple writes to out the result of computing the Simple S2K function (RFC
|
|
||||||
// 4880, section 3.7.1.1) using the given hash and input passphrase.
|
|
||||||
func Simple(out []byte, h hash.Hash, in []byte) {
|
|
||||||
Salted(out, h, in, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
var zero [1]byte
|
|
||||||
|
|
||||||
// Salted writes to out the result of computing the Salted S2K function (RFC
|
|
||||||
// 4880, section 3.7.1.2) using the given hash, input passphrase and salt.
|
|
||||||
func Salted(out []byte, h hash.Hash, in []byte, salt []byte) {
|
|
||||||
done := 0
|
|
||||||
var digest []byte
|
|
||||||
|
|
||||||
for i := 0; done < len(out); i++ {
|
|
||||||
h.Reset()
|
|
||||||
for j := 0; j < i; j++ {
|
|
||||||
h.Write(zero[:])
|
|
||||||
}
|
|
||||||
h.Write(salt)
|
|
||||||
h.Write(in)
|
|
||||||
digest = h.Sum(digest[:0])
|
|
||||||
n := copy(out[done:], digest)
|
|
||||||
done += n
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Iterated writes to out the result of computing the Iterated and Salted S2K
|
|
||||||
// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase,
|
|
||||||
// salt and iteration count.
|
|
||||||
func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) {
|
|
||||||
combined := make([]byte, len(in)+len(salt))
|
|
||||||
copy(combined, salt)
|
|
||||||
copy(combined[len(salt):], in)
|
|
||||||
|
|
||||||
if count < len(combined) {
|
|
||||||
count = len(combined)
|
|
||||||
}
|
|
||||||
|
|
||||||
done := 0
|
|
||||||
var digest []byte
|
|
||||||
for i := 0; done < len(out); i++ {
|
|
||||||
h.Reset()
|
|
||||||
for j := 0; j < i; j++ {
|
|
||||||
h.Write(zero[:])
|
|
||||||
}
|
|
||||||
written := 0
|
|
||||||
for written < count {
|
|
||||||
if written+len(combined) > count {
|
|
||||||
todo := count - written
|
|
||||||
h.Write(combined[:todo])
|
|
||||||
written = count
|
|
||||||
} else {
|
|
||||||
h.Write(combined)
|
|
||||||
written += len(combined)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
digest = h.Sum(digest[:0])
|
|
||||||
n := copy(out[done:], digest)
|
|
||||||
done += n
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse reads a binary specification for a string-to-key transformation from r
|
|
||||||
// and returns a function which performs that transform.
|
|
||||||
func Parse(r io.Reader) (f func(out, in []byte), err error) {
|
|
||||||
var buf [9]byte
|
|
||||||
|
|
||||||
_, err = io.ReadFull(r, buf[:2])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
hash, ok := HashIdToHash(buf[1])
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(buf[1])))
|
|
||||||
}
|
|
||||||
if !hash.Available() {
|
|
||||||
return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hash)))
|
|
||||||
}
|
|
||||||
h := hash.New()
|
|
||||||
|
|
||||||
switch buf[0] {
|
|
||||||
case 0:
|
|
||||||
f := func(out, in []byte) {
|
|
||||||
Simple(out, h, in)
|
|
||||||
}
|
|
||||||
return f, nil
|
|
||||||
case 1:
|
|
||||||
_, err = io.ReadFull(r, buf[:8])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
f := func(out, in []byte) {
|
|
||||||
Salted(out, h, in, buf[:8])
|
|
||||||
}
|
|
||||||
return f, nil
|
|
||||||
case 3:
|
|
||||||
_, err = io.ReadFull(r, buf[:9])
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
count := decodeCount(buf[8])
|
|
||||||
f := func(out, in []byte) {
|
|
||||||
Iterated(out, h, in, buf[:8], count)
|
|
||||||
}
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, errors.UnsupportedError("S2K function")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Serialize salts and stretches the given passphrase and writes the
|
|
||||||
// resulting key into key. It also serializes an S2K descriptor to
|
|
||||||
// w. The key stretching can be configured with c, which may be
|
|
||||||
// nil. In that case, sensible defaults will be used.
|
|
||||||
func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error {
|
|
||||||
var buf [11]byte
|
|
||||||
buf[0] = 3 /* iterated and salted */
|
|
||||||
buf[1], _ = HashToHashId(c.hash())
|
|
||||||
salt := buf[2:10]
|
|
||||||
if _, err := io.ReadFull(rand, salt); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
encodedCount := c.encodedCount()
|
|
||||||
count := decodeCount(encodedCount)
|
|
||||||
buf[10] = encodedCount
|
|
||||||
if _, err := w.Write(buf[:]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
Iterated(key, c.hash().New(), passphrase, salt, count)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// hashToHashIdMapping contains pairs relating OpenPGP's hash identifier with
|
|
||||||
// Go's crypto.Hash type. See RFC 4880, section 9.4.
|
|
||||||
var hashToHashIdMapping = []struct {
|
|
||||||
id byte
|
|
||||||
hash crypto.Hash
|
|
||||||
name string
|
|
||||||
}{
|
|
||||||
{1, crypto.MD5, "MD5"},
|
|
||||||
{2, crypto.SHA1, "SHA1"},
|
|
||||||
{3, crypto.RIPEMD160, "RIPEMD160"},
|
|
||||||
{8, crypto.SHA256, "SHA256"},
|
|
||||||
{9, crypto.SHA384, "SHA384"},
|
|
||||||
{10, crypto.SHA512, "SHA512"},
|
|
||||||
{11, crypto.SHA224, "SHA224"},
|
|
||||||
}
|
|
||||||
|
|
||||||
// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP
|
|
||||||
// hash id.
|
|
||||||
func HashIdToHash(id byte) (h crypto.Hash, ok bool) {
|
|
||||||
for _, m := range hashToHashIdMapping {
|
|
||||||
if m.id == id {
|
|
||||||
return m.hash, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// HashIdToString returns the name of the hash function corresponding to the
|
|
||||||
// given OpenPGP hash id.
|
|
||||||
func HashIdToString(id byte) (name string, ok bool) {
|
|
||||||
for _, m := range hashToHashIdMapping {
|
|
||||||
if m.id == id {
|
|
||||||
return m.name, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
|
|
||||||
// HashIdToHash returns an OpenPGP hash id which corresponds the given Hash.
|
|
||||||
func HashToHashId(h crypto.Hash) (id byte, ok bool) {
|
|
||||||
for _, m := range hashToHashIdMapping {
|
|
||||||
if m.hash == h {
|
|
||||||
return m.id, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0, false
|
|
||||||
}
|
|
418
vendor/golang.org/x/crypto/openpgp/write.go
generated
vendored
418
vendor/golang.org/x/crypto/openpgp/write.go
generated
vendored
@ -1,418 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package openpgp
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto"
|
|
||||||
"hash"
|
|
||||||
"io"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/crypto/openpgp/armor"
|
|
||||||
"golang.org/x/crypto/openpgp/errors"
|
|
||||||
"golang.org/x/crypto/openpgp/packet"
|
|
||||||
"golang.org/x/crypto/openpgp/s2k"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DetachSign signs message with the private key from signer (which must
|
|
||||||
// already have been decrypted) and writes the signature to w.
|
|
||||||
// If config is nil, sensible defaults will be used.
|
|
||||||
func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error {
|
|
||||||
return detachSign(w, signer, message, packet.SigTypeBinary, config)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ArmoredDetachSign signs message with the private key from signer (which
|
|
||||||
// must already have been decrypted) and writes an armored signature to w.
|
|
||||||
// If config is nil, sensible defaults will be used.
|
|
||||||
func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) {
|
|
||||||
return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DetachSignText signs message (after canonicalising the line endings) with
|
|
||||||
// the private key from signer (which must already have been decrypted) and
|
|
||||||
// writes the signature to w.
|
|
||||||
// If config is nil, sensible defaults will be used.
|
|
||||||
func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error {
|
|
||||||
return detachSign(w, signer, message, packet.SigTypeText, config)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ArmoredDetachSignText signs message (after canonicalising the line endings)
|
|
||||||
// with the private key from signer (which must already have been decrypted)
|
|
||||||
// and writes an armored signature to w.
|
|
||||||
// If config is nil, sensible defaults will be used.
|
|
||||||
func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error {
|
|
||||||
return armoredDetachSign(w, signer, message, packet.SigTypeText, config)
|
|
||||||
}
|
|
||||||
|
|
||||||
func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) {
|
|
||||||
out, err := armor.Encode(w, SignatureType, nil)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err = detachSign(out, signer, message, sigType, config)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return out.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) {
|
|
||||||
if signer.PrivateKey == nil {
|
|
||||||
return errors.InvalidArgumentError("signing key doesn't have a private key")
|
|
||||||
}
|
|
||||||
if signer.PrivateKey.Encrypted {
|
|
||||||
return errors.InvalidArgumentError("signing key is encrypted")
|
|
||||||
}
|
|
||||||
|
|
||||||
sig := new(packet.Signature)
|
|
||||||
sig.SigType = sigType
|
|
||||||
sig.PubKeyAlgo = signer.PrivateKey.PubKeyAlgo
|
|
||||||
sig.Hash = config.Hash()
|
|
||||||
sig.CreationTime = config.Now()
|
|
||||||
sig.IssuerKeyId = &signer.PrivateKey.KeyId
|
|
||||||
|
|
||||||
h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
io.Copy(wrappedHash, message)
|
|
||||||
|
|
||||||
err = sig.Sign(h, signer.PrivateKey, config)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
return sig.Serialize(w)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileHints contains metadata about encrypted files. This metadata is, itself,
|
|
||||||
// encrypted.
|
|
||||||
type FileHints struct {
|
|
||||||
// IsBinary can be set to hint that the contents are binary data.
|
|
||||||
IsBinary bool
|
|
||||||
// FileName hints at the name of the file that should be written. It's
|
|
||||||
// truncated to 255 bytes if longer. It may be empty to suggest that the
|
|
||||||
// file should not be written to disk. It may be equal to "_CONSOLE" to
|
|
||||||
// suggest the data should not be written to disk.
|
|
||||||
FileName string
|
|
||||||
// ModTime contains the modification time of the file, or the zero time if not applicable.
|
|
||||||
ModTime time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase.
|
|
||||||
// The resulting WriteCloser must be closed after the contents of the file have
|
|
||||||
// been written.
|
|
||||||
// If config is nil, sensible defaults will be used.
|
|
||||||
func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
|
|
||||||
if hints == nil {
|
|
||||||
hints = &FileHints{}
|
|
||||||
}
|
|
||||||
|
|
||||||
key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
w, err := packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
literaldata := w
|
|
||||||
if algo := config.Compression(); algo != packet.CompressionNone {
|
|
||||||
var compConfig *packet.CompressionConfig
|
|
||||||
if config != nil {
|
|
||||||
compConfig = config.CompressionConfig
|
|
||||||
}
|
|
||||||
literaldata, err = packet.SerializeCompressed(w, algo, compConfig)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var epochSeconds uint32
|
|
||||||
if !hints.ModTime.IsZero() {
|
|
||||||
epochSeconds = uint32(hints.ModTime.Unix())
|
|
||||||
}
|
|
||||||
return packet.SerializeLiteral(literaldata, hints.IsBinary, hints.FileName, epochSeconds)
|
|
||||||
}
|
|
||||||
|
|
||||||
// intersectPreferences mutates and returns a prefix of a that contains only
|
|
||||||
// the values in the intersection of a and b. The order of a is preserved.
|
|
||||||
func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) {
|
|
||||||
var j int
|
|
||||||
for _, v := range a {
|
|
||||||
for _, v2 := range b {
|
|
||||||
if v == v2 {
|
|
||||||
a[j] = v
|
|
||||||
j++
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return a[:j]
|
|
||||||
}
|
|
||||||
|
|
||||||
func hashToHashId(h crypto.Hash) uint8 {
|
|
||||||
v, ok := s2k.HashToHashId(h)
|
|
||||||
if !ok {
|
|
||||||
panic("tried to convert unknown hash")
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeAndSign writes the data as a payload package and, optionally, signs
|
|
||||||
// it. hints contains optional information, that is also encrypted,
|
|
||||||
// that aids the recipients in processing the message. The resulting
|
|
||||||
// WriteCloser must be closed after the contents of the file have been
|
|
||||||
// written. If config is nil, sensible defaults will be used.
|
|
||||||
func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
|
|
||||||
var signer *packet.PrivateKey
|
|
||||||
if signed != nil {
|
|
||||||
signKey, ok := signed.signingKey(config.Now())
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.InvalidArgumentError("no valid signing keys")
|
|
||||||
}
|
|
||||||
signer = signKey.PrivateKey
|
|
||||||
if signer == nil {
|
|
||||||
return nil, errors.InvalidArgumentError("no private key in signing key")
|
|
||||||
}
|
|
||||||
if signer.Encrypted {
|
|
||||||
return nil, errors.InvalidArgumentError("signing key must be decrypted")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var hash crypto.Hash
|
|
||||||
for _, hashId := range candidateHashes {
|
|
||||||
if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() {
|
|
||||||
hash = h
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the hash specified by config is a candidate, we'll use that.
|
|
||||||
if configuredHash := config.Hash(); configuredHash.Available() {
|
|
||||||
for _, hashId := range candidateHashes {
|
|
||||||
if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash {
|
|
||||||
hash = h
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if hash == 0 {
|
|
||||||
hashId := candidateHashes[0]
|
|
||||||
name, ok := s2k.HashIdToString(hashId)
|
|
||||||
if !ok {
|
|
||||||
name = "#" + strconv.Itoa(int(hashId))
|
|
||||||
}
|
|
||||||
return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)")
|
|
||||||
}
|
|
||||||
|
|
||||||
if signer != nil {
|
|
||||||
ops := &packet.OnePassSignature{
|
|
||||||
SigType: packet.SigTypeBinary,
|
|
||||||
Hash: hash,
|
|
||||||
PubKeyAlgo: signer.PubKeyAlgo,
|
|
||||||
KeyId: signer.KeyId,
|
|
||||||
IsLast: true,
|
|
||||||
}
|
|
||||||
if err := ops.Serialize(payload); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if hints == nil {
|
|
||||||
hints = &FileHints{}
|
|
||||||
}
|
|
||||||
|
|
||||||
w := payload
|
|
||||||
if signer != nil {
|
|
||||||
// If we need to write a signature packet after the literal
|
|
||||||
// data then we need to stop literalData from closing
|
|
||||||
// encryptedData.
|
|
||||||
w = noOpCloser{w}
|
|
||||||
|
|
||||||
}
|
|
||||||
var epochSeconds uint32
|
|
||||||
if !hints.ModTime.IsZero() {
|
|
||||||
epochSeconds = uint32(hints.ModTime.Unix())
|
|
||||||
}
|
|
||||||
literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if signer != nil {
|
|
||||||
return signatureWriter{payload, literalData, hash, hash.New(), signer, config}, nil
|
|
||||||
}
|
|
||||||
return literalData, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encrypt encrypts a message to a number of recipients and, optionally, signs
|
|
||||||
// it. hints contains optional information, that is also encrypted, that aids
|
|
||||||
// the recipients in processing the message. The resulting WriteCloser must
|
|
||||||
// be closed after the contents of the file have been written.
|
|
||||||
// If config is nil, sensible defaults will be used.
|
|
||||||
func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
|
|
||||||
if len(to) == 0 {
|
|
||||||
return nil, errors.InvalidArgumentError("no encryption recipient provided")
|
|
||||||
}
|
|
||||||
|
|
||||||
// These are the possible ciphers that we'll use for the message.
|
|
||||||
candidateCiphers := []uint8{
|
|
||||||
uint8(packet.CipherAES128),
|
|
||||||
uint8(packet.CipherAES256),
|
|
||||||
uint8(packet.CipherCAST5),
|
|
||||||
}
|
|
||||||
// These are the possible hash functions that we'll use for the signature.
|
|
||||||
candidateHashes := []uint8{
|
|
||||||
hashToHashId(crypto.SHA256),
|
|
||||||
hashToHashId(crypto.SHA384),
|
|
||||||
hashToHashId(crypto.SHA512),
|
|
||||||
hashToHashId(crypto.SHA1),
|
|
||||||
hashToHashId(crypto.RIPEMD160),
|
|
||||||
}
|
|
||||||
// In the event that a recipient doesn't specify any supported ciphers
|
|
||||||
// or hash functions, these are the ones that we assume that every
|
|
||||||
// implementation supports.
|
|
||||||
defaultCiphers := candidateCiphers[len(candidateCiphers)-1:]
|
|
||||||
defaultHashes := candidateHashes[len(candidateHashes)-1:]
|
|
||||||
|
|
||||||
encryptKeys := make([]Key, len(to))
|
|
||||||
for i := range to {
|
|
||||||
var ok bool
|
|
||||||
encryptKeys[i], ok = to[i].encryptionKey(config.Now())
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys")
|
|
||||||
}
|
|
||||||
|
|
||||||
sig := to[i].primaryIdentity().SelfSignature
|
|
||||||
|
|
||||||
preferredSymmetric := sig.PreferredSymmetric
|
|
||||||
if len(preferredSymmetric) == 0 {
|
|
||||||
preferredSymmetric = defaultCiphers
|
|
||||||
}
|
|
||||||
preferredHashes := sig.PreferredHash
|
|
||||||
if len(preferredHashes) == 0 {
|
|
||||||
preferredHashes = defaultHashes
|
|
||||||
}
|
|
||||||
candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric)
|
|
||||||
candidateHashes = intersectPreferences(candidateHashes, preferredHashes)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(candidateCiphers) == 0 || len(candidateHashes) == 0 {
|
|
||||||
return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common algorithms")
|
|
||||||
}
|
|
||||||
|
|
||||||
cipher := packet.CipherFunction(candidateCiphers[0])
|
|
||||||
// If the cipher specified by config is a candidate, we'll use that.
|
|
||||||
configuredCipher := config.Cipher()
|
|
||||||
for _, c := range candidateCiphers {
|
|
||||||
cipherFunc := packet.CipherFunction(c)
|
|
||||||
if cipherFunc == configuredCipher {
|
|
||||||
cipher = cipherFunc
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
symKey := make([]byte, cipher.KeySize())
|
|
||||||
if _, err := io.ReadFull(config.Random(), symKey); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, key := range encryptKeys {
|
|
||||||
if err := packet.SerializeEncryptedKey(ciphertext, key.PublicKey, cipher, symKey, config); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
payload, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
return writeAndSign(payload, candidateHashes, signed, hints, config)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sign signs a message. The resulting WriteCloser must be closed after the
|
|
||||||
// contents of the file have been written. hints contains optional information
|
|
||||||
// that aids the recipients in processing the message.
|
|
||||||
// If config is nil, sensible defaults will be used.
|
|
||||||
func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Config) (input io.WriteCloser, err error) {
|
|
||||||
if signed == nil {
|
|
||||||
return nil, errors.InvalidArgumentError("no signer provided")
|
|
||||||
}
|
|
||||||
|
|
||||||
// These are the possible hash functions that we'll use for the signature.
|
|
||||||
candidateHashes := []uint8{
|
|
||||||
hashToHashId(crypto.SHA256),
|
|
||||||
hashToHashId(crypto.SHA384),
|
|
||||||
hashToHashId(crypto.SHA512),
|
|
||||||
hashToHashId(crypto.SHA1),
|
|
||||||
hashToHashId(crypto.RIPEMD160),
|
|
||||||
}
|
|
||||||
defaultHashes := candidateHashes[len(candidateHashes)-1:]
|
|
||||||
preferredHashes := signed.primaryIdentity().SelfSignature.PreferredHash
|
|
||||||
if len(preferredHashes) == 0 {
|
|
||||||
preferredHashes = defaultHashes
|
|
||||||
}
|
|
||||||
candidateHashes = intersectPreferences(candidateHashes, preferredHashes)
|
|
||||||
return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, config)
|
|
||||||
}
|
|
||||||
|
|
||||||
// signatureWriter hashes the contents of a message while passing it along to
|
|
||||||
// literalData. When closed, it closes literalData, writes a signature packet
|
|
||||||
// to encryptedData and then also closes encryptedData.
|
|
||||||
type signatureWriter struct {
|
|
||||||
encryptedData io.WriteCloser
|
|
||||||
literalData io.WriteCloser
|
|
||||||
hashType crypto.Hash
|
|
||||||
h hash.Hash
|
|
||||||
signer *packet.PrivateKey
|
|
||||||
config *packet.Config
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s signatureWriter) Write(data []byte) (int, error) {
|
|
||||||
s.h.Write(data)
|
|
||||||
return s.literalData.Write(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s signatureWriter) Close() error {
|
|
||||||
sig := &packet.Signature{
|
|
||||||
SigType: packet.SigTypeBinary,
|
|
||||||
PubKeyAlgo: s.signer.PubKeyAlgo,
|
|
||||||
Hash: s.hashType,
|
|
||||||
CreationTime: s.config.Now(),
|
|
||||||
IssuerKeyId: &s.signer.KeyId,
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := sig.Sign(s.h, s.signer, s.config); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := s.literalData.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := sig.Serialize(s.encryptedData); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return s.encryptedData.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// noOpCloser is like an ioutil.NopCloser, but for an io.Writer.
|
|
||||||
// TODO: we have two of these in OpenPGP packages alone. This probably needs
|
|
||||||
// to be promoted somewhere more common.
|
|
||||||
type noOpCloser struct {
|
|
||||||
w io.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c noOpCloser) Write(data []byte) (n int, err error) {
|
|
||||||
return c.w.Write(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c noOpCloser) Close() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
3
vendor/golang.org/x/net/AUTHORS
generated
vendored
3
vendor/golang.org/x/net/AUTHORS
generated
vendored
@ -1,3 +0,0 @@
|
|||||||
# This source code refers to The Go Authors for copyright purposes.
|
|
||||||
# The master list of authors is in the main Go distribution,
|
|
||||||
# visible at http://tip.golang.org/AUTHORS.
|
|
3
vendor/golang.org/x/net/CONTRIBUTORS
generated
vendored
3
vendor/golang.org/x/net/CONTRIBUTORS
generated
vendored
@ -1,3 +0,0 @@
|
|||||||
# This source code was written by the Go contributors.
|
|
||||||
# The master list of contributors is in the main Go distribution,
|
|
||||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
|
27
vendor/golang.org/x/net/LICENSE
generated
vendored
27
vendor/golang.org/x/net/LICENSE
generated
vendored
@ -1,27 +0,0 @@
|
|||||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are
|
|
||||||
met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
* Redistributions in binary form must reproduce the above
|
|
||||||
copyright notice, this list of conditions and the following disclaimer
|
|
||||||
in the documentation and/or other materials provided with the
|
|
||||||
distribution.
|
|
||||||
* Neither the name of Google Inc. nor the names of its
|
|
||||||
contributors may be used to endorse or promote products derived from
|
|
||||||
this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
22
vendor/golang.org/x/net/PATENTS
generated
vendored
22
vendor/golang.org/x/net/PATENTS
generated
vendored
@ -1,22 +0,0 @@
|
|||||||
Additional IP Rights Grant (Patents)
|
|
||||||
|
|
||||||
"This implementation" means the copyrightable works distributed by
|
|
||||||
Google as part of the Go project.
|
|
||||||
|
|
||||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
|
||||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
|
||||||
patent license to make, have made, use, offer to sell, sell, import,
|
|
||||||
transfer and otherwise run, modify and propagate the contents of this
|
|
||||||
implementation of Go, where such license applies only to those patent
|
|
||||||
claims, both currently owned or controlled by Google and acquired in
|
|
||||||
the future, licensable by Google that are necessarily infringed by this
|
|
||||||
implementation of Go. This grant does not include claims that would be
|
|
||||||
infringed only as a consequence of further modification of this
|
|
||||||
implementation. If you or your agent or exclusive licensee institute or
|
|
||||||
order or agree to the institution of patent litigation against any
|
|
||||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
|
||||||
that this implementation of Go or any code incorporated within this
|
|
||||||
implementation of Go constitutes direct or contributory patent
|
|
||||||
infringement, or inducement of patent infringement, then any patent
|
|
||||||
rights granted to you under this License for this implementation of Go
|
|
||||||
shall terminate as of the date such litigation is filed.
|
|
56
vendor/golang.org/x/net/context/context.go
generated
vendored
56
vendor/golang.org/x/net/context/context.go
generated
vendored
@ -1,56 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package context defines the Context type, which carries deadlines,
|
|
||||||
// cancelation signals, and other request-scoped values across API boundaries
|
|
||||||
// and between processes.
|
|
||||||
// As of Go 1.7 this package is available in the standard library under the
|
|
||||||
// name context. https://golang.org/pkg/context.
|
|
||||||
//
|
|
||||||
// Incoming requests to a server should create a Context, and outgoing calls to
|
|
||||||
// servers should accept a Context. The chain of function calls between must
|
|
||||||
// propagate the Context, optionally replacing it with a modified copy created
|
|
||||||
// using WithDeadline, WithTimeout, WithCancel, or WithValue.
|
|
||||||
//
|
|
||||||
// Programs that use Contexts should follow these rules to keep interfaces
|
|
||||||
// consistent across packages and enable static analysis tools to check context
|
|
||||||
// propagation:
|
|
||||||
//
|
|
||||||
// Do not store Contexts inside a struct type; instead, pass a Context
|
|
||||||
// explicitly to each function that needs it. The Context should be the first
|
|
||||||
// parameter, typically named ctx:
|
|
||||||
//
|
|
||||||
// func DoSomething(ctx context.Context, arg Arg) error {
|
|
||||||
// // ... use ctx ...
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Do not pass a nil Context, even if a function permits it. Pass context.TODO
|
|
||||||
// if you are unsure about which Context to use.
|
|
||||||
//
|
|
||||||
// Use context Values only for request-scoped data that transits processes and
|
|
||||||
// APIs, not for passing optional parameters to functions.
|
|
||||||
//
|
|
||||||
// The same Context may be passed to functions running in different goroutines;
|
|
||||||
// Contexts are safe for simultaneous use by multiple goroutines.
|
|
||||||
//
|
|
||||||
// See http://blog.golang.org/context for example code for a server that uses
|
|
||||||
// Contexts.
|
|
||||||
package context // import "golang.org/x/net/context"
|
|
||||||
|
|
||||||
// Background returns a non-nil, empty Context. It is never canceled, has no
|
|
||||||
// values, and has no deadline. It is typically used by the main function,
|
|
||||||
// initialization, and tests, and as the top-level Context for incoming
|
|
||||||
// requests.
|
|
||||||
func Background() Context {
|
|
||||||
return background
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO returns a non-nil, empty Context. Code should use context.TODO when
|
|
||||||
// it's unclear which Context to use or it is not yet available (because the
|
|
||||||
// surrounding function has not yet been extended to accept a Context
|
|
||||||
// parameter). TODO is recognized by static analysis tools that determine
|
|
||||||
// whether Contexts are propagated correctly in a program.
|
|
||||||
func TODO() Context {
|
|
||||||
return todo
|
|
||||||
}
|
|
71
vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
generated
vendored
71
vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
generated
vendored
@ -1,71 +0,0 @@
|
|||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
|
|
||||||
package ctxhttp // import "golang.org/x/net/context/ctxhttp"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Do sends an HTTP request with the provided http.Client and returns
|
|
||||||
// an HTTP response.
|
|
||||||
//
|
|
||||||
// If the client is nil, http.DefaultClient is used.
|
|
||||||
//
|
|
||||||
// The provided ctx must be non-nil. If it is canceled or times out,
|
|
||||||
// ctx.Err() will be returned.
|
|
||||||
func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
|
|
||||||
if client == nil {
|
|
||||||
client = http.DefaultClient
|
|
||||||
}
|
|
||||||
resp, err := client.Do(req.WithContext(ctx))
|
|
||||||
// If we got an error, and the context has been canceled,
|
|
||||||
// the context's error is probably more useful.
|
|
||||||
if err != nil {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
err = ctx.Err()
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get issues a GET request via the Do function.
|
|
||||||
func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
|
|
||||||
req, err := http.NewRequest("GET", url, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return Do(ctx, client, req)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Head issues a HEAD request via the Do function.
|
|
||||||
func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
|
|
||||||
req, err := http.NewRequest("HEAD", url, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return Do(ctx, client, req)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Post issues a POST request via the Do function.
|
|
||||||
func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
|
|
||||||
req, err := http.NewRequest("POST", url, body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
req.Header.Set("Content-Type", bodyType)
|
|
||||||
return Do(ctx, client, req)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PostForm issues a POST request via the Do function.
|
|
||||||
func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
|
|
||||||
return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
|
|
||||||
}
|
|
72
vendor/golang.org/x/net/context/go17.go
generated
vendored
72
vendor/golang.org/x/net/context/go17.go
generated
vendored
@ -1,72 +0,0 @@
|
|||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build go1.7
|
|
||||||
|
|
||||||
package context
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context" // standard library's context, as of Go 1.7
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
todo = context.TODO()
|
|
||||||
background = context.Background()
|
|
||||||
)
|
|
||||||
|
|
||||||
// Canceled is the error returned by Context.Err when the context is canceled.
|
|
||||||
var Canceled = context.Canceled
|
|
||||||
|
|
||||||
// DeadlineExceeded is the error returned by Context.Err when the context's
|
|
||||||
// deadline passes.
|
|
||||||
var DeadlineExceeded = context.DeadlineExceeded
|
|
||||||
|
|
||||||
// WithCancel returns a copy of parent with a new Done channel. The returned
|
|
||||||
// context's Done channel is closed when the returned cancel function is called
|
|
||||||
// or when the parent context's Done channel is closed, whichever happens first.
|
|
||||||
//
|
|
||||||
// Canceling this context releases resources associated with it, so code should
|
|
||||||
// call cancel as soon as the operations running in this Context complete.
|
|
||||||
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
|
|
||||||
ctx, f := context.WithCancel(parent)
|
|
||||||
return ctx, CancelFunc(f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithDeadline returns a copy of the parent context with the deadline adjusted
|
|
||||||
// to be no later than d. If the parent's deadline is already earlier than d,
|
|
||||||
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
|
|
||||||
// context's Done channel is closed when the deadline expires, when the returned
|
|
||||||
// cancel function is called, or when the parent context's Done channel is
|
|
||||||
// closed, whichever happens first.
|
|
||||||
//
|
|
||||||
// Canceling this context releases resources associated with it, so code should
|
|
||||||
// call cancel as soon as the operations running in this Context complete.
|
|
||||||
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
|
|
||||||
ctx, f := context.WithDeadline(parent, deadline)
|
|
||||||
return ctx, CancelFunc(f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
|
|
||||||
//
|
|
||||||
// Canceling this context releases resources associated with it, so code should
|
|
||||||
// call cancel as soon as the operations running in this Context complete:
|
|
||||||
//
|
|
||||||
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
|
|
||||||
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
|
||||||
// defer cancel() // releases resources if slowOperation completes before timeout elapses
|
|
||||||
// return slowOperation(ctx)
|
|
||||||
// }
|
|
||||||
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
|
|
||||||
return WithDeadline(parent, time.Now().Add(timeout))
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithValue returns a copy of parent in which the value associated with key is
|
|
||||||
// val.
|
|
||||||
//
|
|
||||||
// Use context Values only for request-scoped data that transits processes and
|
|
||||||
// APIs, not for passing optional parameters to functions.
|
|
||||||
func WithValue(parent Context, key interface{}, val interface{}) Context {
|
|
||||||
return context.WithValue(parent, key, val)
|
|
||||||
}
|
|
20
vendor/golang.org/x/net/context/go19.go
generated
vendored
20
vendor/golang.org/x/net/context/go19.go
generated
vendored
@ -1,20 +0,0 @@
|
|||||||
// Copyright 2017 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build go1.9
|
|
||||||
|
|
||||||
package context
|
|
||||||
|
|
||||||
import "context" // standard library's context, as of Go 1.7
|
|
||||||
|
|
||||||
// A Context carries a deadline, a cancelation signal, and other values across
|
|
||||||
// API boundaries.
|
|
||||||
//
|
|
||||||
// Context's methods may be called by multiple goroutines simultaneously.
|
|
||||||
type Context = context.Context
|
|
||||||
|
|
||||||
// A CancelFunc tells an operation to abandon its work.
|
|
||||||
// A CancelFunc does not wait for the work to stop.
|
|
||||||
// After the first call, subsequent calls to a CancelFunc do nothing.
|
|
||||||
type CancelFunc = context.CancelFunc
|
|
300
vendor/golang.org/x/net/context/pre_go17.go
generated
vendored
300
vendor/golang.org/x/net/context/pre_go17.go
generated
vendored
@ -1,300 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build !go1.7
|
|
||||||
|
|
||||||
package context
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// An emptyCtx is never canceled, has no values, and has no deadline. It is not
|
|
||||||
// struct{}, since vars of this type must have distinct addresses.
|
|
||||||
type emptyCtx int
|
|
||||||
|
|
||||||
func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*emptyCtx) Done() <-chan struct{} {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*emptyCtx) Err() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*emptyCtx) Value(key interface{}) interface{} {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *emptyCtx) String() string {
|
|
||||||
switch e {
|
|
||||||
case background:
|
|
||||||
return "context.Background"
|
|
||||||
case todo:
|
|
||||||
return "context.TODO"
|
|
||||||
}
|
|
||||||
return "unknown empty Context"
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
background = new(emptyCtx)
|
|
||||||
todo = new(emptyCtx)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Canceled is the error returned by Context.Err when the context is canceled.
|
|
||||||
var Canceled = errors.New("context canceled")
|
|
||||||
|
|
||||||
// DeadlineExceeded is the error returned by Context.Err when the context's
|
|
||||||
// deadline passes.
|
|
||||||
var DeadlineExceeded = errors.New("context deadline exceeded")
|
|
||||||
|
|
||||||
// WithCancel returns a copy of parent with a new Done channel. The returned
|
|
||||||
// context's Done channel is closed when the returned cancel function is called
|
|
||||||
// or when the parent context's Done channel is closed, whichever happens first.
|
|
||||||
//
|
|
||||||
// Canceling this context releases resources associated with it, so code should
|
|
||||||
// call cancel as soon as the operations running in this Context complete.
|
|
||||||
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
|
|
||||||
c := newCancelCtx(parent)
|
|
||||||
propagateCancel(parent, c)
|
|
||||||
return c, func() { c.cancel(true, Canceled) }
|
|
||||||
}
|
|
||||||
|
|
||||||
// newCancelCtx returns an initialized cancelCtx.
|
|
||||||
func newCancelCtx(parent Context) *cancelCtx {
|
|
||||||
return &cancelCtx{
|
|
||||||
Context: parent,
|
|
||||||
done: make(chan struct{}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// propagateCancel arranges for child to be canceled when parent is.
|
|
||||||
func propagateCancel(parent Context, child canceler) {
|
|
||||||
if parent.Done() == nil {
|
|
||||||
return // parent is never canceled
|
|
||||||
}
|
|
||||||
if p, ok := parentCancelCtx(parent); ok {
|
|
||||||
p.mu.Lock()
|
|
||||||
if p.err != nil {
|
|
||||||
// parent has already been canceled
|
|
||||||
child.cancel(false, p.err)
|
|
||||||
} else {
|
|
||||||
if p.children == nil {
|
|
||||||
p.children = make(map[canceler]bool)
|
|
||||||
}
|
|
||||||
p.children[child] = true
|
|
||||||
}
|
|
||||||
p.mu.Unlock()
|
|
||||||
} else {
|
|
||||||
go func() {
|
|
||||||
select {
|
|
||||||
case <-parent.Done():
|
|
||||||
child.cancel(false, parent.Err())
|
|
||||||
case <-child.Done():
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// parentCancelCtx follows a chain of parent references until it finds a
|
|
||||||
// *cancelCtx. This function understands how each of the concrete types in this
|
|
||||||
// package represents its parent.
|
|
||||||
func parentCancelCtx(parent Context) (*cancelCtx, bool) {
|
|
||||||
for {
|
|
||||||
switch c := parent.(type) {
|
|
||||||
case *cancelCtx:
|
|
||||||
return c, true
|
|
||||||
case *timerCtx:
|
|
||||||
return c.cancelCtx, true
|
|
||||||
case *valueCtx:
|
|
||||||
parent = c.Context
|
|
||||||
default:
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// removeChild removes a context from its parent.
|
|
||||||
func removeChild(parent Context, child canceler) {
|
|
||||||
p, ok := parentCancelCtx(parent)
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.mu.Lock()
|
|
||||||
if p.children != nil {
|
|
||||||
delete(p.children, child)
|
|
||||||
}
|
|
||||||
p.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// A canceler is a context type that can be canceled directly. The
|
|
||||||
// implementations are *cancelCtx and *timerCtx.
|
|
||||||
type canceler interface {
|
|
||||||
cancel(removeFromParent bool, err error)
|
|
||||||
Done() <-chan struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A cancelCtx can be canceled. When canceled, it also cancels any children
|
|
||||||
// that implement canceler.
|
|
||||||
type cancelCtx struct {
|
|
||||||
Context
|
|
||||||
|
|
||||||
done chan struct{} // closed by the first cancel call.
|
|
||||||
|
|
||||||
mu sync.Mutex
|
|
||||||
children map[canceler]bool // set to nil by the first cancel call
|
|
||||||
err error // set to non-nil by the first cancel call
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *cancelCtx) Done() <-chan struct{} {
|
|
||||||
return c.done
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *cancelCtx) Err() error {
|
|
||||||
c.mu.Lock()
|
|
||||||
defer c.mu.Unlock()
|
|
||||||
return c.err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *cancelCtx) String() string {
|
|
||||||
return fmt.Sprintf("%v.WithCancel", c.Context)
|
|
||||||
}
|
|
||||||
|
|
||||||
// cancel closes c.done, cancels each of c's children, and, if
|
|
||||||
// removeFromParent is true, removes c from its parent's children.
|
|
||||||
func (c *cancelCtx) cancel(removeFromParent bool, err error) {
|
|
||||||
if err == nil {
|
|
||||||
panic("context: internal error: missing cancel error")
|
|
||||||
}
|
|
||||||
c.mu.Lock()
|
|
||||||
if c.err != nil {
|
|
||||||
c.mu.Unlock()
|
|
||||||
return // already canceled
|
|
||||||
}
|
|
||||||
c.err = err
|
|
||||||
close(c.done)
|
|
||||||
for child := range c.children {
|
|
||||||
// NOTE: acquiring the child's lock while holding parent's lock.
|
|
||||||
child.cancel(false, err)
|
|
||||||
}
|
|
||||||
c.children = nil
|
|
||||||
c.mu.Unlock()
|
|
||||||
|
|
||||||
if removeFromParent {
|
|
||||||
removeChild(c.Context, c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithDeadline returns a copy of the parent context with the deadline adjusted
|
|
||||||
// to be no later than d. If the parent's deadline is already earlier than d,
|
|
||||||
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
|
|
||||||
// context's Done channel is closed when the deadline expires, when the returned
|
|
||||||
// cancel function is called, or when the parent context's Done channel is
|
|
||||||
// closed, whichever happens first.
|
|
||||||
//
|
|
||||||
// Canceling this context releases resources associated with it, so code should
|
|
||||||
// call cancel as soon as the operations running in this Context complete.
|
|
||||||
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
|
|
||||||
if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
|
|
||||||
// The current deadline is already sooner than the new one.
|
|
||||||
return WithCancel(parent)
|
|
||||||
}
|
|
||||||
c := &timerCtx{
|
|
||||||
cancelCtx: newCancelCtx(parent),
|
|
||||||
deadline: deadline,
|
|
||||||
}
|
|
||||||
propagateCancel(parent, c)
|
|
||||||
d := deadline.Sub(time.Now())
|
|
||||||
if d <= 0 {
|
|
||||||
c.cancel(true, DeadlineExceeded) // deadline has already passed
|
|
||||||
return c, func() { c.cancel(true, Canceled) }
|
|
||||||
}
|
|
||||||
c.mu.Lock()
|
|
||||||
defer c.mu.Unlock()
|
|
||||||
if c.err == nil {
|
|
||||||
c.timer = time.AfterFunc(d, func() {
|
|
||||||
c.cancel(true, DeadlineExceeded)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return c, func() { c.cancel(true, Canceled) }
|
|
||||||
}
|
|
||||||
|
|
||||||
// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
|
|
||||||
// implement Done and Err. It implements cancel by stopping its timer then
|
|
||||||
// delegating to cancelCtx.cancel.
|
|
||||||
type timerCtx struct {
|
|
||||||
*cancelCtx
|
|
||||||
timer *time.Timer // Under cancelCtx.mu.
|
|
||||||
|
|
||||||
deadline time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
|
|
||||||
return c.deadline, true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *timerCtx) String() string {
|
|
||||||
return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *timerCtx) cancel(removeFromParent bool, err error) {
|
|
||||||
c.cancelCtx.cancel(false, err)
|
|
||||||
if removeFromParent {
|
|
||||||
// Remove this timerCtx from its parent cancelCtx's children.
|
|
||||||
removeChild(c.cancelCtx.Context, c)
|
|
||||||
}
|
|
||||||
c.mu.Lock()
|
|
||||||
if c.timer != nil {
|
|
||||||
c.timer.Stop()
|
|
||||||
c.timer = nil
|
|
||||||
}
|
|
||||||
c.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
|
|
||||||
//
|
|
||||||
// Canceling this context releases resources associated with it, so code should
|
|
||||||
// call cancel as soon as the operations running in this Context complete:
|
|
||||||
//
|
|
||||||
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
|
|
||||||
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
|
||||||
// defer cancel() // releases resources if slowOperation completes before timeout elapses
|
|
||||||
// return slowOperation(ctx)
|
|
||||||
// }
|
|
||||||
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
|
|
||||||
return WithDeadline(parent, time.Now().Add(timeout))
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithValue returns a copy of parent in which the value associated with key is
|
|
||||||
// val.
|
|
||||||
//
|
|
||||||
// Use context Values only for request-scoped data that transits processes and
|
|
||||||
// APIs, not for passing optional parameters to functions.
|
|
||||||
func WithValue(parent Context, key interface{}, val interface{}) Context {
|
|
||||||
return &valueCtx{parent, key, val}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A valueCtx carries a key-value pair. It implements Value for that key and
|
|
||||||
// delegates all other calls to the embedded Context.
|
|
||||||
type valueCtx struct {
|
|
||||||
Context
|
|
||||||
key, val interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *valueCtx) String() string {
|
|
||||||
return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *valueCtx) Value(key interface{}) interface{} {
|
|
||||||
if c.key == key {
|
|
||||||
return c.val
|
|
||||||
}
|
|
||||||
return c.Context.Value(key)
|
|
||||||
}
|
|
109
vendor/golang.org/x/net/context/pre_go19.go
generated
vendored
109
vendor/golang.org/x/net/context/pre_go19.go
generated
vendored
@ -1,109 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build !go1.9
|
|
||||||
|
|
||||||
package context
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
// A Context carries a deadline, a cancelation signal, and other values across
|
|
||||||
// API boundaries.
|
|
||||||
//
|
|
||||||
// Context's methods may be called by multiple goroutines simultaneously.
|
|
||||||
type Context interface {
|
|
||||||
// Deadline returns the time when work done on behalf of this context
|
|
||||||
// should be canceled. Deadline returns ok==false when no deadline is
|
|
||||||
// set. Successive calls to Deadline return the same results.
|
|
||||||
Deadline() (deadline time.Time, ok bool)
|
|
||||||
|
|
||||||
// Done returns a channel that's closed when work done on behalf of this
|
|
||||||
// context should be canceled. Done may return nil if this context can
|
|
||||||
// never be canceled. Successive calls to Done return the same value.
|
|
||||||
//
|
|
||||||
// WithCancel arranges for Done to be closed when cancel is called;
|
|
||||||
// WithDeadline arranges for Done to be closed when the deadline
|
|
||||||
// expires; WithTimeout arranges for Done to be closed when the timeout
|
|
||||||
// elapses.
|
|
||||||
//
|
|
||||||
// Done is provided for use in select statements:
|
|
||||||
//
|
|
||||||
// // Stream generates values with DoSomething and sends them to out
|
|
||||||
// // until DoSomething returns an error or ctx.Done is closed.
|
|
||||||
// func Stream(ctx context.Context, out chan<- Value) error {
|
|
||||||
// for {
|
|
||||||
// v, err := DoSomething(ctx)
|
|
||||||
// if err != nil {
|
|
||||||
// return err
|
|
||||||
// }
|
|
||||||
// select {
|
|
||||||
// case <-ctx.Done():
|
|
||||||
// return ctx.Err()
|
|
||||||
// case out <- v:
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// See http://blog.golang.org/pipelines for more examples of how to use
|
|
||||||
// a Done channel for cancelation.
|
|
||||||
Done() <-chan struct{}
|
|
||||||
|
|
||||||
// Err returns a non-nil error value after Done is closed. Err returns
|
|
||||||
// Canceled if the context was canceled or DeadlineExceeded if the
|
|
||||||
// context's deadline passed. No other values for Err are defined.
|
|
||||||
// After Done is closed, successive calls to Err return the same value.
|
|
||||||
Err() error
|
|
||||||
|
|
||||||
// Value returns the value associated with this context for key, or nil
|
|
||||||
// if no value is associated with key. Successive calls to Value with
|
|
||||||
// the same key returns the same result.
|
|
||||||
//
|
|
||||||
// Use context values only for request-scoped data that transits
|
|
||||||
// processes and API boundaries, not for passing optional parameters to
|
|
||||||
// functions.
|
|
||||||
//
|
|
||||||
// A key identifies a specific value in a Context. Functions that wish
|
|
||||||
// to store values in Context typically allocate a key in a global
|
|
||||||
// variable then use that key as the argument to context.WithValue and
|
|
||||||
// Context.Value. A key can be any type that supports equality;
|
|
||||||
// packages should define keys as an unexported type to avoid
|
|
||||||
// collisions.
|
|
||||||
//
|
|
||||||
// Packages that define a Context key should provide type-safe accessors
|
|
||||||
// for the values stores using that key:
|
|
||||||
//
|
|
||||||
// // Package user defines a User type that's stored in Contexts.
|
|
||||||
// package user
|
|
||||||
//
|
|
||||||
// import "golang.org/x/net/context"
|
|
||||||
//
|
|
||||||
// // User is the type of value stored in the Contexts.
|
|
||||||
// type User struct {...}
|
|
||||||
//
|
|
||||||
// // key is an unexported type for keys defined in this package.
|
|
||||||
// // This prevents collisions with keys defined in other packages.
|
|
||||||
// type key int
|
|
||||||
//
|
|
||||||
// // userKey is the key for user.User values in Contexts. It is
|
|
||||||
// // unexported; clients use user.NewContext and user.FromContext
|
|
||||||
// // instead of using this key directly.
|
|
||||||
// var userKey key = 0
|
|
||||||
//
|
|
||||||
// // NewContext returns a new Context that carries value u.
|
|
||||||
// func NewContext(ctx context.Context, u *User) context.Context {
|
|
||||||
// return context.WithValue(ctx, userKey, u)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// // FromContext returns the User value stored in ctx, if any.
|
|
||||||
// func FromContext(ctx context.Context) (*User, bool) {
|
|
||||||
// u, ok := ctx.Value(userKey).(*User)
|
|
||||||
// return u, ok
|
|
||||||
// }
|
|
||||||
Value(key interface{}) interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A CancelFunc tells an operation to abandon its work.
|
|
||||||
// A CancelFunc does not wait for the work to stop.
|
|
||||||
// After the first call, subsequent calls to a CancelFunc do nothing.
|
|
||||||
type CancelFunc func()
|
|
78
vendor/golang.org/x/net/html/atom/atom.go
generated
vendored
78
vendor/golang.org/x/net/html/atom/atom.go
generated
vendored
@ -1,78 +0,0 @@
|
|||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package atom provides integer codes (also known as atoms) for a fixed set of
|
|
||||||
// frequently occurring HTML strings: tag names and attribute keys such as "p"
|
|
||||||
// and "id".
|
|
||||||
//
|
|
||||||
// Sharing an atom's name between all elements with the same tag can result in
|
|
||||||
// fewer string allocations when tokenizing and parsing HTML. Integer
|
|
||||||
// comparisons are also generally faster than string comparisons.
|
|
||||||
//
|
|
||||||
// The value of an atom's particular code is not guaranteed to stay the same
|
|
||||||
// between versions of this package. Neither is any ordering guaranteed:
|
|
||||||
// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to
|
|
||||||
// be dense. The only guarantees are that e.g. looking up "div" will yield
|
|
||||||
// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0.
|
|
||||||
package atom // import "golang.org/x/net/html/atom"
|
|
||||||
|
|
||||||
// Atom is an integer code for a string. The zero value maps to "".
|
|
||||||
type Atom uint32
|
|
||||||
|
|
||||||
// String returns the atom's name.
|
|
||||||
func (a Atom) String() string {
|
|
||||||
start := uint32(a >> 8)
|
|
||||||
n := uint32(a & 0xff)
|
|
||||||
if start+n > uint32(len(atomText)) {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return atomText[start : start+n]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a Atom) string() string {
|
|
||||||
return atomText[a>>8 : a>>8+a&0xff]
|
|
||||||
}
|
|
||||||
|
|
||||||
// fnv computes the FNV hash with an arbitrary starting value h.
|
|
||||||
func fnv(h uint32, s []byte) uint32 {
|
|
||||||
for i := range s {
|
|
||||||
h ^= uint32(s[i])
|
|
||||||
h *= 16777619
|
|
||||||
}
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
func match(s string, t []byte) bool {
|
|
||||||
for i, c := range t {
|
|
||||||
if s[i] != c {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lookup returns the atom whose name is s. It returns zero if there is no
|
|
||||||
// such atom. The lookup is case sensitive.
|
|
||||||
func Lookup(s []byte) Atom {
|
|
||||||
if len(s) == 0 || len(s) > maxAtomLen {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
h := fnv(hash0, s)
|
|
||||||
if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a string whose contents are equal to s. In that sense, it is
|
|
||||||
// equivalent to string(s) but may be more efficient.
|
|
||||||
func String(s []byte) string {
|
|
||||||
if a := Lookup(s); a != 0 {
|
|
||||||
return a.String()
|
|
||||||
}
|
|
||||||
return string(s)
|
|
||||||
}
|
|
712
vendor/golang.org/x/net/html/atom/gen.go
generated
vendored
712
vendor/golang.org/x/net/html/atom/gen.go
generated
vendored
@ -1,712 +0,0 @@
|
|||||||
// Copyright 2012 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build ignore
|
|
||||||
|
|
||||||
//go:generate go run gen.go
|
|
||||||
//go:generate go run gen.go -test
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"go/format"
|
|
||||||
"io/ioutil"
|
|
||||||
"math/rand"
|
|
||||||
"os"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// identifier converts s to a Go exported identifier.
|
|
||||||
// It converts "div" to "Div" and "accept-charset" to "AcceptCharset".
|
|
||||||
func identifier(s string) string {
|
|
||||||
b := make([]byte, 0, len(s))
|
|
||||||
cap := true
|
|
||||||
for _, c := range s {
|
|
||||||
if c == '-' {
|
|
||||||
cap = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if cap && 'a' <= c && c <= 'z' {
|
|
||||||
c -= 'a' - 'A'
|
|
||||||
}
|
|
||||||
cap = false
|
|
||||||
b = append(b, byte(c))
|
|
||||||
}
|
|
||||||
return string(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
var test = flag.Bool("test", false, "generate table_test.go")
|
|
||||||
|
|
||||||
func genFile(name string, buf *bytes.Buffer) {
|
|
||||||
b, err := format.Source(buf.Bytes())
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintln(os.Stderr, err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if err := ioutil.WriteFile(name, b, 0644); err != nil {
|
|
||||||
fmt.Fprintln(os.Stderr, err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Parse()
|
|
||||||
|
|
||||||
var all []string
|
|
||||||
all = append(all, elements...)
|
|
||||||
all = append(all, attributes...)
|
|
||||||
all = append(all, eventHandlers...)
|
|
||||||
all = append(all, extra...)
|
|
||||||
sort.Strings(all)
|
|
||||||
|
|
||||||
// uniq - lists have dups
|
|
||||||
w := 0
|
|
||||||
for _, s := range all {
|
|
||||||
if w == 0 || all[w-1] != s {
|
|
||||||
all[w] = s
|
|
||||||
w++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
all = all[:w]
|
|
||||||
|
|
||||||
if *test {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n")
|
|
||||||
fmt.Fprintln(&buf, "//go:generate go run gen.go -test\n")
|
|
||||||
fmt.Fprintln(&buf, "package atom\n")
|
|
||||||
fmt.Fprintln(&buf, "var testAtomList = []string{")
|
|
||||||
for _, s := range all {
|
|
||||||
fmt.Fprintf(&buf, "\t%q,\n", s)
|
|
||||||
}
|
|
||||||
fmt.Fprintln(&buf, "}")
|
|
||||||
|
|
||||||
genFile("table_test.go", &buf)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find hash that minimizes table size.
|
|
||||||
var best *table
|
|
||||||
for i := 0; i < 1000000; i++ {
|
|
||||||
if best != nil && 1<<(best.k-1) < len(all) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
h := rand.Uint32()
|
|
||||||
for k := uint(0); k <= 16; k++ {
|
|
||||||
if best != nil && k >= best.k {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
var t table
|
|
||||||
if t.init(h, k, all) {
|
|
||||||
best = &t
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if best == nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "failed to construct string table\n")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lay out strings, using overlaps when possible.
|
|
||||||
layout := append([]string{}, all...)
|
|
||||||
|
|
||||||
// Remove strings that are substrings of other strings
|
|
||||||
for changed := true; changed; {
|
|
||||||
changed = false
|
|
||||||
for i, s := range layout {
|
|
||||||
if s == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for j, t := range layout {
|
|
||||||
if i != j && t != "" && strings.Contains(s, t) {
|
|
||||||
changed = true
|
|
||||||
layout[j] = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Join strings where one suffix matches another prefix.
|
|
||||||
for {
|
|
||||||
// Find best i, j, k such that layout[i][len-k:] == layout[j][:k],
|
|
||||||
// maximizing overlap length k.
|
|
||||||
besti := -1
|
|
||||||
bestj := -1
|
|
||||||
bestk := 0
|
|
||||||
for i, s := range layout {
|
|
||||||
if s == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for j, t := range layout {
|
|
||||||
if i == j {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for k := bestk + 1; k <= len(s) && k <= len(t); k++ {
|
|
||||||
if s[len(s)-k:] == t[:k] {
|
|
||||||
besti = i
|
|
||||||
bestj = j
|
|
||||||
bestk = k
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if bestk > 0 {
|
|
||||||
layout[besti] += layout[bestj][bestk:]
|
|
||||||
layout[bestj] = ""
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
text := strings.Join(layout, "")
|
|
||||||
|
|
||||||
atom := map[string]uint32{}
|
|
||||||
for _, s := range all {
|
|
||||||
off := strings.Index(text, s)
|
|
||||||
if off < 0 {
|
|
||||||
panic("lost string " + s)
|
|
||||||
}
|
|
||||||
atom[s] = uint32(off<<8 | len(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
// Generate the Go code.
|
|
||||||
fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n")
|
|
||||||
fmt.Fprintln(&buf, "//go:generate go run gen.go\n")
|
|
||||||
fmt.Fprintln(&buf, "package atom\n\nconst (")
|
|
||||||
|
|
||||||
// compute max len
|
|
||||||
maxLen := 0
|
|
||||||
for _, s := range all {
|
|
||||||
if maxLen < len(s) {
|
|
||||||
maxLen = len(s)
|
|
||||||
}
|
|
||||||
fmt.Fprintf(&buf, "\t%s Atom = %#x\n", identifier(s), atom[s])
|
|
||||||
}
|
|
||||||
fmt.Fprintln(&buf, ")\n")
|
|
||||||
|
|
||||||
fmt.Fprintf(&buf, "const hash0 = %#x\n\n", best.h0)
|
|
||||||
fmt.Fprintf(&buf, "const maxAtomLen = %d\n\n", maxLen)
|
|
||||||
|
|
||||||
fmt.Fprintf(&buf, "var table = [1<<%d]Atom{\n", best.k)
|
|
||||||
for i, s := range best.tab {
|
|
||||||
if s == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fmt.Fprintf(&buf, "\t%#x: %#x, // %s\n", i, atom[s], s)
|
|
||||||
}
|
|
||||||
fmt.Fprintf(&buf, "}\n")
|
|
||||||
datasize := (1 << best.k) * 4
|
|
||||||
|
|
||||||
fmt.Fprintln(&buf, "const atomText =")
|
|
||||||
textsize := len(text)
|
|
||||||
for len(text) > 60 {
|
|
||||||
fmt.Fprintf(&buf, "\t%q +\n", text[:60])
|
|
||||||
text = text[60:]
|
|
||||||
}
|
|
||||||
fmt.Fprintf(&buf, "\t%q\n\n", text)
|
|
||||||
|
|
||||||
genFile("table.go", &buf)
|
|
||||||
|
|
||||||
fmt.Fprintf(os.Stdout, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize)
|
|
||||||
}
|
|
||||||
|
|
||||||
type byLen []string
|
|
||||||
|
|
||||||
func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) }
|
|
||||||
func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
|
||||||
func (x byLen) Len() int { return len(x) }
|
|
||||||
|
|
||||||
// fnv computes the FNV hash with an arbitrary starting value h.
|
|
||||||
func fnv(h uint32, s string) uint32 {
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
h ^= uint32(s[i])
|
|
||||||
h *= 16777619
|
|
||||||
}
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// A table represents an attempt at constructing the lookup table.
|
|
||||||
// The lookup table uses cuckoo hashing, meaning that each string
|
|
||||||
// can be found in one of two positions.
|
|
||||||
type table struct {
|
|
||||||
h0 uint32
|
|
||||||
k uint
|
|
||||||
mask uint32
|
|
||||||
tab []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// hash returns the two hashes for s.
|
|
||||||
func (t *table) hash(s string) (h1, h2 uint32) {
|
|
||||||
h := fnv(t.h0, s)
|
|
||||||
h1 = h & t.mask
|
|
||||||
h2 = (h >> 16) & t.mask
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// init initializes the table with the given parameters.
|
|
||||||
// h0 is the initial hash value,
|
|
||||||
// k is the number of bits of hash value to use, and
|
|
||||||
// x is the list of strings to store in the table.
|
|
||||||
// init returns false if the table cannot be constructed.
|
|
||||||
func (t *table) init(h0 uint32, k uint, x []string) bool {
|
|
||||||
t.h0 = h0
|
|
||||||
t.k = k
|
|
||||||
t.tab = make([]string, 1<<k)
|
|
||||||
t.mask = 1<<k - 1
|
|
||||||
for _, s := range x {
|
|
||||||
if !t.insert(s) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// insert inserts s in the table.
|
|
||||||
func (t *table) insert(s string) bool {
|
|
||||||
h1, h2 := t.hash(s)
|
|
||||||
if t.tab[h1] == "" {
|
|
||||||
t.tab[h1] = s
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if t.tab[h2] == "" {
|
|
||||||
t.tab[h2] = s
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if t.push(h1, 0) {
|
|
||||||
t.tab[h1] = s
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if t.push(h2, 0) {
|
|
||||||
t.tab[h2] = s
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// push attempts to push aside the entry in slot i.
|
|
||||||
func (t *table) push(i uint32, depth int) bool {
|
|
||||||
if depth > len(t.tab) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
s := t.tab[i]
|
|
||||||
h1, h2 := t.hash(s)
|
|
||||||
j := h1 + h2 - i
|
|
||||||
if t.tab[j] != "" && !t.push(j, depth+1) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
t.tab[j] = s
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// The lists of element names and attribute keys were taken from
|
|
||||||
// https://html.spec.whatwg.org/multipage/indices.html#index
|
|
||||||
// as of the "HTML Living Standard - Last Updated 16 April 2018" version.
|
|
||||||
|
|
||||||
// "command", "keygen" and "menuitem" have been removed from the spec,
|
|
||||||
// but are kept here for backwards compatibility.
|
|
||||||
var elements = []string{
|
|
||||||
"a",
|
|
||||||
"abbr",
|
|
||||||
"address",
|
|
||||||
"area",
|
|
||||||
"article",
|
|
||||||
"aside",
|
|
||||||
"audio",
|
|
||||||
"b",
|
|
||||||
"base",
|
|
||||||
"bdi",
|
|
||||||
"bdo",
|
|
||||||
"blockquote",
|
|
||||||
"body",
|
|
||||||
"br",
|
|
||||||
"button",
|
|
||||||
"canvas",
|
|
||||||
"caption",
|
|
||||||
"cite",
|
|
||||||
"code",
|
|
||||||
"col",
|
|
||||||
"colgroup",
|
|
||||||
"command",
|
|
||||||
"data",
|
|
||||||
"datalist",
|
|
||||||
"dd",
|
|
||||||
"del",
|
|
||||||
"details",
|
|
||||||
"dfn",
|
|
||||||
"dialog",
|
|
||||||
"div",
|
|
||||||
"dl",
|
|
||||||
"dt",
|
|
||||||
"em",
|
|
||||||
"embed",
|
|
||||||
"fieldset",
|
|
||||||
"figcaption",
|
|
||||||
"figure",
|
|
||||||
"footer",
|
|
||||||
"form",
|
|
||||||
"h1",
|
|
||||||
"h2",
|
|
||||||
"h3",
|
|
||||||
"h4",
|
|
||||||
"h5",
|
|
||||||
"h6",
|
|
||||||
"head",
|
|
||||||
"header",
|
|
||||||
"hgroup",
|
|
||||||
"hr",
|
|
||||||
"html",
|
|
||||||
"i",
|
|
||||||
"iframe",
|
|
||||||
"img",
|
|
||||||
"input",
|
|
||||||
"ins",
|
|
||||||
"kbd",
|
|
||||||
"keygen",
|
|
||||||
"label",
|
|
||||||
"legend",
|
|
||||||
"li",
|
|
||||||
"link",
|
|
||||||
"main",
|
|
||||||
"map",
|
|
||||||
"mark",
|
|
||||||
"menu",
|
|
||||||
"menuitem",
|
|
||||||
"meta",
|
|
||||||
"meter",
|
|
||||||
"nav",
|
|
||||||
"noscript",
|
|
||||||
"object",
|
|
||||||
"ol",
|
|
||||||
"optgroup",
|
|
||||||
"option",
|
|
||||||
"output",
|
|
||||||
"p",
|
|
||||||
"param",
|
|
||||||
"picture",
|
|
||||||
"pre",
|
|
||||||
"progress",
|
|
||||||
"q",
|
|
||||||
"rp",
|
|
||||||
"rt",
|
|
||||||
"ruby",
|
|
||||||
"s",
|
|
||||||
"samp",
|
|
||||||
"script",
|
|
||||||
"section",
|
|
||||||
"select",
|
|
||||||
"slot",
|
|
||||||
"small",
|
|
||||||
"source",
|
|
||||||
"span",
|
|
||||||
"strong",
|
|
||||||
"style",
|
|
||||||
"sub",
|
|
||||||
"summary",
|
|
||||||
"sup",
|
|
||||||
"table",
|
|
||||||
"tbody",
|
|
||||||
"td",
|
|
||||||
"template",
|
|
||||||
"textarea",
|
|
||||||
"tfoot",
|
|
||||||
"th",
|
|
||||||
"thead",
|
|
||||||
"time",
|
|
||||||
"title",
|
|
||||||
"tr",
|
|
||||||
"track",
|
|
||||||
"u",
|
|
||||||
"ul",
|
|
||||||
"var",
|
|
||||||
"video",
|
|
||||||
"wbr",
|
|
||||||
}
|
|
||||||
|
|
||||||
// https://html.spec.whatwg.org/multipage/indices.html#attributes-3
|
|
||||||
//
|
|
||||||
// "challenge", "command", "contextmenu", "dropzone", "icon", "keytype", "mediagroup",
|
|
||||||
// "radiogroup", "spellcheck", "scoped", "seamless", "sortable" and "sorted" have been removed from the spec,
|
|
||||||
// but are kept here for backwards compatibility.
|
|
||||||
var attributes = []string{
|
|
||||||
"abbr",
|
|
||||||
"accept",
|
|
||||||
"accept-charset",
|
|
||||||
"accesskey",
|
|
||||||
"action",
|
|
||||||
"allowfullscreen",
|
|
||||||
"allowpaymentrequest",
|
|
||||||
"allowusermedia",
|
|
||||||
"alt",
|
|
||||||
"as",
|
|
||||||
"async",
|
|
||||||
"autocomplete",
|
|
||||||
"autofocus",
|
|
||||||
"autoplay",
|
|
||||||
"challenge",
|
|
||||||
"charset",
|
|
||||||
"checked",
|
|
||||||
"cite",
|
|
||||||
"class",
|
|
||||||
"color",
|
|
||||||
"cols",
|
|
||||||
"colspan",
|
|
||||||
"command",
|
|
||||||
"content",
|
|
||||||
"contenteditable",
|
|
||||||
"contextmenu",
|
|
||||||
"controls",
|
|
||||||
"coords",
|
|
||||||
"crossorigin",
|
|
||||||
"data",
|
|
||||||
"datetime",
|
|
||||||
"default",
|
|
||||||
"defer",
|
|
||||||
"dir",
|
|
||||||
"dirname",
|
|
||||||
"disabled",
|
|
||||||
"download",
|
|
||||||
"draggable",
|
|
||||||
"dropzone",
|
|
||||||
"enctype",
|
|
||||||
"for",
|
|
||||||
"form",
|
|
||||||
"formaction",
|
|
||||||
"formenctype",
|
|
||||||
"formmethod",
|
|
||||||
"formnovalidate",
|
|
||||||
"formtarget",
|
|
||||||
"headers",
|
|
||||||
"height",
|
|
||||||
"hidden",
|
|
||||||
"high",
|
|
||||||
"href",
|
|
||||||
"hreflang",
|
|
||||||
"http-equiv",
|
|
||||||
"icon",
|
|
||||||
"id",
|
|
||||||
"inputmode",
|
|
||||||
"integrity",
|
|
||||||
"is",
|
|
||||||
"ismap",
|
|
||||||
"itemid",
|
|
||||||
"itemprop",
|
|
||||||
"itemref",
|
|
||||||
"itemscope",
|
|
||||||
"itemtype",
|
|
||||||
"keytype",
|
|
||||||
"kind",
|
|
||||||
"label",
|
|
||||||
"lang",
|
|
||||||
"list",
|
|
||||||
"loop",
|
|
||||||
"low",
|
|
||||||
"manifest",
|
|
||||||
"max",
|
|
||||||
"maxlength",
|
|
||||||
"media",
|
|
||||||
"mediagroup",
|
|
||||||
"method",
|
|
||||||
"min",
|
|
||||||
"minlength",
|
|
||||||
"multiple",
|
|
||||||
"muted",
|
|
||||||
"name",
|
|
||||||
"nomodule",
|
|
||||||
"nonce",
|
|
||||||
"novalidate",
|
|
||||||
"open",
|
|
||||||
"optimum",
|
|
||||||
"pattern",
|
|
||||||
"ping",
|
|
||||||
"placeholder",
|
|
||||||
"playsinline",
|
|
||||||
"poster",
|
|
||||||
"preload",
|
|
||||||
"radiogroup",
|
|
||||||
"readonly",
|
|
||||||
"referrerpolicy",
|
|
||||||
"rel",
|
|
||||||
"required",
|
|
||||||
"reversed",
|
|
||||||
"rows",
|
|
||||||
"rowspan",
|
|
||||||
"sandbox",
|
|
||||||
"spellcheck",
|
|
||||||
"scope",
|
|
||||||
"scoped",
|
|
||||||
"seamless",
|
|
||||||
"selected",
|
|
||||||
"shape",
|
|
||||||
"size",
|
|
||||||
"sizes",
|
|
||||||
"sortable",
|
|
||||||
"sorted",
|
|
||||||
"slot",
|
|
||||||
"span",
|
|
||||||
"spellcheck",
|
|
||||||
"src",
|
|
||||||
"srcdoc",
|
|
||||||
"srclang",
|
|
||||||
"srcset",
|
|
||||||
"start",
|
|
||||||
"step",
|
|
||||||
"style",
|
|
||||||
"tabindex",
|
|
||||||
"target",
|
|
||||||
"title",
|
|
||||||
"translate",
|
|
||||||
"type",
|
|
||||||
"typemustmatch",
|
|
||||||
"updateviacache",
|
|
||||||
"usemap",
|
|
||||||
"value",
|
|
||||||
"width",
|
|
||||||
"workertype",
|
|
||||||
"wrap",
|
|
||||||
}
|
|
||||||
|
|
||||||
// "onautocomplete", "onautocompleteerror", "onmousewheel",
|
|
||||||
// "onshow" and "onsort" have been removed from the spec,
|
|
||||||
// but are kept here for backwards compatibility.
|
|
||||||
var eventHandlers = []string{
|
|
||||||
"onabort",
|
|
||||||
"onautocomplete",
|
|
||||||
"onautocompleteerror",
|
|
||||||
"onauxclick",
|
|
||||||
"onafterprint",
|
|
||||||
"onbeforeprint",
|
|
||||||
"onbeforeunload",
|
|
||||||
"onblur",
|
|
||||||
"oncancel",
|
|
||||||
"oncanplay",
|
|
||||||
"oncanplaythrough",
|
|
||||||
"onchange",
|
|
||||||
"onclick",
|
|
||||||
"onclose",
|
|
||||||
"oncontextmenu",
|
|
||||||
"oncopy",
|
|
||||||
"oncuechange",
|
|
||||||
"oncut",
|
|
||||||
"ondblclick",
|
|
||||||
"ondrag",
|
|
||||||
"ondragend",
|
|
||||||
"ondragenter",
|
|
||||||
"ondragexit",
|
|
||||||
"ondragleave",
|
|
||||||
"ondragover",
|
|
||||||
"ondragstart",
|
|
||||||
"ondrop",
|
|
||||||
"ondurationchange",
|
|
||||||
"onemptied",
|
|
||||||
"onended",
|
|
||||||
"onerror",
|
|
||||||
"onfocus",
|
|
||||||
"onhashchange",
|
|
||||||
"oninput",
|
|
||||||
"oninvalid",
|
|
||||||
"onkeydown",
|
|
||||||
"onkeypress",
|
|
||||||
"onkeyup",
|
|
||||||
"onlanguagechange",
|
|
||||||
"onload",
|
|
||||||
"onloadeddata",
|
|
||||||
"onloadedmetadata",
|
|
||||||
"onloadend",
|
|
||||||
"onloadstart",
|
|
||||||
"onmessage",
|
|
||||||
"onmessageerror",
|
|
||||||
"onmousedown",
|
|
||||||
"onmouseenter",
|
|
||||||
"onmouseleave",
|
|
||||||
"onmousemove",
|
|
||||||
"onmouseout",
|
|
||||||
"onmouseover",
|
|
||||||
"onmouseup",
|
|
||||||
"onmousewheel",
|
|
||||||
"onwheel",
|
|
||||||
"onoffline",
|
|
||||||
"ononline",
|
|
||||||
"onpagehide",
|
|
||||||
"onpageshow",
|
|
||||||
"onpaste",
|
|
||||||
"onpause",
|
|
||||||
"onplay",
|
|
||||||
"onplaying",
|
|
||||||
"onpopstate",
|
|
||||||
"onprogress",
|
|
||||||
"onratechange",
|
|
||||||
"onreset",
|
|
||||||
"onresize",
|
|
||||||
"onrejectionhandled",
|
|
||||||
"onscroll",
|
|
||||||
"onsecuritypolicyviolation",
|
|
||||||
"onseeked",
|
|
||||||
"onseeking",
|
|
||||||
"onselect",
|
|
||||||
"onshow",
|
|
||||||
"onsort",
|
|
||||||
"onstalled",
|
|
||||||
"onstorage",
|
|
||||||
"onsubmit",
|
|
||||||
"onsuspend",
|
|
||||||
"ontimeupdate",
|
|
||||||
"ontoggle",
|
|
||||||
"onunhandledrejection",
|
|
||||||
"onunload",
|
|
||||||
"onvolumechange",
|
|
||||||
"onwaiting",
|
|
||||||
}
|
|
||||||
|
|
||||||
// extra are ad-hoc values not covered by any of the lists above.
|
|
||||||
var extra = []string{
|
|
||||||
"acronym",
|
|
||||||
"align",
|
|
||||||
"annotation",
|
|
||||||
"annotation-xml",
|
|
||||||
"applet",
|
|
||||||
"basefont",
|
|
||||||
"bgsound",
|
|
||||||
"big",
|
|
||||||
"blink",
|
|
||||||
"center",
|
|
||||||
"color",
|
|
||||||
"desc",
|
|
||||||
"face",
|
|
||||||
"font",
|
|
||||||
"foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive.
|
|
||||||
"foreignobject",
|
|
||||||
"frame",
|
|
||||||
"frameset",
|
|
||||||
"image",
|
|
||||||
"isindex",
|
|
||||||
"listing",
|
|
||||||
"malignmark",
|
|
||||||
"marquee",
|
|
||||||
"math",
|
|
||||||
"mglyph",
|
|
||||||
"mi",
|
|
||||||
"mn",
|
|
||||||
"mo",
|
|
||||||
"ms",
|
|
||||||
"mtext",
|
|
||||||
"nobr",
|
|
||||||
"noembed",
|
|
||||||
"noframes",
|
|
||||||
"plaintext",
|
|
||||||
"prompt",
|
|
||||||
"public",
|
|
||||||
"rb",
|
|
||||||
"rtc",
|
|
||||||
"spacer",
|
|
||||||
"strike",
|
|
||||||
"svg",
|
|
||||||
"system",
|
|
||||||
"tt",
|
|
||||||
"xmp",
|
|
||||||
}
|
|
783
vendor/golang.org/x/net/html/atom/table.go
generated
vendored
783
vendor/golang.org/x/net/html/atom/table.go
generated
vendored
@ -1,783 +0,0 @@
|
|||||||
// Code generated by go generate gen.go; DO NOT EDIT.
|
|
||||||
|
|
||||||
//go:generate go run gen.go
|
|
||||||
|
|
||||||
package atom
|
|
||||||
|
|
||||||
const (
|
|
||||||
A Atom = 0x1
|
|
||||||
Abbr Atom = 0x4
|
|
||||||
Accept Atom = 0x1a06
|
|
||||||
AcceptCharset Atom = 0x1a0e
|
|
||||||
Accesskey Atom = 0x2c09
|
|
||||||
Acronym Atom = 0xaa07
|
|
||||||
Action Atom = 0x27206
|
|
||||||
Address Atom = 0x6f307
|
|
||||||
Align Atom = 0xb105
|
|
||||||
Allowfullscreen Atom = 0x2080f
|
|
||||||
Allowpaymentrequest Atom = 0xc113
|
|
||||||
Allowusermedia Atom = 0xdd0e
|
|
||||||
Alt Atom = 0xf303
|
|
||||||
Annotation Atom = 0x1c90a
|
|
||||||
AnnotationXml Atom = 0x1c90e
|
|
||||||
Applet Atom = 0x31906
|
|
||||||
Area Atom = 0x35604
|
|
||||||
Article Atom = 0x3fc07
|
|
||||||
As Atom = 0x3c02
|
|
||||||
Aside Atom = 0x10705
|
|
||||||
Async Atom = 0xff05
|
|
||||||
Audio Atom = 0x11505
|
|
||||||
Autocomplete Atom = 0x2780c
|
|
||||||
Autofocus Atom = 0x12109
|
|
||||||
Autoplay Atom = 0x13c08
|
|
||||||
B Atom = 0x101
|
|
||||||
Base Atom = 0x3b04
|
|
||||||
Basefont Atom = 0x3b08
|
|
||||||
Bdi Atom = 0xba03
|
|
||||||
Bdo Atom = 0x14b03
|
|
||||||
Bgsound Atom = 0x15e07
|
|
||||||
Big Atom = 0x17003
|
|
||||||
Blink Atom = 0x17305
|
|
||||||
Blockquote Atom = 0x1870a
|
|
||||||
Body Atom = 0x2804
|
|
||||||
Br Atom = 0x202
|
|
||||||
Button Atom = 0x19106
|
|
||||||
Canvas Atom = 0x10306
|
|
||||||
Caption Atom = 0x23107
|
|
||||||
Center Atom = 0x22006
|
|
||||||
Challenge Atom = 0x29b09
|
|
||||||
Charset Atom = 0x2107
|
|
||||||
Checked Atom = 0x47907
|
|
||||||
Cite Atom = 0x19c04
|
|
||||||
Class Atom = 0x56405
|
|
||||||
Code Atom = 0x5c504
|
|
||||||
Col Atom = 0x1ab03
|
|
||||||
Colgroup Atom = 0x1ab08
|
|
||||||
Color Atom = 0x1bf05
|
|
||||||
Cols Atom = 0x1c404
|
|
||||||
Colspan Atom = 0x1c407
|
|
||||||
Command Atom = 0x1d707
|
|
||||||
Content Atom = 0x58b07
|
|
||||||
Contenteditable Atom = 0x58b0f
|
|
||||||
Contextmenu Atom = 0x3800b
|
|
||||||
Controls Atom = 0x1de08
|
|
||||||
Coords Atom = 0x1ea06
|
|
||||||
Crossorigin Atom = 0x1fb0b
|
|
||||||
Data Atom = 0x4a504
|
|
||||||
Datalist Atom = 0x4a508
|
|
||||||
Datetime Atom = 0x2b808
|
|
||||||
Dd Atom = 0x2d702
|
|
||||||
Default Atom = 0x10a07
|
|
||||||
Defer Atom = 0x5c705
|
|
||||||
Del Atom = 0x45203
|
|
||||||
Desc Atom = 0x56104
|
|
||||||
Details Atom = 0x7207
|
|
||||||
Dfn Atom = 0x8703
|
|
||||||
Dialog Atom = 0xbb06
|
|
||||||
Dir Atom = 0x9303
|
|
||||||
Dirname Atom = 0x9307
|
|
||||||
Disabled Atom = 0x16408
|
|
||||||
Div Atom = 0x16b03
|
|
||||||
Dl Atom = 0x5e602
|
|
||||||
Download Atom = 0x46308
|
|
||||||
Draggable Atom = 0x17a09
|
|
||||||
Dropzone Atom = 0x40508
|
|
||||||
Dt Atom = 0x64b02
|
|
||||||
Em Atom = 0x6e02
|
|
||||||
Embed Atom = 0x6e05
|
|
||||||
Enctype Atom = 0x28d07
|
|
||||||
Face Atom = 0x21e04
|
|
||||||
Fieldset Atom = 0x22608
|
|
||||||
Figcaption Atom = 0x22e0a
|
|
||||||
Figure Atom = 0x24806
|
|
||||||
Font Atom = 0x3f04
|
|
||||||
Footer Atom = 0xf606
|
|
||||||
For Atom = 0x25403
|
|
||||||
ForeignObject Atom = 0x2540d
|
|
||||||
Foreignobject Atom = 0x2610d
|
|
||||||
Form Atom = 0x26e04
|
|
||||||
Formaction Atom = 0x26e0a
|
|
||||||
Formenctype Atom = 0x2890b
|
|
||||||
Formmethod Atom = 0x2a40a
|
|
||||||
Formnovalidate Atom = 0x2ae0e
|
|
||||||
Formtarget Atom = 0x2c00a
|
|
||||||
Frame Atom = 0x8b05
|
|
||||||
Frameset Atom = 0x8b08
|
|
||||||
H1 Atom = 0x15c02
|
|
||||||
H2 Atom = 0x2de02
|
|
||||||
H3 Atom = 0x30d02
|
|
||||||
H4 Atom = 0x34502
|
|
||||||
H5 Atom = 0x34f02
|
|
||||||
H6 Atom = 0x64d02
|
|
||||||
Head Atom = 0x33104
|
|
||||||
Header Atom = 0x33106
|
|
||||||
Headers Atom = 0x33107
|
|
||||||
Height Atom = 0x5206
|
|
||||||
Hgroup Atom = 0x2ca06
|
|
||||||
Hidden Atom = 0x2d506
|
|
||||||
High Atom = 0x2db04
|
|
||||||
Hr Atom = 0x15702
|
|
||||||
Href Atom = 0x2e004
|
|
||||||
Hreflang Atom = 0x2e008
|
|
||||||
Html Atom = 0x5604
|
|
||||||
HttpEquiv Atom = 0x2e80a
|
|
||||||
I Atom = 0x601
|
|
||||||
Icon Atom = 0x58a04
|
|
||||||
Id Atom = 0x10902
|
|
||||||
Iframe Atom = 0x2fc06
|
|
||||||
Image Atom = 0x30205
|
|
||||||
Img Atom = 0x30703
|
|
||||||
Input Atom = 0x44b05
|
|
||||||
Inputmode Atom = 0x44b09
|
|
||||||
Ins Atom = 0x20403
|
|
||||||
Integrity Atom = 0x23f09
|
|
||||||
Is Atom = 0x16502
|
|
||||||
Isindex Atom = 0x30f07
|
|
||||||
Ismap Atom = 0x31605
|
|
||||||
Itemid Atom = 0x38b06
|
|
||||||
Itemprop Atom = 0x19d08
|
|
||||||
Itemref Atom = 0x3cd07
|
|
||||||
Itemscope Atom = 0x67109
|
|
||||||
Itemtype Atom = 0x31f08
|
|
||||||
Kbd Atom = 0xb903
|
|
||||||
Keygen Atom = 0x3206
|
|
||||||
Keytype Atom = 0xd607
|
|
||||||
Kind Atom = 0x17704
|
|
||||||
Label Atom = 0x5905
|
|
||||||
Lang Atom = 0x2e404
|
|
||||||
Legend Atom = 0x18106
|
|
||||||
Li Atom = 0xb202
|
|
||||||
Link Atom = 0x17404
|
|
||||||
List Atom = 0x4a904
|
|
||||||
Listing Atom = 0x4a907
|
|
||||||
Loop Atom = 0x5d04
|
|
||||||
Low Atom = 0xc303
|
|
||||||
Main Atom = 0x1004
|
|
||||||
Malignmark Atom = 0xb00a
|
|
||||||
Manifest Atom = 0x6d708
|
|
||||||
Map Atom = 0x31803
|
|
||||||
Mark Atom = 0xb604
|
|
||||||
Marquee Atom = 0x32707
|
|
||||||
Math Atom = 0x32e04
|
|
||||||
Max Atom = 0x33d03
|
|
||||||
Maxlength Atom = 0x33d09
|
|
||||||
Media Atom = 0xe605
|
|
||||||
Mediagroup Atom = 0xe60a
|
|
||||||
Menu Atom = 0x38704
|
|
||||||
Menuitem Atom = 0x38708
|
|
||||||
Meta Atom = 0x4b804
|
|
||||||
Meter Atom = 0x9805
|
|
||||||
Method Atom = 0x2a806
|
|
||||||
Mglyph Atom = 0x30806
|
|
||||||
Mi Atom = 0x34702
|
|
||||||
Min Atom = 0x34703
|
|
||||||
Minlength Atom = 0x34709
|
|
||||||
Mn Atom = 0x2b102
|
|
||||||
Mo Atom = 0xa402
|
|
||||||
Ms Atom = 0x67402
|
|
||||||
Mtext Atom = 0x35105
|
|
||||||
Multiple Atom = 0x35f08
|
|
||||||
Muted Atom = 0x36705
|
|
||||||
Name Atom = 0x9604
|
|
||||||
Nav Atom = 0x1303
|
|
||||||
Nobr Atom = 0x3704
|
|
||||||
Noembed Atom = 0x6c07
|
|
||||||
Noframes Atom = 0x8908
|
|
||||||
Nomodule Atom = 0xa208
|
|
||||||
Nonce Atom = 0x1a605
|
|
||||||
Noscript Atom = 0x21608
|
|
||||||
Novalidate Atom = 0x2b20a
|
|
||||||
Object Atom = 0x26806
|
|
||||||
Ol Atom = 0x13702
|
|
||||||
Onabort Atom = 0x19507
|
|
||||||
Onafterprint Atom = 0x2360c
|
|
||||||
Onautocomplete Atom = 0x2760e
|
|
||||||
Onautocompleteerror Atom = 0x27613
|
|
||||||
Onauxclick Atom = 0x61f0a
|
|
||||||
Onbeforeprint Atom = 0x69e0d
|
|
||||||
Onbeforeunload Atom = 0x6e70e
|
|
||||||
Onblur Atom = 0x56d06
|
|
||||||
Oncancel Atom = 0x11908
|
|
||||||
Oncanplay Atom = 0x14d09
|
|
||||||
Oncanplaythrough Atom = 0x14d10
|
|
||||||
Onchange Atom = 0x41b08
|
|
||||||
Onclick Atom = 0x2f507
|
|
||||||
Onclose Atom = 0x36c07
|
|
||||||
Oncontextmenu Atom = 0x37e0d
|
|
||||||
Oncopy Atom = 0x39106
|
|
||||||
Oncuechange Atom = 0x3970b
|
|
||||||
Oncut Atom = 0x3a205
|
|
||||||
Ondblclick Atom = 0x3a70a
|
|
||||||
Ondrag Atom = 0x3b106
|
|
||||||
Ondragend Atom = 0x3b109
|
|
||||||
Ondragenter Atom = 0x3ba0b
|
|
||||||
Ondragexit Atom = 0x3c50a
|
|
||||||
Ondragleave Atom = 0x3df0b
|
|
||||||
Ondragover Atom = 0x3ea0a
|
|
||||||
Ondragstart Atom = 0x3f40b
|
|
||||||
Ondrop Atom = 0x40306
|
|
||||||
Ondurationchange Atom = 0x41310
|
|
||||||
Onemptied Atom = 0x40a09
|
|
||||||
Onended Atom = 0x42307
|
|
||||||
Onerror Atom = 0x42a07
|
|
||||||
Onfocus Atom = 0x43107
|
|
||||||
Onhashchange Atom = 0x43d0c
|
|
||||||
Oninput Atom = 0x44907
|
|
||||||
Oninvalid Atom = 0x45509
|
|
||||||
Onkeydown Atom = 0x45e09
|
|
||||||
Onkeypress Atom = 0x46b0a
|
|
||||||
Onkeyup Atom = 0x48007
|
|
||||||
Onlanguagechange Atom = 0x48d10
|
|
||||||
Onload Atom = 0x49d06
|
|
||||||
Onloadeddata Atom = 0x49d0c
|
|
||||||
Onloadedmetadata Atom = 0x4b010
|
|
||||||
Onloadend Atom = 0x4c609
|
|
||||||
Onloadstart Atom = 0x4cf0b
|
|
||||||
Onmessage Atom = 0x4da09
|
|
||||||
Onmessageerror Atom = 0x4da0e
|
|
||||||
Onmousedown Atom = 0x4e80b
|
|
||||||
Onmouseenter Atom = 0x4f30c
|
|
||||||
Onmouseleave Atom = 0x4ff0c
|
|
||||||
Onmousemove Atom = 0x50b0b
|
|
||||||
Onmouseout Atom = 0x5160a
|
|
||||||
Onmouseover Atom = 0x5230b
|
|
||||||
Onmouseup Atom = 0x52e09
|
|
||||||
Onmousewheel Atom = 0x53c0c
|
|
||||||
Onoffline Atom = 0x54809
|
|
||||||
Ononline Atom = 0x55108
|
|
||||||
Onpagehide Atom = 0x5590a
|
|
||||||
Onpageshow Atom = 0x5730a
|
|
||||||
Onpaste Atom = 0x57f07
|
|
||||||
Onpause Atom = 0x59a07
|
|
||||||
Onplay Atom = 0x5a406
|
|
||||||
Onplaying Atom = 0x5a409
|
|
||||||
Onpopstate Atom = 0x5ad0a
|
|
||||||
Onprogress Atom = 0x5b70a
|
|
||||||
Onratechange Atom = 0x5cc0c
|
|
||||||
Onrejectionhandled Atom = 0x5d812
|
|
||||||
Onreset Atom = 0x5ea07
|
|
||||||
Onresize Atom = 0x5f108
|
|
||||||
Onscroll Atom = 0x60008
|
|
||||||
Onsecuritypolicyviolation Atom = 0x60819
|
|
||||||
Onseeked Atom = 0x62908
|
|
||||||
Onseeking Atom = 0x63109
|
|
||||||
Onselect Atom = 0x63a08
|
|
||||||
Onshow Atom = 0x64406
|
|
||||||
Onsort Atom = 0x64f06
|
|
||||||
Onstalled Atom = 0x65909
|
|
||||||
Onstorage Atom = 0x66209
|
|
||||||
Onsubmit Atom = 0x66b08
|
|
||||||
Onsuspend Atom = 0x67b09
|
|
||||||
Ontimeupdate Atom = 0x400c
|
|
||||||
Ontoggle Atom = 0x68408
|
|
||||||
Onunhandledrejection Atom = 0x68c14
|
|
||||||
Onunload Atom = 0x6ab08
|
|
||||||
Onvolumechange Atom = 0x6b30e
|
|
||||||
Onwaiting Atom = 0x6c109
|
|
||||||
Onwheel Atom = 0x6ca07
|
|
||||||
Open Atom = 0x1a304
|
|
||||||
Optgroup Atom = 0x5f08
|
|
||||||
Optimum Atom = 0x6d107
|
|
||||||
Option Atom = 0x6e306
|
|
||||||
Output Atom = 0x51d06
|
|
||||||
P Atom = 0xc01
|
|
||||||
Param Atom = 0xc05
|
|
||||||
Pattern Atom = 0x6607
|
|
||||||
Picture Atom = 0x7b07
|
|
||||||
Ping Atom = 0xef04
|
|
||||||
Placeholder Atom = 0x1310b
|
|
||||||
Plaintext Atom = 0x1b209
|
|
||||||
Playsinline Atom = 0x1400b
|
|
||||||
Poster Atom = 0x2cf06
|
|
||||||
Pre Atom = 0x47003
|
|
||||||
Preload Atom = 0x48607
|
|
||||||
Progress Atom = 0x5b908
|
|
||||||
Prompt Atom = 0x53606
|
|
||||||
Public Atom = 0x58606
|
|
||||||
Q Atom = 0xcf01
|
|
||||||
Radiogroup Atom = 0x30a
|
|
||||||
Rb Atom = 0x3a02
|
|
||||||
Readonly Atom = 0x35708
|
|
||||||
Referrerpolicy Atom = 0x3d10e
|
|
||||||
Rel Atom = 0x48703
|
|
||||||
Required Atom = 0x24c08
|
|
||||||
Reversed Atom = 0x8008
|
|
||||||
Rows Atom = 0x9c04
|
|
||||||
Rowspan Atom = 0x9c07
|
|
||||||
Rp Atom = 0x23c02
|
|
||||||
Rt Atom = 0x19a02
|
|
||||||
Rtc Atom = 0x19a03
|
|
||||||
Ruby Atom = 0xfb04
|
|
||||||
S Atom = 0x2501
|
|
||||||
Samp Atom = 0x7804
|
|
||||||
Sandbox Atom = 0x12907
|
|
||||||
Scope Atom = 0x67505
|
|
||||||
Scoped Atom = 0x67506
|
|
||||||
Script Atom = 0x21806
|
|
||||||
Seamless Atom = 0x37108
|
|
||||||
Section Atom = 0x56807
|
|
||||||
Select Atom = 0x63c06
|
|
||||||
Selected Atom = 0x63c08
|
|
||||||
Shape Atom = 0x1e505
|
|
||||||
Size Atom = 0x5f504
|
|
||||||
Sizes Atom = 0x5f505
|
|
||||||
Slot Atom = 0x1ef04
|
|
||||||
Small Atom = 0x20605
|
|
||||||
Sortable Atom = 0x65108
|
|
||||||
Sorted Atom = 0x33706
|
|
||||||
Source Atom = 0x37806
|
|
||||||
Spacer Atom = 0x43706
|
|
||||||
Span Atom = 0x9f04
|
|
||||||
Spellcheck Atom = 0x4740a
|
|
||||||
Src Atom = 0x5c003
|
|
||||||
Srcdoc Atom = 0x5c006
|
|
||||||
Srclang Atom = 0x5f907
|
|
||||||
Srcset Atom = 0x6f906
|
|
||||||
Start Atom = 0x3fa05
|
|
||||||
Step Atom = 0x58304
|
|
||||||
Strike Atom = 0xd206
|
|
||||||
Strong Atom = 0x6dd06
|
|
||||||
Style Atom = 0x6ff05
|
|
||||||
Sub Atom = 0x66d03
|
|
||||||
Summary Atom = 0x70407
|
|
||||||
Sup Atom = 0x70b03
|
|
||||||
Svg Atom = 0x70e03
|
|
||||||
System Atom = 0x71106
|
|
||||||
Tabindex Atom = 0x4be08
|
|
||||||
Table Atom = 0x59505
|
|
||||||
Target Atom = 0x2c406
|
|
||||||
Tbody Atom = 0x2705
|
|
||||||
Td Atom = 0x9202
|
|
||||||
Template Atom = 0x71408
|
|
||||||
Textarea Atom = 0x35208
|
|
||||||
Tfoot Atom = 0xf505
|
|
||||||
Th Atom = 0x15602
|
|
||||||
Thead Atom = 0x33005
|
|
||||||
Time Atom = 0x4204
|
|
||||||
Title Atom = 0x11005
|
|
||||||
Tr Atom = 0xcc02
|
|
||||||
Track Atom = 0x1ba05
|
|
||||||
Translate Atom = 0x1f209
|
|
||||||
Tt Atom = 0x6802
|
|
||||||
Type Atom = 0xd904
|
|
||||||
Typemustmatch Atom = 0x2900d
|
|
||||||
U Atom = 0xb01
|
|
||||||
Ul Atom = 0xa702
|
|
||||||
Updateviacache Atom = 0x460e
|
|
||||||
Usemap Atom = 0x59e06
|
|
||||||
Value Atom = 0x1505
|
|
||||||
Var Atom = 0x16d03
|
|
||||||
Video Atom = 0x2f105
|
|
||||||
Wbr Atom = 0x57c03
|
|
||||||
Width Atom = 0x64905
|
|
||||||
Workertype Atom = 0x71c0a
|
|
||||||
Wrap Atom = 0x72604
|
|
||||||
Xmp Atom = 0x12f03
|
|
||||||
)
|
|
||||||
|
|
||||||
const hash0 = 0x81cdf10e
|
|
||||||
|
|
||||||
const maxAtomLen = 25
|
|
||||||
|
|
||||||
var table = [1 << 9]Atom{
|
|
||||||
0x1: 0xe60a, // mediagroup
|
|
||||||
0x2: 0x2e404, // lang
|
|
||||||
0x4: 0x2c09, // accesskey
|
|
||||||
0x5: 0x8b08, // frameset
|
|
||||||
0x7: 0x63a08, // onselect
|
|
||||||
0x8: 0x71106, // system
|
|
||||||
0xa: 0x64905, // width
|
|
||||||
0xc: 0x2890b, // formenctype
|
|
||||||
0xd: 0x13702, // ol
|
|
||||||
0xe: 0x3970b, // oncuechange
|
|
||||||
0x10: 0x14b03, // bdo
|
|
||||||
0x11: 0x11505, // audio
|
|
||||||
0x12: 0x17a09, // draggable
|
|
||||||
0x14: 0x2f105, // video
|
|
||||||
0x15: 0x2b102, // mn
|
|
||||||
0x16: 0x38704, // menu
|
|
||||||
0x17: 0x2cf06, // poster
|
|
||||||
0x19: 0xf606, // footer
|
|
||||||
0x1a: 0x2a806, // method
|
|
||||||
0x1b: 0x2b808, // datetime
|
|
||||||
0x1c: 0x19507, // onabort
|
|
||||||
0x1d: 0x460e, // updateviacache
|
|
||||||
0x1e: 0xff05, // async
|
|
||||||
0x1f: 0x49d06, // onload
|
|
||||||
0x21: 0x11908, // oncancel
|
|
||||||
0x22: 0x62908, // onseeked
|
|
||||||
0x23: 0x30205, // image
|
|
||||||
0x24: 0x5d812, // onrejectionhandled
|
|
||||||
0x26: 0x17404, // link
|
|
||||||
0x27: 0x51d06, // output
|
|
||||||
0x28: 0x33104, // head
|
|
||||||
0x29: 0x4ff0c, // onmouseleave
|
|
||||||
0x2a: 0x57f07, // onpaste
|
|
||||||
0x2b: 0x5a409, // onplaying
|
|
||||||
0x2c: 0x1c407, // colspan
|
|
||||||
0x2f: 0x1bf05, // color
|
|
||||||
0x30: 0x5f504, // size
|
|
||||||
0x31: 0x2e80a, // http-equiv
|
|
||||||
0x33: 0x601, // i
|
|
||||||
0x34: 0x5590a, // onpagehide
|
|
||||||
0x35: 0x68c14, // onunhandledrejection
|
|
||||||
0x37: 0x42a07, // onerror
|
|
||||||
0x3a: 0x3b08, // basefont
|
|
||||||
0x3f: 0x1303, // nav
|
|
||||||
0x40: 0x17704, // kind
|
|
||||||
0x41: 0x35708, // readonly
|
|
||||||
0x42: 0x30806, // mglyph
|
|
||||||
0x44: 0xb202, // li
|
|
||||||
0x46: 0x2d506, // hidden
|
|
||||||
0x47: 0x70e03, // svg
|
|
||||||
0x48: 0x58304, // step
|
|
||||||
0x49: 0x23f09, // integrity
|
|
||||||
0x4a: 0x58606, // public
|
|
||||||
0x4c: 0x1ab03, // col
|
|
||||||
0x4d: 0x1870a, // blockquote
|
|
||||||
0x4e: 0x34f02, // h5
|
|
||||||
0x50: 0x5b908, // progress
|
|
||||||
0x51: 0x5f505, // sizes
|
|
||||||
0x52: 0x34502, // h4
|
|
||||||
0x56: 0x33005, // thead
|
|
||||||
0x57: 0xd607, // keytype
|
|
||||||
0x58: 0x5b70a, // onprogress
|
|
||||||
0x59: 0x44b09, // inputmode
|
|
||||||
0x5a: 0x3b109, // ondragend
|
|
||||||
0x5d: 0x3a205, // oncut
|
|
||||||
0x5e: 0x43706, // spacer
|
|
||||||
0x5f: 0x1ab08, // colgroup
|
|
||||||
0x62: 0x16502, // is
|
|
||||||
0x65: 0x3c02, // as
|
|
||||||
0x66: 0x54809, // onoffline
|
|
||||||
0x67: 0x33706, // sorted
|
|
||||||
0x69: 0x48d10, // onlanguagechange
|
|
||||||
0x6c: 0x43d0c, // onhashchange
|
|
||||||
0x6d: 0x9604, // name
|
|
||||||
0x6e: 0xf505, // tfoot
|
|
||||||
0x6f: 0x56104, // desc
|
|
||||||
0x70: 0x33d03, // max
|
|
||||||
0x72: 0x1ea06, // coords
|
|
||||||
0x73: 0x30d02, // h3
|
|
||||||
0x74: 0x6e70e, // onbeforeunload
|
|
||||||
0x75: 0x9c04, // rows
|
|
||||||
0x76: 0x63c06, // select
|
|
||||||
0x77: 0x9805, // meter
|
|
||||||
0x78: 0x38b06, // itemid
|
|
||||||
0x79: 0x53c0c, // onmousewheel
|
|
||||||
0x7a: 0x5c006, // srcdoc
|
|
||||||
0x7d: 0x1ba05, // track
|
|
||||||
0x7f: 0x31f08, // itemtype
|
|
||||||
0x82: 0xa402, // mo
|
|
||||||
0x83: 0x41b08, // onchange
|
|
||||||
0x84: 0x33107, // headers
|
|
||||||
0x85: 0x5cc0c, // onratechange
|
|
||||||
0x86: 0x60819, // onsecuritypolicyviolation
|
|
||||||
0x88: 0x4a508, // datalist
|
|
||||||
0x89: 0x4e80b, // onmousedown
|
|
||||||
0x8a: 0x1ef04, // slot
|
|
||||||
0x8b: 0x4b010, // onloadedmetadata
|
|
||||||
0x8c: 0x1a06, // accept
|
|
||||||
0x8d: 0x26806, // object
|
|
||||||
0x91: 0x6b30e, // onvolumechange
|
|
||||||
0x92: 0x2107, // charset
|
|
||||||
0x93: 0x27613, // onautocompleteerror
|
|
||||||
0x94: 0xc113, // allowpaymentrequest
|
|
||||||
0x95: 0x2804, // body
|
|
||||||
0x96: 0x10a07, // default
|
|
||||||
0x97: 0x63c08, // selected
|
|
||||||
0x98: 0x21e04, // face
|
|
||||||
0x99: 0x1e505, // shape
|
|
||||||
0x9b: 0x68408, // ontoggle
|
|
||||||
0x9e: 0x64b02, // dt
|
|
||||||
0x9f: 0xb604, // mark
|
|
||||||
0xa1: 0xb01, // u
|
|
||||||
0xa4: 0x6ab08, // onunload
|
|
||||||
0xa5: 0x5d04, // loop
|
|
||||||
0xa6: 0x16408, // disabled
|
|
||||||
0xaa: 0x42307, // onended
|
|
||||||
0xab: 0xb00a, // malignmark
|
|
||||||
0xad: 0x67b09, // onsuspend
|
|
||||||
0xae: 0x35105, // mtext
|
|
||||||
0xaf: 0x64f06, // onsort
|
|
||||||
0xb0: 0x19d08, // itemprop
|
|
||||||
0xb3: 0x67109, // itemscope
|
|
||||||
0xb4: 0x17305, // blink
|
|
||||||
0xb6: 0x3b106, // ondrag
|
|
||||||
0xb7: 0xa702, // ul
|
|
||||||
0xb8: 0x26e04, // form
|
|
||||||
0xb9: 0x12907, // sandbox
|
|
||||||
0xba: 0x8b05, // frame
|
|
||||||
0xbb: 0x1505, // value
|
|
||||||
0xbc: 0x66209, // onstorage
|
|
||||||
0xbf: 0xaa07, // acronym
|
|
||||||
0xc0: 0x19a02, // rt
|
|
||||||
0xc2: 0x202, // br
|
|
||||||
0xc3: 0x22608, // fieldset
|
|
||||||
0xc4: 0x2900d, // typemustmatch
|
|
||||||
0xc5: 0xa208, // nomodule
|
|
||||||
0xc6: 0x6c07, // noembed
|
|
||||||
0xc7: 0x69e0d, // onbeforeprint
|
|
||||||
0xc8: 0x19106, // button
|
|
||||||
0xc9: 0x2f507, // onclick
|
|
||||||
0xca: 0x70407, // summary
|
|
||||||
0xcd: 0xfb04, // ruby
|
|
||||||
0xce: 0x56405, // class
|
|
||||||
0xcf: 0x3f40b, // ondragstart
|
|
||||||
0xd0: 0x23107, // caption
|
|
||||||
0xd4: 0xdd0e, // allowusermedia
|
|
||||||
0xd5: 0x4cf0b, // onloadstart
|
|
||||||
0xd9: 0x16b03, // div
|
|
||||||
0xda: 0x4a904, // list
|
|
||||||
0xdb: 0x32e04, // math
|
|
||||||
0xdc: 0x44b05, // input
|
|
||||||
0xdf: 0x3ea0a, // ondragover
|
|
||||||
0xe0: 0x2de02, // h2
|
|
||||||
0xe2: 0x1b209, // plaintext
|
|
||||||
0xe4: 0x4f30c, // onmouseenter
|
|
||||||
0xe7: 0x47907, // checked
|
|
||||||
0xe8: 0x47003, // pre
|
|
||||||
0xea: 0x35f08, // multiple
|
|
||||||
0xeb: 0xba03, // bdi
|
|
||||||
0xec: 0x33d09, // maxlength
|
|
||||||
0xed: 0xcf01, // q
|
|
||||||
0xee: 0x61f0a, // onauxclick
|
|
||||||
0xf0: 0x57c03, // wbr
|
|
||||||
0xf2: 0x3b04, // base
|
|
||||||
0xf3: 0x6e306, // option
|
|
||||||
0xf5: 0x41310, // ondurationchange
|
|
||||||
0xf7: 0x8908, // noframes
|
|
||||||
0xf9: 0x40508, // dropzone
|
|
||||||
0xfb: 0x67505, // scope
|
|
||||||
0xfc: 0x8008, // reversed
|
|
||||||
0xfd: 0x3ba0b, // ondragenter
|
|
||||||
0xfe: 0x3fa05, // start
|
|
||||||
0xff: 0x12f03, // xmp
|
|
||||||
0x100: 0x5f907, // srclang
|
|
||||||
0x101: 0x30703, // img
|
|
||||||
0x104: 0x101, // b
|
|
||||||
0x105: 0x25403, // for
|
|
||||||
0x106: 0x10705, // aside
|
|
||||||
0x107: 0x44907, // oninput
|
|
||||||
0x108: 0x35604, // area
|
|
||||||
0x109: 0x2a40a, // formmethod
|
|
||||||
0x10a: 0x72604, // wrap
|
|
||||||
0x10c: 0x23c02, // rp
|
|
||||||
0x10d: 0x46b0a, // onkeypress
|
|
||||||
0x10e: 0x6802, // tt
|
|
||||||
0x110: 0x34702, // mi
|
|
||||||
0x111: 0x36705, // muted
|
|
||||||
0x112: 0xf303, // alt
|
|
||||||
0x113: 0x5c504, // code
|
|
||||||
0x114: 0x6e02, // em
|
|
||||||
0x115: 0x3c50a, // ondragexit
|
|
||||||
0x117: 0x9f04, // span
|
|
||||||
0x119: 0x6d708, // manifest
|
|
||||||
0x11a: 0x38708, // menuitem
|
|
||||||
0x11b: 0x58b07, // content
|
|
||||||
0x11d: 0x6c109, // onwaiting
|
|
||||||
0x11f: 0x4c609, // onloadend
|
|
||||||
0x121: 0x37e0d, // oncontextmenu
|
|
||||||
0x123: 0x56d06, // onblur
|
|
||||||
0x124: 0x3fc07, // article
|
|
||||||
0x125: 0x9303, // dir
|
|
||||||
0x126: 0xef04, // ping
|
|
||||||
0x127: 0x24c08, // required
|
|
||||||
0x128: 0x45509, // oninvalid
|
|
||||||
0x129: 0xb105, // align
|
|
||||||
0x12b: 0x58a04, // icon
|
|
||||||
0x12c: 0x64d02, // h6
|
|
||||||
0x12d: 0x1c404, // cols
|
|
||||||
0x12e: 0x22e0a, // figcaption
|
|
||||||
0x12f: 0x45e09, // onkeydown
|
|
||||||
0x130: 0x66b08, // onsubmit
|
|
||||||
0x131: 0x14d09, // oncanplay
|
|
||||||
0x132: 0x70b03, // sup
|
|
||||||
0x133: 0xc01, // p
|
|
||||||
0x135: 0x40a09, // onemptied
|
|
||||||
0x136: 0x39106, // oncopy
|
|
||||||
0x137: 0x19c04, // cite
|
|
||||||
0x138: 0x3a70a, // ondblclick
|
|
||||||
0x13a: 0x50b0b, // onmousemove
|
|
||||||
0x13c: 0x66d03, // sub
|
|
||||||
0x13d: 0x48703, // rel
|
|
||||||
0x13e: 0x5f08, // optgroup
|
|
||||||
0x142: 0x9c07, // rowspan
|
|
||||||
0x143: 0x37806, // source
|
|
||||||
0x144: 0x21608, // noscript
|
|
||||||
0x145: 0x1a304, // open
|
|
||||||
0x146: 0x20403, // ins
|
|
||||||
0x147: 0x2540d, // foreignObject
|
|
||||||
0x148: 0x5ad0a, // onpopstate
|
|
||||||
0x14a: 0x28d07, // enctype
|
|
||||||
0x14b: 0x2760e, // onautocomplete
|
|
||||||
0x14c: 0x35208, // textarea
|
|
||||||
0x14e: 0x2780c, // autocomplete
|
|
||||||
0x14f: 0x15702, // hr
|
|
||||||
0x150: 0x1de08, // controls
|
|
||||||
0x151: 0x10902, // id
|
|
||||||
0x153: 0x2360c, // onafterprint
|
|
||||||
0x155: 0x2610d, // foreignobject
|
|
||||||
0x156: 0x32707, // marquee
|
|
||||||
0x157: 0x59a07, // onpause
|
|
||||||
0x158: 0x5e602, // dl
|
|
||||||
0x159: 0x5206, // height
|
|
||||||
0x15a: 0x34703, // min
|
|
||||||
0x15b: 0x9307, // dirname
|
|
||||||
0x15c: 0x1f209, // translate
|
|
||||||
0x15d: 0x5604, // html
|
|
||||||
0x15e: 0x34709, // minlength
|
|
||||||
0x15f: 0x48607, // preload
|
|
||||||
0x160: 0x71408, // template
|
|
||||||
0x161: 0x3df0b, // ondragleave
|
|
||||||
0x162: 0x3a02, // rb
|
|
||||||
0x164: 0x5c003, // src
|
|
||||||
0x165: 0x6dd06, // strong
|
|
||||||
0x167: 0x7804, // samp
|
|
||||||
0x168: 0x6f307, // address
|
|
||||||
0x169: 0x55108, // ononline
|
|
||||||
0x16b: 0x1310b, // placeholder
|
|
||||||
0x16c: 0x2c406, // target
|
|
||||||
0x16d: 0x20605, // small
|
|
||||||
0x16e: 0x6ca07, // onwheel
|
|
||||||
0x16f: 0x1c90a, // annotation
|
|
||||||
0x170: 0x4740a, // spellcheck
|
|
||||||
0x171: 0x7207, // details
|
|
||||||
0x172: 0x10306, // canvas
|
|
||||||
0x173: 0x12109, // autofocus
|
|
||||||
0x174: 0xc05, // param
|
|
||||||
0x176: 0x46308, // download
|
|
||||||
0x177: 0x45203, // del
|
|
||||||
0x178: 0x36c07, // onclose
|
|
||||||
0x179: 0xb903, // kbd
|
|
||||||
0x17a: 0x31906, // applet
|
|
||||||
0x17b: 0x2e004, // href
|
|
||||||
0x17c: 0x5f108, // onresize
|
|
||||||
0x17e: 0x49d0c, // onloadeddata
|
|
||||||
0x180: 0xcc02, // tr
|
|
||||||
0x181: 0x2c00a, // formtarget
|
|
||||||
0x182: 0x11005, // title
|
|
||||||
0x183: 0x6ff05, // style
|
|
||||||
0x184: 0xd206, // strike
|
|
||||||
0x185: 0x59e06, // usemap
|
|
||||||
0x186: 0x2fc06, // iframe
|
|
||||||
0x187: 0x1004, // main
|
|
||||||
0x189: 0x7b07, // picture
|
|
||||||
0x18c: 0x31605, // ismap
|
|
||||||
0x18e: 0x4a504, // data
|
|
||||||
0x18f: 0x5905, // label
|
|
||||||
0x191: 0x3d10e, // referrerpolicy
|
|
||||||
0x192: 0x15602, // th
|
|
||||||
0x194: 0x53606, // prompt
|
|
||||||
0x195: 0x56807, // section
|
|
||||||
0x197: 0x6d107, // optimum
|
|
||||||
0x198: 0x2db04, // high
|
|
||||||
0x199: 0x15c02, // h1
|
|
||||||
0x19a: 0x65909, // onstalled
|
|
||||||
0x19b: 0x16d03, // var
|
|
||||||
0x19c: 0x4204, // time
|
|
||||||
0x19e: 0x67402, // ms
|
|
||||||
0x19f: 0x33106, // header
|
|
||||||
0x1a0: 0x4da09, // onmessage
|
|
||||||
0x1a1: 0x1a605, // nonce
|
|
||||||
0x1a2: 0x26e0a, // formaction
|
|
||||||
0x1a3: 0x22006, // center
|
|
||||||
0x1a4: 0x3704, // nobr
|
|
||||||
0x1a5: 0x59505, // table
|
|
||||||
0x1a6: 0x4a907, // listing
|
|
||||||
0x1a7: 0x18106, // legend
|
|
||||||
0x1a9: 0x29b09, // challenge
|
|
||||||
0x1aa: 0x24806, // figure
|
|
||||||
0x1ab: 0xe605, // media
|
|
||||||
0x1ae: 0xd904, // type
|
|
||||||
0x1af: 0x3f04, // font
|
|
||||||
0x1b0: 0x4da0e, // onmessageerror
|
|
||||||
0x1b1: 0x37108, // seamless
|
|
||||||
0x1b2: 0x8703, // dfn
|
|
||||||
0x1b3: 0x5c705, // defer
|
|
||||||
0x1b4: 0xc303, // low
|
|
||||||
0x1b5: 0x19a03, // rtc
|
|
||||||
0x1b6: 0x5230b, // onmouseover
|
|
||||||
0x1b7: 0x2b20a, // novalidate
|
|
||||||
0x1b8: 0x71c0a, // workertype
|
|
||||||
0x1ba: 0x3cd07, // itemref
|
|
||||||
0x1bd: 0x1, // a
|
|
||||||
0x1be: 0x31803, // map
|
|
||||||
0x1bf: 0x400c, // ontimeupdate
|
|
||||||
0x1c0: 0x15e07, // bgsound
|
|
||||||
0x1c1: 0x3206, // keygen
|
|
||||||
0x1c2: 0x2705, // tbody
|
|
||||||
0x1c5: 0x64406, // onshow
|
|
||||||
0x1c7: 0x2501, // s
|
|
||||||
0x1c8: 0x6607, // pattern
|
|
||||||
0x1cc: 0x14d10, // oncanplaythrough
|
|
||||||
0x1ce: 0x2d702, // dd
|
|
||||||
0x1cf: 0x6f906, // srcset
|
|
||||||
0x1d0: 0x17003, // big
|
|
||||||
0x1d2: 0x65108, // sortable
|
|
||||||
0x1d3: 0x48007, // onkeyup
|
|
||||||
0x1d5: 0x5a406, // onplay
|
|
||||||
0x1d7: 0x4b804, // meta
|
|
||||||
0x1d8: 0x40306, // ondrop
|
|
||||||
0x1da: 0x60008, // onscroll
|
|
||||||
0x1db: 0x1fb0b, // crossorigin
|
|
||||||
0x1dc: 0x5730a, // onpageshow
|
|
||||||
0x1dd: 0x4, // abbr
|
|
||||||
0x1de: 0x9202, // td
|
|
||||||
0x1df: 0x58b0f, // contenteditable
|
|
||||||
0x1e0: 0x27206, // action
|
|
||||||
0x1e1: 0x1400b, // playsinline
|
|
||||||
0x1e2: 0x43107, // onfocus
|
|
||||||
0x1e3: 0x2e008, // hreflang
|
|
||||||
0x1e5: 0x5160a, // onmouseout
|
|
||||||
0x1e6: 0x5ea07, // onreset
|
|
||||||
0x1e7: 0x13c08, // autoplay
|
|
||||||
0x1e8: 0x63109, // onseeking
|
|
||||||
0x1ea: 0x67506, // scoped
|
|
||||||
0x1ec: 0x30a, // radiogroup
|
|
||||||
0x1ee: 0x3800b, // contextmenu
|
|
||||||
0x1ef: 0x52e09, // onmouseup
|
|
||||||
0x1f1: 0x2ca06, // hgroup
|
|
||||||
0x1f2: 0x2080f, // allowfullscreen
|
|
||||||
0x1f3: 0x4be08, // tabindex
|
|
||||||
0x1f6: 0x30f07, // isindex
|
|
||||||
0x1f7: 0x1a0e, // accept-charset
|
|
||||||
0x1f8: 0x2ae0e, // formnovalidate
|
|
||||||
0x1fb: 0x1c90e, // annotation-xml
|
|
||||||
0x1fc: 0x6e05, // embed
|
|
||||||
0x1fd: 0x21806, // script
|
|
||||||
0x1fe: 0xbb06, // dialog
|
|
||||||
0x1ff: 0x1d707, // command
|
|
||||||
}
|
|
||||||
|
|
||||||
const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" +
|
|
||||||
"asefontimeupdateviacacheightmlabelooptgroupatternoembedetail" +
|
|
||||||
"sampictureversedfnoframesetdirnameterowspanomoduleacronymali" +
|
|
||||||
"gnmarkbdialogallowpaymentrequestrikeytypeallowusermediagroup" +
|
|
||||||
"ingaltfooterubyasyncanvasidefaultitleaudioncancelautofocusan" +
|
|
||||||
"dboxmplaceholderautoplaysinlinebdoncanplaythrough1bgsoundisa" +
|
|
||||||
"bledivarbigblinkindraggablegendblockquotebuttonabortcitempro" +
|
|
||||||
"penoncecolgrouplaintextrackcolorcolspannotation-xmlcommandco" +
|
|
||||||
"ntrolshapecoordslotranslatecrossoriginsmallowfullscreenoscri" +
|
|
||||||
"ptfacenterfieldsetfigcaptionafterprintegrityfigurequiredfore" +
|
|
||||||
"ignObjectforeignobjectformactionautocompleteerrorformenctype" +
|
|
||||||
"mustmatchallengeformmethodformnovalidatetimeformtargethgroup" +
|
|
||||||
"osterhiddenhigh2hreflanghttp-equivideonclickiframeimageimgly" +
|
|
||||||
"ph3isindexismappletitemtypemarqueematheadersortedmaxlength4m" +
|
|
||||||
"inlength5mtextareadonlymultiplemutedoncloseamlessourceoncont" +
|
|
||||||
"extmenuitemidoncopyoncuechangeoncutondblclickondragendondrag" +
|
|
||||||
"enterondragexitemreferrerpolicyondragleaveondragoverondragst" +
|
|
||||||
"articleondropzonemptiedondurationchangeonendedonerroronfocus" +
|
|
||||||
"paceronhashchangeoninputmodeloninvalidonkeydownloadonkeypres" +
|
|
||||||
"spellcheckedonkeyupreloadonlanguagechangeonloadeddatalisting" +
|
|
||||||
"onloadedmetadatabindexonloadendonloadstartonmessageerroronmo" +
|
|
||||||
"usedownonmouseenteronmouseleaveonmousemoveonmouseoutputonmou" +
|
|
||||||
"seoveronmouseupromptonmousewheelonofflineononlineonpagehides" +
|
|
||||||
"classectionbluronpageshowbronpastepublicontenteditableonpaus" +
|
|
||||||
"emaponplayingonpopstateonprogressrcdocodeferonratechangeonre" +
|
|
||||||
"jectionhandledonresetonresizesrclangonscrollonsecuritypolicy" +
|
|
||||||
"violationauxclickonseekedonseekingonselectedonshowidth6onsor" +
|
|
||||||
"tableonstalledonstorageonsubmitemscopedonsuspendontoggleonun" +
|
|
||||||
"handledrejectionbeforeprintonunloadonvolumechangeonwaitingon" +
|
|
||||||
"wheeloptimumanifestrongoptionbeforeunloaddressrcsetstylesumm" +
|
|
||||||
"arysupsvgsystemplateworkertypewrap"
|
|
257
vendor/golang.org/x/net/html/charset/charset.go
generated
vendored
257
vendor/golang.org/x/net/html/charset/charset.go
generated
vendored
@ -1,257 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package charset provides common text encodings for HTML documents.
|
|
||||||
//
|
|
||||||
// The mapping from encoding labels to encodings is defined at
|
|
||||||
// https://encoding.spec.whatwg.org/.
|
|
||||||
package charset // import "golang.org/x/net/html/charset"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"mime"
|
|
||||||
"strings"
|
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"golang.org/x/net/html"
|
|
||||||
"golang.org/x/text/encoding"
|
|
||||||
"golang.org/x/text/encoding/charmap"
|
|
||||||
"golang.org/x/text/encoding/htmlindex"
|
|
||||||
"golang.org/x/text/transform"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Lookup returns the encoding with the specified label, and its canonical
|
|
||||||
// name. It returns nil and the empty string if label is not one of the
|
|
||||||
// standard encodings for HTML. Matching is case-insensitive and ignores
|
|
||||||
// leading and trailing whitespace. Encoders will use HTML escape sequences for
|
|
||||||
// runes that are not supported by the character set.
|
|
||||||
func Lookup(label string) (e encoding.Encoding, name string) {
|
|
||||||
e, err := htmlindex.Get(label)
|
|
||||||
if err != nil {
|
|
||||||
return nil, ""
|
|
||||||
}
|
|
||||||
name, _ = htmlindex.Name(e)
|
|
||||||
return &htmlEncoding{e}, name
|
|
||||||
}
|
|
||||||
|
|
||||||
type htmlEncoding struct{ encoding.Encoding }
|
|
||||||
|
|
||||||
func (h *htmlEncoding) NewEncoder() *encoding.Encoder {
|
|
||||||
// HTML requires a non-terminating legacy encoder. We use HTML escapes to
|
|
||||||
// substitute unsupported code points.
|
|
||||||
return encoding.HTMLEscapeUnsupported(h.Encoding.NewEncoder())
|
|
||||||
}
|
|
||||||
|
|
||||||
// DetermineEncoding determines the encoding of an HTML document by examining
|
|
||||||
// up to the first 1024 bytes of content and the declared Content-Type.
|
|
||||||
//
|
|
||||||
// See http://www.whatwg.org/specs/web-apps/current-work/multipage/parsing.html#determining-the-character-encoding
|
|
||||||
func DetermineEncoding(content []byte, contentType string) (e encoding.Encoding, name string, certain bool) {
|
|
||||||
if len(content) > 1024 {
|
|
||||||
content = content[:1024]
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, b := range boms {
|
|
||||||
if bytes.HasPrefix(content, b.bom) {
|
|
||||||
e, name = Lookup(b.enc)
|
|
||||||
return e, name, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, params, err := mime.ParseMediaType(contentType); err == nil {
|
|
||||||
if cs, ok := params["charset"]; ok {
|
|
||||||
if e, name = Lookup(cs); e != nil {
|
|
||||||
return e, name, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(content) > 0 {
|
|
||||||
e, name = prescan(content)
|
|
||||||
if e != nil {
|
|
||||||
return e, name, false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to detect UTF-8.
|
|
||||||
// First eliminate any partial rune at the end.
|
|
||||||
for i := len(content) - 1; i >= 0 && i > len(content)-4; i-- {
|
|
||||||
b := content[i]
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if utf8.RuneStart(b) {
|
|
||||||
content = content[:i]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
hasHighBit := false
|
|
||||||
for _, c := range content {
|
|
||||||
if c >= 0x80 {
|
|
||||||
hasHighBit = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if hasHighBit && utf8.Valid(content) {
|
|
||||||
return encoding.Nop, "utf-8", false
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: change default depending on user's locale?
|
|
||||||
return charmap.Windows1252, "windows-1252", false
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReader returns an io.Reader that converts the content of r to UTF-8.
|
|
||||||
// It calls DetermineEncoding to find out what r's encoding is.
|
|
||||||
func NewReader(r io.Reader, contentType string) (io.Reader, error) {
|
|
||||||
preview := make([]byte, 1024)
|
|
||||||
n, err := io.ReadFull(r, preview)
|
|
||||||
switch {
|
|
||||||
case err == io.ErrUnexpectedEOF:
|
|
||||||
preview = preview[:n]
|
|
||||||
r = bytes.NewReader(preview)
|
|
||||||
case err != nil:
|
|
||||||
return nil, err
|
|
||||||
default:
|
|
||||||
r = io.MultiReader(bytes.NewReader(preview), r)
|
|
||||||
}
|
|
||||||
|
|
||||||
if e, _, _ := DetermineEncoding(preview, contentType); e != encoding.Nop {
|
|
||||||
r = transform.NewReader(r, e.NewDecoder())
|
|
||||||
}
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReaderLabel returns a reader that converts from the specified charset to
|
|
||||||
// UTF-8. It uses Lookup to find the encoding that corresponds to label, and
|
|
||||||
// returns an error if Lookup returns nil. It is suitable for use as
|
|
||||||
// encoding/xml.Decoder's CharsetReader function.
|
|
||||||
func NewReaderLabel(label string, input io.Reader) (io.Reader, error) {
|
|
||||||
e, _ := Lookup(label)
|
|
||||||
if e == nil {
|
|
||||||
return nil, fmt.Errorf("unsupported charset: %q", label)
|
|
||||||
}
|
|
||||||
return transform.NewReader(input, e.NewDecoder()), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func prescan(content []byte) (e encoding.Encoding, name string) {
|
|
||||||
z := html.NewTokenizer(bytes.NewReader(content))
|
|
||||||
for {
|
|
||||||
switch z.Next() {
|
|
||||||
case html.ErrorToken:
|
|
||||||
return nil, ""
|
|
||||||
|
|
||||||
case html.StartTagToken, html.SelfClosingTagToken:
|
|
||||||
tagName, hasAttr := z.TagName()
|
|
||||||
if !bytes.Equal(tagName, []byte("meta")) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
attrList := make(map[string]bool)
|
|
||||||
gotPragma := false
|
|
||||||
|
|
||||||
const (
|
|
||||||
dontKnow = iota
|
|
||||||
doNeedPragma
|
|
||||||
doNotNeedPragma
|
|
||||||
)
|
|
||||||
needPragma := dontKnow
|
|
||||||
|
|
||||||
name = ""
|
|
||||||
e = nil
|
|
||||||
for hasAttr {
|
|
||||||
var key, val []byte
|
|
||||||
key, val, hasAttr = z.TagAttr()
|
|
||||||
ks := string(key)
|
|
||||||
if attrList[ks] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
attrList[ks] = true
|
|
||||||
for i, c := range val {
|
|
||||||
if 'A' <= c && c <= 'Z' {
|
|
||||||
val[i] = c + 0x20
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch ks {
|
|
||||||
case "http-equiv":
|
|
||||||
if bytes.Equal(val, []byte("content-type")) {
|
|
||||||
gotPragma = true
|
|
||||||
}
|
|
||||||
|
|
||||||
case "content":
|
|
||||||
if e == nil {
|
|
||||||
name = fromMetaElement(string(val))
|
|
||||||
if name != "" {
|
|
||||||
e, name = Lookup(name)
|
|
||||||
if e != nil {
|
|
||||||
needPragma = doNeedPragma
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case "charset":
|
|
||||||
e, name = Lookup(string(val))
|
|
||||||
needPragma = doNotNeedPragma
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if needPragma == dontKnow || needPragma == doNeedPragma && !gotPragma {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(name, "utf-16") {
|
|
||||||
name = "utf-8"
|
|
||||||
e = encoding.Nop
|
|
||||||
}
|
|
||||||
|
|
||||||
if e != nil {
|
|
||||||
return e, name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func fromMetaElement(s string) string {
|
|
||||||
for s != "" {
|
|
||||||
csLoc := strings.Index(s, "charset")
|
|
||||||
if csLoc == -1 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
s = s[csLoc+len("charset"):]
|
|
||||||
s = strings.TrimLeft(s, " \t\n\f\r")
|
|
||||||
if !strings.HasPrefix(s, "=") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
s = s[1:]
|
|
||||||
s = strings.TrimLeft(s, " \t\n\f\r")
|
|
||||||
if s == "" {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
if q := s[0]; q == '"' || q == '\'' {
|
|
||||||
s = s[1:]
|
|
||||||
closeQuote := strings.IndexRune(s, rune(q))
|
|
||||||
if closeQuote == -1 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return s[:closeQuote]
|
|
||||||
}
|
|
||||||
|
|
||||||
end := strings.IndexAny(s, "; \t\n\f\r")
|
|
||||||
if end == -1 {
|
|
||||||
end = len(s)
|
|
||||||
}
|
|
||||||
return s[:end]
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
var boms = []struct {
|
|
||||||
bom []byte
|
|
||||||
enc string
|
|
||||||
}{
|
|
||||||
{[]byte{0xfe, 0xff}, "utf-16be"},
|
|
||||||
{[]byte{0xff, 0xfe}, "utf-16le"},
|
|
||||||
{[]byte{0xef, 0xbb, 0xbf}, "utf-8"},
|
|
||||||
}
|
|
112
vendor/golang.org/x/net/html/const.go
generated
vendored
112
vendor/golang.org/x/net/html/const.go
generated
vendored
@ -1,112 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package html
|
|
||||||
|
|
||||||
// Section 12.2.4.2 of the HTML5 specification says "The following elements
|
|
||||||
// have varying levels of special parsing rules".
|
|
||||||
// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements
|
|
||||||
var isSpecialElementMap = map[string]bool{
|
|
||||||
"address": true,
|
|
||||||
"applet": true,
|
|
||||||
"area": true,
|
|
||||||
"article": true,
|
|
||||||
"aside": true,
|
|
||||||
"base": true,
|
|
||||||
"basefont": true,
|
|
||||||
"bgsound": true,
|
|
||||||
"blockquote": true,
|
|
||||||
"body": true,
|
|
||||||
"br": true,
|
|
||||||
"button": true,
|
|
||||||
"caption": true,
|
|
||||||
"center": true,
|
|
||||||
"col": true,
|
|
||||||
"colgroup": true,
|
|
||||||
"dd": true,
|
|
||||||
"details": true,
|
|
||||||
"dir": true,
|
|
||||||
"div": true,
|
|
||||||
"dl": true,
|
|
||||||
"dt": true,
|
|
||||||
"embed": true,
|
|
||||||
"fieldset": true,
|
|
||||||
"figcaption": true,
|
|
||||||
"figure": true,
|
|
||||||
"footer": true,
|
|
||||||
"form": true,
|
|
||||||
"frame": true,
|
|
||||||
"frameset": true,
|
|
||||||
"h1": true,
|
|
||||||
"h2": true,
|
|
||||||
"h3": true,
|
|
||||||
"h4": true,
|
|
||||||
"h5": true,
|
|
||||||
"h6": true,
|
|
||||||
"head": true,
|
|
||||||
"header": true,
|
|
||||||
"hgroup": true,
|
|
||||||
"hr": true,
|
|
||||||
"html": true,
|
|
||||||
"iframe": true,
|
|
||||||
"img": true,
|
|
||||||
"input": true,
|
|
||||||
"isindex": true, // The 'isindex' element has been removed, but keep it for backwards compatibility.
|
|
||||||
"keygen": true,
|
|
||||||
"li": true,
|
|
||||||
"link": true,
|
|
||||||
"listing": true,
|
|
||||||
"main": true,
|
|
||||||
"marquee": true,
|
|
||||||
"menu": true,
|
|
||||||
"meta": true,
|
|
||||||
"nav": true,
|
|
||||||
"noembed": true,
|
|
||||||
"noframes": true,
|
|
||||||
"noscript": true,
|
|
||||||
"object": true,
|
|
||||||
"ol": true,
|
|
||||||
"p": true,
|
|
||||||
"param": true,
|
|
||||||
"plaintext": true,
|
|
||||||
"pre": true,
|
|
||||||
"script": true,
|
|
||||||
"section": true,
|
|
||||||
"select": true,
|
|
||||||
"source": true,
|
|
||||||
"style": true,
|
|
||||||
"summary": true,
|
|
||||||
"table": true,
|
|
||||||
"tbody": true,
|
|
||||||
"td": true,
|
|
||||||
"template": true,
|
|
||||||
"textarea": true,
|
|
||||||
"tfoot": true,
|
|
||||||
"th": true,
|
|
||||||
"thead": true,
|
|
||||||
"title": true,
|
|
||||||
"tr": true,
|
|
||||||
"track": true,
|
|
||||||
"ul": true,
|
|
||||||
"wbr": true,
|
|
||||||
"xmp": true,
|
|
||||||
}
|
|
||||||
|
|
||||||
func isSpecialElement(element *Node) bool {
|
|
||||||
switch element.Namespace {
|
|
||||||
case "", "html":
|
|
||||||
return isSpecialElementMap[element.Data]
|
|
||||||
case "math":
|
|
||||||
switch element.Data {
|
|
||||||
case "mi", "mo", "mn", "ms", "mtext", "annotation-xml":
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
case "svg":
|
|
||||||
switch element.Data {
|
|
||||||
case "foreignObject", "desc", "title":
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
106
vendor/golang.org/x/net/html/doc.go
generated
vendored
106
vendor/golang.org/x/net/html/doc.go
generated
vendored
@ -1,106 +0,0 @@
|
|||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package html implements an HTML5-compliant tokenizer and parser.
|
|
||||||
|
|
||||||
Tokenization is done by creating a Tokenizer for an io.Reader r. It is the
|
|
||||||
caller's responsibility to ensure that r provides UTF-8 encoded HTML.
|
|
||||||
|
|
||||||
z := html.NewTokenizer(r)
|
|
||||||
|
|
||||||
Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(),
|
|
||||||
which parses the next token and returns its type, or an error:
|
|
||||||
|
|
||||||
for {
|
|
||||||
tt := z.Next()
|
|
||||||
if tt == html.ErrorToken {
|
|
||||||
// ...
|
|
||||||
return ...
|
|
||||||
}
|
|
||||||
// Process the current token.
|
|
||||||
}
|
|
||||||
|
|
||||||
There are two APIs for retrieving the current token. The high-level API is to
|
|
||||||
call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs
|
|
||||||
allow optionally calling Raw after Next but before Token, Text, TagName, or
|
|
||||||
TagAttr. In EBNF notation, the valid call sequence per token is:
|
|
||||||
|
|
||||||
Next {Raw} [ Token | Text | TagName {TagAttr} ]
|
|
||||||
|
|
||||||
Token returns an independent data structure that completely describes a token.
|
|
||||||
Entities (such as "<") are unescaped, tag names and attribute keys are
|
|
||||||
lower-cased, and attributes are collected into a []Attribute. For example:
|
|
||||||
|
|
||||||
for {
|
|
||||||
if z.Next() == html.ErrorToken {
|
|
||||||
// Returning io.EOF indicates success.
|
|
||||||
return z.Err()
|
|
||||||
}
|
|
||||||
emitToken(z.Token())
|
|
||||||
}
|
|
||||||
|
|
||||||
The low-level API performs fewer allocations and copies, but the contents of
|
|
||||||
the []byte values returned by Text, TagName and TagAttr may change on the next
|
|
||||||
call to Next. For example, to extract an HTML page's anchor text:
|
|
||||||
|
|
||||||
depth := 0
|
|
||||||
for {
|
|
||||||
tt := z.Next()
|
|
||||||
switch tt {
|
|
||||||
case html.ErrorToken:
|
|
||||||
return z.Err()
|
|
||||||
case html.TextToken:
|
|
||||||
if depth > 0 {
|
|
||||||
// emitBytes should copy the []byte it receives,
|
|
||||||
// if it doesn't process it immediately.
|
|
||||||
emitBytes(z.Text())
|
|
||||||
}
|
|
||||||
case html.StartTagToken, html.EndTagToken:
|
|
||||||
tn, _ := z.TagName()
|
|
||||||
if len(tn) == 1 && tn[0] == 'a' {
|
|
||||||
if tt == html.StartTagToken {
|
|
||||||
depth++
|
|
||||||
} else {
|
|
||||||
depth--
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Parsing is done by calling Parse with an io.Reader, which returns the root of
|
|
||||||
the parse tree (the document element) as a *Node. It is the caller's
|
|
||||||
responsibility to ensure that the Reader provides UTF-8 encoded HTML. For
|
|
||||||
example, to process each anchor node in depth-first order:
|
|
||||||
|
|
||||||
doc, err := html.Parse(r)
|
|
||||||
if err != nil {
|
|
||||||
// ...
|
|
||||||
}
|
|
||||||
var f func(*html.Node)
|
|
||||||
f = func(n *html.Node) {
|
|
||||||
if n.Type == html.ElementNode && n.Data == "a" {
|
|
||||||
// Do something with n...
|
|
||||||
}
|
|
||||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
|
||||||
f(c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
f(doc)
|
|
||||||
|
|
||||||
The relevant specifications include:
|
|
||||||
https://html.spec.whatwg.org/multipage/syntax.html and
|
|
||||||
https://html.spec.whatwg.org/multipage/syntax.html#tokenization
|
|
||||||
*/
|
|
||||||
package html // import "golang.org/x/net/html"
|
|
||||||
|
|
||||||
// The tokenization algorithm implemented by this package is not a line-by-line
|
|
||||||
// transliteration of the relatively verbose state-machine in the WHATWG
|
|
||||||
// specification. A more direct approach is used instead, where the program
|
|
||||||
// counter implies the state, such as whether it is tokenizing a tag or a text
|
|
||||||
// node. Specification compliance is verified by checking expected and actual
|
|
||||||
// outputs over a test suite rather than aiming for algorithmic fidelity.
|
|
||||||
|
|
||||||
// TODO(nigeltao): Does a DOM API belong in this package or a separate one?
|
|
||||||
// TODO(nigeltao): How does parsing interact with a JavaScript engine?
|
|
156
vendor/golang.org/x/net/html/doctype.go
generated
vendored
156
vendor/golang.org/x/net/html/doctype.go
generated
vendored
@ -1,156 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package html
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// parseDoctype parses the data from a DoctypeToken into a name,
|
|
||||||
// public identifier, and system identifier. It returns a Node whose Type
|
|
||||||
// is DoctypeNode, whose Data is the name, and which has attributes
|
|
||||||
// named "system" and "public" for the two identifiers if they were present.
|
|
||||||
// quirks is whether the document should be parsed in "quirks mode".
|
|
||||||
func parseDoctype(s string) (n *Node, quirks bool) {
|
|
||||||
n = &Node{Type: DoctypeNode}
|
|
||||||
|
|
||||||
// Find the name.
|
|
||||||
space := strings.IndexAny(s, whitespace)
|
|
||||||
if space == -1 {
|
|
||||||
space = len(s)
|
|
||||||
}
|
|
||||||
n.Data = s[:space]
|
|
||||||
// The comparison to "html" is case-sensitive.
|
|
||||||
if n.Data != "html" {
|
|
||||||
quirks = true
|
|
||||||
}
|
|
||||||
n.Data = strings.ToLower(n.Data)
|
|
||||||
s = strings.TrimLeft(s[space:], whitespace)
|
|
||||||
|
|
||||||
if len(s) < 6 {
|
|
||||||
// It can't start with "PUBLIC" or "SYSTEM".
|
|
||||||
// Ignore the rest of the string.
|
|
||||||
return n, quirks || s != ""
|
|
||||||
}
|
|
||||||
|
|
||||||
key := strings.ToLower(s[:6])
|
|
||||||
s = s[6:]
|
|
||||||
for key == "public" || key == "system" {
|
|
||||||
s = strings.TrimLeft(s, whitespace)
|
|
||||||
if s == "" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
quote := s[0]
|
|
||||||
if quote != '"' && quote != '\'' {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
s = s[1:]
|
|
||||||
q := strings.IndexRune(s, rune(quote))
|
|
||||||
var id string
|
|
||||||
if q == -1 {
|
|
||||||
id = s
|
|
||||||
s = ""
|
|
||||||
} else {
|
|
||||||
id = s[:q]
|
|
||||||
s = s[q+1:]
|
|
||||||
}
|
|
||||||
n.Attr = append(n.Attr, Attribute{Key: key, Val: id})
|
|
||||||
if key == "public" {
|
|
||||||
key = "system"
|
|
||||||
} else {
|
|
||||||
key = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if key != "" || s != "" {
|
|
||||||
quirks = true
|
|
||||||
} else if len(n.Attr) > 0 {
|
|
||||||
if n.Attr[0].Key == "public" {
|
|
||||||
public := strings.ToLower(n.Attr[0].Val)
|
|
||||||
switch public {
|
|
||||||
case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html":
|
|
||||||
quirks = true
|
|
||||||
default:
|
|
||||||
for _, q := range quirkyIDs {
|
|
||||||
if strings.HasPrefix(public, q) {
|
|
||||||
quirks = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// The following two public IDs only cause quirks mode if there is no system ID.
|
|
||||||
if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") ||
|
|
||||||
strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) {
|
|
||||||
quirks = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" &&
|
|
||||||
strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" {
|
|
||||||
quirks = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return n, quirks
|
|
||||||
}
|
|
||||||
|
|
||||||
// quirkyIDs is a list of public doctype identifiers that cause a document
|
|
||||||
// to be interpreted in quirks mode. The identifiers should be in lower case.
|
|
||||||
var quirkyIDs = []string{
|
|
||||||
"+//silmaril//dtd html pro v0r11 19970101//",
|
|
||||||
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
|
|
||||||
"-//as//dtd html 3.0 aswedit + extensions//",
|
|
||||||
"-//ietf//dtd html 2.0 level 1//",
|
|
||||||
"-//ietf//dtd html 2.0 level 2//",
|
|
||||||
"-//ietf//dtd html 2.0 strict level 1//",
|
|
||||||
"-//ietf//dtd html 2.0 strict level 2//",
|
|
||||||
"-//ietf//dtd html 2.0 strict//",
|
|
||||||
"-//ietf//dtd html 2.0//",
|
|
||||||
"-//ietf//dtd html 2.1e//",
|
|
||||||
"-//ietf//dtd html 3.0//",
|
|
||||||
"-//ietf//dtd html 3.2 final//",
|
|
||||||
"-//ietf//dtd html 3.2//",
|
|
||||||
"-//ietf//dtd html 3//",
|
|
||||||
"-//ietf//dtd html level 0//",
|
|
||||||
"-//ietf//dtd html level 1//",
|
|
||||||
"-//ietf//dtd html level 2//",
|
|
||||||
"-//ietf//dtd html level 3//",
|
|
||||||
"-//ietf//dtd html strict level 0//",
|
|
||||||
"-//ietf//dtd html strict level 1//",
|
|
||||||
"-//ietf//dtd html strict level 2//",
|
|
||||||
"-//ietf//dtd html strict level 3//",
|
|
||||||
"-//ietf//dtd html strict//",
|
|
||||||
"-//ietf//dtd html//",
|
|
||||||
"-//metrius//dtd metrius presentational//",
|
|
||||||
"-//microsoft//dtd internet explorer 2.0 html strict//",
|
|
||||||
"-//microsoft//dtd internet explorer 2.0 html//",
|
|
||||||
"-//microsoft//dtd internet explorer 2.0 tables//",
|
|
||||||
"-//microsoft//dtd internet explorer 3.0 html strict//",
|
|
||||||
"-//microsoft//dtd internet explorer 3.0 html//",
|
|
||||||
"-//microsoft//dtd internet explorer 3.0 tables//",
|
|
||||||
"-//netscape comm. corp.//dtd html//",
|
|
||||||
"-//netscape comm. corp.//dtd strict html//",
|
|
||||||
"-//o'reilly and associates//dtd html 2.0//",
|
|
||||||
"-//o'reilly and associates//dtd html extended 1.0//",
|
|
||||||
"-//o'reilly and associates//dtd html extended relaxed 1.0//",
|
|
||||||
"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
|
|
||||||
"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
|
|
||||||
"-//spyglass//dtd html 2.0 extended//",
|
|
||||||
"-//sq//dtd html 2.0 hotmetal + extensions//",
|
|
||||||
"-//sun microsystems corp.//dtd hotjava html//",
|
|
||||||
"-//sun microsystems corp.//dtd hotjava strict html//",
|
|
||||||
"-//w3c//dtd html 3 1995-03-24//",
|
|
||||||
"-//w3c//dtd html 3.2 draft//",
|
|
||||||
"-//w3c//dtd html 3.2 final//",
|
|
||||||
"-//w3c//dtd html 3.2//",
|
|
||||||
"-//w3c//dtd html 3.2s draft//",
|
|
||||||
"-//w3c//dtd html 4.0 frameset//",
|
|
||||||
"-//w3c//dtd html 4.0 transitional//",
|
|
||||||
"-//w3c//dtd html experimental 19960712//",
|
|
||||||
"-//w3c//dtd html experimental 970421//",
|
|
||||||
"-//w3c//dtd w3 html//",
|
|
||||||
"-//w3o//dtd w3 html 3.0//",
|
|
||||||
"-//webtechs//dtd mozilla html 2.0//",
|
|
||||||
"-//webtechs//dtd mozilla html//",
|
|
||||||
}
|
|
2253
vendor/golang.org/x/net/html/entity.go
generated
vendored
2253
vendor/golang.org/x/net/html/entity.go
generated
vendored
File diff suppressed because it is too large
Load Diff
258
vendor/golang.org/x/net/html/escape.go
generated
vendored
258
vendor/golang.org/x/net/html/escape.go
generated
vendored
@ -1,258 +0,0 @@
|
|||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package html
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"strings"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
// These replacements permit compatibility with old numeric entities that
|
|
||||||
// assumed Windows-1252 encoding.
|
|
||||||
// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference
|
|
||||||
var replacementTable = [...]rune{
|
|
||||||
'\u20AC', // First entry is what 0x80 should be replaced with.
|
|
||||||
'\u0081',
|
|
||||||
'\u201A',
|
|
||||||
'\u0192',
|
|
||||||
'\u201E',
|
|
||||||
'\u2026',
|
|
||||||
'\u2020',
|
|
||||||
'\u2021',
|
|
||||||
'\u02C6',
|
|
||||||
'\u2030',
|
|
||||||
'\u0160',
|
|
||||||
'\u2039',
|
|
||||||
'\u0152',
|
|
||||||
'\u008D',
|
|
||||||
'\u017D',
|
|
||||||
'\u008F',
|
|
||||||
'\u0090',
|
|
||||||
'\u2018',
|
|
||||||
'\u2019',
|
|
||||||
'\u201C',
|
|
||||||
'\u201D',
|
|
||||||
'\u2022',
|
|
||||||
'\u2013',
|
|
||||||
'\u2014',
|
|
||||||
'\u02DC',
|
|
||||||
'\u2122',
|
|
||||||
'\u0161',
|
|
||||||
'\u203A',
|
|
||||||
'\u0153',
|
|
||||||
'\u009D',
|
|
||||||
'\u017E',
|
|
||||||
'\u0178', // Last entry is 0x9F.
|
|
||||||
// 0x00->'\uFFFD' is handled programmatically.
|
|
||||||
// 0x0D->'\u000D' is a no-op.
|
|
||||||
}
|
|
||||||
|
|
||||||
// unescapeEntity reads an entity like "<" from b[src:] and writes the
|
|
||||||
// corresponding "<" to b[dst:], returning the incremented dst and src cursors.
|
|
||||||
// Precondition: b[src] == '&' && dst <= src.
|
|
||||||
// attribute should be true if parsing an attribute value.
|
|
||||||
func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) {
|
|
||||||
// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference
|
|
||||||
|
|
||||||
// i starts at 1 because we already know that s[0] == '&'.
|
|
||||||
i, s := 1, b[src:]
|
|
||||||
|
|
||||||
if len(s) <= 1 {
|
|
||||||
b[dst] = b[src]
|
|
||||||
return dst + 1, src + 1
|
|
||||||
}
|
|
||||||
|
|
||||||
if s[i] == '#' {
|
|
||||||
if len(s) <= 3 { // We need to have at least "&#.".
|
|
||||||
b[dst] = b[src]
|
|
||||||
return dst + 1, src + 1
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
c := s[i]
|
|
||||||
hex := false
|
|
||||||
if c == 'x' || c == 'X' {
|
|
||||||
hex = true
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
|
|
||||||
x := '\x00'
|
|
||||||
for i < len(s) {
|
|
||||||
c = s[i]
|
|
||||||
i++
|
|
||||||
if hex {
|
|
||||||
if '0' <= c && c <= '9' {
|
|
||||||
x = 16*x + rune(c) - '0'
|
|
||||||
continue
|
|
||||||
} else if 'a' <= c && c <= 'f' {
|
|
||||||
x = 16*x + rune(c) - 'a' + 10
|
|
||||||
continue
|
|
||||||
} else if 'A' <= c && c <= 'F' {
|
|
||||||
x = 16*x + rune(c) - 'A' + 10
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
} else if '0' <= c && c <= '9' {
|
|
||||||
x = 10*x + rune(c) - '0'
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if c != ';' {
|
|
||||||
i--
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if i <= 3 { // No characters matched.
|
|
||||||
b[dst] = b[src]
|
|
||||||
return dst + 1, src + 1
|
|
||||||
}
|
|
||||||
|
|
||||||
if 0x80 <= x && x <= 0x9F {
|
|
||||||
// Replace characters from Windows-1252 with UTF-8 equivalents.
|
|
||||||
x = replacementTable[x-0x80]
|
|
||||||
} else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF {
|
|
||||||
// Replace invalid characters with the replacement character.
|
|
||||||
x = '\uFFFD'
|
|
||||||
}
|
|
||||||
|
|
||||||
return dst + utf8.EncodeRune(b[dst:], x), src + i
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume the maximum number of characters possible, with the
|
|
||||||
// consumed characters matching one of the named references.
|
|
||||||
|
|
||||||
for i < len(s) {
|
|
||||||
c := s[i]
|
|
||||||
i++
|
|
||||||
// Lower-cased characters are more common in entities, so we check for them first.
|
|
||||||
if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if c != ';' {
|
|
||||||
i--
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
entityName := string(s[1:i])
|
|
||||||
if entityName == "" {
|
|
||||||
// No-op.
|
|
||||||
} else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' {
|
|
||||||
// No-op.
|
|
||||||
} else if x := entity[entityName]; x != 0 {
|
|
||||||
return dst + utf8.EncodeRune(b[dst:], x), src + i
|
|
||||||
} else if x := entity2[entityName]; x[0] != 0 {
|
|
||||||
dst1 := dst + utf8.EncodeRune(b[dst:], x[0])
|
|
||||||
return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i
|
|
||||||
} else if !attribute {
|
|
||||||
maxLen := len(entityName) - 1
|
|
||||||
if maxLen > longestEntityWithoutSemicolon {
|
|
||||||
maxLen = longestEntityWithoutSemicolon
|
|
||||||
}
|
|
||||||
for j := maxLen; j > 1; j-- {
|
|
||||||
if x := entity[entityName[:j]]; x != 0 {
|
|
||||||
return dst + utf8.EncodeRune(b[dst:], x), src + j + 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
dst1, src1 = dst+i, src+i
|
|
||||||
copy(b[dst:dst1], b[src:src1])
|
|
||||||
return dst1, src1
|
|
||||||
}
|
|
||||||
|
|
||||||
// unescape unescapes b's entities in-place, so that "a<b" becomes "a<b".
|
|
||||||
// attribute should be true if parsing an attribute value.
|
|
||||||
func unescape(b []byte, attribute bool) []byte {
|
|
||||||
for i, c := range b {
|
|
||||||
if c == '&' {
|
|
||||||
dst, src := unescapeEntity(b, i, i, attribute)
|
|
||||||
for src < len(b) {
|
|
||||||
c := b[src]
|
|
||||||
if c == '&' {
|
|
||||||
dst, src = unescapeEntity(b, dst, src, attribute)
|
|
||||||
} else {
|
|
||||||
b[dst] = c
|
|
||||||
dst, src = dst+1, src+1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return b[0:dst]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// lower lower-cases the A-Z bytes in b in-place, so that "aBc" becomes "abc".
|
|
||||||
func lower(b []byte) []byte {
|
|
||||||
for i, c := range b {
|
|
||||||
if 'A' <= c && c <= 'Z' {
|
|
||||||
b[i] = c + 'a' - 'A'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
const escapedChars = "&'<>\"\r"
|
|
||||||
|
|
||||||
func escape(w writer, s string) error {
|
|
||||||
i := strings.IndexAny(s, escapedChars)
|
|
||||||
for i != -1 {
|
|
||||||
if _, err := w.WriteString(s[:i]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var esc string
|
|
||||||
switch s[i] {
|
|
||||||
case '&':
|
|
||||||
esc = "&"
|
|
||||||
case '\'':
|
|
||||||
// "'" is shorter than "'" and apos was not in HTML until HTML5.
|
|
||||||
esc = "'"
|
|
||||||
case '<':
|
|
||||||
esc = "<"
|
|
||||||
case '>':
|
|
||||||
esc = ">"
|
|
||||||
case '"':
|
|
||||||
// """ is shorter than """.
|
|
||||||
esc = """
|
|
||||||
case '\r':
|
|
||||||
esc = " "
|
|
||||||
default:
|
|
||||||
panic("unrecognized escape character")
|
|
||||||
}
|
|
||||||
s = s[i+1:]
|
|
||||||
if _, err := w.WriteString(esc); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
i = strings.IndexAny(s, escapedChars)
|
|
||||||
}
|
|
||||||
_, err := w.WriteString(s)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// EscapeString escapes special characters like "<" to become "<". It
|
|
||||||
// escapes only five such characters: <, >, &, ' and ".
|
|
||||||
// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't
|
|
||||||
// always true.
|
|
||||||
func EscapeString(s string) string {
|
|
||||||
if strings.IndexAny(s, escapedChars) == -1 {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
var buf bytes.Buffer
|
|
||||||
escape(&buf, s)
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnescapeString unescapes entities like "<" to become "<". It unescapes a
|
|
||||||
// larger range of entities than EscapeString escapes. For example, "á"
|
|
||||||
// unescapes to "á", as does "á" and "&xE1;".
|
|
||||||
// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't
|
|
||||||
// always true.
|
|
||||||
func UnescapeString(s string) string {
|
|
||||||
for _, c := range s {
|
|
||||||
if c == '&' {
|
|
||||||
return string(unescape([]byte(s), false))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
226
vendor/golang.org/x/net/html/foreign.go
generated
vendored
226
vendor/golang.org/x/net/html/foreign.go
generated
vendored
@ -1,226 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package html
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func adjustAttributeNames(aa []Attribute, nameMap map[string]string) {
|
|
||||||
for i := range aa {
|
|
||||||
if newName, ok := nameMap[aa[i].Key]; ok {
|
|
||||||
aa[i].Key = newName
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func adjustForeignAttributes(aa []Attribute) {
|
|
||||||
for i, a := range aa {
|
|
||||||
if a.Key == "" || a.Key[0] != 'x' {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
switch a.Key {
|
|
||||||
case "xlink:actuate", "xlink:arcrole", "xlink:href", "xlink:role", "xlink:show",
|
|
||||||
"xlink:title", "xlink:type", "xml:base", "xml:lang", "xml:space", "xmlns:xlink":
|
|
||||||
j := strings.Index(a.Key, ":")
|
|
||||||
aa[i].Namespace = a.Key[:j]
|
|
||||||
aa[i].Key = a.Key[j+1:]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func htmlIntegrationPoint(n *Node) bool {
|
|
||||||
if n.Type != ElementNode {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
switch n.Namespace {
|
|
||||||
case "math":
|
|
||||||
if n.Data == "annotation-xml" {
|
|
||||||
for _, a := range n.Attr {
|
|
||||||
if a.Key == "encoding" {
|
|
||||||
val := strings.ToLower(a.Val)
|
|
||||||
if val == "text/html" || val == "application/xhtml+xml" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "svg":
|
|
||||||
switch n.Data {
|
|
||||||
case "desc", "foreignObject", "title":
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func mathMLTextIntegrationPoint(n *Node) bool {
|
|
||||||
if n.Namespace != "math" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
switch n.Data {
|
|
||||||
case "mi", "mo", "mn", "ms", "mtext":
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Section 12.2.6.5.
|
|
||||||
var breakout = map[string]bool{
|
|
||||||
"b": true,
|
|
||||||
"big": true,
|
|
||||||
"blockquote": true,
|
|
||||||
"body": true,
|
|
||||||
"br": true,
|
|
||||||
"center": true,
|
|
||||||
"code": true,
|
|
||||||
"dd": true,
|
|
||||||
"div": true,
|
|
||||||
"dl": true,
|
|
||||||
"dt": true,
|
|
||||||
"em": true,
|
|
||||||
"embed": true,
|
|
||||||
"h1": true,
|
|
||||||
"h2": true,
|
|
||||||
"h3": true,
|
|
||||||
"h4": true,
|
|
||||||
"h5": true,
|
|
||||||
"h6": true,
|
|
||||||
"head": true,
|
|
||||||
"hr": true,
|
|
||||||
"i": true,
|
|
||||||
"img": true,
|
|
||||||
"li": true,
|
|
||||||
"listing": true,
|
|
||||||
"menu": true,
|
|
||||||
"meta": true,
|
|
||||||
"nobr": true,
|
|
||||||
"ol": true,
|
|
||||||
"p": true,
|
|
||||||
"pre": true,
|
|
||||||
"ruby": true,
|
|
||||||
"s": true,
|
|
||||||
"small": true,
|
|
||||||
"span": true,
|
|
||||||
"strong": true,
|
|
||||||
"strike": true,
|
|
||||||
"sub": true,
|
|
||||||
"sup": true,
|
|
||||||
"table": true,
|
|
||||||
"tt": true,
|
|
||||||
"u": true,
|
|
||||||
"ul": true,
|
|
||||||
"var": true,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Section 12.2.6.5.
|
|
||||||
var svgTagNameAdjustments = map[string]string{
|
|
||||||
"altglyph": "altGlyph",
|
|
||||||
"altglyphdef": "altGlyphDef",
|
|
||||||
"altglyphitem": "altGlyphItem",
|
|
||||||
"animatecolor": "animateColor",
|
|
||||||
"animatemotion": "animateMotion",
|
|
||||||
"animatetransform": "animateTransform",
|
|
||||||
"clippath": "clipPath",
|
|
||||||
"feblend": "feBlend",
|
|
||||||
"fecolormatrix": "feColorMatrix",
|
|
||||||
"fecomponenttransfer": "feComponentTransfer",
|
|
||||||
"fecomposite": "feComposite",
|
|
||||||
"feconvolvematrix": "feConvolveMatrix",
|
|
||||||
"fediffuselighting": "feDiffuseLighting",
|
|
||||||
"fedisplacementmap": "feDisplacementMap",
|
|
||||||
"fedistantlight": "feDistantLight",
|
|
||||||
"feflood": "feFlood",
|
|
||||||
"fefunca": "feFuncA",
|
|
||||||
"fefuncb": "feFuncB",
|
|
||||||
"fefuncg": "feFuncG",
|
|
||||||
"fefuncr": "feFuncR",
|
|
||||||
"fegaussianblur": "feGaussianBlur",
|
|
||||||
"feimage": "feImage",
|
|
||||||
"femerge": "feMerge",
|
|
||||||
"femergenode": "feMergeNode",
|
|
||||||
"femorphology": "feMorphology",
|
|
||||||
"feoffset": "feOffset",
|
|
||||||
"fepointlight": "fePointLight",
|
|
||||||
"fespecularlighting": "feSpecularLighting",
|
|
||||||
"fespotlight": "feSpotLight",
|
|
||||||
"fetile": "feTile",
|
|
||||||
"feturbulence": "feTurbulence",
|
|
||||||
"foreignobject": "foreignObject",
|
|
||||||
"glyphref": "glyphRef",
|
|
||||||
"lineargradient": "linearGradient",
|
|
||||||
"radialgradient": "radialGradient",
|
|
||||||
"textpath": "textPath",
|
|
||||||
}
|
|
||||||
|
|
||||||
// Section 12.2.6.1
|
|
||||||
var mathMLAttributeAdjustments = map[string]string{
|
|
||||||
"definitionurl": "definitionURL",
|
|
||||||
}
|
|
||||||
|
|
||||||
var svgAttributeAdjustments = map[string]string{
|
|
||||||
"attributename": "attributeName",
|
|
||||||
"attributetype": "attributeType",
|
|
||||||
"basefrequency": "baseFrequency",
|
|
||||||
"baseprofile": "baseProfile",
|
|
||||||
"calcmode": "calcMode",
|
|
||||||
"clippathunits": "clipPathUnits",
|
|
||||||
"contentscripttype": "contentScriptType",
|
|
||||||
"contentstyletype": "contentStyleType",
|
|
||||||
"diffuseconstant": "diffuseConstant",
|
|
||||||
"edgemode": "edgeMode",
|
|
||||||
"externalresourcesrequired": "externalResourcesRequired",
|
|
||||||
"filterres": "filterRes",
|
|
||||||
"filterunits": "filterUnits",
|
|
||||||
"glyphref": "glyphRef",
|
|
||||||
"gradienttransform": "gradientTransform",
|
|
||||||
"gradientunits": "gradientUnits",
|
|
||||||
"kernelmatrix": "kernelMatrix",
|
|
||||||
"kernelunitlength": "kernelUnitLength",
|
|
||||||
"keypoints": "keyPoints",
|
|
||||||
"keysplines": "keySplines",
|
|
||||||
"keytimes": "keyTimes",
|
|
||||||
"lengthadjust": "lengthAdjust",
|
|
||||||
"limitingconeangle": "limitingConeAngle",
|
|
||||||
"markerheight": "markerHeight",
|
|
||||||
"markerunits": "markerUnits",
|
|
||||||
"markerwidth": "markerWidth",
|
|
||||||
"maskcontentunits": "maskContentUnits",
|
|
||||||
"maskunits": "maskUnits",
|
|
||||||
"numoctaves": "numOctaves",
|
|
||||||
"pathlength": "pathLength",
|
|
||||||
"patterncontentunits": "patternContentUnits",
|
|
||||||
"patterntransform": "patternTransform",
|
|
||||||
"patternunits": "patternUnits",
|
|
||||||
"pointsatx": "pointsAtX",
|
|
||||||
"pointsaty": "pointsAtY",
|
|
||||||
"pointsatz": "pointsAtZ",
|
|
||||||
"preservealpha": "preserveAlpha",
|
|
||||||
"preserveaspectratio": "preserveAspectRatio",
|
|
||||||
"primitiveunits": "primitiveUnits",
|
|
||||||
"refx": "refX",
|
|
||||||
"refy": "refY",
|
|
||||||
"repeatcount": "repeatCount",
|
|
||||||
"repeatdur": "repeatDur",
|
|
||||||
"requiredextensions": "requiredExtensions",
|
|
||||||
"requiredfeatures": "requiredFeatures",
|
|
||||||
"specularconstant": "specularConstant",
|
|
||||||
"specularexponent": "specularExponent",
|
|
||||||
"spreadmethod": "spreadMethod",
|
|
||||||
"startoffset": "startOffset",
|
|
||||||
"stddeviation": "stdDeviation",
|
|
||||||
"stitchtiles": "stitchTiles",
|
|
||||||
"surfacescale": "surfaceScale",
|
|
||||||
"systemlanguage": "systemLanguage",
|
|
||||||
"tablevalues": "tableValues",
|
|
||||||
"targetx": "targetX",
|
|
||||||
"targety": "targetY",
|
|
||||||
"textlength": "textLength",
|
|
||||||
"viewbox": "viewBox",
|
|
||||||
"viewtarget": "viewTarget",
|
|
||||||
"xchannelselector": "xChannelSelector",
|
|
||||||
"ychannelselector": "yChannelSelector",
|
|
||||||
"zoomandpan": "zoomAndPan",
|
|
||||||
}
|
|
220
vendor/golang.org/x/net/html/node.go
generated
vendored
220
vendor/golang.org/x/net/html/node.go
generated
vendored
@ -1,220 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package html
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/net/html/atom"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A NodeType is the type of a Node.
|
|
||||||
type NodeType uint32
|
|
||||||
|
|
||||||
const (
|
|
||||||
ErrorNode NodeType = iota
|
|
||||||
TextNode
|
|
||||||
DocumentNode
|
|
||||||
ElementNode
|
|
||||||
CommentNode
|
|
||||||
DoctypeNode
|
|
||||||
scopeMarkerNode
|
|
||||||
)
|
|
||||||
|
|
||||||
// Section 12.2.4.3 says "The markers are inserted when entering applet,
|
|
||||||
// object, marquee, template, td, th, and caption elements, and are used
|
|
||||||
// to prevent formatting from "leaking" into applet, object, marquee,
|
|
||||||
// template, td, th, and caption elements".
|
|
||||||
var scopeMarker = Node{Type: scopeMarkerNode}
|
|
||||||
|
|
||||||
// A Node consists of a NodeType and some Data (tag name for element nodes,
|
|
||||||
// content for text) and are part of a tree of Nodes. Element nodes may also
|
|
||||||
// have a Namespace and contain a slice of Attributes. Data is unescaped, so
|
|
||||||
// that it looks like "a<b" rather than "a<b". For element nodes, DataAtom
|
|
||||||
// is the atom for Data, or zero if Data is not a known tag name.
|
|
||||||
//
|
|
||||||
// An empty Namespace implies a "http://www.w3.org/1999/xhtml" namespace.
|
|
||||||
// Similarly, "math" is short for "http://www.w3.org/1998/Math/MathML", and
|
|
||||||
// "svg" is short for "http://www.w3.org/2000/svg".
|
|
||||||
type Node struct {
|
|
||||||
Parent, FirstChild, LastChild, PrevSibling, NextSibling *Node
|
|
||||||
|
|
||||||
Type NodeType
|
|
||||||
DataAtom atom.Atom
|
|
||||||
Data string
|
|
||||||
Namespace string
|
|
||||||
Attr []Attribute
|
|
||||||
}
|
|
||||||
|
|
||||||
// InsertBefore inserts newChild as a child of n, immediately before oldChild
|
|
||||||
// in the sequence of n's children. oldChild may be nil, in which case newChild
|
|
||||||
// is appended to the end of n's children.
|
|
||||||
//
|
|
||||||
// It will panic if newChild already has a parent or siblings.
|
|
||||||
func (n *Node) InsertBefore(newChild, oldChild *Node) {
|
|
||||||
if newChild.Parent != nil || newChild.PrevSibling != nil || newChild.NextSibling != nil {
|
|
||||||
panic("html: InsertBefore called for an attached child Node")
|
|
||||||
}
|
|
||||||
var prev, next *Node
|
|
||||||
if oldChild != nil {
|
|
||||||
prev, next = oldChild.PrevSibling, oldChild
|
|
||||||
} else {
|
|
||||||
prev = n.LastChild
|
|
||||||
}
|
|
||||||
if prev != nil {
|
|
||||||
prev.NextSibling = newChild
|
|
||||||
} else {
|
|
||||||
n.FirstChild = newChild
|
|
||||||
}
|
|
||||||
if next != nil {
|
|
||||||
next.PrevSibling = newChild
|
|
||||||
} else {
|
|
||||||
n.LastChild = newChild
|
|
||||||
}
|
|
||||||
newChild.Parent = n
|
|
||||||
newChild.PrevSibling = prev
|
|
||||||
newChild.NextSibling = next
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendChild adds a node c as a child of n.
|
|
||||||
//
|
|
||||||
// It will panic if c already has a parent or siblings.
|
|
||||||
func (n *Node) AppendChild(c *Node) {
|
|
||||||
if c.Parent != nil || c.PrevSibling != nil || c.NextSibling != nil {
|
|
||||||
panic("html: AppendChild called for an attached child Node")
|
|
||||||
}
|
|
||||||
last := n.LastChild
|
|
||||||
if last != nil {
|
|
||||||
last.NextSibling = c
|
|
||||||
} else {
|
|
||||||
n.FirstChild = c
|
|
||||||
}
|
|
||||||
n.LastChild = c
|
|
||||||
c.Parent = n
|
|
||||||
c.PrevSibling = last
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveChild removes a node c that is a child of n. Afterwards, c will have
|
|
||||||
// no parent and no siblings.
|
|
||||||
//
|
|
||||||
// It will panic if c's parent is not n.
|
|
||||||
func (n *Node) RemoveChild(c *Node) {
|
|
||||||
if c.Parent != n {
|
|
||||||
panic("html: RemoveChild called for a non-child Node")
|
|
||||||
}
|
|
||||||
if n.FirstChild == c {
|
|
||||||
n.FirstChild = c.NextSibling
|
|
||||||
}
|
|
||||||
if c.NextSibling != nil {
|
|
||||||
c.NextSibling.PrevSibling = c.PrevSibling
|
|
||||||
}
|
|
||||||
if n.LastChild == c {
|
|
||||||
n.LastChild = c.PrevSibling
|
|
||||||
}
|
|
||||||
if c.PrevSibling != nil {
|
|
||||||
c.PrevSibling.NextSibling = c.NextSibling
|
|
||||||
}
|
|
||||||
c.Parent = nil
|
|
||||||
c.PrevSibling = nil
|
|
||||||
c.NextSibling = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// reparentChildren reparents all of src's child nodes to dst.
|
|
||||||
func reparentChildren(dst, src *Node) {
|
|
||||||
for {
|
|
||||||
child := src.FirstChild
|
|
||||||
if child == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
src.RemoveChild(child)
|
|
||||||
dst.AppendChild(child)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// clone returns a new node with the same type, data and attributes.
|
|
||||||
// The clone has no parent, no siblings and no children.
|
|
||||||
func (n *Node) clone() *Node {
|
|
||||||
m := &Node{
|
|
||||||
Type: n.Type,
|
|
||||||
DataAtom: n.DataAtom,
|
|
||||||
Data: n.Data,
|
|
||||||
Attr: make([]Attribute, len(n.Attr)),
|
|
||||||
}
|
|
||||||
copy(m.Attr, n.Attr)
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
// nodeStack is a stack of nodes.
|
|
||||||
type nodeStack []*Node
|
|
||||||
|
|
||||||
// pop pops the stack. It will panic if s is empty.
|
|
||||||
func (s *nodeStack) pop() *Node {
|
|
||||||
i := len(*s)
|
|
||||||
n := (*s)[i-1]
|
|
||||||
*s = (*s)[:i-1]
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
// top returns the most recently pushed node, or nil if s is empty.
|
|
||||||
func (s *nodeStack) top() *Node {
|
|
||||||
if i := len(*s); i > 0 {
|
|
||||||
return (*s)[i-1]
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// index returns the index of the top-most occurrence of n in the stack, or -1
|
|
||||||
// if n is not present.
|
|
||||||
func (s *nodeStack) index(n *Node) int {
|
|
||||||
for i := len(*s) - 1; i >= 0; i-- {
|
|
||||||
if (*s)[i] == n {
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// contains returns whether a is within s.
|
|
||||||
func (s *nodeStack) contains(a atom.Atom) bool {
|
|
||||||
for _, n := range *s {
|
|
||||||
if n.DataAtom == a && n.Namespace == "" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// insert inserts a node at the given index.
|
|
||||||
func (s *nodeStack) insert(i int, n *Node) {
|
|
||||||
(*s) = append(*s, nil)
|
|
||||||
copy((*s)[i+1:], (*s)[i:])
|
|
||||||
(*s)[i] = n
|
|
||||||
}
|
|
||||||
|
|
||||||
// remove removes a node from the stack. It is a no-op if n is not present.
|
|
||||||
func (s *nodeStack) remove(n *Node) {
|
|
||||||
i := s.index(n)
|
|
||||||
if i == -1 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
copy((*s)[i:], (*s)[i+1:])
|
|
||||||
j := len(*s) - 1
|
|
||||||
(*s)[j] = nil
|
|
||||||
*s = (*s)[:j]
|
|
||||||
}
|
|
||||||
|
|
||||||
type insertionModeStack []insertionMode
|
|
||||||
|
|
||||||
func (s *insertionModeStack) pop() (im insertionMode) {
|
|
||||||
i := len(*s)
|
|
||||||
im = (*s)[i-1]
|
|
||||||
*s = (*s)[:i-1]
|
|
||||||
return im
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *insertionModeStack) top() insertionMode {
|
|
||||||
if i := len(*s); i > 0 {
|
|
||||||
return (*s)[i-1]
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
2417
vendor/golang.org/x/net/html/parse.go
generated
vendored
2417
vendor/golang.org/x/net/html/parse.go
generated
vendored
File diff suppressed because it is too large
Load Diff
271
vendor/golang.org/x/net/html/render.go
generated
vendored
271
vendor/golang.org/x/net/html/render.go
generated
vendored
@ -1,271 +0,0 @@
|
|||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package html
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type writer interface {
|
|
||||||
io.Writer
|
|
||||||
io.ByteWriter
|
|
||||||
WriteString(string) (int, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Render renders the parse tree n to the given writer.
|
|
||||||
//
|
|
||||||
// Rendering is done on a 'best effort' basis: calling Parse on the output of
|
|
||||||
// Render will always result in something similar to the original tree, but it
|
|
||||||
// is not necessarily an exact clone unless the original tree was 'well-formed'.
|
|
||||||
// 'Well-formed' is not easily specified; the HTML5 specification is
|
|
||||||
// complicated.
|
|
||||||
//
|
|
||||||
// Calling Parse on arbitrary input typically results in a 'well-formed' parse
|
|
||||||
// tree. However, it is possible for Parse to yield a 'badly-formed' parse tree.
|
|
||||||
// For example, in a 'well-formed' parse tree, no <a> element is a child of
|
|
||||||
// another <a> element: parsing "<a><a>" results in two sibling elements.
|
|
||||||
// Similarly, in a 'well-formed' parse tree, no <a> element is a child of a
|
|
||||||
// <table> element: parsing "<p><table><a>" results in a <p> with two sibling
|
|
||||||
// children; the <a> is reparented to the <table>'s parent. However, calling
|
|
||||||
// Parse on "<a><table><a>" does not return an error, but the result has an <a>
|
|
||||||
// element with an <a> child, and is therefore not 'well-formed'.
|
|
||||||
//
|
|
||||||
// Programmatically constructed trees are typically also 'well-formed', but it
|
|
||||||
// is possible to construct a tree that looks innocuous but, when rendered and
|
|
||||||
// re-parsed, results in a different tree. A simple example is that a solitary
|
|
||||||
// text node would become a tree containing <html>, <head> and <body> elements.
|
|
||||||
// Another example is that the programmatic equivalent of "a<head>b</head>c"
|
|
||||||
// becomes "<html><head><head/><body>abc</body></html>".
|
|
||||||
func Render(w io.Writer, n *Node) error {
|
|
||||||
if x, ok := w.(writer); ok {
|
|
||||||
return render(x, n)
|
|
||||||
}
|
|
||||||
buf := bufio.NewWriter(w)
|
|
||||||
if err := render(buf, n); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return buf.Flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
// plaintextAbort is returned from render1 when a <plaintext> element
|
|
||||||
// has been rendered. No more end tags should be rendered after that.
|
|
||||||
var plaintextAbort = errors.New("html: internal error (plaintext abort)")
|
|
||||||
|
|
||||||
func render(w writer, n *Node) error {
|
|
||||||
err := render1(w, n)
|
|
||||||
if err == plaintextAbort {
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func render1(w writer, n *Node) error {
|
|
||||||
// Render non-element nodes; these are the easy cases.
|
|
||||||
switch n.Type {
|
|
||||||
case ErrorNode:
|
|
||||||
return errors.New("html: cannot render an ErrorNode node")
|
|
||||||
case TextNode:
|
|
||||||
return escape(w, n.Data)
|
|
||||||
case DocumentNode:
|
|
||||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
|
||||||
if err := render1(w, c); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
case ElementNode:
|
|
||||||
// No-op.
|
|
||||||
case CommentNode:
|
|
||||||
if _, err := w.WriteString("<!--"); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := w.WriteString(n.Data); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := w.WriteString("-->"); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
case DoctypeNode:
|
|
||||||
if _, err := w.WriteString("<!DOCTYPE "); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := w.WriteString(n.Data); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if n.Attr != nil {
|
|
||||||
var p, s string
|
|
||||||
for _, a := range n.Attr {
|
|
||||||
switch a.Key {
|
|
||||||
case "public":
|
|
||||||
p = a.Val
|
|
||||||
case "system":
|
|
||||||
s = a.Val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if p != "" {
|
|
||||||
if _, err := w.WriteString(" PUBLIC "); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := writeQuoted(w, p); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if s != "" {
|
|
||||||
if err := w.WriteByte(' '); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := writeQuoted(w, s); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if s != "" {
|
|
||||||
if _, err := w.WriteString(" SYSTEM "); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := writeQuoted(w, s); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return w.WriteByte('>')
|
|
||||||
default:
|
|
||||||
return errors.New("html: unknown node type")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Render the <xxx> opening tag.
|
|
||||||
if err := w.WriteByte('<'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := w.WriteString(n.Data); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, a := range n.Attr {
|
|
||||||
if err := w.WriteByte(' '); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if a.Namespace != "" {
|
|
||||||
if _, err := w.WriteString(a.Namespace); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := w.WriteByte(':'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if _, err := w.WriteString(a.Key); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := w.WriteString(`="`); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := escape(w, a.Val); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := w.WriteByte('"'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if voidElements[n.Data] {
|
|
||||||
if n.FirstChild != nil {
|
|
||||||
return fmt.Errorf("html: void element <%s> has child nodes", n.Data)
|
|
||||||
}
|
|
||||||
_, err := w.WriteString("/>")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := w.WriteByte('>'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add initial newline where there is danger of a newline beging ignored.
|
|
||||||
if c := n.FirstChild; c != nil && c.Type == TextNode && strings.HasPrefix(c.Data, "\n") {
|
|
||||||
switch n.Data {
|
|
||||||
case "pre", "listing", "textarea":
|
|
||||||
if err := w.WriteByte('\n'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Render any child nodes.
|
|
||||||
switch n.Data {
|
|
||||||
case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp":
|
|
||||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
|
||||||
if c.Type == TextNode {
|
|
||||||
if _, err := w.WriteString(c.Data); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if err := render1(w, c); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if n.Data == "plaintext" {
|
|
||||||
// Don't render anything else. <plaintext> must be the
|
|
||||||
// last element in the file, with no closing tag.
|
|
||||||
return plaintextAbort
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
|
||||||
if err := render1(w, c); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Render the </xxx> closing tag.
|
|
||||||
if _, err := w.WriteString("</"); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := w.WriteString(n.Data); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return w.WriteByte('>')
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeQuoted writes s to w surrounded by quotes. Normally it will use double
|
|
||||||
// quotes, but if s contains a double quote, it will use single quotes.
|
|
||||||
// It is used for writing the identifiers in a doctype declaration.
|
|
||||||
// In valid HTML, they can't contain both types of quotes.
|
|
||||||
func writeQuoted(w writer, s string) error {
|
|
||||||
var q byte = '"'
|
|
||||||
if strings.Contains(s, `"`) {
|
|
||||||
q = '\''
|
|
||||||
}
|
|
||||||
if err := w.WriteByte(q); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := w.WriteString(s); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := w.WriteByte(q); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Section 12.1.2, "Elements", gives this list of void elements. Void elements
|
|
||||||
// are those that can't have any contents.
|
|
||||||
var voidElements = map[string]bool{
|
|
||||||
"area": true,
|
|
||||||
"base": true,
|
|
||||||
"br": true,
|
|
||||||
"col": true,
|
|
||||||
"command": true,
|
|
||||||
"embed": true,
|
|
||||||
"hr": true,
|
|
||||||
"img": true,
|
|
||||||
"input": true,
|
|
||||||
"keygen": true,
|
|
||||||
"link": true,
|
|
||||||
"meta": true,
|
|
||||||
"param": true,
|
|
||||||
"source": true,
|
|
||||||
"track": true,
|
|
||||||
"wbr": true,
|
|
||||||
}
|
|
1219
vendor/golang.org/x/net/html/token.go
generated
vendored
1219
vendor/golang.org/x/net/html/token.go
generated
vendored
File diff suppressed because it is too large
Load Diff
50
vendor/golang.org/x/net/http/httpguts/guts.go
generated
vendored
50
vendor/golang.org/x/net/http/httpguts/guts.go
generated
vendored
@ -1,50 +0,0 @@
|
|||||||
// Copyright 2018 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package httpguts provides functions implementing various details
|
|
||||||
// of the HTTP specification.
|
|
||||||
//
|
|
||||||
// This package is shared by the standard library (which vendors it)
|
|
||||||
// and x/net/http2. It comes with no API stability promise.
|
|
||||||
package httpguts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/textproto"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ValidTrailerHeader reports whether name is a valid header field name to appear
|
|
||||||
// in trailers.
|
|
||||||
// See RFC 7230, Section 4.1.2
|
|
||||||
func ValidTrailerHeader(name string) bool {
|
|
||||||
name = textproto.CanonicalMIMEHeaderKey(name)
|
|
||||||
if strings.HasPrefix(name, "If-") || badTrailer[name] {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
var badTrailer = map[string]bool{
|
|
||||||
"Authorization": true,
|
|
||||||
"Cache-Control": true,
|
|
||||||
"Connection": true,
|
|
||||||
"Content-Encoding": true,
|
|
||||||
"Content-Length": true,
|
|
||||||
"Content-Range": true,
|
|
||||||
"Content-Type": true,
|
|
||||||
"Expect": true,
|
|
||||||
"Host": true,
|
|
||||||
"Keep-Alive": true,
|
|
||||||
"Max-Forwards": true,
|
|
||||||
"Pragma": true,
|
|
||||||
"Proxy-Authenticate": true,
|
|
||||||
"Proxy-Authorization": true,
|
|
||||||
"Proxy-Connection": true,
|
|
||||||
"Range": true,
|
|
||||||
"Realm": true,
|
|
||||||
"Te": true,
|
|
||||||
"Trailer": true,
|
|
||||||
"Transfer-Encoding": true,
|
|
||||||
"Www-Authenticate": true,
|
|
||||||
}
|
|
346
vendor/golang.org/x/net/http/httpguts/httplex.go
generated
vendored
346
vendor/golang.org/x/net/http/httpguts/httplex.go
generated
vendored
@ -1,346 +0,0 @@
|
|||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package httpguts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
"strings"
|
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"golang.org/x/net/idna"
|
|
||||||
)
|
|
||||||
|
|
||||||
var isTokenTable = [127]bool{
|
|
||||||
'!': true,
|
|
||||||
'#': true,
|
|
||||||
'$': true,
|
|
||||||
'%': true,
|
|
||||||
'&': true,
|
|
||||||
'\'': true,
|
|
||||||
'*': true,
|
|
||||||
'+': true,
|
|
||||||
'-': true,
|
|
||||||
'.': true,
|
|
||||||
'0': true,
|
|
||||||
'1': true,
|
|
||||||
'2': true,
|
|
||||||
'3': true,
|
|
||||||
'4': true,
|
|
||||||
'5': true,
|
|
||||||
'6': true,
|
|
||||||
'7': true,
|
|
||||||
'8': true,
|
|
||||||
'9': true,
|
|
||||||
'A': true,
|
|
||||||
'B': true,
|
|
||||||
'C': true,
|
|
||||||
'D': true,
|
|
||||||
'E': true,
|
|
||||||
'F': true,
|
|
||||||
'G': true,
|
|
||||||
'H': true,
|
|
||||||
'I': true,
|
|
||||||
'J': true,
|
|
||||||
'K': true,
|
|
||||||
'L': true,
|
|
||||||
'M': true,
|
|
||||||
'N': true,
|
|
||||||
'O': true,
|
|
||||||
'P': true,
|
|
||||||
'Q': true,
|
|
||||||
'R': true,
|
|
||||||
'S': true,
|
|
||||||
'T': true,
|
|
||||||
'U': true,
|
|
||||||
'W': true,
|
|
||||||
'V': true,
|
|
||||||
'X': true,
|
|
||||||
'Y': true,
|
|
||||||
'Z': true,
|
|
||||||
'^': true,
|
|
||||||
'_': true,
|
|
||||||
'`': true,
|
|
||||||
'a': true,
|
|
||||||
'b': true,
|
|
||||||
'c': true,
|
|
||||||
'd': true,
|
|
||||||
'e': true,
|
|
||||||
'f': true,
|
|
||||||
'g': true,
|
|
||||||
'h': true,
|
|
||||||
'i': true,
|
|
||||||
'j': true,
|
|
||||||
'k': true,
|
|
||||||
'l': true,
|
|
||||||
'm': true,
|
|
||||||
'n': true,
|
|
||||||
'o': true,
|
|
||||||
'p': true,
|
|
||||||
'q': true,
|
|
||||||
'r': true,
|
|
||||||
's': true,
|
|
||||||
't': true,
|
|
||||||
'u': true,
|
|
||||||
'v': true,
|
|
||||||
'w': true,
|
|
||||||
'x': true,
|
|
||||||
'y': true,
|
|
||||||
'z': true,
|
|
||||||
'|': true,
|
|
||||||
'~': true,
|
|
||||||
}
|
|
||||||
|
|
||||||
func IsTokenRune(r rune) bool {
|
|
||||||
i := int(r)
|
|
||||||
return i < len(isTokenTable) && isTokenTable[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func isNotToken(r rune) bool {
|
|
||||||
return !IsTokenRune(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HeaderValuesContainsToken reports whether any string in values
|
|
||||||
// contains the provided token, ASCII case-insensitively.
|
|
||||||
func HeaderValuesContainsToken(values []string, token string) bool {
|
|
||||||
for _, v := range values {
|
|
||||||
if headerValueContainsToken(v, token) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// isOWS reports whether b is an optional whitespace byte, as defined
|
|
||||||
// by RFC 7230 section 3.2.3.
|
|
||||||
func isOWS(b byte) bool { return b == ' ' || b == '\t' }
|
|
||||||
|
|
||||||
// trimOWS returns x with all optional whitespace removes from the
|
|
||||||
// beginning and end.
|
|
||||||
func trimOWS(x string) string {
|
|
||||||
// TODO: consider using strings.Trim(x, " \t") instead,
|
|
||||||
// if and when it's fast enough. See issue 10292.
|
|
||||||
// But this ASCII-only code will probably always beat UTF-8
|
|
||||||
// aware code.
|
|
||||||
for len(x) > 0 && isOWS(x[0]) {
|
|
||||||
x = x[1:]
|
|
||||||
}
|
|
||||||
for len(x) > 0 && isOWS(x[len(x)-1]) {
|
|
||||||
x = x[:len(x)-1]
|
|
||||||
}
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// headerValueContainsToken reports whether v (assumed to be a
|
|
||||||
// 0#element, in the ABNF extension described in RFC 7230 section 7)
|
|
||||||
// contains token amongst its comma-separated tokens, ASCII
|
|
||||||
// case-insensitively.
|
|
||||||
func headerValueContainsToken(v string, token string) bool {
|
|
||||||
v = trimOWS(v)
|
|
||||||
if comma := strings.IndexByte(v, ','); comma != -1 {
|
|
||||||
return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token)
|
|
||||||
}
|
|
||||||
return tokenEqual(v, token)
|
|
||||||
}
|
|
||||||
|
|
||||||
// lowerASCII returns the ASCII lowercase version of b.
|
|
||||||
func lowerASCII(b byte) byte {
|
|
||||||
if 'A' <= b && b <= 'Z' {
|
|
||||||
return b + ('a' - 'A')
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively.
|
|
||||||
func tokenEqual(t1, t2 string) bool {
|
|
||||||
if len(t1) != len(t2) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for i, b := range t1 {
|
|
||||||
if b >= utf8.RuneSelf {
|
|
||||||
// No UTF-8 or non-ASCII allowed in tokens.
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if lowerASCII(byte(b)) != lowerASCII(t2[i]) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// isLWS reports whether b is linear white space, according
|
|
||||||
// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
|
|
||||||
// LWS = [CRLF] 1*( SP | HT )
|
|
||||||
func isLWS(b byte) bool { return b == ' ' || b == '\t' }
|
|
||||||
|
|
||||||
// isCTL reports whether b is a control byte, according
|
|
||||||
// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
|
|
||||||
// CTL = <any US-ASCII control character
|
|
||||||
// (octets 0 - 31) and DEL (127)>
|
|
||||||
func isCTL(b byte) bool {
|
|
||||||
const del = 0x7f // a CTL
|
|
||||||
return b < ' ' || b == del
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name.
|
|
||||||
// HTTP/2 imposes the additional restriction that uppercase ASCII
|
|
||||||
// letters are not allowed.
|
|
||||||
//
|
|
||||||
// RFC 7230 says:
|
|
||||||
// header-field = field-name ":" OWS field-value OWS
|
|
||||||
// field-name = token
|
|
||||||
// token = 1*tchar
|
|
||||||
// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
|
|
||||||
// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA
|
|
||||||
func ValidHeaderFieldName(v string) bool {
|
|
||||||
if len(v) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, r := range v {
|
|
||||||
if !IsTokenRune(r) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidHostHeader reports whether h is a valid host header.
|
|
||||||
func ValidHostHeader(h string) bool {
|
|
||||||
// The latest spec is actually this:
|
|
||||||
//
|
|
||||||
// http://tools.ietf.org/html/rfc7230#section-5.4
|
|
||||||
// Host = uri-host [ ":" port ]
|
|
||||||
//
|
|
||||||
// Where uri-host is:
|
|
||||||
// http://tools.ietf.org/html/rfc3986#section-3.2.2
|
|
||||||
//
|
|
||||||
// But we're going to be much more lenient for now and just
|
|
||||||
// search for any byte that's not a valid byte in any of those
|
|
||||||
// expressions.
|
|
||||||
for i := 0; i < len(h); i++ {
|
|
||||||
if !validHostByte[h[i]] {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// See the validHostHeader comment.
|
|
||||||
var validHostByte = [256]bool{
|
|
||||||
'0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true,
|
|
||||||
'8': true, '9': true,
|
|
||||||
|
|
||||||
'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true,
|
|
||||||
'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true,
|
|
||||||
'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true,
|
|
||||||
'y': true, 'z': true,
|
|
||||||
|
|
||||||
'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true,
|
|
||||||
'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true,
|
|
||||||
'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true,
|
|
||||||
'Y': true, 'Z': true,
|
|
||||||
|
|
||||||
'!': true, // sub-delims
|
|
||||||
'$': true, // sub-delims
|
|
||||||
'%': true, // pct-encoded (and used in IPv6 zones)
|
|
||||||
'&': true, // sub-delims
|
|
||||||
'(': true, // sub-delims
|
|
||||||
')': true, // sub-delims
|
|
||||||
'*': true, // sub-delims
|
|
||||||
'+': true, // sub-delims
|
|
||||||
',': true, // sub-delims
|
|
||||||
'-': true, // unreserved
|
|
||||||
'.': true, // unreserved
|
|
||||||
':': true, // IPv6address + Host expression's optional port
|
|
||||||
';': true, // sub-delims
|
|
||||||
'=': true, // sub-delims
|
|
||||||
'[': true,
|
|
||||||
'\'': true, // sub-delims
|
|
||||||
']': true,
|
|
||||||
'_': true, // unreserved
|
|
||||||
'~': true, // unreserved
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidHeaderFieldValue reports whether v is a valid "field-value" according to
|
|
||||||
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 :
|
|
||||||
//
|
|
||||||
// message-header = field-name ":" [ field-value ]
|
|
||||||
// field-value = *( field-content | LWS )
|
|
||||||
// field-content = <the OCTETs making up the field-value
|
|
||||||
// and consisting of either *TEXT or combinations
|
|
||||||
// of token, separators, and quoted-string>
|
|
||||||
//
|
|
||||||
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 :
|
|
||||||
//
|
|
||||||
// TEXT = <any OCTET except CTLs,
|
|
||||||
// but including LWS>
|
|
||||||
// LWS = [CRLF] 1*( SP | HT )
|
|
||||||
// CTL = <any US-ASCII control character
|
|
||||||
// (octets 0 - 31) and DEL (127)>
|
|
||||||
//
|
|
||||||
// RFC 7230 says:
|
|
||||||
// field-value = *( field-content / obs-fold )
|
|
||||||
// obj-fold = N/A to http2, and deprecated
|
|
||||||
// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
|
|
||||||
// field-vchar = VCHAR / obs-text
|
|
||||||
// obs-text = %x80-FF
|
|
||||||
// VCHAR = "any visible [USASCII] character"
|
|
||||||
//
|
|
||||||
// http2 further says: "Similarly, HTTP/2 allows header field values
|
|
||||||
// that are not valid. While most of the values that can be encoded
|
|
||||||
// will not alter header field parsing, carriage return (CR, ASCII
|
|
||||||
// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII
|
|
||||||
// 0x0) might be exploited by an attacker if they are translated
|
|
||||||
// verbatim. Any request or response that contains a character not
|
|
||||||
// permitted in a header field value MUST be treated as malformed
|
|
||||||
// (Section 8.1.2.6). Valid characters are defined by the
|
|
||||||
// field-content ABNF rule in Section 3.2 of [RFC7230]."
|
|
||||||
//
|
|
||||||
// This function does not (yet?) properly handle the rejection of
|
|
||||||
// strings that begin or end with SP or HTAB.
|
|
||||||
func ValidHeaderFieldValue(v string) bool {
|
|
||||||
for i := 0; i < len(v); i++ {
|
|
||||||
b := v[i]
|
|
||||||
if isCTL(b) && !isLWS(b) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func isASCII(s string) bool {
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
if s[i] >= utf8.RuneSelf {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// PunycodeHostPort returns the IDNA Punycode version
|
|
||||||
// of the provided "host" or "host:port" string.
|
|
||||||
func PunycodeHostPort(v string) (string, error) {
|
|
||||||
if isASCII(v) {
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
host, port, err := net.SplitHostPort(v)
|
|
||||||
if err != nil {
|
|
||||||
// The input 'v' argument was just a "host" argument,
|
|
||||||
// without a port. This error should not be returned
|
|
||||||
// to the caller.
|
|
||||||
host = v
|
|
||||||
port = ""
|
|
||||||
}
|
|
||||||
host, err = idna.ToASCII(host)
|
|
||||||
if err != nil {
|
|
||||||
// Non-UTF-8? Not representable in Punycode, in any
|
|
||||||
// case.
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if port == "" {
|
|
||||||
return host, nil
|
|
||||||
}
|
|
||||||
return net.JoinHostPort(host, port), nil
|
|
||||||
}
|
|
2
vendor/golang.org/x/net/http2/.gitignore
generated
vendored
2
vendor/golang.org/x/net/http2/.gitignore
generated
vendored
@ -1,2 +0,0 @@
|
|||||||
*~
|
|
||||||
h2i/h2i
|
|
51
vendor/golang.org/x/net/http2/Dockerfile
generated
vendored
51
vendor/golang.org/x/net/http2/Dockerfile
generated
vendored
@ -1,51 +0,0 @@
|
|||||||
#
|
|
||||||
# This Dockerfile builds a recent curl with HTTP/2 client support, using
|
|
||||||
# a recent nghttp2 build.
|
|
||||||
#
|
|
||||||
# See the Makefile for how to tag it. If Docker and that image is found, the
|
|
||||||
# Go tests use this curl binary for integration tests.
|
|
||||||
#
|
|
||||||
|
|
||||||
FROM ubuntu:trusty
|
|
||||||
|
|
||||||
RUN apt-get update && \
|
|
||||||
apt-get upgrade -y && \
|
|
||||||
apt-get install -y git-core build-essential wget
|
|
||||||
|
|
||||||
RUN apt-get install -y --no-install-recommends \
|
|
||||||
autotools-dev libtool pkg-config zlib1g-dev \
|
|
||||||
libcunit1-dev libssl-dev libxml2-dev libevent-dev \
|
|
||||||
automake autoconf
|
|
||||||
|
|
||||||
# The list of packages nghttp2 recommends for h2load:
|
|
||||||
RUN apt-get install -y --no-install-recommends make binutils \
|
|
||||||
autoconf automake autotools-dev \
|
|
||||||
libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
|
|
||||||
libev-dev libevent-dev libjansson-dev libjemalloc-dev \
|
|
||||||
cython python3.4-dev python-setuptools
|
|
||||||
|
|
||||||
# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
|
|
||||||
ENV NGHTTP2_VER 895da9a
|
|
||||||
RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
|
|
||||||
|
|
||||||
WORKDIR /root/nghttp2
|
|
||||||
RUN git reset --hard $NGHTTP2_VER
|
|
||||||
RUN autoreconf -i
|
|
||||||
RUN automake
|
|
||||||
RUN autoconf
|
|
||||||
RUN ./configure
|
|
||||||
RUN make
|
|
||||||
RUN make install
|
|
||||||
|
|
||||||
WORKDIR /root
|
|
||||||
RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz
|
|
||||||
RUN tar -zxvf curl-7.45.0.tar.gz
|
|
||||||
WORKDIR /root/curl-7.45.0
|
|
||||||
RUN ./configure --with-ssl --with-nghttp2=/usr/local
|
|
||||||
RUN make
|
|
||||||
RUN make install
|
|
||||||
RUN ldconfig
|
|
||||||
|
|
||||||
CMD ["-h"]
|
|
||||||
ENTRYPOINT ["/usr/local/bin/curl"]
|
|
||||||
|
|
3
vendor/golang.org/x/net/http2/Makefile
generated
vendored
3
vendor/golang.org/x/net/http2/Makefile
generated
vendored
@ -1,3 +0,0 @@
|
|||||||
curlimage:
|
|
||||||
docker build -t gohttp2/curl .
|
|
||||||
|
|
20
vendor/golang.org/x/net/http2/README
generated
vendored
20
vendor/golang.org/x/net/http2/README
generated
vendored
@ -1,20 +0,0 @@
|
|||||||
This is a work-in-progress HTTP/2 implementation for Go.
|
|
||||||
|
|
||||||
It will eventually live in the Go standard library and won't require
|
|
||||||
any changes to your code to use. It will just be automatic.
|
|
||||||
|
|
||||||
Status:
|
|
||||||
|
|
||||||
* The server support is pretty good. A few things are missing
|
|
||||||
but are being worked on.
|
|
||||||
* The client work has just started but shares a lot of code
|
|
||||||
is coming along much quicker.
|
|
||||||
|
|
||||||
Docs are at https://godoc.org/golang.org/x/net/http2
|
|
||||||
|
|
||||||
Demo test server at https://http2.golang.org/
|
|
||||||
|
|
||||||
Help & bug reports welcome!
|
|
||||||
|
|
||||||
Contributing: https://golang.org/doc/contribute.html
|
|
||||||
Bugs: https://golang.org/issue/new?title=x/net/http2:+
|
|
641
vendor/golang.org/x/net/http2/ciphers.go
generated
vendored
641
vendor/golang.org/x/net/http2/ciphers.go
generated
vendored
@ -1,641 +0,0 @@
|
|||||||
// Copyright 2017 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package http2
|
|
||||||
|
|
||||||
// A list of the possible cipher suite ids. Taken from
|
|
||||||
// https://www.iana.org/assignments/tls-parameters/tls-parameters.txt
|
|
||||||
|
|
||||||
const (
|
|
||||||
cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000
|
|
||||||
cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001
|
|
||||||
cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002
|
|
||||||
cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003
|
|
||||||
cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004
|
|
||||||
cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
|
|
||||||
cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006
|
|
||||||
cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007
|
|
||||||
cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008
|
|
||||||
cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009
|
|
||||||
cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A
|
|
||||||
cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B
|
|
||||||
cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C
|
|
||||||
cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D
|
|
||||||
cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E
|
|
||||||
cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F
|
|
||||||
cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010
|
|
||||||
cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011
|
|
||||||
cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012
|
|
||||||
cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013
|
|
||||||
cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014
|
|
||||||
cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015
|
|
||||||
cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016
|
|
||||||
cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017
|
|
||||||
cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018
|
|
||||||
cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019
|
|
||||||
cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A
|
|
||||||
cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B
|
|
||||||
// Reserved uint16 = 0x001C-1D
|
|
||||||
cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E
|
|
||||||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F
|
|
||||||
cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020
|
|
||||||
cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021
|
|
||||||
cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022
|
|
||||||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023
|
|
||||||
cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024
|
|
||||||
cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025
|
|
||||||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026
|
|
||||||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027
|
|
||||||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028
|
|
||||||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029
|
|
||||||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A
|
|
||||||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B
|
|
||||||
cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C
|
|
||||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D
|
|
||||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E
|
|
||||||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F
|
|
||||||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030
|
|
||||||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031
|
|
||||||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032
|
|
||||||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033
|
|
||||||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034
|
|
||||||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035
|
|
||||||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036
|
|
||||||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037
|
|
||||||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038
|
|
||||||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039
|
|
||||||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A
|
|
||||||
cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B
|
|
||||||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C
|
|
||||||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D
|
|
||||||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E
|
|
||||||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F
|
|
||||||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040
|
|
||||||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041
|
|
||||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042
|
|
||||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043
|
|
||||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044
|
|
||||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045
|
|
||||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046
|
|
||||||
// Reserved uint16 = 0x0047-4F
|
|
||||||
// Reserved uint16 = 0x0050-58
|
|
||||||
// Reserved uint16 = 0x0059-5C
|
|
||||||
// Unassigned uint16 = 0x005D-5F
|
|
||||||
// Reserved uint16 = 0x0060-66
|
|
||||||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067
|
|
||||||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068
|
|
||||||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069
|
|
||||||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A
|
|
||||||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B
|
|
||||||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C
|
|
||||||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D
|
|
||||||
// Unassigned uint16 = 0x006E-83
|
|
||||||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084
|
|
||||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085
|
|
||||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086
|
|
||||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087
|
|
||||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088
|
|
||||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089
|
|
||||||
cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A
|
|
||||||
cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B
|
|
||||||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C
|
|
||||||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D
|
|
||||||
cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E
|
|
||||||
cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F
|
|
||||||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090
|
|
||||||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091
|
|
||||||
cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092
|
|
||||||
cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093
|
|
||||||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094
|
|
||||||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095
|
|
||||||
cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096
|
|
||||||
cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097
|
|
||||||
cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098
|
|
||||||
cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099
|
|
||||||
cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A
|
|
||||||
cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B
|
|
||||||
cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C
|
|
||||||
cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D
|
|
||||||
cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E
|
|
||||||
cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F
|
|
||||||
cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0
|
|
||||||
cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1
|
|
||||||
cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2
|
|
||||||
cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3
|
|
||||||
cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4
|
|
||||||
cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5
|
|
||||||
cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6
|
|
||||||
cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7
|
|
||||||
cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8
|
|
||||||
cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9
|
|
||||||
cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA
|
|
||||||
cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB
|
|
||||||
cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC
|
|
||||||
cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD
|
|
||||||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE
|
|
||||||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF
|
|
||||||
cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0
|
|
||||||
cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1
|
|
||||||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2
|
|
||||||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3
|
|
||||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4
|
|
||||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5
|
|
||||||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6
|
|
||||||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7
|
|
||||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8
|
|
||||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9
|
|
||||||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA
|
|
||||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB
|
|
||||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC
|
|
||||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD
|
|
||||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE
|
|
||||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF
|
|
||||||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0
|
|
||||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1
|
|
||||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2
|
|
||||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3
|
|
||||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4
|
|
||||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5
|
|
||||||
// Unassigned uint16 = 0x00C6-FE
|
|
||||||
cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF
|
|
||||||
// Unassigned uint16 = 0x01-55,*
|
|
||||||
cipher_TLS_FALLBACK_SCSV uint16 = 0x5600
|
|
||||||
// Unassigned uint16 = 0x5601 - 0xC000
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014
|
|
||||||
cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015
|
|
||||||
cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016
|
|
||||||
cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017
|
|
||||||
cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018
|
|
||||||
cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019
|
|
||||||
cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A
|
|
||||||
cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B
|
|
||||||
cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C
|
|
||||||
cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D
|
|
||||||
cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E
|
|
||||||
cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F
|
|
||||||
cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020
|
|
||||||
cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021
|
|
||||||
cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B
|
|
||||||
cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C
|
|
||||||
cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D
|
|
||||||
cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E
|
|
||||||
cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F
|
|
||||||
cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040
|
|
||||||
cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041
|
|
||||||
cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042
|
|
||||||
cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043
|
|
||||||
cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044
|
|
||||||
cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045
|
|
||||||
cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046
|
|
||||||
cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F
|
|
||||||
cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050
|
|
||||||
cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051
|
|
||||||
cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052
|
|
||||||
cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053
|
|
||||||
cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054
|
|
||||||
cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055
|
|
||||||
cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056
|
|
||||||
cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057
|
|
||||||
cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058
|
|
||||||
cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059
|
|
||||||
cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A
|
|
||||||
cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063
|
|
||||||
cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064
|
|
||||||
cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065
|
|
||||||
cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066
|
|
||||||
cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067
|
|
||||||
cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068
|
|
||||||
cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069
|
|
||||||
cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A
|
|
||||||
cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B
|
|
||||||
cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C
|
|
||||||
cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D
|
|
||||||
cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E
|
|
||||||
cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079
|
|
||||||
cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A
|
|
||||||
cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B
|
|
||||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C
|
|
||||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D
|
|
||||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E
|
|
||||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F
|
|
||||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080
|
|
||||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081
|
|
||||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082
|
|
||||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083
|
|
||||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084
|
|
||||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D
|
|
||||||
cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E
|
|
||||||
cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F
|
|
||||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090
|
|
||||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091
|
|
||||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092
|
|
||||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093
|
|
||||||
cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094
|
|
||||||
cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095
|
|
||||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096
|
|
||||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097
|
|
||||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098
|
|
||||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B
|
|
||||||
cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C
|
|
||||||
cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D
|
|
||||||
cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E
|
|
||||||
cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F
|
|
||||||
cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0
|
|
||||||
cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1
|
|
||||||
cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2
|
|
||||||
cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3
|
|
||||||
cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4
|
|
||||||
cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5
|
|
||||||
cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6
|
|
||||||
cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7
|
|
||||||
cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8
|
|
||||||
cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9
|
|
||||||
cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA
|
|
||||||
cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF
|
|
||||||
// Unassigned uint16 = 0xC0B0-FF
|
|
||||||
// Unassigned uint16 = 0xC1-CB,*
|
|
||||||
// Unassigned uint16 = 0xCC00-A7
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9
|
|
||||||
cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA
|
|
||||||
cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC
|
|
||||||
cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD
|
|
||||||
cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE
|
|
||||||
)
|
|
||||||
|
|
||||||
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
|
|
||||||
// References:
|
|
||||||
// https://tools.ietf.org/html/rfc7540#appendix-A
|
|
||||||
// Reject cipher suites from Appendix A.
|
|
||||||
// "This list includes those cipher suites that do not
|
|
||||||
// offer an ephemeral key exchange and those that are
|
|
||||||
// based on the TLS null, stream or block cipher type"
|
|
||||||
func isBadCipher(cipher uint16) bool {
|
|
||||||
switch cipher {
|
|
||||||
case cipher_TLS_NULL_WITH_NULL_NULL,
|
|
||||||
cipher_TLS_RSA_WITH_NULL_MD5,
|
|
||||||
cipher_TLS_RSA_WITH_NULL_SHA,
|
|
||||||
cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5,
|
|
||||||
cipher_TLS_RSA_WITH_RC4_128_MD5,
|
|
||||||
cipher_TLS_RSA_WITH_RC4_128_SHA,
|
|
||||||
cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,
|
|
||||||
cipher_TLS_RSA_WITH_IDEA_CBC_SHA,
|
|
||||||
cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA,
|
|
||||||
cipher_TLS_RSA_WITH_DES_CBC_SHA,
|
|
||||||
cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA,
|
|
||||||
cipher_TLS_DH_DSS_WITH_DES_CBC_SHA,
|
|
||||||
cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA,
|
|
||||||
cipher_TLS_DH_RSA_WITH_DES_CBC_SHA,
|
|
||||||
cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5,
|
|
||||||
cipher_TLS_DH_anon_WITH_RC4_128_MD5,
|
|
||||||
cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA,
|
|
||||||
cipher_TLS_DH_anon_WITH_DES_CBC_SHA,
|
|
||||||
cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_KRB5_WITH_DES_CBC_SHA,
|
|
||||||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_KRB5_WITH_RC4_128_SHA,
|
|
||||||
cipher_TLS_KRB5_WITH_IDEA_CBC_SHA,
|
|
||||||
cipher_TLS_KRB5_WITH_DES_CBC_MD5,
|
|
||||||
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5,
|
|
||||||
cipher_TLS_KRB5_WITH_RC4_128_MD5,
|
|
||||||
cipher_TLS_KRB5_WITH_IDEA_CBC_MD5,
|
|
||||||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA,
|
|
||||||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA,
|
|
||||||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA,
|
|
||||||
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5,
|
|
||||||
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5,
|
|
||||||
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5,
|
|
||||||
cipher_TLS_PSK_WITH_NULL_SHA,
|
|
||||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA,
|
|
||||||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_RSA_WITH_NULL_SHA256,
|
|
||||||
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256,
|
|
||||||
cipher_TLS_RSA_WITH_AES_256_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
|
|
||||||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,
|
|
||||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,
|
|
||||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,
|
|
||||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256,
|
|
||||||
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
|
|
||||||
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256,
|
|
||||||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,
|
|
||||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,
|
|
||||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,
|
|
||||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA,
|
|
||||||
cipher_TLS_PSK_WITH_RC4_128_SHA,
|
|
||||||
cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_PSK_WITH_RC4_128_SHA,
|
|
||||||
cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_RC4_128_SHA,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_RSA_WITH_SEED_CBC_SHA,
|
|
||||||
cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA,
|
|
||||||
cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA,
|
|
||||||
cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA,
|
|
||||||
cipher_TLS_DH_anon_WITH_SEED_CBC_SHA,
|
|
||||||
cipher_TLS_RSA_WITH_AES_128_GCM_SHA256,
|
|
||||||
cipher_TLS_RSA_WITH_AES_256_GCM_SHA384,
|
|
||||||
cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256,
|
|
||||||
cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384,
|
|
||||||
cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256,
|
|
||||||
cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384,
|
|
||||||
cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256,
|
|
||||||
cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384,
|
|
||||||
cipher_TLS_PSK_WITH_AES_128_GCM_SHA256,
|
|
||||||
cipher_TLS_PSK_WITH_AES_256_GCM_SHA384,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384,
|
|
||||||
cipher_TLS_PSK_WITH_AES_128_CBC_SHA256,
|
|
||||||
cipher_TLS_PSK_WITH_AES_256_CBC_SHA384,
|
|
||||||
cipher_TLS_PSK_WITH_NULL_SHA256,
|
|
||||||
cipher_TLS_PSK_WITH_NULL_SHA384,
|
|
||||||
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384,
|
|
||||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA256,
|
|
||||||
cipher_TLS_DHE_PSK_WITH_NULL_SHA384,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA256,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_NULL_SHA384,
|
|
||||||
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256,
|
|
||||||
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256,
|
|
||||||
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256,
|
|
||||||
cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA,
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_NULL_SHA,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_NULL_SHA,
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA,
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_ECDH_anon_WITH_NULL_SHA,
|
|
||||||
cipher_TLS_ECDH_anon_WITH_RC4_128_SHA,
|
|
||||||
cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384,
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA,
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA,
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA,
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384,
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA,
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256,
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384,
|
|
||||||
cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384,
|
|
||||||
cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
|
||||||
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
|
||||||
cipher_TLS_RSA_WITH_AES_128_CCM,
|
|
||||||
cipher_TLS_RSA_WITH_AES_256_CCM,
|
|
||||||
cipher_TLS_RSA_WITH_AES_128_CCM_8,
|
|
||||||
cipher_TLS_RSA_WITH_AES_256_CCM_8,
|
|
||||||
cipher_TLS_PSK_WITH_AES_128_CCM,
|
|
||||||
cipher_TLS_PSK_WITH_AES_256_CCM,
|
|
||||||
cipher_TLS_PSK_WITH_AES_128_CCM_8,
|
|
||||||
cipher_TLS_PSK_WITH_AES_256_CCM_8:
|
|
||||||
return true
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
282
vendor/golang.org/x/net/http2/client_conn_pool.go
generated
vendored
282
vendor/golang.org/x/net/http2/client_conn_pool.go
generated
vendored
@ -1,282 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Transport code's client connection pooling.
|
|
||||||
|
|
||||||
package http2
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
"net/http"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ClientConnPool manages a pool of HTTP/2 client connections.
|
|
||||||
type ClientConnPool interface {
|
|
||||||
GetClientConn(req *http.Request, addr string) (*ClientConn, error)
|
|
||||||
MarkDead(*ClientConn)
|
|
||||||
}
|
|
||||||
|
|
||||||
// clientConnPoolIdleCloser is the interface implemented by ClientConnPool
|
|
||||||
// implementations which can close their idle connections.
|
|
||||||
type clientConnPoolIdleCloser interface {
|
|
||||||
ClientConnPool
|
|
||||||
closeIdleConnections()
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
_ clientConnPoolIdleCloser = (*clientConnPool)(nil)
|
|
||||||
_ clientConnPoolIdleCloser = noDialClientConnPool{}
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO: use singleflight for dialing and addConnCalls?
|
|
||||||
type clientConnPool struct {
|
|
||||||
t *Transport
|
|
||||||
|
|
||||||
mu sync.Mutex // TODO: maybe switch to RWMutex
|
|
||||||
// TODO: add support for sharing conns based on cert names
|
|
||||||
// (e.g. share conn for googleapis.com and appspot.com)
|
|
||||||
conns map[string][]*ClientConn // key is host:port
|
|
||||||
dialing map[string]*dialCall // currently in-flight dials
|
|
||||||
keys map[*ClientConn][]string
|
|
||||||
addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
|
|
||||||
return p.getClientConn(req, addr, dialOnMiss)
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
dialOnMiss = true
|
|
||||||
noDialOnMiss = false
|
|
||||||
)
|
|
||||||
|
|
||||||
// shouldTraceGetConn reports whether getClientConn should call any
|
|
||||||
// ClientTrace.GetConn hook associated with the http.Request.
|
|
||||||
//
|
|
||||||
// This complexity is needed to avoid double calls of the GetConn hook
|
|
||||||
// during the back-and-forth between net/http and x/net/http2 (when the
|
|
||||||
// net/http.Transport is upgraded to also speak http2), as well as support
|
|
||||||
// the case where x/net/http2 is being used directly.
|
|
||||||
func (p *clientConnPool) shouldTraceGetConn(st clientConnIdleState) bool {
|
|
||||||
// If our Transport wasn't made via ConfigureTransport, always
|
|
||||||
// trace the GetConn hook if provided, because that means the
|
|
||||||
// http2 package is being used directly and it's the one
|
|
||||||
// dialing, as opposed to net/http.
|
|
||||||
if _, ok := p.t.ConnPool.(noDialClientConnPool); !ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// Otherwise, only use the GetConn hook if this connection has
|
|
||||||
// been used previously for other requests. For fresh
|
|
||||||
// connections, the net/http package does the dialing.
|
|
||||||
return !st.freshConn
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
|
|
||||||
if isConnectionCloseRequest(req) && dialOnMiss {
|
|
||||||
// It gets its own connection.
|
|
||||||
traceGetConn(req, addr)
|
|
||||||
const singleUse = true
|
|
||||||
cc, err := p.t.dialClientConn(addr, singleUse)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return cc, nil
|
|
||||||
}
|
|
||||||
p.mu.Lock()
|
|
||||||
for _, cc := range p.conns[addr] {
|
|
||||||
if st := cc.idleState(); st.canTakeNewRequest {
|
|
||||||
if p.shouldTraceGetConn(st) {
|
|
||||||
traceGetConn(req, addr)
|
|
||||||
}
|
|
||||||
p.mu.Unlock()
|
|
||||||
return cc, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !dialOnMiss {
|
|
||||||
p.mu.Unlock()
|
|
||||||
return nil, ErrNoCachedConn
|
|
||||||
}
|
|
||||||
traceGetConn(req, addr)
|
|
||||||
call := p.getStartDialLocked(addr)
|
|
||||||
p.mu.Unlock()
|
|
||||||
<-call.done
|
|
||||||
return call.res, call.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// dialCall is an in-flight Transport dial call to a host.
|
|
||||||
type dialCall struct {
|
|
||||||
p *clientConnPool
|
|
||||||
done chan struct{} // closed when done
|
|
||||||
res *ClientConn // valid after done is closed
|
|
||||||
err error // valid after done is closed
|
|
||||||
}
|
|
||||||
|
|
||||||
// requires p.mu is held.
|
|
||||||
func (p *clientConnPool) getStartDialLocked(addr string) *dialCall {
|
|
||||||
if call, ok := p.dialing[addr]; ok {
|
|
||||||
// A dial is already in-flight. Don't start another.
|
|
||||||
return call
|
|
||||||
}
|
|
||||||
call := &dialCall{p: p, done: make(chan struct{})}
|
|
||||||
if p.dialing == nil {
|
|
||||||
p.dialing = make(map[string]*dialCall)
|
|
||||||
}
|
|
||||||
p.dialing[addr] = call
|
|
||||||
go call.dial(addr)
|
|
||||||
return call
|
|
||||||
}
|
|
||||||
|
|
||||||
// run in its own goroutine.
|
|
||||||
func (c *dialCall) dial(addr string) {
|
|
||||||
const singleUse = false // shared conn
|
|
||||||
c.res, c.err = c.p.t.dialClientConn(addr, singleUse)
|
|
||||||
close(c.done)
|
|
||||||
|
|
||||||
c.p.mu.Lock()
|
|
||||||
delete(c.p.dialing, addr)
|
|
||||||
if c.err == nil {
|
|
||||||
c.p.addConnLocked(addr, c.res)
|
|
||||||
}
|
|
||||||
c.p.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't
|
|
||||||
// already exist. It coalesces concurrent calls with the same key.
|
|
||||||
// This is used by the http1 Transport code when it creates a new connection. Because
|
|
||||||
// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know
|
|
||||||
// the protocol), it can get into a situation where it has multiple TLS connections.
|
|
||||||
// This code decides which ones live or die.
|
|
||||||
// The return value used is whether c was used.
|
|
||||||
// c is never closed.
|
|
||||||
func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) {
|
|
||||||
p.mu.Lock()
|
|
||||||
for _, cc := range p.conns[key] {
|
|
||||||
if cc.CanTakeNewRequest() {
|
|
||||||
p.mu.Unlock()
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
call, dup := p.addConnCalls[key]
|
|
||||||
if !dup {
|
|
||||||
if p.addConnCalls == nil {
|
|
||||||
p.addConnCalls = make(map[string]*addConnCall)
|
|
||||||
}
|
|
||||||
call = &addConnCall{
|
|
||||||
p: p,
|
|
||||||
done: make(chan struct{}),
|
|
||||||
}
|
|
||||||
p.addConnCalls[key] = call
|
|
||||||
go call.run(t, key, c)
|
|
||||||
}
|
|
||||||
p.mu.Unlock()
|
|
||||||
|
|
||||||
<-call.done
|
|
||||||
if call.err != nil {
|
|
||||||
return false, call.err
|
|
||||||
}
|
|
||||||
return !dup, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type addConnCall struct {
|
|
||||||
p *clientConnPool
|
|
||||||
done chan struct{} // closed when done
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) {
|
|
||||||
cc, err := t.NewClientConn(tc)
|
|
||||||
|
|
||||||
p := c.p
|
|
||||||
p.mu.Lock()
|
|
||||||
if err != nil {
|
|
||||||
c.err = err
|
|
||||||
} else {
|
|
||||||
p.addConnLocked(key, cc)
|
|
||||||
}
|
|
||||||
delete(p.addConnCalls, key)
|
|
||||||
p.mu.Unlock()
|
|
||||||
close(c.done)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *clientConnPool) addConn(key string, cc *ClientConn) {
|
|
||||||
p.mu.Lock()
|
|
||||||
p.addConnLocked(key, cc)
|
|
||||||
p.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// p.mu must be held
|
|
||||||
func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) {
|
|
||||||
for _, v := range p.conns[key] {
|
|
||||||
if v == cc {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if p.conns == nil {
|
|
||||||
p.conns = make(map[string][]*ClientConn)
|
|
||||||
}
|
|
||||||
if p.keys == nil {
|
|
||||||
p.keys = make(map[*ClientConn][]string)
|
|
||||||
}
|
|
||||||
p.conns[key] = append(p.conns[key], cc)
|
|
||||||
p.keys[cc] = append(p.keys[cc], key)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *clientConnPool) MarkDead(cc *ClientConn) {
|
|
||||||
p.mu.Lock()
|
|
||||||
defer p.mu.Unlock()
|
|
||||||
for _, key := range p.keys[cc] {
|
|
||||||
vv, ok := p.conns[key]
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
newList := filterOutClientConn(vv, cc)
|
|
||||||
if len(newList) > 0 {
|
|
||||||
p.conns[key] = newList
|
|
||||||
} else {
|
|
||||||
delete(p.conns, key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
delete(p.keys, cc)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *clientConnPool) closeIdleConnections() {
|
|
||||||
p.mu.Lock()
|
|
||||||
defer p.mu.Unlock()
|
|
||||||
// TODO: don't close a cc if it was just added to the pool
|
|
||||||
// milliseconds ago and has never been used. There's currently
|
|
||||||
// a small race window with the HTTP/1 Transport's integration
|
|
||||||
// where it can add an idle conn just before using it, and
|
|
||||||
// somebody else can concurrently call CloseIdleConns and
|
|
||||||
// break some caller's RoundTrip.
|
|
||||||
for _, vv := range p.conns {
|
|
||||||
for _, cc := range vv {
|
|
||||||
cc.closeIfIdle()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn {
|
|
||||||
out := in[:0]
|
|
||||||
for _, v := range in {
|
|
||||||
if v != exclude {
|
|
||||||
out = append(out, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If we filtered it out, zero out the last item to prevent
|
|
||||||
// the GC from seeing it.
|
|
||||||
if len(in) != len(out) {
|
|
||||||
in[len(in)-1] = nil
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// noDialClientConnPool is an implementation of http2.ClientConnPool
|
|
||||||
// which never dials. We let the HTTP/1.1 client dial and use its TLS
|
|
||||||
// connection instead.
|
|
||||||
type noDialClientConnPool struct{ *clientConnPool }
|
|
||||||
|
|
||||||
func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
|
|
||||||
return p.getClientConn(req, addr, noDialOnMiss)
|
|
||||||
}
|
|
146
vendor/golang.org/x/net/http2/databuffer.go
generated
vendored
146
vendor/golang.org/x/net/http2/databuffer.go
generated
vendored
@ -1,146 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package http2
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Buffer chunks are allocated from a pool to reduce pressure on GC.
|
|
||||||
// The maximum wasted space per dataBuffer is 2x the largest size class,
|
|
||||||
// which happens when the dataBuffer has multiple chunks and there is
|
|
||||||
// one unread byte in both the first and last chunks. We use a few size
|
|
||||||
// classes to minimize overheads for servers that typically receive very
|
|
||||||
// small request bodies.
|
|
||||||
//
|
|
||||||
// TODO: Benchmark to determine if the pools are necessary. The GC may have
|
|
||||||
// improved enough that we can instead allocate chunks like this:
|
|
||||||
// make([]byte, max(16<<10, expectedBytesRemaining))
|
|
||||||
var (
|
|
||||||
dataChunkSizeClasses = []int{
|
|
||||||
1 << 10,
|
|
||||||
2 << 10,
|
|
||||||
4 << 10,
|
|
||||||
8 << 10,
|
|
||||||
16 << 10,
|
|
||||||
}
|
|
||||||
dataChunkPools = [...]sync.Pool{
|
|
||||||
{New: func() interface{} { return make([]byte, 1<<10) }},
|
|
||||||
{New: func() interface{} { return make([]byte, 2<<10) }},
|
|
||||||
{New: func() interface{} { return make([]byte, 4<<10) }},
|
|
||||||
{New: func() interface{} { return make([]byte, 8<<10) }},
|
|
||||||
{New: func() interface{} { return make([]byte, 16<<10) }},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func getDataBufferChunk(size int64) []byte {
|
|
||||||
i := 0
|
|
||||||
for ; i < len(dataChunkSizeClasses)-1; i++ {
|
|
||||||
if size <= int64(dataChunkSizeClasses[i]) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dataChunkPools[i].Get().([]byte)
|
|
||||||
}
|
|
||||||
|
|
||||||
func putDataBufferChunk(p []byte) {
|
|
||||||
for i, n := range dataChunkSizeClasses {
|
|
||||||
if len(p) == n {
|
|
||||||
dataChunkPools[i].Put(p)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// dataBuffer is an io.ReadWriter backed by a list of data chunks.
|
|
||||||
// Each dataBuffer is used to read DATA frames on a single stream.
|
|
||||||
// The buffer is divided into chunks so the server can limit the
|
|
||||||
// total memory used by a single connection without limiting the
|
|
||||||
// request body size on any single stream.
|
|
||||||
type dataBuffer struct {
|
|
||||||
chunks [][]byte
|
|
||||||
r int // next byte to read is chunks[0][r]
|
|
||||||
w int // next byte to write is chunks[len(chunks)-1][w]
|
|
||||||
size int // total buffered bytes
|
|
||||||
expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
var errReadEmpty = errors.New("read from empty dataBuffer")
|
|
||||||
|
|
||||||
// Read copies bytes from the buffer into p.
|
|
||||||
// It is an error to read when no data is available.
|
|
||||||
func (b *dataBuffer) Read(p []byte) (int, error) {
|
|
||||||
if b.size == 0 {
|
|
||||||
return 0, errReadEmpty
|
|
||||||
}
|
|
||||||
var ntotal int
|
|
||||||
for len(p) > 0 && b.size > 0 {
|
|
||||||
readFrom := b.bytesFromFirstChunk()
|
|
||||||
n := copy(p, readFrom)
|
|
||||||
p = p[n:]
|
|
||||||
ntotal += n
|
|
||||||
b.r += n
|
|
||||||
b.size -= n
|
|
||||||
// If the first chunk has been consumed, advance to the next chunk.
|
|
||||||
if b.r == len(b.chunks[0]) {
|
|
||||||
putDataBufferChunk(b.chunks[0])
|
|
||||||
end := len(b.chunks) - 1
|
|
||||||
copy(b.chunks[:end], b.chunks[1:])
|
|
||||||
b.chunks[end] = nil
|
|
||||||
b.chunks = b.chunks[:end]
|
|
||||||
b.r = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ntotal, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *dataBuffer) bytesFromFirstChunk() []byte {
|
|
||||||
if len(b.chunks) == 1 {
|
|
||||||
return b.chunks[0][b.r:b.w]
|
|
||||||
}
|
|
||||||
return b.chunks[0][b.r:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the number of bytes of the unread portion of the buffer.
|
|
||||||
func (b *dataBuffer) Len() int {
|
|
||||||
return b.size
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write appends p to the buffer.
|
|
||||||
func (b *dataBuffer) Write(p []byte) (int, error) {
|
|
||||||
ntotal := len(p)
|
|
||||||
for len(p) > 0 {
|
|
||||||
// If the last chunk is empty, allocate a new chunk. Try to allocate
|
|
||||||
// enough to fully copy p plus any additional bytes we expect to
|
|
||||||
// receive. However, this may allocate less than len(p).
|
|
||||||
want := int64(len(p))
|
|
||||||
if b.expected > want {
|
|
||||||
want = b.expected
|
|
||||||
}
|
|
||||||
chunk := b.lastChunkOrAlloc(want)
|
|
||||||
n := copy(chunk[b.w:], p)
|
|
||||||
p = p[n:]
|
|
||||||
b.w += n
|
|
||||||
b.size += n
|
|
||||||
b.expected -= int64(n)
|
|
||||||
}
|
|
||||||
return ntotal, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte {
|
|
||||||
if len(b.chunks) != 0 {
|
|
||||||
last := b.chunks[len(b.chunks)-1]
|
|
||||||
if b.w < len(last) {
|
|
||||||
return last
|
|
||||||
}
|
|
||||||
}
|
|
||||||
chunk := getDataBufferChunk(want)
|
|
||||||
b.chunks = append(b.chunks, chunk)
|
|
||||||
b.w = 0
|
|
||||||
return chunk
|
|
||||||
}
|
|
133
vendor/golang.org/x/net/http2/errors.go
generated
vendored
133
vendor/golang.org/x/net/http2/errors.go
generated
vendored
@ -1,133 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package http2
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.
|
|
||||||
type ErrCode uint32
|
|
||||||
|
|
||||||
const (
|
|
||||||
ErrCodeNo ErrCode = 0x0
|
|
||||||
ErrCodeProtocol ErrCode = 0x1
|
|
||||||
ErrCodeInternal ErrCode = 0x2
|
|
||||||
ErrCodeFlowControl ErrCode = 0x3
|
|
||||||
ErrCodeSettingsTimeout ErrCode = 0x4
|
|
||||||
ErrCodeStreamClosed ErrCode = 0x5
|
|
||||||
ErrCodeFrameSize ErrCode = 0x6
|
|
||||||
ErrCodeRefusedStream ErrCode = 0x7
|
|
||||||
ErrCodeCancel ErrCode = 0x8
|
|
||||||
ErrCodeCompression ErrCode = 0x9
|
|
||||||
ErrCodeConnect ErrCode = 0xa
|
|
||||||
ErrCodeEnhanceYourCalm ErrCode = 0xb
|
|
||||||
ErrCodeInadequateSecurity ErrCode = 0xc
|
|
||||||
ErrCodeHTTP11Required ErrCode = 0xd
|
|
||||||
)
|
|
||||||
|
|
||||||
var errCodeName = map[ErrCode]string{
|
|
||||||
ErrCodeNo: "NO_ERROR",
|
|
||||||
ErrCodeProtocol: "PROTOCOL_ERROR",
|
|
||||||
ErrCodeInternal: "INTERNAL_ERROR",
|
|
||||||
ErrCodeFlowControl: "FLOW_CONTROL_ERROR",
|
|
||||||
ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT",
|
|
||||||
ErrCodeStreamClosed: "STREAM_CLOSED",
|
|
||||||
ErrCodeFrameSize: "FRAME_SIZE_ERROR",
|
|
||||||
ErrCodeRefusedStream: "REFUSED_STREAM",
|
|
||||||
ErrCodeCancel: "CANCEL",
|
|
||||||
ErrCodeCompression: "COMPRESSION_ERROR",
|
|
||||||
ErrCodeConnect: "CONNECT_ERROR",
|
|
||||||
ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM",
|
|
||||||
ErrCodeInadequateSecurity: "INADEQUATE_SECURITY",
|
|
||||||
ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED",
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e ErrCode) String() string {
|
|
||||||
if s, ok := errCodeName[e]; ok {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("unknown error code 0x%x", uint32(e))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConnectionError is an error that results in the termination of the
|
|
||||||
// entire connection.
|
|
||||||
type ConnectionError ErrCode
|
|
||||||
|
|
||||||
func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) }
|
|
||||||
|
|
||||||
// StreamError is an error that only affects one stream within an
|
|
||||||
// HTTP/2 connection.
|
|
||||||
type StreamError struct {
|
|
||||||
StreamID uint32
|
|
||||||
Code ErrCode
|
|
||||||
Cause error // optional additional detail
|
|
||||||
}
|
|
||||||
|
|
||||||
func streamError(id uint32, code ErrCode) StreamError {
|
|
||||||
return StreamError{StreamID: id, Code: code}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e StreamError) Error() string {
|
|
||||||
if e.Cause != nil {
|
|
||||||
return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
|
|
||||||
}
|
|
||||||
|
|
||||||
// 6.9.1 The Flow Control Window
|
|
||||||
// "If a sender receives a WINDOW_UPDATE that causes a flow control
|
|
||||||
// window to exceed this maximum it MUST terminate either the stream
|
|
||||||
// or the connection, as appropriate. For streams, [...]; for the
|
|
||||||
// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code."
|
|
||||||
type goAwayFlowError struct{}
|
|
||||||
|
|
||||||
func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
|
|
||||||
|
|
||||||
// connError represents an HTTP/2 ConnectionError error code, along
|
|
||||||
// with a string (for debugging) explaining why.
|
|
||||||
//
|
|
||||||
// Errors of this type are only returned by the frame parser functions
|
|
||||||
// and converted into ConnectionError(Code), after stashing away
|
|
||||||
// the Reason into the Framer's errDetail field, accessible via
|
|
||||||
// the (*Framer).ErrorDetail method.
|
|
||||||
type connError struct {
|
|
||||||
Code ErrCode // the ConnectionError error code
|
|
||||||
Reason string // additional reason
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e connError) Error() string {
|
|
||||||
return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason)
|
|
||||||
}
|
|
||||||
|
|
||||||
type pseudoHeaderError string
|
|
||||||
|
|
||||||
func (e pseudoHeaderError) Error() string {
|
|
||||||
return fmt.Sprintf("invalid pseudo-header %q", string(e))
|
|
||||||
}
|
|
||||||
|
|
||||||
type duplicatePseudoHeaderError string
|
|
||||||
|
|
||||||
func (e duplicatePseudoHeaderError) Error() string {
|
|
||||||
return fmt.Sprintf("duplicate pseudo-header %q", string(e))
|
|
||||||
}
|
|
||||||
|
|
||||||
type headerFieldNameError string
|
|
||||||
|
|
||||||
func (e headerFieldNameError) Error() string {
|
|
||||||
return fmt.Sprintf("invalid header field name %q", string(e))
|
|
||||||
}
|
|
||||||
|
|
||||||
type headerFieldValueError string
|
|
||||||
|
|
||||||
func (e headerFieldValueError) Error() string {
|
|
||||||
return fmt.Sprintf("invalid header field value %q", string(e))
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers")
|
|
||||||
errPseudoAfterRegular = errors.New("pseudo header field after regular")
|
|
||||||
)
|
|
50
vendor/golang.org/x/net/http2/flow.go
generated
vendored
50
vendor/golang.org/x/net/http2/flow.go
generated
vendored
@ -1,50 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Flow control
|
|
||||||
|
|
||||||
package http2
|
|
||||||
|
|
||||||
// flow is the flow control window's size.
|
|
||||||
type flow struct {
|
|
||||||
// n is the number of DATA bytes we're allowed to send.
|
|
||||||
// A flow is kept both on a conn and a per-stream.
|
|
||||||
n int32
|
|
||||||
|
|
||||||
// conn points to the shared connection-level flow that is
|
|
||||||
// shared by all streams on that conn. It is nil for the flow
|
|
||||||
// that's on the conn directly.
|
|
||||||
conn *flow
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *flow) setConnFlow(cf *flow) { f.conn = cf }
|
|
||||||
|
|
||||||
func (f *flow) available() int32 {
|
|
||||||
n := f.n
|
|
||||||
if f.conn != nil && f.conn.n < n {
|
|
||||||
n = f.conn.n
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *flow) take(n int32) {
|
|
||||||
if n > f.available() {
|
|
||||||
panic("internal error: took too much")
|
|
||||||
}
|
|
||||||
f.n -= n
|
|
||||||
if f.conn != nil {
|
|
||||||
f.conn.n -= n
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// add adds n bytes (positive or negative) to the flow control window.
|
|
||||||
// It returns false if the sum would exceed 2^31-1.
|
|
||||||
func (f *flow) add(n int32) bool {
|
|
||||||
sum := f.n + n
|
|
||||||
if (sum > n) == (f.n > 0) {
|
|
||||||
f.n = sum
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
1614
vendor/golang.org/x/net/http2/frame.go
generated
vendored
1614
vendor/golang.org/x/net/http2/frame.go
generated
vendored
File diff suppressed because it is too large
Load Diff
29
vendor/golang.org/x/net/http2/go111.go
generated
vendored
29
vendor/golang.org/x/net/http2/go111.go
generated
vendored
@ -1,29 +0,0 @@
|
|||||||
// Copyright 2018 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build go1.11
|
|
||||||
|
|
||||||
package http2
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http/httptrace"
|
|
||||||
"net/textproto"
|
|
||||||
)
|
|
||||||
|
|
||||||
func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
|
|
||||||
return trace != nil && trace.WroteHeaderField != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
|
|
||||||
if trace != nil && trace.WroteHeaderField != nil {
|
|
||||||
trace.WroteHeaderField(k, []string{v})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
|
|
||||||
if trace != nil {
|
|
||||||
return trace.Got1xxResponse
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
170
vendor/golang.org/x/net/http2/gotrack.go
generated
vendored
170
vendor/golang.org/x/net/http2/gotrack.go
generated
vendored
@ -1,170 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Defensive debug-only utility to track that functions run on the
|
|
||||||
// goroutine that they're supposed to.
|
|
||||||
|
|
||||||
package http2
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
"strconv"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
|
|
||||||
|
|
||||||
type goroutineLock uint64
|
|
||||||
|
|
||||||
func newGoroutineLock() goroutineLock {
|
|
||||||
if !DebugGoroutines {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return goroutineLock(curGoroutineID())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g goroutineLock) check() {
|
|
||||||
if !DebugGoroutines {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if curGoroutineID() != uint64(g) {
|
|
||||||
panic("running on the wrong goroutine")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g goroutineLock) checkNotOn() {
|
|
||||||
if !DebugGoroutines {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if curGoroutineID() == uint64(g) {
|
|
||||||
panic("running on the wrong goroutine")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var goroutineSpace = []byte("goroutine ")
|
|
||||||
|
|
||||||
func curGoroutineID() uint64 {
|
|
||||||
bp := littleBuf.Get().(*[]byte)
|
|
||||||
defer littleBuf.Put(bp)
|
|
||||||
b := *bp
|
|
||||||
b = b[:runtime.Stack(b, false)]
|
|
||||||
// Parse the 4707 out of "goroutine 4707 ["
|
|
||||||
b = bytes.TrimPrefix(b, goroutineSpace)
|
|
||||||
i := bytes.IndexByte(b, ' ')
|
|
||||||
if i < 0 {
|
|
||||||
panic(fmt.Sprintf("No space found in %q", b))
|
|
||||||
}
|
|
||||||
b = b[:i]
|
|
||||||
n, err := parseUintBytes(b, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
var littleBuf = sync.Pool{
|
|
||||||
New: func() interface{} {
|
|
||||||
buf := make([]byte, 64)
|
|
||||||
return &buf
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseUintBytes is like strconv.ParseUint, but using a []byte.
|
|
||||||
func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) {
|
|
||||||
var cutoff, maxVal uint64
|
|
||||||
|
|
||||||
if bitSize == 0 {
|
|
||||||
bitSize = int(strconv.IntSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
s0 := s
|
|
||||||
switch {
|
|
||||||
case len(s) < 1:
|
|
||||||
err = strconv.ErrSyntax
|
|
||||||
goto Error
|
|
||||||
|
|
||||||
case 2 <= base && base <= 36:
|
|
||||||
// valid base; nothing to do
|
|
||||||
|
|
||||||
case base == 0:
|
|
||||||
// Look for octal, hex prefix.
|
|
||||||
switch {
|
|
||||||
case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):
|
|
||||||
base = 16
|
|
||||||
s = s[2:]
|
|
||||||
if len(s) < 1 {
|
|
||||||
err = strconv.ErrSyntax
|
|
||||||
goto Error
|
|
||||||
}
|
|
||||||
case s[0] == '0':
|
|
||||||
base = 8
|
|
||||||
default:
|
|
||||||
base = 10
|
|
||||||
}
|
|
||||||
|
|
||||||
default:
|
|
||||||
err = errors.New("invalid base " + strconv.Itoa(base))
|
|
||||||
goto Error
|
|
||||||
}
|
|
||||||
|
|
||||||
n = 0
|
|
||||||
cutoff = cutoff64(base)
|
|
||||||
maxVal = 1<<uint(bitSize) - 1
|
|
||||||
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
var v byte
|
|
||||||
d := s[i]
|
|
||||||
switch {
|
|
||||||
case '0' <= d && d <= '9':
|
|
||||||
v = d - '0'
|
|
||||||
case 'a' <= d && d <= 'z':
|
|
||||||
v = d - 'a' + 10
|
|
||||||
case 'A' <= d && d <= 'Z':
|
|
||||||
v = d - 'A' + 10
|
|
||||||
default:
|
|
||||||
n = 0
|
|
||||||
err = strconv.ErrSyntax
|
|
||||||
goto Error
|
|
||||||
}
|
|
||||||
if int(v) >= base {
|
|
||||||
n = 0
|
|
||||||
err = strconv.ErrSyntax
|
|
||||||
goto Error
|
|
||||||
}
|
|
||||||
|
|
||||||
if n >= cutoff {
|
|
||||||
// n*base overflows
|
|
||||||
n = 1<<64 - 1
|
|
||||||
err = strconv.ErrRange
|
|
||||||
goto Error
|
|
||||||
}
|
|
||||||
n *= uint64(base)
|
|
||||||
|
|
||||||
n1 := n + uint64(v)
|
|
||||||
if n1 < n || n1 > maxVal {
|
|
||||||
// n+v overflows
|
|
||||||
n = 1<<64 - 1
|
|
||||||
err = strconv.ErrRange
|
|
||||||
goto Error
|
|
||||||
}
|
|
||||||
n = n1
|
|
||||||
}
|
|
||||||
|
|
||||||
return n, nil
|
|
||||||
|
|
||||||
Error:
|
|
||||||
return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return the first number n such that n*base >= 1<<64.
|
|
||||||
func cutoff64(base int) uint64 {
|
|
||||||
if base < 2 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return (1<<64-1)/uint64(base) + 1
|
|
||||||
}
|
|
88
vendor/golang.org/x/net/http2/headermap.go
generated
vendored
88
vendor/golang.org/x/net/http2/headermap.go
generated
vendored
@ -1,88 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package http2
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
commonBuildOnce sync.Once
|
|
||||||
commonLowerHeader map[string]string // Go-Canonical-Case -> lower-case
|
|
||||||
commonCanonHeader map[string]string // lower-case -> Go-Canonical-Case
|
|
||||||
)
|
|
||||||
|
|
||||||
func buildCommonHeaderMapsOnce() {
|
|
||||||
commonBuildOnce.Do(buildCommonHeaderMaps)
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildCommonHeaderMaps() {
|
|
||||||
common := []string{
|
|
||||||
"accept",
|
|
||||||
"accept-charset",
|
|
||||||
"accept-encoding",
|
|
||||||
"accept-language",
|
|
||||||
"accept-ranges",
|
|
||||||
"age",
|
|
||||||
"access-control-allow-origin",
|
|
||||||
"allow",
|
|
||||||
"authorization",
|
|
||||||
"cache-control",
|
|
||||||
"content-disposition",
|
|
||||||
"content-encoding",
|
|
||||||
"content-language",
|
|
||||||
"content-length",
|
|
||||||
"content-location",
|
|
||||||
"content-range",
|
|
||||||
"content-type",
|
|
||||||
"cookie",
|
|
||||||
"date",
|
|
||||||
"etag",
|
|
||||||
"expect",
|
|
||||||
"expires",
|
|
||||||
"from",
|
|
||||||
"host",
|
|
||||||
"if-match",
|
|
||||||
"if-modified-since",
|
|
||||||
"if-none-match",
|
|
||||||
"if-unmodified-since",
|
|
||||||
"last-modified",
|
|
||||||
"link",
|
|
||||||
"location",
|
|
||||||
"max-forwards",
|
|
||||||
"proxy-authenticate",
|
|
||||||
"proxy-authorization",
|
|
||||||
"range",
|
|
||||||
"referer",
|
|
||||||
"refresh",
|
|
||||||
"retry-after",
|
|
||||||
"server",
|
|
||||||
"set-cookie",
|
|
||||||
"strict-transport-security",
|
|
||||||
"trailer",
|
|
||||||
"transfer-encoding",
|
|
||||||
"user-agent",
|
|
||||||
"vary",
|
|
||||||
"via",
|
|
||||||
"www-authenticate",
|
|
||||||
}
|
|
||||||
commonLowerHeader = make(map[string]string, len(common))
|
|
||||||
commonCanonHeader = make(map[string]string, len(common))
|
|
||||||
for _, v := range common {
|
|
||||||
chk := http.CanonicalHeaderKey(v)
|
|
||||||
commonLowerHeader[chk] = v
|
|
||||||
commonCanonHeader[v] = chk
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func lowerHeader(v string) string {
|
|
||||||
buildCommonHeaderMapsOnce()
|
|
||||||
if s, ok := commonLowerHeader[v]; ok {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
return strings.ToLower(v)
|
|
||||||
}
|
|
240
vendor/golang.org/x/net/http2/hpack/encode.go
generated
vendored
240
vendor/golang.org/x/net/http2/hpack/encode.go
generated
vendored
@ -1,240 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package hpack
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
uint32Max = ^uint32(0)
|
|
||||||
initialHeaderTableSize = 4096
|
|
||||||
)
|
|
||||||
|
|
||||||
type Encoder struct {
|
|
||||||
dynTab dynamicTable
|
|
||||||
// minSize is the minimum table size set by
|
|
||||||
// SetMaxDynamicTableSize after the previous Header Table Size
|
|
||||||
// Update.
|
|
||||||
minSize uint32
|
|
||||||
// maxSizeLimit is the maximum table size this encoder
|
|
||||||
// supports. This will protect the encoder from too large
|
|
||||||
// size.
|
|
||||||
maxSizeLimit uint32
|
|
||||||
// tableSizeUpdate indicates whether "Header Table Size
|
|
||||||
// Update" is required.
|
|
||||||
tableSizeUpdate bool
|
|
||||||
w io.Writer
|
|
||||||
buf []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEncoder returns a new Encoder which performs HPACK encoding. An
|
|
||||||
// encoded data is written to w.
|
|
||||||
func NewEncoder(w io.Writer) *Encoder {
|
|
||||||
e := &Encoder{
|
|
||||||
minSize: uint32Max,
|
|
||||||
maxSizeLimit: initialHeaderTableSize,
|
|
||||||
tableSizeUpdate: false,
|
|
||||||
w: w,
|
|
||||||
}
|
|
||||||
e.dynTab.table.init()
|
|
||||||
e.dynTab.setMaxSize(initialHeaderTableSize)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteField encodes f into a single Write to e's underlying Writer.
|
|
||||||
// This function may also produce bytes for "Header Table Size Update"
|
|
||||||
// if necessary. If produced, it is done before encoding f.
|
|
||||||
func (e *Encoder) WriteField(f HeaderField) error {
|
|
||||||
e.buf = e.buf[:0]
|
|
||||||
|
|
||||||
if e.tableSizeUpdate {
|
|
||||||
e.tableSizeUpdate = false
|
|
||||||
if e.minSize < e.dynTab.maxSize {
|
|
||||||
e.buf = appendTableSize(e.buf, e.minSize)
|
|
||||||
}
|
|
||||||
e.minSize = uint32Max
|
|
||||||
e.buf = appendTableSize(e.buf, e.dynTab.maxSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
idx, nameValueMatch := e.searchTable(f)
|
|
||||||
if nameValueMatch {
|
|
||||||
e.buf = appendIndexed(e.buf, idx)
|
|
||||||
} else {
|
|
||||||
indexing := e.shouldIndex(f)
|
|
||||||
if indexing {
|
|
||||||
e.dynTab.add(f)
|
|
||||||
}
|
|
||||||
|
|
||||||
if idx == 0 {
|
|
||||||
e.buf = appendNewName(e.buf, f, indexing)
|
|
||||||
} else {
|
|
||||||
e.buf = appendIndexedName(e.buf, f, idx, indexing)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
n, err := e.w.Write(e.buf)
|
|
||||||
if err == nil && n != len(e.buf) {
|
|
||||||
err = io.ErrShortWrite
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// searchTable searches f in both stable and dynamic header tables.
|
|
||||||
// The static header table is searched first. Only when there is no
|
|
||||||
// exact match for both name and value, the dynamic header table is
|
|
||||||
// then searched. If there is no match, i is 0. If both name and value
|
|
||||||
// match, i is the matched index and nameValueMatch becomes true. If
|
|
||||||
// only name matches, i points to that index and nameValueMatch
|
|
||||||
// becomes false.
|
|
||||||
func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
|
|
||||||
i, nameValueMatch = staticTable.search(f)
|
|
||||||
if nameValueMatch {
|
|
||||||
return i, true
|
|
||||||
}
|
|
||||||
|
|
||||||
j, nameValueMatch := e.dynTab.table.search(f)
|
|
||||||
if nameValueMatch || (i == 0 && j != 0) {
|
|
||||||
return j + uint64(staticTable.len()), nameValueMatch
|
|
||||||
}
|
|
||||||
|
|
||||||
return i, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetMaxDynamicTableSize changes the dynamic header table size to v.
|
|
||||||
// The actual size is bounded by the value passed to
|
|
||||||
// SetMaxDynamicTableSizeLimit.
|
|
||||||
func (e *Encoder) SetMaxDynamicTableSize(v uint32) {
|
|
||||||
if v > e.maxSizeLimit {
|
|
||||||
v = e.maxSizeLimit
|
|
||||||
}
|
|
||||||
if v < e.minSize {
|
|
||||||
e.minSize = v
|
|
||||||
}
|
|
||||||
e.tableSizeUpdate = true
|
|
||||||
e.dynTab.setMaxSize(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetMaxDynamicTableSizeLimit changes the maximum value that can be
|
|
||||||
// specified in SetMaxDynamicTableSize to v. By default, it is set to
|
|
||||||
// 4096, which is the same size of the default dynamic header table
|
|
||||||
// size described in HPACK specification. If the current maximum
|
|
||||||
// dynamic header table size is strictly greater than v, "Header Table
|
|
||||||
// Size Update" will be done in the next WriteField call and the
|
|
||||||
// maximum dynamic header table size is truncated to v.
|
|
||||||
func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) {
|
|
||||||
e.maxSizeLimit = v
|
|
||||||
if e.dynTab.maxSize > v {
|
|
||||||
e.tableSizeUpdate = true
|
|
||||||
e.dynTab.setMaxSize(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// shouldIndex reports whether f should be indexed.
|
|
||||||
func (e *Encoder) shouldIndex(f HeaderField) bool {
|
|
||||||
return !f.Sensitive && f.Size() <= e.dynTab.maxSize
|
|
||||||
}
|
|
||||||
|
|
||||||
// appendIndexed appends index i, as encoded in "Indexed Header Field"
|
|
||||||
// representation, to dst and returns the extended buffer.
|
|
||||||
func appendIndexed(dst []byte, i uint64) []byte {
|
|
||||||
first := len(dst)
|
|
||||||
dst = appendVarInt(dst, 7, i)
|
|
||||||
dst[first] |= 0x80
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// appendNewName appends f, as encoded in one of "Literal Header field
|
|
||||||
// - New Name" representation variants, to dst and returns the
|
|
||||||
// extended buffer.
|
|
||||||
//
|
|
||||||
// If f.Sensitive is true, "Never Indexed" representation is used. If
|
|
||||||
// f.Sensitive is false and indexing is true, "Inremental Indexing"
|
|
||||||
// representation is used.
|
|
||||||
func appendNewName(dst []byte, f HeaderField, indexing bool) []byte {
|
|
||||||
dst = append(dst, encodeTypeByte(indexing, f.Sensitive))
|
|
||||||
dst = appendHpackString(dst, f.Name)
|
|
||||||
return appendHpackString(dst, f.Value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// appendIndexedName appends f and index i referring indexed name
|
|
||||||
// entry, as encoded in one of "Literal Header field - Indexed Name"
|
|
||||||
// representation variants, to dst and returns the extended buffer.
|
|
||||||
//
|
|
||||||
// If f.Sensitive is true, "Never Indexed" representation is used. If
|
|
||||||
// f.Sensitive is false and indexing is true, "Incremental Indexing"
|
|
||||||
// representation is used.
|
|
||||||
func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte {
|
|
||||||
first := len(dst)
|
|
||||||
var n byte
|
|
||||||
if indexing {
|
|
||||||
n = 6
|
|
||||||
} else {
|
|
||||||
n = 4
|
|
||||||
}
|
|
||||||
dst = appendVarInt(dst, n, i)
|
|
||||||
dst[first] |= encodeTypeByte(indexing, f.Sensitive)
|
|
||||||
return appendHpackString(dst, f.Value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// appendTableSize appends v, as encoded in "Header Table Size Update"
|
|
||||||
// representation, to dst and returns the extended buffer.
|
|
||||||
func appendTableSize(dst []byte, v uint32) []byte {
|
|
||||||
first := len(dst)
|
|
||||||
dst = appendVarInt(dst, 5, uint64(v))
|
|
||||||
dst[first] |= 0x20
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// appendVarInt appends i, as encoded in variable integer form using n
|
|
||||||
// bit prefix, to dst and returns the extended buffer.
|
|
||||||
//
|
|
||||||
// See
|
|
||||||
// http://http2.github.io/http2-spec/compression.html#integer.representation
|
|
||||||
func appendVarInt(dst []byte, n byte, i uint64) []byte {
|
|
||||||
k := uint64((1 << n) - 1)
|
|
||||||
if i < k {
|
|
||||||
return append(dst, byte(i))
|
|
||||||
}
|
|
||||||
dst = append(dst, byte(k))
|
|
||||||
i -= k
|
|
||||||
for ; i >= 128; i >>= 7 {
|
|
||||||
dst = append(dst, byte(0x80|(i&0x7f)))
|
|
||||||
}
|
|
||||||
return append(dst, byte(i))
|
|
||||||
}
|
|
||||||
|
|
||||||
// appendHpackString appends s, as encoded in "String Literal"
|
|
||||||
// representation, to dst and returns the extended buffer.
|
|
||||||
//
|
|
||||||
// s will be encoded in Huffman codes only when it produces strictly
|
|
||||||
// shorter byte string.
|
|
||||||
func appendHpackString(dst []byte, s string) []byte {
|
|
||||||
huffmanLength := HuffmanEncodeLength(s)
|
|
||||||
if huffmanLength < uint64(len(s)) {
|
|
||||||
first := len(dst)
|
|
||||||
dst = appendVarInt(dst, 7, huffmanLength)
|
|
||||||
dst = AppendHuffmanString(dst, s)
|
|
||||||
dst[first] |= 0x80
|
|
||||||
} else {
|
|
||||||
dst = appendVarInt(dst, 7, uint64(len(s)))
|
|
||||||
dst = append(dst, s...)
|
|
||||||
}
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// encodeTypeByte returns type byte. If sensitive is true, type byte
|
|
||||||
// for "Never Indexed" representation is returned. If sensitive is
|
|
||||||
// false and indexing is true, type byte for "Incremental Indexing"
|
|
||||||
// representation is returned. Otherwise, type byte for "Without
|
|
||||||
// Indexing" is returned.
|
|
||||||
func encodeTypeByte(indexing, sensitive bool) byte {
|
|
||||||
if sensitive {
|
|
||||||
return 0x10
|
|
||||||
}
|
|
||||||
if indexing {
|
|
||||||
return 0x40
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
504
vendor/golang.org/x/net/http2/hpack/hpack.go
generated
vendored
504
vendor/golang.org/x/net/http2/hpack/hpack.go
generated
vendored
@ -1,504 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package hpack implements HPACK, a compression format for
|
|
||||||
// efficiently representing HTTP header fields in the context of HTTP/2.
|
|
||||||
//
|
|
||||||
// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09
|
|
||||||
package hpack
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A DecodingError is something the spec defines as a decoding error.
|
|
||||||
type DecodingError struct {
|
|
||||||
Err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (de DecodingError) Error() string {
|
|
||||||
return fmt.Sprintf("decoding error: %v", de.Err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// An InvalidIndexError is returned when an encoder references a table
|
|
||||||
// entry before the static table or after the end of the dynamic table.
|
|
||||||
type InvalidIndexError int
|
|
||||||
|
|
||||||
func (e InvalidIndexError) Error() string {
|
|
||||||
return fmt.Sprintf("invalid indexed representation index %d", int(e))
|
|
||||||
}
|
|
||||||
|
|
||||||
// A HeaderField is a name-value pair. Both the name and value are
|
|
||||||
// treated as opaque sequences of octets.
|
|
||||||
type HeaderField struct {
|
|
||||||
Name, Value string
|
|
||||||
|
|
||||||
// Sensitive means that this header field should never be
|
|
||||||
// indexed.
|
|
||||||
Sensitive bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsPseudo reports whether the header field is an http2 pseudo header.
|
|
||||||
// That is, it reports whether it starts with a colon.
|
|
||||||
// It is not otherwise guaranteed to be a valid pseudo header field,
|
|
||||||
// though.
|
|
||||||
func (hf HeaderField) IsPseudo() bool {
|
|
||||||
return len(hf.Name) != 0 && hf.Name[0] == ':'
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hf HeaderField) String() string {
|
|
||||||
var suffix string
|
|
||||||
if hf.Sensitive {
|
|
||||||
suffix = " (sensitive)"
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size returns the size of an entry per RFC 7541 section 4.1.
|
|
||||||
func (hf HeaderField) Size() uint32 {
|
|
||||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.4.1
|
|
||||||
// "The size of the dynamic table is the sum of the size of
|
|
||||||
// its entries. The size of an entry is the sum of its name's
|
|
||||||
// length in octets (as defined in Section 5.2), its value's
|
|
||||||
// length in octets (see Section 5.2), plus 32. The size of
|
|
||||||
// an entry is calculated using the length of the name and
|
|
||||||
// value without any Huffman encoding applied."
|
|
||||||
|
|
||||||
// This can overflow if somebody makes a large HeaderField
|
|
||||||
// Name and/or Value by hand, but we don't care, because that
|
|
||||||
// won't happen on the wire because the encoding doesn't allow
|
|
||||||
// it.
|
|
||||||
return uint32(len(hf.Name) + len(hf.Value) + 32)
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Decoder is the decoding context for incremental processing of
|
|
||||||
// header blocks.
|
|
||||||
type Decoder struct {
|
|
||||||
dynTab dynamicTable
|
|
||||||
emit func(f HeaderField)
|
|
||||||
|
|
||||||
emitEnabled bool // whether calls to emit are enabled
|
|
||||||
maxStrLen int // 0 means unlimited
|
|
||||||
|
|
||||||
// buf is the unparsed buffer. It's only written to
|
|
||||||
// saveBuf if it was truncated in the middle of a header
|
|
||||||
// block. Because it's usually not owned, we can only
|
|
||||||
// process it under Write.
|
|
||||||
buf []byte // not owned; only valid during Write
|
|
||||||
|
|
||||||
// saveBuf is previous data passed to Write which we weren't able
|
|
||||||
// to fully parse before. Unlike buf, we own this data.
|
|
||||||
saveBuf bytes.Buffer
|
|
||||||
|
|
||||||
firstField bool // processing the first field of the header block
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDecoder returns a new decoder with the provided maximum dynamic
|
|
||||||
// table size. The emitFunc will be called for each valid field
|
|
||||||
// parsed, in the same goroutine as calls to Write, before Write returns.
|
|
||||||
func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder {
|
|
||||||
d := &Decoder{
|
|
||||||
emit: emitFunc,
|
|
||||||
emitEnabled: true,
|
|
||||||
firstField: true,
|
|
||||||
}
|
|
||||||
d.dynTab.table.init()
|
|
||||||
d.dynTab.allowedMaxSize = maxDynamicTableSize
|
|
||||||
d.dynTab.setMaxSize(maxDynamicTableSize)
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
// ErrStringLength is returned by Decoder.Write when the max string length
|
|
||||||
// (as configured by Decoder.SetMaxStringLength) would be violated.
|
|
||||||
var ErrStringLength = errors.New("hpack: string too long")
|
|
||||||
|
|
||||||
// SetMaxStringLength sets the maximum size of a HeaderField name or
|
|
||||||
// value string. If a string exceeds this length (even after any
|
|
||||||
// decompression), Write will return ErrStringLength.
|
|
||||||
// A value of 0 means unlimited and is the default from NewDecoder.
|
|
||||||
func (d *Decoder) SetMaxStringLength(n int) {
|
|
||||||
d.maxStrLen = n
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetEmitFunc changes the callback used when new header fields
|
|
||||||
// are decoded.
|
|
||||||
// It must be non-nil. It does not affect EmitEnabled.
|
|
||||||
func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) {
|
|
||||||
d.emit = emitFunc
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetEmitEnabled controls whether the emitFunc provided to NewDecoder
|
|
||||||
// should be called. The default is true.
|
|
||||||
//
|
|
||||||
// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE
|
|
||||||
// while still decoding and keeping in-sync with decoder state, but
|
|
||||||
// without doing unnecessary decompression or generating unnecessary
|
|
||||||
// garbage for header fields past the limit.
|
|
||||||
func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v }
|
|
||||||
|
|
||||||
// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder
|
|
||||||
// are currently enabled. The default is true.
|
|
||||||
func (d *Decoder) EmitEnabled() bool { return d.emitEnabled }
|
|
||||||
|
|
||||||
// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their
|
|
||||||
// underlying buffers for garbage reasons.
|
|
||||||
|
|
||||||
func (d *Decoder) SetMaxDynamicTableSize(v uint32) {
|
|
||||||
d.dynTab.setMaxSize(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded
|
|
||||||
// stream (via dynamic table size updates) may set the maximum size
|
|
||||||
// to.
|
|
||||||
func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {
|
|
||||||
d.dynTab.allowedMaxSize = v
|
|
||||||
}
|
|
||||||
|
|
||||||
type dynamicTable struct {
|
|
||||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2
|
|
||||||
table headerFieldTable
|
|
||||||
size uint32 // in bytes
|
|
||||||
maxSize uint32 // current maxSize
|
|
||||||
allowedMaxSize uint32 // maxSize may go up to this, inclusive
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dt *dynamicTable) setMaxSize(v uint32) {
|
|
||||||
dt.maxSize = v
|
|
||||||
dt.evict()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dt *dynamicTable) add(f HeaderField) {
|
|
||||||
dt.table.addEntry(f)
|
|
||||||
dt.size += f.Size()
|
|
||||||
dt.evict()
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we're too big, evict old stuff.
|
|
||||||
func (dt *dynamicTable) evict() {
|
|
||||||
var n int
|
|
||||||
for dt.size > dt.maxSize && n < dt.table.len() {
|
|
||||||
dt.size -= dt.table.ents[n].Size()
|
|
||||||
n++
|
|
||||||
}
|
|
||||||
dt.table.evictOldest(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) maxTableIndex() int {
|
|
||||||
// This should never overflow. RFC 7540 Section 6.5.2 limits the size of
|
|
||||||
// the dynamic table to 2^32 bytes, where each entry will occupy more than
|
|
||||||
// one byte. Further, the staticTable has a fixed, small length.
|
|
||||||
return d.dynTab.table.len() + staticTable.len()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
|
|
||||||
// See Section 2.3.3.
|
|
||||||
if i == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if i <= uint64(staticTable.len()) {
|
|
||||||
return staticTable.ents[i-1], true
|
|
||||||
}
|
|
||||||
if i > uint64(d.maxTableIndex()) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// In the dynamic table, newer entries have lower indices.
|
|
||||||
// However, dt.ents[0] is the oldest entry. Hence, dt.ents is
|
|
||||||
// the reversed dynamic table.
|
|
||||||
dt := d.dynTab.table
|
|
||||||
return dt.ents[dt.len()-(int(i)-staticTable.len())], true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode decodes an entire block.
|
|
||||||
//
|
|
||||||
// TODO: remove this method and make it incremental later? This is
|
|
||||||
// easier for debugging now.
|
|
||||||
func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) {
|
|
||||||
var hf []HeaderField
|
|
||||||
saveFunc := d.emit
|
|
||||||
defer func() { d.emit = saveFunc }()
|
|
||||||
d.emit = func(f HeaderField) { hf = append(hf, f) }
|
|
||||||
if _, err := d.Write(p); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := d.Close(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return hf, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close declares that the decoding is complete and resets the Decoder
|
|
||||||
// to be reused again for a new header block. If there is any remaining
|
|
||||||
// data in the decoder's buffer, Close returns an error.
|
|
||||||
func (d *Decoder) Close() error {
|
|
||||||
if d.saveBuf.Len() > 0 {
|
|
||||||
d.saveBuf.Reset()
|
|
||||||
return DecodingError{errors.New("truncated headers")}
|
|
||||||
}
|
|
||||||
d.firstField = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) Write(p []byte) (n int, err error) {
|
|
||||||
if len(p) == 0 {
|
|
||||||
// Prevent state machine CPU attacks (making us redo
|
|
||||||
// work up to the point of finding out we don't have
|
|
||||||
// enough data)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Only copy the data if we have to. Optimistically assume
|
|
||||||
// that p will contain a complete header block.
|
|
||||||
if d.saveBuf.Len() == 0 {
|
|
||||||
d.buf = p
|
|
||||||
} else {
|
|
||||||
d.saveBuf.Write(p)
|
|
||||||
d.buf = d.saveBuf.Bytes()
|
|
||||||
d.saveBuf.Reset()
|
|
||||||
}
|
|
||||||
|
|
||||||
for len(d.buf) > 0 {
|
|
||||||
err = d.parseHeaderFieldRepr()
|
|
||||||
if err == errNeedMore {
|
|
||||||
// Extra paranoia, making sure saveBuf won't
|
|
||||||
// get too large. All the varint and string
|
|
||||||
// reading code earlier should already catch
|
|
||||||
// overlong things and return ErrStringLength,
|
|
||||||
// but keep this as a last resort.
|
|
||||||
const varIntOverhead = 8 // conservative
|
|
||||||
if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) {
|
|
||||||
return 0, ErrStringLength
|
|
||||||
}
|
|
||||||
d.saveBuf.Write(d.buf)
|
|
||||||
return len(p), nil
|
|
||||||
}
|
|
||||||
d.firstField = false
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return len(p), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// errNeedMore is an internal sentinel error value that means the
|
|
||||||
// buffer is truncated and we need to read more data before we can
|
|
||||||
// continue parsing.
|
|
||||||
var errNeedMore = errors.New("need more data")
|
|
||||||
|
|
||||||
type indexType int
|
|
||||||
|
|
||||||
const (
|
|
||||||
indexedTrue indexType = iota
|
|
||||||
indexedFalse
|
|
||||||
indexedNever
|
|
||||||
)
|
|
||||||
|
|
||||||
func (v indexType) indexed() bool { return v == indexedTrue }
|
|
||||||
func (v indexType) sensitive() bool { return v == indexedNever }
|
|
||||||
|
|
||||||
// returns errNeedMore if there isn't enough data available.
|
|
||||||
// any other error is fatal.
|
|
||||||
// consumes d.buf iff it returns nil.
|
|
||||||
// precondition: must be called with len(d.buf) > 0
|
|
||||||
func (d *Decoder) parseHeaderFieldRepr() error {
|
|
||||||
b := d.buf[0]
|
|
||||||
switch {
|
|
||||||
case b&128 != 0:
|
|
||||||
// Indexed representation.
|
|
||||||
// High bit set?
|
|
||||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.1
|
|
||||||
return d.parseFieldIndexed()
|
|
||||||
case b&192 == 64:
|
|
||||||
// 6.2.1 Literal Header Field with Incremental Indexing
|
|
||||||
// 0b10xxxxxx: top two bits are 10
|
|
||||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1
|
|
||||||
return d.parseFieldLiteral(6, indexedTrue)
|
|
||||||
case b&240 == 0:
|
|
||||||
// 6.2.2 Literal Header Field without Indexing
|
|
||||||
// 0b0000xxxx: top four bits are 0000
|
|
||||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2
|
|
||||||
return d.parseFieldLiteral(4, indexedFalse)
|
|
||||||
case b&240 == 16:
|
|
||||||
// 6.2.3 Literal Header Field never Indexed
|
|
||||||
// 0b0001xxxx: top four bits are 0001
|
|
||||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3
|
|
||||||
return d.parseFieldLiteral(4, indexedNever)
|
|
||||||
case b&224 == 32:
|
|
||||||
// 6.3 Dynamic Table Size Update
|
|
||||||
// Top three bits are '001'.
|
|
||||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.6.3
|
|
||||||
return d.parseDynamicTableSizeUpdate()
|
|
||||||
}
|
|
||||||
|
|
||||||
return DecodingError{errors.New("invalid encoding")}
|
|
||||||
}
|
|
||||||
|
|
||||||
// (same invariants and behavior as parseHeaderFieldRepr)
|
|
||||||
func (d *Decoder) parseFieldIndexed() error {
|
|
||||||
buf := d.buf
|
|
||||||
idx, buf, err := readVarInt(7, buf)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
hf, ok := d.at(idx)
|
|
||||||
if !ok {
|
|
||||||
return DecodingError{InvalidIndexError(idx)}
|
|
||||||
}
|
|
||||||
d.buf = buf
|
|
||||||
return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value})
|
|
||||||
}
|
|
||||||
|
|
||||||
// (same invariants and behavior as parseHeaderFieldRepr)
|
|
||||||
func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
|
|
||||||
buf := d.buf
|
|
||||||
nameIdx, buf, err := readVarInt(n, buf)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var hf HeaderField
|
|
||||||
wantStr := d.emitEnabled || it.indexed()
|
|
||||||
if nameIdx > 0 {
|
|
||||||
ihf, ok := d.at(nameIdx)
|
|
||||||
if !ok {
|
|
||||||
return DecodingError{InvalidIndexError(nameIdx)}
|
|
||||||
}
|
|
||||||
hf.Name = ihf.Name
|
|
||||||
} else {
|
|
||||||
hf.Name, buf, err = d.readString(buf, wantStr)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
hf.Value, buf, err = d.readString(buf, wantStr)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
d.buf = buf
|
|
||||||
if it.indexed() {
|
|
||||||
d.dynTab.add(hf)
|
|
||||||
}
|
|
||||||
hf.Sensitive = it.sensitive()
|
|
||||||
return d.callEmit(hf)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Decoder) callEmit(hf HeaderField) error {
|
|
||||||
if d.maxStrLen != 0 {
|
|
||||||
if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen {
|
|
||||||
return ErrStringLength
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if d.emitEnabled {
|
|
||||||
d.emit(hf)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// (same invariants and behavior as parseHeaderFieldRepr)
|
|
||||||
func (d *Decoder) parseDynamicTableSizeUpdate() error {
|
|
||||||
// RFC 7541, sec 4.2: This dynamic table size update MUST occur at the
|
|
||||||
// beginning of the first header block following the change to the dynamic table size.
|
|
||||||
if !d.firstField && d.dynTab.size > 0 {
|
|
||||||
return DecodingError{errors.New("dynamic table size update MUST occur at the beginning of a header block")}
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := d.buf
|
|
||||||
size, buf, err := readVarInt(5, buf)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if size > uint64(d.dynTab.allowedMaxSize) {
|
|
||||||
return DecodingError{errors.New("dynamic table size update too large")}
|
|
||||||
}
|
|
||||||
d.dynTab.setMaxSize(uint32(size))
|
|
||||||
d.buf = buf
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var errVarintOverflow = DecodingError{errors.New("varint integer overflow")}
|
|
||||||
|
|
||||||
// readVarInt reads an unsigned variable length integer off the
|
|
||||||
// beginning of p. n is the parameter as described in
|
|
||||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1.
|
|
||||||
//
|
|
||||||
// n must always be between 1 and 8.
|
|
||||||
//
|
|
||||||
// The returned remain buffer is either a smaller suffix of p, or err != nil.
|
|
||||||
// The error is errNeedMore if p doesn't contain a complete integer.
|
|
||||||
func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
|
|
||||||
if n < 1 || n > 8 {
|
|
||||||
panic("bad n")
|
|
||||||
}
|
|
||||||
if len(p) == 0 {
|
|
||||||
return 0, p, errNeedMore
|
|
||||||
}
|
|
||||||
i = uint64(p[0])
|
|
||||||
if n < 8 {
|
|
||||||
i &= (1 << uint64(n)) - 1
|
|
||||||
}
|
|
||||||
if i < (1<<uint64(n))-1 {
|
|
||||||
return i, p[1:], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
origP := p
|
|
||||||
p = p[1:]
|
|
||||||
var m uint64
|
|
||||||
for len(p) > 0 {
|
|
||||||
b := p[0]
|
|
||||||
p = p[1:]
|
|
||||||
i += uint64(b&127) << m
|
|
||||||
if b&128 == 0 {
|
|
||||||
return i, p, nil
|
|
||||||
}
|
|
||||||
m += 7
|
|
||||||
if m >= 63 { // TODO: proper overflow check. making this up.
|
|
||||||
return 0, origP, errVarintOverflow
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0, origP, errNeedMore
|
|
||||||
}
|
|
||||||
|
|
||||||
// readString decodes an hpack string from p.
|
|
||||||
//
|
|
||||||
// wantStr is whether s will be used. If false, decompression and
|
|
||||||
// []byte->string garbage are skipped if s will be ignored
|
|
||||||
// anyway. This does mean that huffman decoding errors for non-indexed
|
|
||||||
// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server
|
|
||||||
// is returning an error anyway, and because they're not indexed, the error
|
|
||||||
// won't affect the decoding state.
|
|
||||||
func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) {
|
|
||||||
if len(p) == 0 {
|
|
||||||
return "", p, errNeedMore
|
|
||||||
}
|
|
||||||
isHuff := p[0]&128 != 0
|
|
||||||
strLen, p, err := readVarInt(7, p)
|
|
||||||
if err != nil {
|
|
||||||
return "", p, err
|
|
||||||
}
|
|
||||||
if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) {
|
|
||||||
return "", nil, ErrStringLength
|
|
||||||
}
|
|
||||||
if uint64(len(p)) < strLen {
|
|
||||||
return "", p, errNeedMore
|
|
||||||
}
|
|
||||||
if !isHuff {
|
|
||||||
if wantStr {
|
|
||||||
s = string(p[:strLen])
|
|
||||||
}
|
|
||||||
return s, p[strLen:], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if wantStr {
|
|
||||||
buf := bufPool.Get().(*bytes.Buffer)
|
|
||||||
buf.Reset() // don't trust others
|
|
||||||
defer bufPool.Put(buf)
|
|
||||||
if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil {
|
|
||||||
buf.Reset()
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
s = buf.String()
|
|
||||||
buf.Reset() // be nice to GC
|
|
||||||
}
|
|
||||||
return s, p[strLen:], nil
|
|
||||||
}
|
|
222
vendor/golang.org/x/net/http2/hpack/huffman.go
generated
vendored
222
vendor/golang.org/x/net/http2/hpack/huffman.go
generated
vendored
@ -1,222 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package hpack
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
var bufPool = sync.Pool{
|
|
||||||
New: func() interface{} { return new(bytes.Buffer) },
|
|
||||||
}
|
|
||||||
|
|
||||||
// HuffmanDecode decodes the string in v and writes the expanded
|
|
||||||
// result to w, returning the number of bytes written to w and the
|
|
||||||
// Write call's return value. At most one Write call is made.
|
|
||||||
func HuffmanDecode(w io.Writer, v []byte) (int, error) {
|
|
||||||
buf := bufPool.Get().(*bytes.Buffer)
|
|
||||||
buf.Reset()
|
|
||||||
defer bufPool.Put(buf)
|
|
||||||
if err := huffmanDecode(buf, 0, v); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return w.Write(buf.Bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
// HuffmanDecodeToString decodes the string in v.
|
|
||||||
func HuffmanDecodeToString(v []byte) (string, error) {
|
|
||||||
buf := bufPool.Get().(*bytes.Buffer)
|
|
||||||
buf.Reset()
|
|
||||||
defer bufPool.Put(buf)
|
|
||||||
if err := huffmanDecode(buf, 0, v); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return buf.String(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ErrInvalidHuffman is returned for errors found decoding
|
|
||||||
// Huffman-encoded strings.
|
|
||||||
var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data")
|
|
||||||
|
|
||||||
// huffmanDecode decodes v to buf.
|
|
||||||
// If maxLen is greater than 0, attempts to write more to buf than
|
|
||||||
// maxLen bytes will return ErrStringLength.
|
|
||||||
func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
|
|
||||||
rootHuffmanNode := getRootHuffmanNode()
|
|
||||||
n := rootHuffmanNode
|
|
||||||
// cur is the bit buffer that has not been fed into n.
|
|
||||||
// cbits is the number of low order bits in cur that are valid.
|
|
||||||
// sbits is the number of bits of the symbol prefix being decoded.
|
|
||||||
cur, cbits, sbits := uint(0), uint8(0), uint8(0)
|
|
||||||
for _, b := range v {
|
|
||||||
cur = cur<<8 | uint(b)
|
|
||||||
cbits += 8
|
|
||||||
sbits += 8
|
|
||||||
for cbits >= 8 {
|
|
||||||
idx := byte(cur >> (cbits - 8))
|
|
||||||
n = n.children[idx]
|
|
||||||
if n == nil {
|
|
||||||
return ErrInvalidHuffman
|
|
||||||
}
|
|
||||||
if n.children == nil {
|
|
||||||
if maxLen != 0 && buf.Len() == maxLen {
|
|
||||||
return ErrStringLength
|
|
||||||
}
|
|
||||||
buf.WriteByte(n.sym)
|
|
||||||
cbits -= n.codeLen
|
|
||||||
n = rootHuffmanNode
|
|
||||||
sbits = cbits
|
|
||||||
} else {
|
|
||||||
cbits -= 8
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for cbits > 0 {
|
|
||||||
n = n.children[byte(cur<<(8-cbits))]
|
|
||||||
if n == nil {
|
|
||||||
return ErrInvalidHuffman
|
|
||||||
}
|
|
||||||
if n.children != nil || n.codeLen > cbits {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if maxLen != 0 && buf.Len() == maxLen {
|
|
||||||
return ErrStringLength
|
|
||||||
}
|
|
||||||
buf.WriteByte(n.sym)
|
|
||||||
cbits -= n.codeLen
|
|
||||||
n = rootHuffmanNode
|
|
||||||
sbits = cbits
|
|
||||||
}
|
|
||||||
if sbits > 7 {
|
|
||||||
// Either there was an incomplete symbol, or overlong padding.
|
|
||||||
// Both are decoding errors per RFC 7541 section 5.2.
|
|
||||||
return ErrInvalidHuffman
|
|
||||||
}
|
|
||||||
if mask := uint(1<<cbits - 1); cur&mask != mask {
|
|
||||||
// Trailing bits must be a prefix of EOS per RFC 7541 section 5.2.
|
|
||||||
return ErrInvalidHuffman
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type node struct {
|
|
||||||
// children is non-nil for internal nodes
|
|
||||||
children *[256]*node
|
|
||||||
|
|
||||||
// The following are only valid if children is nil:
|
|
||||||
codeLen uint8 // number of bits that led to the output of sym
|
|
||||||
sym byte // output symbol
|
|
||||||
}
|
|
||||||
|
|
||||||
func newInternalNode() *node {
|
|
||||||
return &node{children: new([256]*node)}
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
buildRootOnce sync.Once
|
|
||||||
lazyRootHuffmanNode *node
|
|
||||||
)
|
|
||||||
|
|
||||||
func getRootHuffmanNode() *node {
|
|
||||||
buildRootOnce.Do(buildRootHuffmanNode)
|
|
||||||
return lazyRootHuffmanNode
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildRootHuffmanNode() {
|
|
||||||
if len(huffmanCodes) != 256 {
|
|
||||||
panic("unexpected size")
|
|
||||||
}
|
|
||||||
lazyRootHuffmanNode = newInternalNode()
|
|
||||||
for i, code := range huffmanCodes {
|
|
||||||
addDecoderNode(byte(i), code, huffmanCodeLen[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func addDecoderNode(sym byte, code uint32, codeLen uint8) {
|
|
||||||
cur := lazyRootHuffmanNode
|
|
||||||
for codeLen > 8 {
|
|
||||||
codeLen -= 8
|
|
||||||
i := uint8(code >> codeLen)
|
|
||||||
if cur.children[i] == nil {
|
|
||||||
cur.children[i] = newInternalNode()
|
|
||||||
}
|
|
||||||
cur = cur.children[i]
|
|
||||||
}
|
|
||||||
shift := 8 - codeLen
|
|
||||||
start, end := int(uint8(code<<shift)), int(1<<shift)
|
|
||||||
for i := start; i < start+end; i++ {
|
|
||||||
cur.children[i] = &node{sym: sym, codeLen: codeLen}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendHuffmanString appends s, as encoded in Huffman codes, to dst
|
|
||||||
// and returns the extended buffer.
|
|
||||||
func AppendHuffmanString(dst []byte, s string) []byte {
|
|
||||||
rembits := uint8(8)
|
|
||||||
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
if rembits == 8 {
|
|
||||||
dst = append(dst, 0)
|
|
||||||
}
|
|
||||||
dst, rembits = appendByteToHuffmanCode(dst, rembits, s[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
if rembits < 8 {
|
|
||||||
// special EOS symbol
|
|
||||||
code := uint32(0x3fffffff)
|
|
||||||
nbits := uint8(30)
|
|
||||||
|
|
||||||
t := uint8(code >> (nbits - rembits))
|
|
||||||
dst[len(dst)-1] |= t
|
|
||||||
}
|
|
||||||
|
|
||||||
return dst
|
|
||||||
}
|
|
||||||
|
|
||||||
// HuffmanEncodeLength returns the number of bytes required to encode
|
|
||||||
// s in Huffman codes. The result is round up to byte boundary.
|
|
||||||
func HuffmanEncodeLength(s string) uint64 {
|
|
||||||
n := uint64(0)
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
n += uint64(huffmanCodeLen[s[i]])
|
|
||||||
}
|
|
||||||
return (n + 7) / 8
|
|
||||||
}
|
|
||||||
|
|
||||||
// appendByteToHuffmanCode appends Huffman code for c to dst and
|
|
||||||
// returns the extended buffer and the remaining bits in the last
|
|
||||||
// element. The appending is not byte aligned and the remaining bits
|
|
||||||
// in the last element of dst is given in rembits.
|
|
||||||
func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) {
|
|
||||||
code := huffmanCodes[c]
|
|
||||||
nbits := huffmanCodeLen[c]
|
|
||||||
|
|
||||||
for {
|
|
||||||
if rembits > nbits {
|
|
||||||
t := uint8(code << (rembits - nbits))
|
|
||||||
dst[len(dst)-1] |= t
|
|
||||||
rembits -= nbits
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
t := uint8(code >> (nbits - rembits))
|
|
||||||
dst[len(dst)-1] |= t
|
|
||||||
|
|
||||||
nbits -= rembits
|
|
||||||
rembits = 8
|
|
||||||
|
|
||||||
if nbits == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
dst = append(dst, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
return dst, rembits
|
|
||||||
}
|
|
479
vendor/golang.org/x/net/http2/hpack/tables.go
generated
vendored
479
vendor/golang.org/x/net/http2/hpack/tables.go
generated
vendored
@ -1,479 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package hpack
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// headerFieldTable implements a list of HeaderFields.
|
|
||||||
// This is used to implement the static and dynamic tables.
|
|
||||||
type headerFieldTable struct {
|
|
||||||
// For static tables, entries are never evicted.
|
|
||||||
//
|
|
||||||
// For dynamic tables, entries are evicted from ents[0] and added to the end.
|
|
||||||
// Each entry has a unique id that starts at one and increments for each
|
|
||||||
// entry that is added. This unique id is stable across evictions, meaning
|
|
||||||
// it can be used as a pointer to a specific entry. As in hpack, unique ids
|
|
||||||
// are 1-based. The unique id for ents[k] is k + evictCount + 1.
|
|
||||||
//
|
|
||||||
// Zero is not a valid unique id.
|
|
||||||
//
|
|
||||||
// evictCount should not overflow in any remotely practical situation. In
|
|
||||||
// practice, we will have one dynamic table per HTTP/2 connection. If we
|
|
||||||
// assume a very powerful server that handles 1M QPS per connection and each
|
|
||||||
// request adds (then evicts) 100 entries from the table, it would still take
|
|
||||||
// 2M years for evictCount to overflow.
|
|
||||||
ents []HeaderField
|
|
||||||
evictCount uint64
|
|
||||||
|
|
||||||
// byName maps a HeaderField name to the unique id of the newest entry with
|
|
||||||
// the same name. See above for a definition of "unique id".
|
|
||||||
byName map[string]uint64
|
|
||||||
|
|
||||||
// byNameValue maps a HeaderField name/value pair to the unique id of the newest
|
|
||||||
// entry with the same name and value. See above for a definition of "unique id".
|
|
||||||
byNameValue map[pairNameValue]uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
type pairNameValue struct {
|
|
||||||
name, value string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *headerFieldTable) init() {
|
|
||||||
t.byName = make(map[string]uint64)
|
|
||||||
t.byNameValue = make(map[pairNameValue]uint64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// len reports the number of entries in the table.
|
|
||||||
func (t *headerFieldTable) len() int {
|
|
||||||
return len(t.ents)
|
|
||||||
}
|
|
||||||
|
|
||||||
// addEntry adds a new entry.
|
|
||||||
func (t *headerFieldTable) addEntry(f HeaderField) {
|
|
||||||
id := uint64(t.len()) + t.evictCount + 1
|
|
||||||
t.byName[f.Name] = id
|
|
||||||
t.byNameValue[pairNameValue{f.Name, f.Value}] = id
|
|
||||||
t.ents = append(t.ents, f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// evictOldest evicts the n oldest entries in the table.
|
|
||||||
func (t *headerFieldTable) evictOldest(n int) {
|
|
||||||
if n > t.len() {
|
|
||||||
panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len()))
|
|
||||||
}
|
|
||||||
for k := 0; k < n; k++ {
|
|
||||||
f := t.ents[k]
|
|
||||||
id := t.evictCount + uint64(k) + 1
|
|
||||||
if t.byName[f.Name] == id {
|
|
||||||
delete(t.byName, f.Name)
|
|
||||||
}
|
|
||||||
if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id {
|
|
||||||
delete(t.byNameValue, p)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
copy(t.ents, t.ents[n:])
|
|
||||||
for k := t.len() - n; k < t.len(); k++ {
|
|
||||||
t.ents[k] = HeaderField{} // so strings can be garbage collected
|
|
||||||
}
|
|
||||||
t.ents = t.ents[:t.len()-n]
|
|
||||||
if t.evictCount+uint64(n) < t.evictCount {
|
|
||||||
panic("evictCount overflow")
|
|
||||||
}
|
|
||||||
t.evictCount += uint64(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// search finds f in the table. If there is no match, i is 0.
|
|
||||||
// If both name and value match, i is the matched index and nameValueMatch
|
|
||||||
// becomes true. If only name matches, i points to that index and
|
|
||||||
// nameValueMatch becomes false.
|
|
||||||
//
|
|
||||||
// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says
|
|
||||||
// that index 1 should be the newest entry, but t.ents[0] is the oldest entry,
|
|
||||||
// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic
|
|
||||||
// table, the return value i actually refers to the entry t.ents[t.len()-i].
|
|
||||||
//
|
|
||||||
// All tables are assumed to be a dynamic tables except for the global
|
|
||||||
// staticTable pointer.
|
|
||||||
//
|
|
||||||
// See Section 2.3.3.
|
|
||||||
func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
|
|
||||||
if !f.Sensitive {
|
|
||||||
if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 {
|
|
||||||
return t.idToIndex(id), true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if id := t.byName[f.Name]; id != 0 {
|
|
||||||
return t.idToIndex(id), false
|
|
||||||
}
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// idToIndex converts a unique id to an HPACK index.
|
|
||||||
// See Section 2.3.3.
|
|
||||||
func (t *headerFieldTable) idToIndex(id uint64) uint64 {
|
|
||||||
if id <= t.evictCount {
|
|
||||||
panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount))
|
|
||||||
}
|
|
||||||
k := id - t.evictCount - 1 // convert id to an index t.ents[k]
|
|
||||||
if t != staticTable {
|
|
||||||
return uint64(t.len()) - k // dynamic table
|
|
||||||
}
|
|
||||||
return k + 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
|
|
||||||
var staticTable = newStaticTable()
|
|
||||||
var staticTableEntries = [...]HeaderField{
|
|
||||||
{Name: ":authority"},
|
|
||||||
{Name: ":method", Value: "GET"},
|
|
||||||
{Name: ":method", Value: "POST"},
|
|
||||||
{Name: ":path", Value: "/"},
|
|
||||||
{Name: ":path", Value: "/index.html"},
|
|
||||||
{Name: ":scheme", Value: "http"},
|
|
||||||
{Name: ":scheme", Value: "https"},
|
|
||||||
{Name: ":status", Value: "200"},
|
|
||||||
{Name: ":status", Value: "204"},
|
|
||||||
{Name: ":status", Value: "206"},
|
|
||||||
{Name: ":status", Value: "304"},
|
|
||||||
{Name: ":status", Value: "400"},
|
|
||||||
{Name: ":status", Value: "404"},
|
|
||||||
{Name: ":status", Value: "500"},
|
|
||||||
{Name: "accept-charset"},
|
|
||||||
{Name: "accept-encoding", Value: "gzip, deflate"},
|
|
||||||
{Name: "accept-language"},
|
|
||||||
{Name: "accept-ranges"},
|
|
||||||
{Name: "accept"},
|
|
||||||
{Name: "access-control-allow-origin"},
|
|
||||||
{Name: "age"},
|
|
||||||
{Name: "allow"},
|
|
||||||
{Name: "authorization"},
|
|
||||||
{Name: "cache-control"},
|
|
||||||
{Name: "content-disposition"},
|
|
||||||
{Name: "content-encoding"},
|
|
||||||
{Name: "content-language"},
|
|
||||||
{Name: "content-length"},
|
|
||||||
{Name: "content-location"},
|
|
||||||
{Name: "content-range"},
|
|
||||||
{Name: "content-type"},
|
|
||||||
{Name: "cookie"},
|
|
||||||
{Name: "date"},
|
|
||||||
{Name: "etag"},
|
|
||||||
{Name: "expect"},
|
|
||||||
{Name: "expires"},
|
|
||||||
{Name: "from"},
|
|
||||||
{Name: "host"},
|
|
||||||
{Name: "if-match"},
|
|
||||||
{Name: "if-modified-since"},
|
|
||||||
{Name: "if-none-match"},
|
|
||||||
{Name: "if-range"},
|
|
||||||
{Name: "if-unmodified-since"},
|
|
||||||
{Name: "last-modified"},
|
|
||||||
{Name: "link"},
|
|
||||||
{Name: "location"},
|
|
||||||
{Name: "max-forwards"},
|
|
||||||
{Name: "proxy-authenticate"},
|
|
||||||
{Name: "proxy-authorization"},
|
|
||||||
{Name: "range"},
|
|
||||||
{Name: "referer"},
|
|
||||||
{Name: "refresh"},
|
|
||||||
{Name: "retry-after"},
|
|
||||||
{Name: "server"},
|
|
||||||
{Name: "set-cookie"},
|
|
||||||
{Name: "strict-transport-security"},
|
|
||||||
{Name: "transfer-encoding"},
|
|
||||||
{Name: "user-agent"},
|
|
||||||
{Name: "vary"},
|
|
||||||
{Name: "via"},
|
|
||||||
{Name: "www-authenticate"},
|
|
||||||
}
|
|
||||||
|
|
||||||
func newStaticTable() *headerFieldTable {
|
|
||||||
t := &headerFieldTable{}
|
|
||||||
t.init()
|
|
||||||
for _, e := range staticTableEntries[:] {
|
|
||||||
t.addEntry(e)
|
|
||||||
}
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
var huffmanCodes = [256]uint32{
|
|
||||||
0x1ff8,
|
|
||||||
0x7fffd8,
|
|
||||||
0xfffffe2,
|
|
||||||
0xfffffe3,
|
|
||||||
0xfffffe4,
|
|
||||||
0xfffffe5,
|
|
||||||
0xfffffe6,
|
|
||||||
0xfffffe7,
|
|
||||||
0xfffffe8,
|
|
||||||
0xffffea,
|
|
||||||
0x3ffffffc,
|
|
||||||
0xfffffe9,
|
|
||||||
0xfffffea,
|
|
||||||
0x3ffffffd,
|
|
||||||
0xfffffeb,
|
|
||||||
0xfffffec,
|
|
||||||
0xfffffed,
|
|
||||||
0xfffffee,
|
|
||||||
0xfffffef,
|
|
||||||
0xffffff0,
|
|
||||||
0xffffff1,
|
|
||||||
0xffffff2,
|
|
||||||
0x3ffffffe,
|
|
||||||
0xffffff3,
|
|
||||||
0xffffff4,
|
|
||||||
0xffffff5,
|
|
||||||
0xffffff6,
|
|
||||||
0xffffff7,
|
|
||||||
0xffffff8,
|
|
||||||
0xffffff9,
|
|
||||||
0xffffffa,
|
|
||||||
0xffffffb,
|
|
||||||
0x14,
|
|
||||||
0x3f8,
|
|
||||||
0x3f9,
|
|
||||||
0xffa,
|
|
||||||
0x1ff9,
|
|
||||||
0x15,
|
|
||||||
0xf8,
|
|
||||||
0x7fa,
|
|
||||||
0x3fa,
|
|
||||||
0x3fb,
|
|
||||||
0xf9,
|
|
||||||
0x7fb,
|
|
||||||
0xfa,
|
|
||||||
0x16,
|
|
||||||
0x17,
|
|
||||||
0x18,
|
|
||||||
0x0,
|
|
||||||
0x1,
|
|
||||||
0x2,
|
|
||||||
0x19,
|
|
||||||
0x1a,
|
|
||||||
0x1b,
|
|
||||||
0x1c,
|
|
||||||
0x1d,
|
|
||||||
0x1e,
|
|
||||||
0x1f,
|
|
||||||
0x5c,
|
|
||||||
0xfb,
|
|
||||||
0x7ffc,
|
|
||||||
0x20,
|
|
||||||
0xffb,
|
|
||||||
0x3fc,
|
|
||||||
0x1ffa,
|
|
||||||
0x21,
|
|
||||||
0x5d,
|
|
||||||
0x5e,
|
|
||||||
0x5f,
|
|
||||||
0x60,
|
|
||||||
0x61,
|
|
||||||
0x62,
|
|
||||||
0x63,
|
|
||||||
0x64,
|
|
||||||
0x65,
|
|
||||||
0x66,
|
|
||||||
0x67,
|
|
||||||
0x68,
|
|
||||||
0x69,
|
|
||||||
0x6a,
|
|
||||||
0x6b,
|
|
||||||
0x6c,
|
|
||||||
0x6d,
|
|
||||||
0x6e,
|
|
||||||
0x6f,
|
|
||||||
0x70,
|
|
||||||
0x71,
|
|
||||||
0x72,
|
|
||||||
0xfc,
|
|
||||||
0x73,
|
|
||||||
0xfd,
|
|
||||||
0x1ffb,
|
|
||||||
0x7fff0,
|
|
||||||
0x1ffc,
|
|
||||||
0x3ffc,
|
|
||||||
0x22,
|
|
||||||
0x7ffd,
|
|
||||||
0x3,
|
|
||||||
0x23,
|
|
||||||
0x4,
|
|
||||||
0x24,
|
|
||||||
0x5,
|
|
||||||
0x25,
|
|
||||||
0x26,
|
|
||||||
0x27,
|
|
||||||
0x6,
|
|
||||||
0x74,
|
|
||||||
0x75,
|
|
||||||
0x28,
|
|
||||||
0x29,
|
|
||||||
0x2a,
|
|
||||||
0x7,
|
|
||||||
0x2b,
|
|
||||||
0x76,
|
|
||||||
0x2c,
|
|
||||||
0x8,
|
|
||||||
0x9,
|
|
||||||
0x2d,
|
|
||||||
0x77,
|
|
||||||
0x78,
|
|
||||||
0x79,
|
|
||||||
0x7a,
|
|
||||||
0x7b,
|
|
||||||
0x7ffe,
|
|
||||||
0x7fc,
|
|
||||||
0x3ffd,
|
|
||||||
0x1ffd,
|
|
||||||
0xffffffc,
|
|
||||||
0xfffe6,
|
|
||||||
0x3fffd2,
|
|
||||||
0xfffe7,
|
|
||||||
0xfffe8,
|
|
||||||
0x3fffd3,
|
|
||||||
0x3fffd4,
|
|
||||||
0x3fffd5,
|
|
||||||
0x7fffd9,
|
|
||||||
0x3fffd6,
|
|
||||||
0x7fffda,
|
|
||||||
0x7fffdb,
|
|
||||||
0x7fffdc,
|
|
||||||
0x7fffdd,
|
|
||||||
0x7fffde,
|
|
||||||
0xffffeb,
|
|
||||||
0x7fffdf,
|
|
||||||
0xffffec,
|
|
||||||
0xffffed,
|
|
||||||
0x3fffd7,
|
|
||||||
0x7fffe0,
|
|
||||||
0xffffee,
|
|
||||||
0x7fffe1,
|
|
||||||
0x7fffe2,
|
|
||||||
0x7fffe3,
|
|
||||||
0x7fffe4,
|
|
||||||
0x1fffdc,
|
|
||||||
0x3fffd8,
|
|
||||||
0x7fffe5,
|
|
||||||
0x3fffd9,
|
|
||||||
0x7fffe6,
|
|
||||||
0x7fffe7,
|
|
||||||
0xffffef,
|
|
||||||
0x3fffda,
|
|
||||||
0x1fffdd,
|
|
||||||
0xfffe9,
|
|
||||||
0x3fffdb,
|
|
||||||
0x3fffdc,
|
|
||||||
0x7fffe8,
|
|
||||||
0x7fffe9,
|
|
||||||
0x1fffde,
|
|
||||||
0x7fffea,
|
|
||||||
0x3fffdd,
|
|
||||||
0x3fffde,
|
|
||||||
0xfffff0,
|
|
||||||
0x1fffdf,
|
|
||||||
0x3fffdf,
|
|
||||||
0x7fffeb,
|
|
||||||
0x7fffec,
|
|
||||||
0x1fffe0,
|
|
||||||
0x1fffe1,
|
|
||||||
0x3fffe0,
|
|
||||||
0x1fffe2,
|
|
||||||
0x7fffed,
|
|
||||||
0x3fffe1,
|
|
||||||
0x7fffee,
|
|
||||||
0x7fffef,
|
|
||||||
0xfffea,
|
|
||||||
0x3fffe2,
|
|
||||||
0x3fffe3,
|
|
||||||
0x3fffe4,
|
|
||||||
0x7ffff0,
|
|
||||||
0x3fffe5,
|
|
||||||
0x3fffe6,
|
|
||||||
0x7ffff1,
|
|
||||||
0x3ffffe0,
|
|
||||||
0x3ffffe1,
|
|
||||||
0xfffeb,
|
|
||||||
0x7fff1,
|
|
||||||
0x3fffe7,
|
|
||||||
0x7ffff2,
|
|
||||||
0x3fffe8,
|
|
||||||
0x1ffffec,
|
|
||||||
0x3ffffe2,
|
|
||||||
0x3ffffe3,
|
|
||||||
0x3ffffe4,
|
|
||||||
0x7ffffde,
|
|
||||||
0x7ffffdf,
|
|
||||||
0x3ffffe5,
|
|
||||||
0xfffff1,
|
|
||||||
0x1ffffed,
|
|
||||||
0x7fff2,
|
|
||||||
0x1fffe3,
|
|
||||||
0x3ffffe6,
|
|
||||||
0x7ffffe0,
|
|
||||||
0x7ffffe1,
|
|
||||||
0x3ffffe7,
|
|
||||||
0x7ffffe2,
|
|
||||||
0xfffff2,
|
|
||||||
0x1fffe4,
|
|
||||||
0x1fffe5,
|
|
||||||
0x3ffffe8,
|
|
||||||
0x3ffffe9,
|
|
||||||
0xffffffd,
|
|
||||||
0x7ffffe3,
|
|
||||||
0x7ffffe4,
|
|
||||||
0x7ffffe5,
|
|
||||||
0xfffec,
|
|
||||||
0xfffff3,
|
|
||||||
0xfffed,
|
|
||||||
0x1fffe6,
|
|
||||||
0x3fffe9,
|
|
||||||
0x1fffe7,
|
|
||||||
0x1fffe8,
|
|
||||||
0x7ffff3,
|
|
||||||
0x3fffea,
|
|
||||||
0x3fffeb,
|
|
||||||
0x1ffffee,
|
|
||||||
0x1ffffef,
|
|
||||||
0xfffff4,
|
|
||||||
0xfffff5,
|
|
||||||
0x3ffffea,
|
|
||||||
0x7ffff4,
|
|
||||||
0x3ffffeb,
|
|
||||||
0x7ffffe6,
|
|
||||||
0x3ffffec,
|
|
||||||
0x3ffffed,
|
|
||||||
0x7ffffe7,
|
|
||||||
0x7ffffe8,
|
|
||||||
0x7ffffe9,
|
|
||||||
0x7ffffea,
|
|
||||||
0x7ffffeb,
|
|
||||||
0xffffffe,
|
|
||||||
0x7ffffec,
|
|
||||||
0x7ffffed,
|
|
||||||
0x7ffffee,
|
|
||||||
0x7ffffef,
|
|
||||||
0x7fffff0,
|
|
||||||
0x3ffffee,
|
|
||||||
}
|
|
||||||
|
|
||||||
var huffmanCodeLen = [256]uint8{
|
|
||||||
13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28,
|
|
||||||
28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28,
|
|
||||||
6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6,
|
|
||||||
5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10,
|
|
||||||
13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
|
|
||||||
7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6,
|
|
||||||
15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5,
|
|
||||||
6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28,
|
|
||||||
20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23,
|
|
||||||
24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24,
|
|
||||||
22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23,
|
|
||||||
21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23,
|
|
||||||
26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25,
|
|
||||||
19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27,
|
|
||||||
20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23,
|
|
||||||
26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26,
|
|
||||||
}
|
|
384
vendor/golang.org/x/net/http2/http2.go
generated
vendored
384
vendor/golang.org/x/net/http2/http2.go
generated
vendored
@ -1,384 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package http2 implements the HTTP/2 protocol.
|
|
||||||
//
|
|
||||||
// This package is low-level and intended to be used directly by very
|
|
||||||
// few people. Most users will use it indirectly through the automatic
|
|
||||||
// use by the net/http package (from Go 1.6 and later).
|
|
||||||
// For use in earlier Go versions see ConfigureServer. (Transport support
|
|
||||||
// requires Go 1.6 or later)
|
|
||||||
//
|
|
||||||
// See https://http2.github.io/ for more information on HTTP/2.
|
|
||||||
//
|
|
||||||
// See https://http2.golang.org/ for a test server running this code.
|
|
||||||
//
|
|
||||||
package http2 // import "golang.org/x/net/http2"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"crypto/tls"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"golang.org/x/net/http/httpguts"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
VerboseLogs bool
|
|
||||||
logFrameWrites bool
|
|
||||||
logFrameReads bool
|
|
||||||
inTests bool
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
e := os.Getenv("GODEBUG")
|
|
||||||
if strings.Contains(e, "http2debug=1") {
|
|
||||||
VerboseLogs = true
|
|
||||||
}
|
|
||||||
if strings.Contains(e, "http2debug=2") {
|
|
||||||
VerboseLogs = true
|
|
||||||
logFrameWrites = true
|
|
||||||
logFrameReads = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
// ClientPreface is the string that must be sent by new
|
|
||||||
// connections from clients.
|
|
||||||
ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
|
|
||||||
|
|
||||||
// SETTINGS_MAX_FRAME_SIZE default
|
|
||||||
// http://http2.github.io/http2-spec/#rfc.section.6.5.2
|
|
||||||
initialMaxFrameSize = 16384
|
|
||||||
|
|
||||||
// NextProtoTLS is the NPN/ALPN protocol negotiated during
|
|
||||||
// HTTP/2's TLS setup.
|
|
||||||
NextProtoTLS = "h2"
|
|
||||||
|
|
||||||
// http://http2.github.io/http2-spec/#SettingValues
|
|
||||||
initialHeaderTableSize = 4096
|
|
||||||
|
|
||||||
initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size
|
|
||||||
|
|
||||||
defaultMaxReadFrameSize = 1 << 20
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
clientPreface = []byte(ClientPreface)
|
|
||||||
)
|
|
||||||
|
|
||||||
type streamState int
|
|
||||||
|
|
||||||
// HTTP/2 stream states.
|
|
||||||
//
|
|
||||||
// See http://tools.ietf.org/html/rfc7540#section-5.1.
|
|
||||||
//
|
|
||||||
// For simplicity, the server code merges "reserved (local)" into
|
|
||||||
// "half-closed (remote)". This is one less state transition to track.
|
|
||||||
// The only downside is that we send PUSH_PROMISEs slightly less
|
|
||||||
// liberally than allowable. More discussion here:
|
|
||||||
// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html
|
|
||||||
//
|
|
||||||
// "reserved (remote)" is omitted since the client code does not
|
|
||||||
// support server push.
|
|
||||||
const (
|
|
||||||
stateIdle streamState = iota
|
|
||||||
stateOpen
|
|
||||||
stateHalfClosedLocal
|
|
||||||
stateHalfClosedRemote
|
|
||||||
stateClosed
|
|
||||||
)
|
|
||||||
|
|
||||||
var stateName = [...]string{
|
|
||||||
stateIdle: "Idle",
|
|
||||||
stateOpen: "Open",
|
|
||||||
stateHalfClosedLocal: "HalfClosedLocal",
|
|
||||||
stateHalfClosedRemote: "HalfClosedRemote",
|
|
||||||
stateClosed: "Closed",
|
|
||||||
}
|
|
||||||
|
|
||||||
func (st streamState) String() string {
|
|
||||||
return stateName[st]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Setting is a setting parameter: which setting it is, and its value.
|
|
||||||
type Setting struct {
|
|
||||||
// ID is which setting is being set.
|
|
||||||
// See http://http2.github.io/http2-spec/#SettingValues
|
|
||||||
ID SettingID
|
|
||||||
|
|
||||||
// Val is the value.
|
|
||||||
Val uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s Setting) String() string {
|
|
||||||
return fmt.Sprintf("[%v = %d]", s.ID, s.Val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Valid reports whether the setting is valid.
|
|
||||||
func (s Setting) Valid() error {
|
|
||||||
// Limits and error codes from 6.5.2 Defined SETTINGS Parameters
|
|
||||||
switch s.ID {
|
|
||||||
case SettingEnablePush:
|
|
||||||
if s.Val != 1 && s.Val != 0 {
|
|
||||||
return ConnectionError(ErrCodeProtocol)
|
|
||||||
}
|
|
||||||
case SettingInitialWindowSize:
|
|
||||||
if s.Val > 1<<31-1 {
|
|
||||||
return ConnectionError(ErrCodeFlowControl)
|
|
||||||
}
|
|
||||||
case SettingMaxFrameSize:
|
|
||||||
if s.Val < 16384 || s.Val > 1<<24-1 {
|
|
||||||
return ConnectionError(ErrCodeProtocol)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// A SettingID is an HTTP/2 setting as defined in
|
|
||||||
// http://http2.github.io/http2-spec/#iana-settings
|
|
||||||
type SettingID uint16
|
|
||||||
|
|
||||||
const (
|
|
||||||
SettingHeaderTableSize SettingID = 0x1
|
|
||||||
SettingEnablePush SettingID = 0x2
|
|
||||||
SettingMaxConcurrentStreams SettingID = 0x3
|
|
||||||
SettingInitialWindowSize SettingID = 0x4
|
|
||||||
SettingMaxFrameSize SettingID = 0x5
|
|
||||||
SettingMaxHeaderListSize SettingID = 0x6
|
|
||||||
)
|
|
||||||
|
|
||||||
var settingName = map[SettingID]string{
|
|
||||||
SettingHeaderTableSize: "HEADER_TABLE_SIZE",
|
|
||||||
SettingEnablePush: "ENABLE_PUSH",
|
|
||||||
SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
|
|
||||||
SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
|
|
||||||
SettingMaxFrameSize: "MAX_FRAME_SIZE",
|
|
||||||
SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s SettingID) String() string {
|
|
||||||
if v, ok := settingName[s]; ok {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
errInvalidHeaderFieldName = errors.New("http2: invalid header field name")
|
|
||||||
errInvalidHeaderFieldValue = errors.New("http2: invalid header field value")
|
|
||||||
)
|
|
||||||
|
|
||||||
// validWireHeaderFieldName reports whether v is a valid header field
|
|
||||||
// name (key). See httpguts.ValidHeaderName for the base rules.
|
|
||||||
//
|
|
||||||
// Further, http2 says:
|
|
||||||
// "Just as in HTTP/1.x, header field names are strings of ASCII
|
|
||||||
// characters that are compared in a case-insensitive
|
|
||||||
// fashion. However, header field names MUST be converted to
|
|
||||||
// lowercase prior to their encoding in HTTP/2. "
|
|
||||||
func validWireHeaderFieldName(v string) bool {
|
|
||||||
if len(v) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, r := range v {
|
|
||||||
if !httpguts.IsTokenRune(r) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if 'A' <= r && r <= 'Z' {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func httpCodeString(code int) string {
|
|
||||||
switch code {
|
|
||||||
case 200:
|
|
||||||
return "200"
|
|
||||||
case 404:
|
|
||||||
return "404"
|
|
||||||
}
|
|
||||||
return strconv.Itoa(code)
|
|
||||||
}
|
|
||||||
|
|
||||||
// from pkg io
|
|
||||||
type stringWriter interface {
|
|
||||||
WriteString(s string) (n int, err error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// A gate lets two goroutines coordinate their activities.
|
|
||||||
type gate chan struct{}
|
|
||||||
|
|
||||||
func (g gate) Done() { g <- struct{}{} }
|
|
||||||
func (g gate) Wait() { <-g }
|
|
||||||
|
|
||||||
// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
|
|
||||||
type closeWaiter chan struct{}
|
|
||||||
|
|
||||||
// Init makes a closeWaiter usable.
|
|
||||||
// It exists because so a closeWaiter value can be placed inside a
|
|
||||||
// larger struct and have the Mutex and Cond's memory in the same
|
|
||||||
// allocation.
|
|
||||||
func (cw *closeWaiter) Init() {
|
|
||||||
*cw = make(chan struct{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close marks the closeWaiter as closed and unblocks any waiters.
|
|
||||||
func (cw closeWaiter) Close() {
|
|
||||||
close(cw)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait waits for the closeWaiter to become closed.
|
|
||||||
func (cw closeWaiter) Wait() {
|
|
||||||
<-cw
|
|
||||||
}
|
|
||||||
|
|
||||||
// bufferedWriter is a buffered writer that writes to w.
|
|
||||||
// Its buffered writer is lazily allocated as needed, to minimize
|
|
||||||
// idle memory usage with many connections.
|
|
||||||
type bufferedWriter struct {
|
|
||||||
w io.Writer // immutable
|
|
||||||
bw *bufio.Writer // non-nil when data is buffered
|
|
||||||
}
|
|
||||||
|
|
||||||
func newBufferedWriter(w io.Writer) *bufferedWriter {
|
|
||||||
return &bufferedWriter{w: w}
|
|
||||||
}
|
|
||||||
|
|
||||||
// bufWriterPoolBufferSize is the size of bufio.Writer's
|
|
||||||
// buffers created using bufWriterPool.
|
|
||||||
//
|
|
||||||
// TODO: pick a less arbitrary value? this is a bit under
|
|
||||||
// (3 x typical 1500 byte MTU) at least. Other than that,
|
|
||||||
// not much thought went into it.
|
|
||||||
const bufWriterPoolBufferSize = 4 << 10
|
|
||||||
|
|
||||||
var bufWriterPool = sync.Pool{
|
|
||||||
New: func() interface{} {
|
|
||||||
return bufio.NewWriterSize(nil, bufWriterPoolBufferSize)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *bufferedWriter) Available() int {
|
|
||||||
if w.bw == nil {
|
|
||||||
return bufWriterPoolBufferSize
|
|
||||||
}
|
|
||||||
return w.bw.Available()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *bufferedWriter) Write(p []byte) (n int, err error) {
|
|
||||||
if w.bw == nil {
|
|
||||||
bw := bufWriterPool.Get().(*bufio.Writer)
|
|
||||||
bw.Reset(w.w)
|
|
||||||
w.bw = bw
|
|
||||||
}
|
|
||||||
return w.bw.Write(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *bufferedWriter) Flush() error {
|
|
||||||
bw := w.bw
|
|
||||||
if bw == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
err := bw.Flush()
|
|
||||||
bw.Reset(nil)
|
|
||||||
bufWriterPool.Put(bw)
|
|
||||||
w.bw = nil
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func mustUint31(v int32) uint32 {
|
|
||||||
if v < 0 || v > 2147483647 {
|
|
||||||
panic("out of range")
|
|
||||||
}
|
|
||||||
return uint32(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// bodyAllowedForStatus reports whether a given response status code
|
|
||||||
// permits a body. See RFC 7230, section 3.3.
|
|
||||||
func bodyAllowedForStatus(status int) bool {
|
|
||||||
switch {
|
|
||||||
case status >= 100 && status <= 199:
|
|
||||||
return false
|
|
||||||
case status == 204:
|
|
||||||
return false
|
|
||||||
case status == 304:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
type httpError struct {
|
|
||||||
msg string
|
|
||||||
timeout bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *httpError) Error() string { return e.msg }
|
|
||||||
func (e *httpError) Timeout() bool { return e.timeout }
|
|
||||||
func (e *httpError) Temporary() bool { return true }
|
|
||||||
|
|
||||||
var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true}
|
|
||||||
|
|
||||||
type connectionStater interface {
|
|
||||||
ConnectionState() tls.ConnectionState
|
|
||||||
}
|
|
||||||
|
|
||||||
var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }}
|
|
||||||
|
|
||||||
type sorter struct {
|
|
||||||
v []string // owned by sorter
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *sorter) Len() int { return len(s.v) }
|
|
||||||
func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] }
|
|
||||||
func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] }
|
|
||||||
|
|
||||||
// Keys returns the sorted keys of h.
|
|
||||||
//
|
|
||||||
// The returned slice is only valid until s used again or returned to
|
|
||||||
// its pool.
|
|
||||||
func (s *sorter) Keys(h http.Header) []string {
|
|
||||||
keys := s.v[:0]
|
|
||||||
for k := range h {
|
|
||||||
keys = append(keys, k)
|
|
||||||
}
|
|
||||||
s.v = keys
|
|
||||||
sort.Sort(s)
|
|
||||||
return keys
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *sorter) SortStrings(ss []string) {
|
|
||||||
// Our sorter works on s.v, which sorter owns, so
|
|
||||||
// stash it away while we sort the user's buffer.
|
|
||||||
save := s.v
|
|
||||||
s.v = ss
|
|
||||||
sort.Sort(s)
|
|
||||||
s.v = save
|
|
||||||
}
|
|
||||||
|
|
||||||
// validPseudoPath reports whether v is a valid :path pseudo-header
|
|
||||||
// value. It must be either:
|
|
||||||
//
|
|
||||||
// *) a non-empty string starting with '/'
|
|
||||||
// *) the string '*', for OPTIONS requests.
|
|
||||||
//
|
|
||||||
// For now this is only used a quick check for deciding when to clean
|
|
||||||
// up Opaque URLs before sending requests from the Transport.
|
|
||||||
// See golang.org/issue/16847
|
|
||||||
//
|
|
||||||
// We used to enforce that the path also didn't start with "//", but
|
|
||||||
// Google's GFE accepts such paths and Chrome sends them, so ignore
|
|
||||||
// that part of the spec. See golang.org/issue/19103.
|
|
||||||
func validPseudoPath(v string) bool {
|
|
||||||
return (len(v) > 0 && v[0] == '/') || v == "*"
|
|
||||||
}
|
|
20
vendor/golang.org/x/net/http2/not_go111.go
generated
vendored
20
vendor/golang.org/x/net/http2/not_go111.go
generated
vendored
@ -1,20 +0,0 @@
|
|||||||
// Copyright 2018 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build !go1.11
|
|
||||||
|
|
||||||
package http2
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http/httptrace"
|
|
||||||
"net/textproto"
|
|
||||||
)
|
|
||||||
|
|
||||||
func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false }
|
|
||||||
|
|
||||||
func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {}
|
|
||||||
|
|
||||||
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
163
vendor/golang.org/x/net/http2/pipe.go
generated
vendored
163
vendor/golang.org/x/net/http2/pipe.go
generated
vendored
@ -1,163 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package http2
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
|
|
||||||
// io.Pipe except there are no PipeReader/PipeWriter halves, and the
|
|
||||||
// underlying buffer is an interface. (io.Pipe is always unbuffered)
|
|
||||||
type pipe struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
c sync.Cond // c.L lazily initialized to &p.mu
|
|
||||||
b pipeBuffer // nil when done reading
|
|
||||||
err error // read error once empty. non-nil means closed.
|
|
||||||
breakErr error // immediate read error (caller doesn't see rest of b)
|
|
||||||
donec chan struct{} // closed on error
|
|
||||||
readFn func() // optional code to run in Read before error
|
|
||||||
}
|
|
||||||
|
|
||||||
type pipeBuffer interface {
|
|
||||||
Len() int
|
|
||||||
io.Writer
|
|
||||||
io.Reader
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *pipe) Len() int {
|
|
||||||
p.mu.Lock()
|
|
||||||
defer p.mu.Unlock()
|
|
||||||
if p.b == nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return p.b.Len()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read waits until data is available and copies bytes
|
|
||||||
// from the buffer into p.
|
|
||||||
func (p *pipe) Read(d []byte) (n int, err error) {
|
|
||||||
p.mu.Lock()
|
|
||||||
defer p.mu.Unlock()
|
|
||||||
if p.c.L == nil {
|
|
||||||
p.c.L = &p.mu
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
if p.breakErr != nil {
|
|
||||||
return 0, p.breakErr
|
|
||||||
}
|
|
||||||
if p.b != nil && p.b.Len() > 0 {
|
|
||||||
return p.b.Read(d)
|
|
||||||
}
|
|
||||||
if p.err != nil {
|
|
||||||
if p.readFn != nil {
|
|
||||||
p.readFn() // e.g. copy trailers
|
|
||||||
p.readFn = nil // not sticky like p.err
|
|
||||||
}
|
|
||||||
p.b = nil
|
|
||||||
return 0, p.err
|
|
||||||
}
|
|
||||||
p.c.Wait()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var errClosedPipeWrite = errors.New("write on closed buffer")
|
|
||||||
|
|
||||||
// Write copies bytes from p into the buffer and wakes a reader.
|
|
||||||
// It is an error to write more data than the buffer can hold.
|
|
||||||
func (p *pipe) Write(d []byte) (n int, err error) {
|
|
||||||
p.mu.Lock()
|
|
||||||
defer p.mu.Unlock()
|
|
||||||
if p.c.L == nil {
|
|
||||||
p.c.L = &p.mu
|
|
||||||
}
|
|
||||||
defer p.c.Signal()
|
|
||||||
if p.err != nil {
|
|
||||||
return 0, errClosedPipeWrite
|
|
||||||
}
|
|
||||||
if p.breakErr != nil {
|
|
||||||
return len(d), nil // discard when there is no reader
|
|
||||||
}
|
|
||||||
return p.b.Write(d)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CloseWithError causes the next Read (waking up a current blocked
|
|
||||||
// Read if needed) to return the provided err after all data has been
|
|
||||||
// read.
|
|
||||||
//
|
|
||||||
// The error must be non-nil.
|
|
||||||
func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) }
|
|
||||||
|
|
||||||
// BreakWithError causes the next Read (waking up a current blocked
|
|
||||||
// Read if needed) to return the provided err immediately, without
|
|
||||||
// waiting for unread data.
|
|
||||||
func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) }
|
|
||||||
|
|
||||||
// closeWithErrorAndCode is like CloseWithError but also sets some code to run
|
|
||||||
// in the caller's goroutine before returning the error.
|
|
||||||
func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) }
|
|
||||||
|
|
||||||
func (p *pipe) closeWithError(dst *error, err error, fn func()) {
|
|
||||||
if err == nil {
|
|
||||||
panic("err must be non-nil")
|
|
||||||
}
|
|
||||||
p.mu.Lock()
|
|
||||||
defer p.mu.Unlock()
|
|
||||||
if p.c.L == nil {
|
|
||||||
p.c.L = &p.mu
|
|
||||||
}
|
|
||||||
defer p.c.Signal()
|
|
||||||
if *dst != nil {
|
|
||||||
// Already been done.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.readFn = fn
|
|
||||||
if dst == &p.breakErr {
|
|
||||||
p.b = nil
|
|
||||||
}
|
|
||||||
*dst = err
|
|
||||||
p.closeDoneLocked()
|
|
||||||
}
|
|
||||||
|
|
||||||
// requires p.mu be held.
|
|
||||||
func (p *pipe) closeDoneLocked() {
|
|
||||||
if p.donec == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Close if unclosed. This isn't racy since we always
|
|
||||||
// hold p.mu while closing.
|
|
||||||
select {
|
|
||||||
case <-p.donec:
|
|
||||||
default:
|
|
||||||
close(p.donec)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Err returns the error (if any) first set by BreakWithError or CloseWithError.
|
|
||||||
func (p *pipe) Err() error {
|
|
||||||
p.mu.Lock()
|
|
||||||
defer p.mu.Unlock()
|
|
||||||
if p.breakErr != nil {
|
|
||||||
return p.breakErr
|
|
||||||
}
|
|
||||||
return p.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Done returns a channel which is closed if and when this pipe is closed
|
|
||||||
// with CloseWithError.
|
|
||||||
func (p *pipe) Done() <-chan struct{} {
|
|
||||||
p.mu.Lock()
|
|
||||||
defer p.mu.Unlock()
|
|
||||||
if p.donec == nil {
|
|
||||||
p.donec = make(chan struct{})
|
|
||||||
if p.err != nil || p.breakErr != nil {
|
|
||||||
// Already hit an error.
|
|
||||||
p.closeDoneLocked()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return p.donec
|
|
||||||
}
|
|
2931
vendor/golang.org/x/net/http2/server.go
generated
vendored
2931
vendor/golang.org/x/net/http2/server.go
generated
vendored
File diff suppressed because it is too large
Load Diff
2610
vendor/golang.org/x/net/http2/transport.go
generated
vendored
2610
vendor/golang.org/x/net/http2/transport.go
generated
vendored
File diff suppressed because it is too large
Load Diff
365
vendor/golang.org/x/net/http2/write.go
generated
vendored
365
vendor/golang.org/x/net/http2/write.go
generated
vendored
@ -1,365 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package http2
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
|
|
||||||
"golang.org/x/net/http/httpguts"
|
|
||||||
"golang.org/x/net/http2/hpack"
|
|
||||||
)
|
|
||||||
|
|
||||||
// writeFramer is implemented by any type that is used to write frames.
|
|
||||||
type writeFramer interface {
|
|
||||||
writeFrame(writeContext) error
|
|
||||||
|
|
||||||
// staysWithinBuffer reports whether this writer promises that
|
|
||||||
// it will only write less than or equal to size bytes, and it
|
|
||||||
// won't Flush the write context.
|
|
||||||
staysWithinBuffer(size int) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeContext is the interface needed by the various frame writer
|
|
||||||
// types below. All the writeFrame methods below are scheduled via the
|
|
||||||
// frame writing scheduler (see writeScheduler in writesched.go).
|
|
||||||
//
|
|
||||||
// This interface is implemented by *serverConn.
|
|
||||||
//
|
|
||||||
// TODO: decide whether to a) use this in the client code (which didn't
|
|
||||||
// end up using this yet, because it has a simpler design, not
|
|
||||||
// currently implementing priorities), or b) delete this and
|
|
||||||
// make the server code a bit more concrete.
|
|
||||||
type writeContext interface {
|
|
||||||
Framer() *Framer
|
|
||||||
Flush() error
|
|
||||||
CloseConn() error
|
|
||||||
// HeaderEncoder returns an HPACK encoder that writes to the
|
|
||||||
// returned buffer.
|
|
||||||
HeaderEncoder() (*hpack.Encoder, *bytes.Buffer)
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeEndsStream reports whether w writes a frame that will transition
|
|
||||||
// the stream to a half-closed local state. This returns false for RST_STREAM,
|
|
||||||
// which closes the entire stream (not just the local half).
|
|
||||||
func writeEndsStream(w writeFramer) bool {
|
|
||||||
switch v := w.(type) {
|
|
||||||
case *writeData:
|
|
||||||
return v.endStream
|
|
||||||
case *writeResHeaders:
|
|
||||||
return v.endStream
|
|
||||||
case nil:
|
|
||||||
// This can only happen if the caller reuses w after it's
|
|
||||||
// been intentionally nil'ed out to prevent use. Keep this
|
|
||||||
// here to catch future refactoring breaking it.
|
|
||||||
panic("writeEndsStream called on nil writeFramer")
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
type flushFrameWriter struct{}
|
|
||||||
|
|
||||||
func (flushFrameWriter) writeFrame(ctx writeContext) error {
|
|
||||||
return ctx.Flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (flushFrameWriter) staysWithinBuffer(max int) bool { return false }
|
|
||||||
|
|
||||||
type writeSettings []Setting
|
|
||||||
|
|
||||||
func (s writeSettings) staysWithinBuffer(max int) bool {
|
|
||||||
const settingSize = 6 // uint16 + uint32
|
|
||||||
return frameHeaderLen+settingSize*len(s) <= max
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s writeSettings) writeFrame(ctx writeContext) error {
|
|
||||||
return ctx.Framer().WriteSettings([]Setting(s)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
type writeGoAway struct {
|
|
||||||
maxStreamID uint32
|
|
||||||
code ErrCode
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *writeGoAway) writeFrame(ctx writeContext) error {
|
|
||||||
err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)
|
|
||||||
ctx.Flush() // ignore error: we're hanging up on them anyway
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes
|
|
||||||
|
|
||||||
type writeData struct {
|
|
||||||
streamID uint32
|
|
||||||
p []byte
|
|
||||||
endStream bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writeData) String() string {
|
|
||||||
return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writeData) writeFrame(ctx writeContext) error {
|
|
||||||
return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writeData) staysWithinBuffer(max int) bool {
|
|
||||||
return frameHeaderLen+len(w.p) <= max
|
|
||||||
}
|
|
||||||
|
|
||||||
// handlerPanicRST is the message sent from handler goroutines when
|
|
||||||
// the handler panics.
|
|
||||||
type handlerPanicRST struct {
|
|
||||||
StreamID uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hp handlerPanicRST) writeFrame(ctx writeContext) error {
|
|
||||||
return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
|
|
||||||
|
|
||||||
func (se StreamError) writeFrame(ctx writeContext) error {
|
|
||||||
return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
|
|
||||||
|
|
||||||
type writePingAck struct{ pf *PingFrame }
|
|
||||||
|
|
||||||
func (w writePingAck) writeFrame(ctx writeContext) error {
|
|
||||||
return ctx.Framer().WritePing(true, w.pf.Data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max }
|
|
||||||
|
|
||||||
type writeSettingsAck struct{}
|
|
||||||
|
|
||||||
func (writeSettingsAck) writeFrame(ctx writeContext) error {
|
|
||||||
return ctx.Framer().WriteSettingsAck()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max }
|
|
||||||
|
|
||||||
// splitHeaderBlock splits headerBlock into fragments so that each fragment fits
|
|
||||||
// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true
|
|
||||||
// for the first/last fragment, respectively.
|
|
||||||
func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error {
|
|
||||||
// For now we're lazy and just pick the minimum MAX_FRAME_SIZE
|
|
||||||
// that all peers must support (16KB). Later we could care
|
|
||||||
// more and send larger frames if the peer advertised it, but
|
|
||||||
// there's little point. Most headers are small anyway (so we
|
|
||||||
// generally won't have CONTINUATION frames), and extra frames
|
|
||||||
// only waste 9 bytes anyway.
|
|
||||||
const maxFrameSize = 16384
|
|
||||||
|
|
||||||
first := true
|
|
||||||
for len(headerBlock) > 0 {
|
|
||||||
frag := headerBlock
|
|
||||||
if len(frag) > maxFrameSize {
|
|
||||||
frag = frag[:maxFrameSize]
|
|
||||||
}
|
|
||||||
headerBlock = headerBlock[len(frag):]
|
|
||||||
if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
first = false
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
|
|
||||||
// for HTTP response headers or trailers from a server handler.
|
|
||||||
type writeResHeaders struct {
|
|
||||||
streamID uint32
|
|
||||||
httpResCode int // 0 means no ":status" line
|
|
||||||
h http.Header // may be nil
|
|
||||||
trailers []string // if non-nil, which keys of h to write. nil means all.
|
|
||||||
endStream bool
|
|
||||||
|
|
||||||
date string
|
|
||||||
contentType string
|
|
||||||
contentLength string
|
|
||||||
}
|
|
||||||
|
|
||||||
func encKV(enc *hpack.Encoder, k, v string) {
|
|
||||||
if VerboseLogs {
|
|
||||||
log.Printf("http2: server encoding header %q = %q", k, v)
|
|
||||||
}
|
|
||||||
enc.WriteField(hpack.HeaderField{Name: k, Value: v})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writeResHeaders) staysWithinBuffer(max int) bool {
|
|
||||||
// TODO: this is a common one. It'd be nice to return true
|
|
||||||
// here and get into the fast path if we could be clever and
|
|
||||||
// calculate the size fast enough, or at least a conservative
|
|
||||||
// upper bound that usually fires. (Maybe if w.h and
|
|
||||||
// w.trailers are nil, so we don't need to enumerate it.)
|
|
||||||
// Otherwise I'm afraid that just calculating the length to
|
|
||||||
// answer this question would be slower than the ~2µs benefit.
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writeResHeaders) writeFrame(ctx writeContext) error {
|
|
||||||
enc, buf := ctx.HeaderEncoder()
|
|
||||||
buf.Reset()
|
|
||||||
|
|
||||||
if w.httpResCode != 0 {
|
|
||||||
encKV(enc, ":status", httpCodeString(w.httpResCode))
|
|
||||||
}
|
|
||||||
|
|
||||||
encodeHeaders(enc, w.h, w.trailers)
|
|
||||||
|
|
||||||
if w.contentType != "" {
|
|
||||||
encKV(enc, "content-type", w.contentType)
|
|
||||||
}
|
|
||||||
if w.contentLength != "" {
|
|
||||||
encKV(enc, "content-length", w.contentLength)
|
|
||||||
}
|
|
||||||
if w.date != "" {
|
|
||||||
encKV(enc, "date", w.date)
|
|
||||||
}
|
|
||||||
|
|
||||||
headerBlock := buf.Bytes()
|
|
||||||
if len(headerBlock) == 0 && w.trailers == nil {
|
|
||||||
panic("unexpected empty hpack")
|
|
||||||
}
|
|
||||||
|
|
||||||
return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {
|
|
||||||
if firstFrag {
|
|
||||||
return ctx.Framer().WriteHeaders(HeadersFrameParam{
|
|
||||||
StreamID: w.streamID,
|
|
||||||
BlockFragment: frag,
|
|
||||||
EndStream: w.endStream,
|
|
||||||
EndHeaders: lastFrag,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames.
|
|
||||||
type writePushPromise struct {
|
|
||||||
streamID uint32 // pusher stream
|
|
||||||
method string // for :method
|
|
||||||
url *url.URL // for :scheme, :authority, :path
|
|
||||||
h http.Header
|
|
||||||
|
|
||||||
// Creates an ID for a pushed stream. This runs on serveG just before
|
|
||||||
// the frame is written. The returned ID is copied to promisedID.
|
|
||||||
allocatePromisedID func() (uint32, error)
|
|
||||||
promisedID uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writePushPromise) staysWithinBuffer(max int) bool {
|
|
||||||
// TODO: see writeResHeaders.staysWithinBuffer
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writePushPromise) writeFrame(ctx writeContext) error {
|
|
||||||
enc, buf := ctx.HeaderEncoder()
|
|
||||||
buf.Reset()
|
|
||||||
|
|
||||||
encKV(enc, ":method", w.method)
|
|
||||||
encKV(enc, ":scheme", w.url.Scheme)
|
|
||||||
encKV(enc, ":authority", w.url.Host)
|
|
||||||
encKV(enc, ":path", w.url.RequestURI())
|
|
||||||
encodeHeaders(enc, w.h, nil)
|
|
||||||
|
|
||||||
headerBlock := buf.Bytes()
|
|
||||||
if len(headerBlock) == 0 {
|
|
||||||
panic("unexpected empty hpack")
|
|
||||||
}
|
|
||||||
|
|
||||||
return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {
|
|
||||||
if firstFrag {
|
|
||||||
return ctx.Framer().WritePushPromise(PushPromiseParam{
|
|
||||||
StreamID: w.streamID,
|
|
||||||
PromiseID: w.promisedID,
|
|
||||||
BlockFragment: frag,
|
|
||||||
EndHeaders: lastFrag,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type write100ContinueHeadersFrame struct {
|
|
||||||
streamID uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error {
|
|
||||||
enc, buf := ctx.HeaderEncoder()
|
|
||||||
buf.Reset()
|
|
||||||
encKV(enc, ":status", "100")
|
|
||||||
return ctx.Framer().WriteHeaders(HeadersFrameParam{
|
|
||||||
StreamID: w.streamID,
|
|
||||||
BlockFragment: buf.Bytes(),
|
|
||||||
EndStream: false,
|
|
||||||
EndHeaders: true,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool {
|
|
||||||
// Sloppy but conservative:
|
|
||||||
return 9+2*(len(":status")+len("100")) <= max
|
|
||||||
}
|
|
||||||
|
|
||||||
type writeWindowUpdate struct {
|
|
||||||
streamID uint32 // or 0 for conn-level
|
|
||||||
n uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
|
|
||||||
|
|
||||||
func (wu writeWindowUpdate) writeFrame(ctx writeContext) error {
|
|
||||||
return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k])
|
|
||||||
// is encoded only if k is in keys.
|
|
||||||
func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
|
|
||||||
if keys == nil {
|
|
||||||
sorter := sorterPool.Get().(*sorter)
|
|
||||||
// Using defer here, since the returned keys from the
|
|
||||||
// sorter.Keys method is only valid until the sorter
|
|
||||||
// is returned:
|
|
||||||
defer sorterPool.Put(sorter)
|
|
||||||
keys = sorter.Keys(h)
|
|
||||||
}
|
|
||||||
for _, k := range keys {
|
|
||||||
vv := h[k]
|
|
||||||
k = lowerHeader(k)
|
|
||||||
if !validWireHeaderFieldName(k) {
|
|
||||||
// Skip it as backup paranoia. Per
|
|
||||||
// golang.org/issue/14048, these should
|
|
||||||
// already be rejected at a higher level.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
isTE := k == "transfer-encoding"
|
|
||||||
for _, v := range vv {
|
|
||||||
if !httpguts.ValidHeaderFieldValue(v) {
|
|
||||||
// TODO: return an error? golang.org/issue/14048
|
|
||||||
// For now just omit it.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// TODO: more of "8.1.2.2 Connection-Specific Header Fields"
|
|
||||||
if isTE && v != "trailers" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
encKV(enc, k, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
242
vendor/golang.org/x/net/http2/writesched.go
generated
vendored
242
vendor/golang.org/x/net/http2/writesched.go
generated
vendored
@ -1,242 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package http2
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
// WriteScheduler is the interface implemented by HTTP/2 write schedulers.
|
|
||||||
// Methods are never called concurrently.
|
|
||||||
type WriteScheduler interface {
|
|
||||||
// OpenStream opens a new stream in the write scheduler.
|
|
||||||
// It is illegal to call this with streamID=0 or with a streamID that is
|
|
||||||
// already open -- the call may panic.
|
|
||||||
OpenStream(streamID uint32, options OpenStreamOptions)
|
|
||||||
|
|
||||||
// CloseStream closes a stream in the write scheduler. Any frames queued on
|
|
||||||
// this stream should be discarded. It is illegal to call this on a stream
|
|
||||||
// that is not open -- the call may panic.
|
|
||||||
CloseStream(streamID uint32)
|
|
||||||
|
|
||||||
// AdjustStream adjusts the priority of the given stream. This may be called
|
|
||||||
// on a stream that has not yet been opened or has been closed. Note that
|
|
||||||
// RFC 7540 allows PRIORITY frames to be sent on streams in any state. See:
|
|
||||||
// https://tools.ietf.org/html/rfc7540#section-5.1
|
|
||||||
AdjustStream(streamID uint32, priority PriorityParam)
|
|
||||||
|
|
||||||
// Push queues a frame in the scheduler. In most cases, this will not be
|
|
||||||
// called with wr.StreamID()!=0 unless that stream is currently open. The one
|
|
||||||
// exception is RST_STREAM frames, which may be sent on idle or closed streams.
|
|
||||||
Push(wr FrameWriteRequest)
|
|
||||||
|
|
||||||
// Pop dequeues the next frame to write. Returns false if no frames can
|
|
||||||
// be written. Frames with a given wr.StreamID() are Pop'd in the same
|
|
||||||
// order they are Push'd.
|
|
||||||
Pop() (wr FrameWriteRequest, ok bool)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream.
|
|
||||||
type OpenStreamOptions struct {
|
|
||||||
// PusherID is zero if the stream was initiated by the client. Otherwise,
|
|
||||||
// PusherID names the stream that pushed the newly opened stream.
|
|
||||||
PusherID uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// FrameWriteRequest is a request to write a frame.
|
|
||||||
type FrameWriteRequest struct {
|
|
||||||
// write is the interface value that does the writing, once the
|
|
||||||
// WriteScheduler has selected this frame to write. The write
|
|
||||||
// functions are all defined in write.go.
|
|
||||||
write writeFramer
|
|
||||||
|
|
||||||
// stream is the stream on which this frame will be written.
|
|
||||||
// nil for non-stream frames like PING and SETTINGS.
|
|
||||||
stream *stream
|
|
||||||
|
|
||||||
// done, if non-nil, must be a buffered channel with space for
|
|
||||||
// 1 message and is sent the return value from write (or an
|
|
||||||
// earlier error) when the frame has been written.
|
|
||||||
done chan error
|
|
||||||
}
|
|
||||||
|
|
||||||
// StreamID returns the id of the stream this frame will be written to.
|
|
||||||
// 0 is used for non-stream frames such as PING and SETTINGS.
|
|
||||||
func (wr FrameWriteRequest) StreamID() uint32 {
|
|
||||||
if wr.stream == nil {
|
|
||||||
if se, ok := wr.write.(StreamError); ok {
|
|
||||||
// (*serverConn).resetStream doesn't set
|
|
||||||
// stream because it doesn't necessarily have
|
|
||||||
// one. So special case this type of write
|
|
||||||
// message.
|
|
||||||
return se.StreamID
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return wr.stream.id
|
|
||||||
}
|
|
||||||
|
|
||||||
// DataSize returns the number of flow control bytes that must be consumed
|
|
||||||
// to write this entire frame. This is 0 for non-DATA frames.
|
|
||||||
func (wr FrameWriteRequest) DataSize() int {
|
|
||||||
if wd, ok := wr.write.(*writeData); ok {
|
|
||||||
return len(wd.p)
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume consumes min(n, available) bytes from this frame, where available
|
|
||||||
// is the number of flow control bytes available on the stream. Consume returns
|
|
||||||
// 0, 1, or 2 frames, where the integer return value gives the number of frames
|
|
||||||
// returned.
|
|
||||||
//
|
|
||||||
// If flow control prevents consuming any bytes, this returns (_, _, 0). If
|
|
||||||
// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this
|
|
||||||
// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and
|
|
||||||
// 'rest' contains the remaining bytes. The consumed bytes are deducted from the
|
|
||||||
// underlying stream's flow control budget.
|
|
||||||
func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) {
|
|
||||||
var empty FrameWriteRequest
|
|
||||||
|
|
||||||
// Non-DATA frames are always consumed whole.
|
|
||||||
wd, ok := wr.write.(*writeData)
|
|
||||||
if !ok || len(wd.p) == 0 {
|
|
||||||
return wr, empty, 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// Might need to split after applying limits.
|
|
||||||
allowed := wr.stream.flow.available()
|
|
||||||
if n < allowed {
|
|
||||||
allowed = n
|
|
||||||
}
|
|
||||||
if wr.stream.sc.maxFrameSize < allowed {
|
|
||||||
allowed = wr.stream.sc.maxFrameSize
|
|
||||||
}
|
|
||||||
if allowed <= 0 {
|
|
||||||
return empty, empty, 0
|
|
||||||
}
|
|
||||||
if len(wd.p) > int(allowed) {
|
|
||||||
wr.stream.flow.take(allowed)
|
|
||||||
consumed := FrameWriteRequest{
|
|
||||||
stream: wr.stream,
|
|
||||||
write: &writeData{
|
|
||||||
streamID: wd.streamID,
|
|
||||||
p: wd.p[:allowed],
|
|
||||||
// Even if the original had endStream set, there
|
|
||||||
// are bytes remaining because len(wd.p) > allowed,
|
|
||||||
// so we know endStream is false.
|
|
||||||
endStream: false,
|
|
||||||
},
|
|
||||||
// Our caller is blocking on the final DATA frame, not
|
|
||||||
// this intermediate frame, so no need to wait.
|
|
||||||
done: nil,
|
|
||||||
}
|
|
||||||
rest := FrameWriteRequest{
|
|
||||||
stream: wr.stream,
|
|
||||||
write: &writeData{
|
|
||||||
streamID: wd.streamID,
|
|
||||||
p: wd.p[allowed:],
|
|
||||||
endStream: wd.endStream,
|
|
||||||
},
|
|
||||||
done: wr.done,
|
|
||||||
}
|
|
||||||
return consumed, rest, 2
|
|
||||||
}
|
|
||||||
|
|
||||||
// The frame is consumed whole.
|
|
||||||
// NB: This cast cannot overflow because allowed is <= math.MaxInt32.
|
|
||||||
wr.stream.flow.take(int32(len(wd.p)))
|
|
||||||
return wr, empty, 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// String is for debugging only.
|
|
||||||
func (wr FrameWriteRequest) String() string {
|
|
||||||
var des string
|
|
||||||
if s, ok := wr.write.(fmt.Stringer); ok {
|
|
||||||
des = s.String()
|
|
||||||
} else {
|
|
||||||
des = fmt.Sprintf("%T", wr.write)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des)
|
|
||||||
}
|
|
||||||
|
|
||||||
// replyToWriter sends err to wr.done and panics if the send must block
|
|
||||||
// This does nothing if wr.done is nil.
|
|
||||||
func (wr *FrameWriteRequest) replyToWriter(err error) {
|
|
||||||
if wr.done == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case wr.done <- err:
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write))
|
|
||||||
}
|
|
||||||
wr.write = nil // prevent use (assume it's tainted after wr.done send)
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeQueue is used by implementations of WriteScheduler.
|
|
||||||
type writeQueue struct {
|
|
||||||
s []FrameWriteRequest
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *writeQueue) empty() bool { return len(q.s) == 0 }
|
|
||||||
|
|
||||||
func (q *writeQueue) push(wr FrameWriteRequest) {
|
|
||||||
q.s = append(q.s, wr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (q *writeQueue) shift() FrameWriteRequest {
|
|
||||||
if len(q.s) == 0 {
|
|
||||||
panic("invalid use of queue")
|
|
||||||
}
|
|
||||||
wr := q.s[0]
|
|
||||||
// TODO: less copy-happy queue.
|
|
||||||
copy(q.s, q.s[1:])
|
|
||||||
q.s[len(q.s)-1] = FrameWriteRequest{}
|
|
||||||
q.s = q.s[:len(q.s)-1]
|
|
||||||
return wr
|
|
||||||
}
|
|
||||||
|
|
||||||
// consume consumes up to n bytes from q.s[0]. If the frame is
|
|
||||||
// entirely consumed, it is removed from the queue. If the frame
|
|
||||||
// is partially consumed, the frame is kept with the consumed
|
|
||||||
// bytes removed. Returns true iff any bytes were consumed.
|
|
||||||
func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) {
|
|
||||||
if len(q.s) == 0 {
|
|
||||||
return FrameWriteRequest{}, false
|
|
||||||
}
|
|
||||||
consumed, rest, numresult := q.s[0].Consume(n)
|
|
||||||
switch numresult {
|
|
||||||
case 0:
|
|
||||||
return FrameWriteRequest{}, false
|
|
||||||
case 1:
|
|
||||||
q.shift()
|
|
||||||
case 2:
|
|
||||||
q.s[0] = rest
|
|
||||||
}
|
|
||||||
return consumed, true
|
|
||||||
}
|
|
||||||
|
|
||||||
type writeQueuePool []*writeQueue
|
|
||||||
|
|
||||||
// put inserts an unused writeQueue into the pool.
|
|
||||||
func (p *writeQueuePool) put(q *writeQueue) {
|
|
||||||
for i := range q.s {
|
|
||||||
q.s[i] = FrameWriteRequest{}
|
|
||||||
}
|
|
||||||
q.s = q.s[:0]
|
|
||||||
*p = append(*p, q)
|
|
||||||
}
|
|
||||||
|
|
||||||
// get returns an empty writeQueue.
|
|
||||||
func (p *writeQueuePool) get() *writeQueue {
|
|
||||||
ln := len(*p)
|
|
||||||
if ln == 0 {
|
|
||||||
return new(writeQueue)
|
|
||||||
}
|
|
||||||
x := ln - 1
|
|
||||||
q := (*p)[x]
|
|
||||||
(*p)[x] = nil
|
|
||||||
*p = (*p)[:x]
|
|
||||||
return q
|
|
||||||
}
|
|
452
vendor/golang.org/x/net/http2/writesched_priority.go
generated
vendored
452
vendor/golang.org/x/net/http2/writesched_priority.go
generated
vendored
@ -1,452 +0,0 @@
|
|||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package http2
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"sort"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RFC 7540, Section 5.3.5: the default weight is 16.
|
|
||||||
const priorityDefaultWeight = 15 // 16 = 15 + 1
|
|
||||||
|
|
||||||
// PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
|
|
||||||
type PriorityWriteSchedulerConfig struct {
|
|
||||||
// MaxClosedNodesInTree controls the maximum number of closed streams to
|
|
||||||
// retain in the priority tree. Setting this to zero saves a small amount
|
|
||||||
// of memory at the cost of performance.
|
|
||||||
//
|
|
||||||
// See RFC 7540, Section 5.3.4:
|
|
||||||
// "It is possible for a stream to become closed while prioritization
|
|
||||||
// information ... is in transit. ... This potentially creates suboptimal
|
|
||||||
// prioritization, since the stream could be given a priority that is
|
|
||||||
// different from what is intended. To avoid these problems, an endpoint
|
|
||||||
// SHOULD retain stream prioritization state for a period after streams
|
|
||||||
// become closed. The longer state is retained, the lower the chance that
|
|
||||||
// streams are assigned incorrect or default priority values."
|
|
||||||
MaxClosedNodesInTree int
|
|
||||||
|
|
||||||
// MaxIdleNodesInTree controls the maximum number of idle streams to
|
|
||||||
// retain in the priority tree. Setting this to zero saves a small amount
|
|
||||||
// of memory at the cost of performance.
|
|
||||||
//
|
|
||||||
// See RFC 7540, Section 5.3.4:
|
|
||||||
// Similarly, streams that are in the "idle" state can be assigned
|
|
||||||
// priority or become a parent of other streams. This allows for the
|
|
||||||
// creation of a grouping node in the dependency tree, which enables
|
|
||||||
// more flexible expressions of priority. Idle streams begin with a
|
|
||||||
// default priority (Section 5.3.5).
|
|
||||||
MaxIdleNodesInTree int
|
|
||||||
|
|
||||||
// ThrottleOutOfOrderWrites enables write throttling to help ensure that
|
|
||||||
// data is delivered in priority order. This works around a race where
|
|
||||||
// stream B depends on stream A and both streams are about to call Write
|
|
||||||
// to queue DATA frames. If B wins the race, a naive scheduler would eagerly
|
|
||||||
// write as much data from B as possible, but this is suboptimal because A
|
|
||||||
// is a higher-priority stream. With throttling enabled, we write a small
|
|
||||||
// amount of data from B to minimize the amount of bandwidth that B can
|
|
||||||
// steal from A.
|
|
||||||
ThrottleOutOfOrderWrites bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPriorityWriteScheduler constructs a WriteScheduler that schedules
|
|
||||||
// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3.
|
|
||||||
// If cfg is nil, default options are used.
|
|
||||||
func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler {
|
|
||||||
if cfg == nil {
|
|
||||||
// For justification of these defaults, see:
|
|
||||||
// https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY
|
|
||||||
cfg = &PriorityWriteSchedulerConfig{
|
|
||||||
MaxClosedNodesInTree: 10,
|
|
||||||
MaxIdleNodesInTree: 10,
|
|
||||||
ThrottleOutOfOrderWrites: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ws := &priorityWriteScheduler{
|
|
||||||
nodes: make(map[uint32]*priorityNode),
|
|
||||||
maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
|
|
||||||
maxIdleNodesInTree: cfg.MaxIdleNodesInTree,
|
|
||||||
enableWriteThrottle: cfg.ThrottleOutOfOrderWrites,
|
|
||||||
}
|
|
||||||
ws.nodes[0] = &ws.root
|
|
||||||
if cfg.ThrottleOutOfOrderWrites {
|
|
||||||
ws.writeThrottleLimit = 1024
|
|
||||||
} else {
|
|
||||||
ws.writeThrottleLimit = math.MaxInt32
|
|
||||||
}
|
|
||||||
return ws
|
|
||||||
}
|
|
||||||
|
|
||||||
type priorityNodeState int
|
|
||||||
|
|
||||||
const (
|
|
||||||
priorityNodeOpen priorityNodeState = iota
|
|
||||||
priorityNodeClosed
|
|
||||||
priorityNodeIdle
|
|
||||||
)
|
|
||||||
|
|
||||||
// priorityNode is a node in an HTTP/2 priority tree.
|
|
||||||
// Each node is associated with a single stream ID.
|
|
||||||
// See RFC 7540, Section 5.3.
|
|
||||||
type priorityNode struct {
|
|
||||||
q writeQueue // queue of pending frames to write
|
|
||||||
id uint32 // id of the stream, or 0 for the root of the tree
|
|
||||||
weight uint8 // the actual weight is weight+1, so the value is in [1,256]
|
|
||||||
state priorityNodeState // open | closed | idle
|
|
||||||
bytes int64 // number of bytes written by this node, or 0 if closed
|
|
||||||
subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
|
|
||||||
|
|
||||||
// These links form the priority tree.
|
|
||||||
parent *priorityNode
|
|
||||||
kids *priorityNode // start of the kids list
|
|
||||||
prev, next *priorityNode // doubly-linked list of siblings
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *priorityNode) setParent(parent *priorityNode) {
|
|
||||||
if n == parent {
|
|
||||||
panic("setParent to self")
|
|
||||||
}
|
|
||||||
if n.parent == parent {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Unlink from current parent.
|
|
||||||
if parent := n.parent; parent != nil {
|
|
||||||
if n.prev == nil {
|
|
||||||
parent.kids = n.next
|
|
||||||
} else {
|
|
||||||
n.prev.next = n.next
|
|
||||||
}
|
|
||||||
if n.next != nil {
|
|
||||||
n.next.prev = n.prev
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Link to new parent.
|
|
||||||
// If parent=nil, remove n from the tree.
|
|
||||||
// Always insert at the head of parent.kids (this is assumed by walkReadyInOrder).
|
|
||||||
n.parent = parent
|
|
||||||
if parent == nil {
|
|
||||||
n.next = nil
|
|
||||||
n.prev = nil
|
|
||||||
} else {
|
|
||||||
n.next = parent.kids
|
|
||||||
n.prev = nil
|
|
||||||
if n.next != nil {
|
|
||||||
n.next.prev = n
|
|
||||||
}
|
|
||||||
parent.kids = n
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *priorityNode) addBytes(b int64) {
|
|
||||||
n.bytes += b
|
|
||||||
for ; n != nil; n = n.parent {
|
|
||||||
n.subtreeBytes += b
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// walkReadyInOrder iterates over the tree in priority order, calling f for each node
|
|
||||||
// with a non-empty write queue. When f returns true, this funcion returns true and the
|
|
||||||
// walk halts. tmp is used as scratch space for sorting.
|
|
||||||
//
|
|
||||||
// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
|
|
||||||
// if any ancestor p of n is still open (ignoring the root node).
|
|
||||||
func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool {
|
|
||||||
if !n.q.empty() && f(n, openParent) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if n.kids == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Don't consider the root "open" when updating openParent since
|
|
||||||
// we can't send data frames on the root stream (only control frames).
|
|
||||||
if n.id != 0 {
|
|
||||||
openParent = openParent || (n.state == priorityNodeOpen)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Common case: only one kid or all kids have the same weight.
|
|
||||||
// Some clients don't use weights; other clients (like web browsers)
|
|
||||||
// use mostly-linear priority trees.
|
|
||||||
w := n.kids.weight
|
|
||||||
needSort := false
|
|
||||||
for k := n.kids.next; k != nil; k = k.next {
|
|
||||||
if k.weight != w {
|
|
||||||
needSort = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !needSort {
|
|
||||||
for k := n.kids; k != nil; k = k.next {
|
|
||||||
if k.walkReadyInOrder(openParent, tmp, f) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uncommon case: sort the child nodes. We remove the kids from the parent,
|
|
||||||
// then re-insert after sorting so we can reuse tmp for future sort calls.
|
|
||||||
*tmp = (*tmp)[:0]
|
|
||||||
for n.kids != nil {
|
|
||||||
*tmp = append(*tmp, n.kids)
|
|
||||||
n.kids.setParent(nil)
|
|
||||||
}
|
|
||||||
sort.Sort(sortPriorityNodeSiblings(*tmp))
|
|
||||||
for i := len(*tmp) - 1; i >= 0; i-- {
|
|
||||||
(*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
|
|
||||||
}
|
|
||||||
for k := n.kids; k != nil; k = k.next {
|
|
||||||
if k.walkReadyInOrder(openParent, tmp, f) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
type sortPriorityNodeSiblings []*priorityNode
|
|
||||||
|
|
||||||
func (z sortPriorityNodeSiblings) Len() int { return len(z) }
|
|
||||||
func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
|
|
||||||
func (z sortPriorityNodeSiblings) Less(i, k int) bool {
|
|
||||||
// Prefer the subtree that has sent fewer bytes relative to its weight.
|
|
||||||
// See sections 5.3.2 and 5.3.4.
|
|
||||||
wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
|
|
||||||
wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes)
|
|
||||||
if bi == 0 && bk == 0 {
|
|
||||||
return wi >= wk
|
|
||||||
}
|
|
||||||
if bk == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return bi/bk <= wi/wk
|
|
||||||
}
|
|
||||||
|
|
||||||
type priorityWriteScheduler struct {
|
|
||||||
// root is the root of the priority tree, where root.id = 0.
|
|
||||||
// The root queues control frames that are not associated with any stream.
|
|
||||||
root priorityNode
|
|
||||||
|
|
||||||
// nodes maps stream ids to priority tree nodes.
|
|
||||||
nodes map[uint32]*priorityNode
|
|
||||||
|
|
||||||
// maxID is the maximum stream id in nodes.
|
|
||||||
maxID uint32
|
|
||||||
|
|
||||||
// lists of nodes that have been closed or are idle, but are kept in
|
|
||||||
// the tree for improved prioritization. When the lengths exceed either
|
|
||||||
// maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
|
|
||||||
closedNodes, idleNodes []*priorityNode
|
|
||||||
|
|
||||||
// From the config.
|
|
||||||
maxClosedNodesInTree int
|
|
||||||
maxIdleNodesInTree int
|
|
||||||
writeThrottleLimit int32
|
|
||||||
enableWriteThrottle bool
|
|
||||||
|
|
||||||
// tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
|
|
||||||
tmp []*priorityNode
|
|
||||||
|
|
||||||
// pool of empty queues for reuse.
|
|
||||||
queuePool writeQueuePool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
|
|
||||||
// The stream may be currently idle but cannot be opened or closed.
|
|
||||||
if curr := ws.nodes[streamID]; curr != nil {
|
|
||||||
if curr.state != priorityNodeIdle {
|
|
||||||
panic(fmt.Sprintf("stream %d already opened", streamID))
|
|
||||||
}
|
|
||||||
curr.state = priorityNodeOpen
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// RFC 7540, Section 5.3.5:
|
|
||||||
// "All streams are initially assigned a non-exclusive dependency on stream 0x0.
|
|
||||||
// Pushed streams initially depend on their associated stream. In both cases,
|
|
||||||
// streams are assigned a default weight of 16."
|
|
||||||
parent := ws.nodes[options.PusherID]
|
|
||||||
if parent == nil {
|
|
||||||
parent = &ws.root
|
|
||||||
}
|
|
||||||
n := &priorityNode{
|
|
||||||
q: *ws.queuePool.get(),
|
|
||||||
id: streamID,
|
|
||||||
weight: priorityDefaultWeight,
|
|
||||||
state: priorityNodeOpen,
|
|
||||||
}
|
|
||||||
n.setParent(parent)
|
|
||||||
ws.nodes[streamID] = n
|
|
||||||
if streamID > ws.maxID {
|
|
||||||
ws.maxID = streamID
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
|
|
||||||
if streamID == 0 {
|
|
||||||
panic("violation of WriteScheduler interface: cannot close stream 0")
|
|
||||||
}
|
|
||||||
if ws.nodes[streamID] == nil {
|
|
||||||
panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
|
|
||||||
}
|
|
||||||
if ws.nodes[streamID].state != priorityNodeOpen {
|
|
||||||
panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
|
|
||||||
}
|
|
||||||
|
|
||||||
n := ws.nodes[streamID]
|
|
||||||
n.state = priorityNodeClosed
|
|
||||||
n.addBytes(-n.bytes)
|
|
||||||
|
|
||||||
q := n.q
|
|
||||||
ws.queuePool.put(&q)
|
|
||||||
n.q.s = nil
|
|
||||||
if ws.maxClosedNodesInTree > 0 {
|
|
||||||
ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n)
|
|
||||||
} else {
|
|
||||||
ws.removeNode(n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
|
|
||||||
if streamID == 0 {
|
|
||||||
panic("adjustPriority on root")
|
|
||||||
}
|
|
||||||
|
|
||||||
// If streamID does not exist, there are two cases:
|
|
||||||
// - A closed stream that has been removed (this will have ID <= maxID)
|
|
||||||
// - An idle stream that is being used for "grouping" (this will have ID > maxID)
|
|
||||||
n := ws.nodes[streamID]
|
|
||||||
if n == nil {
|
|
||||||
if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
ws.maxID = streamID
|
|
||||||
n = &priorityNode{
|
|
||||||
q: *ws.queuePool.get(),
|
|
||||||
id: streamID,
|
|
||||||
weight: priorityDefaultWeight,
|
|
||||||
state: priorityNodeIdle,
|
|
||||||
}
|
|
||||||
n.setParent(&ws.root)
|
|
||||||
ws.nodes[streamID] = n
|
|
||||||
ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Section 5.3.1: A dependency on a stream that is not currently in the tree
|
|
||||||
// results in that stream being given a default priority (Section 5.3.5).
|
|
||||||
parent := ws.nodes[priority.StreamDep]
|
|
||||||
if parent == nil {
|
|
||||||
n.setParent(&ws.root)
|
|
||||||
n.weight = priorityDefaultWeight
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ignore if the client tries to make a node its own parent.
|
|
||||||
if n == parent {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Section 5.3.3:
|
|
||||||
// "If a stream is made dependent on one of its own dependencies, the
|
|
||||||
// formerly dependent stream is first moved to be dependent on the
|
|
||||||
// reprioritized stream's previous parent. The moved dependency retains
|
|
||||||
// its weight."
|
|
||||||
//
|
|
||||||
// That is: if parent depends on n, move parent to depend on n.parent.
|
|
||||||
for x := parent.parent; x != nil; x = x.parent {
|
|
||||||
if x == n {
|
|
||||||
parent.setParent(n.parent)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Section 5.3.3: The exclusive flag causes the stream to become the sole
|
|
||||||
// dependency of its parent stream, causing other dependencies to become
|
|
||||||
// dependent on the exclusive stream.
|
|
||||||
if priority.Exclusive {
|
|
||||||
k := parent.kids
|
|
||||||
for k != nil {
|
|
||||||
next := k.next
|
|
||||||
if k != n {
|
|
||||||
k.setParent(n)
|
|
||||||
}
|
|
||||||
k = next
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
n.setParent(parent)
|
|
||||||
n.weight = priority.Weight
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
|
|
||||||
var n *priorityNode
|
|
||||||
if id := wr.StreamID(); id == 0 {
|
|
||||||
n = &ws.root
|
|
||||||
} else {
|
|
||||||
n = ws.nodes[id]
|
|
||||||
if n == nil {
|
|
||||||
// id is an idle or closed stream. wr should not be a HEADERS or
|
|
||||||
// DATA frame. However, wr can be a RST_STREAM. In this case, we
|
|
||||||
// push wr onto the root, rather than creating a new priorityNode,
|
|
||||||
// since RST_STREAM is tiny and the stream's priority is unknown
|
|
||||||
// anyway. See issue #17919.
|
|
||||||
if wr.DataSize() > 0 {
|
|
||||||
panic("add DATA on non-open stream")
|
|
||||||
}
|
|
||||||
n = &ws.root
|
|
||||||
}
|
|
||||||
}
|
|
||||||
n.q.push(wr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
|
|
||||||
ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool {
|
|
||||||
limit := int32(math.MaxInt32)
|
|
||||||
if openParent {
|
|
||||||
limit = ws.writeThrottleLimit
|
|
||||||
}
|
|
||||||
wr, ok = n.q.consume(limit)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
n.addBytes(int64(wr.DataSize()))
|
|
||||||
// If B depends on A and B continuously has data available but A
|
|
||||||
// does not, gradually increase the throttling limit to allow B to
|
|
||||||
// steal more and more bandwidth from A.
|
|
||||||
if openParent {
|
|
||||||
ws.writeThrottleLimit += 1024
|
|
||||||
if ws.writeThrottleLimit < 0 {
|
|
||||||
ws.writeThrottleLimit = math.MaxInt32
|
|
||||||
}
|
|
||||||
} else if ws.enableWriteThrottle {
|
|
||||||
ws.writeThrottleLimit = 1024
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
return wr, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) {
|
|
||||||
if maxSize == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(*list) == maxSize {
|
|
||||||
// Remove the oldest node, then shift left.
|
|
||||||
ws.removeNode((*list)[0])
|
|
||||||
x := (*list)[1:]
|
|
||||||
copy(*list, x)
|
|
||||||
*list = (*list)[:len(x)]
|
|
||||||
}
|
|
||||||
*list = append(*list, n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ws *priorityWriteScheduler) removeNode(n *priorityNode) {
|
|
||||||
for k := n.kids; k != nil; k = k.next {
|
|
||||||
k.setParent(n.parent)
|
|
||||||
}
|
|
||||||
n.setParent(nil)
|
|
||||||
delete(ws.nodes, n.id)
|
|
||||||
}
|
|
72
vendor/golang.org/x/net/http2/writesched_random.go
generated
vendored
72
vendor/golang.org/x/net/http2/writesched_random.go
generated
vendored
@ -1,72 +0,0 @@
|
|||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package http2
|
|
||||||
|
|
||||||
import "math"
|
|
||||||
|
|
||||||
// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2
|
|
||||||
// priorities. Control frames like SETTINGS and PING are written before DATA
|
|
||||||
// frames, but if no control frames are queued and multiple streams have queued
|
|
||||||
// HEADERS or DATA frames, Pop selects a ready stream arbitrarily.
|
|
||||||
func NewRandomWriteScheduler() WriteScheduler {
|
|
||||||
return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)}
|
|
||||||
}
|
|
||||||
|
|
||||||
type randomWriteScheduler struct {
|
|
||||||
// zero are frames not associated with a specific stream.
|
|
||||||
zero writeQueue
|
|
||||||
|
|
||||||
// sq contains the stream-specific queues, keyed by stream ID.
|
|
||||||
// When a stream is idle or closed, it's deleted from the map.
|
|
||||||
sq map[uint32]*writeQueue
|
|
||||||
|
|
||||||
// pool of empty queues for reuse.
|
|
||||||
queuePool writeQueuePool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
|
|
||||||
// no-op: idle streams are not tracked
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ws *randomWriteScheduler) CloseStream(streamID uint32) {
|
|
||||||
q, ok := ws.sq[streamID]
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
delete(ws.sq, streamID)
|
|
||||||
ws.queuePool.put(q)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
|
|
||||||
// no-op: priorities are ignored
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) {
|
|
||||||
id := wr.StreamID()
|
|
||||||
if id == 0 {
|
|
||||||
ws.zero.push(wr)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
q, ok := ws.sq[id]
|
|
||||||
if !ok {
|
|
||||||
q = ws.queuePool.get()
|
|
||||||
ws.sq[id] = q
|
|
||||||
}
|
|
||||||
q.push(wr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) {
|
|
||||||
// Control frames first.
|
|
||||||
if !ws.zero.empty() {
|
|
||||||
return ws.zero.shift(), true
|
|
||||||
}
|
|
||||||
// Iterate over all non-idle streams until finding one that can be consumed.
|
|
||||||
for _, q := range ws.sq {
|
|
||||||
if wr, ok := q.consume(math.MaxInt32); ok {
|
|
||||||
return wr, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return FrameWriteRequest{}, false
|
|
||||||
}
|
|
734
vendor/golang.org/x/net/idna/idna10.0.0.go
generated
vendored
734
vendor/golang.org/x/net/idna/idna10.0.0.go
generated
vendored
@ -1,734 +0,0 @@
|
|||||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
|
||||||
|
|
||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build go1.10
|
|
||||||
|
|
||||||
// Package idna implements IDNA2008 using the compatibility processing
|
|
||||||
// defined by UTS (Unicode Technical Standard) #46, which defines a standard to
|
|
||||||
// deal with the transition from IDNA2003.
|
|
||||||
//
|
|
||||||
// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC
|
|
||||||
// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894.
|
|
||||||
// UTS #46 is defined in https://www.unicode.org/reports/tr46.
|
|
||||||
// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the
|
|
||||||
// differences between these two standards.
|
|
||||||
package idna // import "golang.org/x/net/idna"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"golang.org/x/text/secure/bidirule"
|
|
||||||
"golang.org/x/text/unicode/bidi"
|
|
||||||
"golang.org/x/text/unicode/norm"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NOTE: Unlike common practice in Go APIs, the functions will return a
|
|
||||||
// sanitized domain name in case of errors. Browsers sometimes use a partially
|
|
||||||
// evaluated string as lookup.
|
|
||||||
// TODO: the current error handling is, in my opinion, the least opinionated.
|
|
||||||
// Other strategies are also viable, though:
|
|
||||||
// Option 1) Return an empty string in case of error, but allow the user to
|
|
||||||
// specify explicitly which errors to ignore.
|
|
||||||
// Option 2) Return the partially evaluated string if it is itself a valid
|
|
||||||
// string, otherwise return the empty string in case of error.
|
|
||||||
// Option 3) Option 1 and 2.
|
|
||||||
// Option 4) Always return an empty string for now and implement Option 1 as
|
|
||||||
// needed, and document that the return string may not be empty in case of
|
|
||||||
// error in the future.
|
|
||||||
// I think Option 1 is best, but it is quite opinionated.
|
|
||||||
|
|
||||||
// ToASCII is a wrapper for Punycode.ToASCII.
|
|
||||||
func ToASCII(s string) (string, error) {
|
|
||||||
return Punycode.process(s, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToUnicode is a wrapper for Punycode.ToUnicode.
|
|
||||||
func ToUnicode(s string) (string, error) {
|
|
||||||
return Punycode.process(s, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// An Option configures a Profile at creation time.
|
|
||||||
type Option func(*options)
|
|
||||||
|
|
||||||
// Transitional sets a Profile to use the Transitional mapping as defined in UTS
|
|
||||||
// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
|
|
||||||
// transitional mapping provides a compromise between IDNA2003 and IDNA2008
|
|
||||||
// compatibility. It is used by most browsers when resolving domain names. This
|
|
||||||
// option is only meaningful if combined with MapForLookup.
|
|
||||||
func Transitional(transitional bool) Option {
|
|
||||||
return func(o *options) { o.transitional = true }
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
|
|
||||||
// are longer than allowed by the RFC.
|
|
||||||
func VerifyDNSLength(verify bool) Option {
|
|
||||||
return func(o *options) { o.verifyDNSLength = verify }
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveLeadingDots removes leading label separators. Leading runes that map to
|
|
||||||
// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well.
|
|
||||||
//
|
|
||||||
// This is the behavior suggested by the UTS #46 and is adopted by some
|
|
||||||
// browsers.
|
|
||||||
func RemoveLeadingDots(remove bool) Option {
|
|
||||||
return func(o *options) { o.removeLeadingDots = remove }
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateLabels sets whether to check the mandatory label validation criteria
|
|
||||||
// as defined in Section 5.4 of RFC 5891. This includes testing for correct use
|
|
||||||
// of hyphens ('-'), normalization, validity of runes, and the context rules.
|
|
||||||
func ValidateLabels(enable bool) Option {
|
|
||||||
return func(o *options) {
|
|
||||||
// Don't override existing mappings, but set one that at least checks
|
|
||||||
// normalization if it is not set.
|
|
||||||
if o.mapping == nil && enable {
|
|
||||||
o.mapping = normalize
|
|
||||||
}
|
|
||||||
o.trie = trie
|
|
||||||
o.validateLabels = enable
|
|
||||||
o.fromPuny = validateFromPunycode
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// StrictDomainName limits the set of permissible ASCII characters to those
|
|
||||||
// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
|
|
||||||
// hyphen). This is set by default for MapForLookup and ValidateForRegistration.
|
|
||||||
//
|
|
||||||
// This option is useful, for instance, for browsers that allow characters
|
|
||||||
// outside this range, for example a '_' (U+005F LOW LINE). See
|
|
||||||
// http://www.rfc-editor.org/std/std3.txt for more details This option
|
|
||||||
// corresponds to the UseSTD3ASCIIRules option in UTS #46.
|
|
||||||
func StrictDomainName(use bool) Option {
|
|
||||||
return func(o *options) {
|
|
||||||
o.trie = trie
|
|
||||||
o.useSTD3Rules = use
|
|
||||||
o.fromPuny = validateFromPunycode
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE: the following options pull in tables. The tables should not be linked
|
|
||||||
// in as long as the options are not used.
|
|
||||||
|
|
||||||
// BidiRule enables the Bidi rule as defined in RFC 5893. Any application
|
|
||||||
// that relies on proper validation of labels should include this rule.
|
|
||||||
func BidiRule() Option {
|
|
||||||
return func(o *options) { o.bidirule = bidirule.ValidString }
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateForRegistration sets validation options to verify that a given IDN is
|
|
||||||
// properly formatted for registration as defined by Section 4 of RFC 5891.
|
|
||||||
func ValidateForRegistration() Option {
|
|
||||||
return func(o *options) {
|
|
||||||
o.mapping = validateRegistration
|
|
||||||
StrictDomainName(true)(o)
|
|
||||||
ValidateLabels(true)(o)
|
|
||||||
VerifyDNSLength(true)(o)
|
|
||||||
BidiRule()(o)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// MapForLookup sets validation and mapping options such that a given IDN is
|
|
||||||
// transformed for domain name lookup according to the requirements set out in
|
|
||||||
// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894,
|
|
||||||
// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option
|
|
||||||
// to add this check.
|
|
||||||
//
|
|
||||||
// The mappings include normalization and mapping case, width and other
|
|
||||||
// compatibility mappings.
|
|
||||||
func MapForLookup() Option {
|
|
||||||
return func(o *options) {
|
|
||||||
o.mapping = validateAndMap
|
|
||||||
StrictDomainName(true)(o)
|
|
||||||
ValidateLabels(true)(o)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type options struct {
|
|
||||||
transitional bool
|
|
||||||
useSTD3Rules bool
|
|
||||||
validateLabels bool
|
|
||||||
verifyDNSLength bool
|
|
||||||
removeLeadingDots bool
|
|
||||||
|
|
||||||
trie *idnaTrie
|
|
||||||
|
|
||||||
// fromPuny calls validation rules when converting A-labels to U-labels.
|
|
||||||
fromPuny func(p *Profile, s string) error
|
|
||||||
|
|
||||||
// mapping implements a validation and mapping step as defined in RFC 5895
|
|
||||||
// or UTS 46, tailored to, for example, domain registration or lookup.
|
|
||||||
mapping func(p *Profile, s string) (mapped string, isBidi bool, err error)
|
|
||||||
|
|
||||||
// bidirule, if specified, checks whether s conforms to the Bidi Rule
|
|
||||||
// defined in RFC 5893.
|
|
||||||
bidirule func(s string) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Profile defines the configuration of an IDNA mapper.
|
|
||||||
type Profile struct {
|
|
||||||
options
|
|
||||||
}
|
|
||||||
|
|
||||||
func apply(o *options, opts []Option) {
|
|
||||||
for _, f := range opts {
|
|
||||||
f(o)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new Profile.
|
|
||||||
//
|
|
||||||
// With no options, the returned Profile is the most permissive and equals the
|
|
||||||
// Punycode Profile. Options can be passed to further restrict the Profile. The
|
|
||||||
// MapForLookup and ValidateForRegistration options set a collection of options,
|
|
||||||
// for lookup and registration purposes respectively, which can be tailored by
|
|
||||||
// adding more fine-grained options, where later options override earlier
|
|
||||||
// options.
|
|
||||||
func New(o ...Option) *Profile {
|
|
||||||
p := &Profile{}
|
|
||||||
apply(&p.options, o)
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToASCII converts a domain or domain label to its ASCII form. For example,
|
|
||||||
// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
|
|
||||||
// ToASCII("golang") is "golang". If an error is encountered it will return
|
|
||||||
// an error and a (partially) processed result.
|
|
||||||
func (p *Profile) ToASCII(s string) (string, error) {
|
|
||||||
return p.process(s, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToUnicode converts a domain or domain label to its Unicode form. For example,
|
|
||||||
// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and
|
|
||||||
// ToUnicode("golang") is "golang". If an error is encountered it will return
|
|
||||||
// an error and a (partially) processed result.
|
|
||||||
func (p *Profile) ToUnicode(s string) (string, error) {
|
|
||||||
pp := *p
|
|
||||||
pp.transitional = false
|
|
||||||
return pp.process(s, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// String reports a string with a description of the profile for debugging
|
|
||||||
// purposes. The string format may change with different versions.
|
|
||||||
func (p *Profile) String() string {
|
|
||||||
s := ""
|
|
||||||
if p.transitional {
|
|
||||||
s = "Transitional"
|
|
||||||
} else {
|
|
||||||
s = "NonTransitional"
|
|
||||||
}
|
|
||||||
if p.useSTD3Rules {
|
|
||||||
s += ":UseSTD3Rules"
|
|
||||||
}
|
|
||||||
if p.validateLabels {
|
|
||||||
s += ":ValidateLabels"
|
|
||||||
}
|
|
||||||
if p.verifyDNSLength {
|
|
||||||
s += ":VerifyDNSLength"
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
// Punycode is a Profile that does raw punycode processing with a minimum
|
|
||||||
// of validation.
|
|
||||||
Punycode *Profile = punycode
|
|
||||||
|
|
||||||
// Lookup is the recommended profile for looking up domain names, according
|
|
||||||
// to Section 5 of RFC 5891. The exact configuration of this profile may
|
|
||||||
// change over time.
|
|
||||||
Lookup *Profile = lookup
|
|
||||||
|
|
||||||
// Display is the recommended profile for displaying domain names.
|
|
||||||
// The configuration of this profile may change over time.
|
|
||||||
Display *Profile = display
|
|
||||||
|
|
||||||
// Registration is the recommended profile for checking whether a given
|
|
||||||
// IDN is valid for registration, according to Section 4 of RFC 5891.
|
|
||||||
Registration *Profile = registration
|
|
||||||
|
|
||||||
punycode = &Profile{}
|
|
||||||
lookup = &Profile{options{
|
|
||||||
transitional: true,
|
|
||||||
useSTD3Rules: true,
|
|
||||||
validateLabels: true,
|
|
||||||
trie: trie,
|
|
||||||
fromPuny: validateFromPunycode,
|
|
||||||
mapping: validateAndMap,
|
|
||||||
bidirule: bidirule.ValidString,
|
|
||||||
}}
|
|
||||||
display = &Profile{options{
|
|
||||||
useSTD3Rules: true,
|
|
||||||
validateLabels: true,
|
|
||||||
trie: trie,
|
|
||||||
fromPuny: validateFromPunycode,
|
|
||||||
mapping: validateAndMap,
|
|
||||||
bidirule: bidirule.ValidString,
|
|
||||||
}}
|
|
||||||
registration = &Profile{options{
|
|
||||||
useSTD3Rules: true,
|
|
||||||
validateLabels: true,
|
|
||||||
verifyDNSLength: true,
|
|
||||||
trie: trie,
|
|
||||||
fromPuny: validateFromPunycode,
|
|
||||||
mapping: validateRegistration,
|
|
||||||
bidirule: bidirule.ValidString,
|
|
||||||
}}
|
|
||||||
|
|
||||||
// TODO: profiles
|
|
||||||
// Register: recommended for approving domain names: don't do any mappings
|
|
||||||
// but rather reject on invalid input. Bundle or block deviation characters.
|
|
||||||
)
|
|
||||||
|
|
||||||
type labelError struct{ label, code_ string }
|
|
||||||
|
|
||||||
func (e labelError) code() string { return e.code_ }
|
|
||||||
func (e labelError) Error() string {
|
|
||||||
return fmt.Sprintf("idna: invalid label %q", e.label)
|
|
||||||
}
|
|
||||||
|
|
||||||
type runeError rune
|
|
||||||
|
|
||||||
func (e runeError) code() string { return "P1" }
|
|
||||||
func (e runeError) Error() string {
|
|
||||||
return fmt.Sprintf("idna: disallowed rune %U", e)
|
|
||||||
}
|
|
||||||
|
|
||||||
// process implements the algorithm described in section 4 of UTS #46,
|
|
||||||
// see https://www.unicode.org/reports/tr46.
|
|
||||||
func (p *Profile) process(s string, toASCII bool) (string, error) {
|
|
||||||
var err error
|
|
||||||
var isBidi bool
|
|
||||||
if p.mapping != nil {
|
|
||||||
s, isBidi, err = p.mapping(p, s)
|
|
||||||
}
|
|
||||||
// Remove leading empty labels.
|
|
||||||
if p.removeLeadingDots {
|
|
||||||
for ; len(s) > 0 && s[0] == '.'; s = s[1:] {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// TODO: allow for a quick check of the tables data.
|
|
||||||
// It seems like we should only create this error on ToASCII, but the
|
|
||||||
// UTS 46 conformance tests suggests we should always check this.
|
|
||||||
if err == nil && p.verifyDNSLength && s == "" {
|
|
||||||
err = &labelError{s, "A4"}
|
|
||||||
}
|
|
||||||
labels := labelIter{orig: s}
|
|
||||||
for ; !labels.done(); labels.next() {
|
|
||||||
label := labels.label()
|
|
||||||
if label == "" {
|
|
||||||
// Empty labels are not okay. The label iterator skips the last
|
|
||||||
// label if it is empty.
|
|
||||||
if err == nil && p.verifyDNSLength {
|
|
||||||
err = &labelError{s, "A4"}
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(label, acePrefix) {
|
|
||||||
u, err2 := decode(label[len(acePrefix):])
|
|
||||||
if err2 != nil {
|
|
||||||
if err == nil {
|
|
||||||
err = err2
|
|
||||||
}
|
|
||||||
// Spec says keep the old label.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
isBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight
|
|
||||||
labels.set(u)
|
|
||||||
if err == nil && p.validateLabels {
|
|
||||||
err = p.fromPuny(p, u)
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
// This should be called on NonTransitional, according to the
|
|
||||||
// spec, but that currently does not have any effect. Use the
|
|
||||||
// original profile to preserve options.
|
|
||||||
err = p.validateLabel(u)
|
|
||||||
}
|
|
||||||
} else if err == nil {
|
|
||||||
err = p.validateLabel(label)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if isBidi && p.bidirule != nil && err == nil {
|
|
||||||
for labels.reset(); !labels.done(); labels.next() {
|
|
||||||
if !p.bidirule(labels.label()) {
|
|
||||||
err = &labelError{s, "B"}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if toASCII {
|
|
||||||
for labels.reset(); !labels.done(); labels.next() {
|
|
||||||
label := labels.label()
|
|
||||||
if !ascii(label) {
|
|
||||||
a, err2 := encode(acePrefix, label)
|
|
||||||
if err == nil {
|
|
||||||
err = err2
|
|
||||||
}
|
|
||||||
label = a
|
|
||||||
labels.set(a)
|
|
||||||
}
|
|
||||||
n := len(label)
|
|
||||||
if p.verifyDNSLength && err == nil && (n == 0 || n > 63) {
|
|
||||||
err = &labelError{label, "A4"}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s = labels.result()
|
|
||||||
if toASCII && p.verifyDNSLength && err == nil {
|
|
||||||
// Compute the length of the domain name minus the root label and its dot.
|
|
||||||
n := len(s)
|
|
||||||
if n > 0 && s[n-1] == '.' {
|
|
||||||
n--
|
|
||||||
}
|
|
||||||
if len(s) < 1 || n > 253 {
|
|
||||||
err = &labelError{s, "A4"}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return s, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func normalize(p *Profile, s string) (mapped string, isBidi bool, err error) {
|
|
||||||
// TODO: consider first doing a quick check to see if any of these checks
|
|
||||||
// need to be done. This will make it slower in the general case, but
|
|
||||||
// faster in the common case.
|
|
||||||
mapped = norm.NFC.String(s)
|
|
||||||
isBidi = bidirule.DirectionString(mapped) == bidi.RightToLeft
|
|
||||||
return mapped, isBidi, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateRegistration(p *Profile, s string) (idem string, bidi bool, err error) {
|
|
||||||
// TODO: filter need for normalization in loop below.
|
|
||||||
if !norm.NFC.IsNormalString(s) {
|
|
||||||
return s, false, &labelError{s, "V1"}
|
|
||||||
}
|
|
||||||
for i := 0; i < len(s); {
|
|
||||||
v, sz := trie.lookupString(s[i:])
|
|
||||||
if sz == 0 {
|
|
||||||
return s, bidi, runeError(utf8.RuneError)
|
|
||||||
}
|
|
||||||
bidi = bidi || info(v).isBidi(s[i:])
|
|
||||||
// Copy bytes not copied so far.
|
|
||||||
switch p.simplify(info(v).category()) {
|
|
||||||
// TODO: handle the NV8 defined in the Unicode idna data set to allow
|
|
||||||
// for strict conformance to IDNA2008.
|
|
||||||
case valid, deviation:
|
|
||||||
case disallowed, mapped, unknown, ignored:
|
|
||||||
r, _ := utf8.DecodeRuneInString(s[i:])
|
|
||||||
return s, bidi, runeError(r)
|
|
||||||
}
|
|
||||||
i += sz
|
|
||||||
}
|
|
||||||
return s, bidi, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c info) isBidi(s string) bool {
|
|
||||||
if !c.isMapped() {
|
|
||||||
return c&attributesMask == rtl
|
|
||||||
}
|
|
||||||
// TODO: also store bidi info for mapped data. This is possible, but a bit
|
|
||||||
// cumbersome and not for the common case.
|
|
||||||
p, _ := bidi.LookupString(s)
|
|
||||||
switch p.Class() {
|
|
||||||
case bidi.R, bidi.AL, bidi.AN:
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateAndMap(p *Profile, s string) (vm string, bidi bool, err error) {
|
|
||||||
var (
|
|
||||||
b []byte
|
|
||||||
k int
|
|
||||||
)
|
|
||||||
// combinedInfoBits contains the or-ed bits of all runes. We use this
|
|
||||||
// to derive the mayNeedNorm bit later. This may trigger normalization
|
|
||||||
// overeagerly, but it will not do so in the common case. The end result
|
|
||||||
// is another 10% saving on BenchmarkProfile for the common case.
|
|
||||||
var combinedInfoBits info
|
|
||||||
for i := 0; i < len(s); {
|
|
||||||
v, sz := trie.lookupString(s[i:])
|
|
||||||
if sz == 0 {
|
|
||||||
b = append(b, s[k:i]...)
|
|
||||||
b = append(b, "\ufffd"...)
|
|
||||||
k = len(s)
|
|
||||||
if err == nil {
|
|
||||||
err = runeError(utf8.RuneError)
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
combinedInfoBits |= info(v)
|
|
||||||
bidi = bidi || info(v).isBidi(s[i:])
|
|
||||||
start := i
|
|
||||||
i += sz
|
|
||||||
// Copy bytes not copied so far.
|
|
||||||
switch p.simplify(info(v).category()) {
|
|
||||||
case valid:
|
|
||||||
continue
|
|
||||||
case disallowed:
|
|
||||||
if err == nil {
|
|
||||||
r, _ := utf8.DecodeRuneInString(s[start:])
|
|
||||||
err = runeError(r)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
case mapped, deviation:
|
|
||||||
b = append(b, s[k:start]...)
|
|
||||||
b = info(v).appendMapping(b, s[start:i])
|
|
||||||
case ignored:
|
|
||||||
b = append(b, s[k:start]...)
|
|
||||||
// drop the rune
|
|
||||||
case unknown:
|
|
||||||
b = append(b, s[k:start]...)
|
|
||||||
b = append(b, "\ufffd"...)
|
|
||||||
}
|
|
||||||
k = i
|
|
||||||
}
|
|
||||||
if k == 0 {
|
|
||||||
// No changes so far.
|
|
||||||
if combinedInfoBits&mayNeedNorm != 0 {
|
|
||||||
s = norm.NFC.String(s)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
b = append(b, s[k:]...)
|
|
||||||
if norm.NFC.QuickSpan(b) != len(b) {
|
|
||||||
b = norm.NFC.Bytes(b)
|
|
||||||
}
|
|
||||||
// TODO: the punycode converters require strings as input.
|
|
||||||
s = string(b)
|
|
||||||
}
|
|
||||||
return s, bidi, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// A labelIter allows iterating over domain name labels.
|
|
||||||
type labelIter struct {
|
|
||||||
orig string
|
|
||||||
slice []string
|
|
||||||
curStart int
|
|
||||||
curEnd int
|
|
||||||
i int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *labelIter) reset() {
|
|
||||||
l.curStart = 0
|
|
||||||
l.curEnd = 0
|
|
||||||
l.i = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *labelIter) done() bool {
|
|
||||||
return l.curStart >= len(l.orig)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *labelIter) result() string {
|
|
||||||
if l.slice != nil {
|
|
||||||
return strings.Join(l.slice, ".")
|
|
||||||
}
|
|
||||||
return l.orig
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *labelIter) label() string {
|
|
||||||
if l.slice != nil {
|
|
||||||
return l.slice[l.i]
|
|
||||||
}
|
|
||||||
p := strings.IndexByte(l.orig[l.curStart:], '.')
|
|
||||||
l.curEnd = l.curStart + p
|
|
||||||
if p == -1 {
|
|
||||||
l.curEnd = len(l.orig)
|
|
||||||
}
|
|
||||||
return l.orig[l.curStart:l.curEnd]
|
|
||||||
}
|
|
||||||
|
|
||||||
// next sets the value to the next label. It skips the last label if it is empty.
|
|
||||||
func (l *labelIter) next() {
|
|
||||||
l.i++
|
|
||||||
if l.slice != nil {
|
|
||||||
if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" {
|
|
||||||
l.curStart = len(l.orig)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
l.curStart = l.curEnd + 1
|
|
||||||
if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' {
|
|
||||||
l.curStart = len(l.orig)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *labelIter) set(s string) {
|
|
||||||
if l.slice == nil {
|
|
||||||
l.slice = strings.Split(l.orig, ".")
|
|
||||||
}
|
|
||||||
l.slice[l.i] = s
|
|
||||||
}
|
|
||||||
|
|
||||||
// acePrefix is the ASCII Compatible Encoding prefix.
|
|
||||||
const acePrefix = "xn--"
|
|
||||||
|
|
||||||
func (p *Profile) simplify(cat category) category {
|
|
||||||
switch cat {
|
|
||||||
case disallowedSTD3Mapped:
|
|
||||||
if p.useSTD3Rules {
|
|
||||||
cat = disallowed
|
|
||||||
} else {
|
|
||||||
cat = mapped
|
|
||||||
}
|
|
||||||
case disallowedSTD3Valid:
|
|
||||||
if p.useSTD3Rules {
|
|
||||||
cat = disallowed
|
|
||||||
} else {
|
|
||||||
cat = valid
|
|
||||||
}
|
|
||||||
case deviation:
|
|
||||||
if !p.transitional {
|
|
||||||
cat = valid
|
|
||||||
}
|
|
||||||
case validNV8, validXV8:
|
|
||||||
// TODO: handle V2008
|
|
||||||
cat = valid
|
|
||||||
}
|
|
||||||
return cat
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateFromPunycode(p *Profile, s string) error {
|
|
||||||
if !norm.NFC.IsNormalString(s) {
|
|
||||||
return &labelError{s, "V1"}
|
|
||||||
}
|
|
||||||
// TODO: detect whether string may have to be normalized in the following
|
|
||||||
// loop.
|
|
||||||
for i := 0; i < len(s); {
|
|
||||||
v, sz := trie.lookupString(s[i:])
|
|
||||||
if sz == 0 {
|
|
||||||
return runeError(utf8.RuneError)
|
|
||||||
}
|
|
||||||
if c := p.simplify(info(v).category()); c != valid && c != deviation {
|
|
||||||
return &labelError{s, "V6"}
|
|
||||||
}
|
|
||||||
i += sz
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
zwnj = "\u200c"
|
|
||||||
zwj = "\u200d"
|
|
||||||
)
|
|
||||||
|
|
||||||
type joinState int8
|
|
||||||
|
|
||||||
const (
|
|
||||||
stateStart joinState = iota
|
|
||||||
stateVirama
|
|
||||||
stateBefore
|
|
||||||
stateBeforeVirama
|
|
||||||
stateAfter
|
|
||||||
stateFAIL
|
|
||||||
)
|
|
||||||
|
|
||||||
var joinStates = [][numJoinTypes]joinState{
|
|
||||||
stateStart: {
|
|
||||||
joiningL: stateBefore,
|
|
||||||
joiningD: stateBefore,
|
|
||||||
joinZWNJ: stateFAIL,
|
|
||||||
joinZWJ: stateFAIL,
|
|
||||||
joinVirama: stateVirama,
|
|
||||||
},
|
|
||||||
stateVirama: {
|
|
||||||
joiningL: stateBefore,
|
|
||||||
joiningD: stateBefore,
|
|
||||||
},
|
|
||||||
stateBefore: {
|
|
||||||
joiningL: stateBefore,
|
|
||||||
joiningD: stateBefore,
|
|
||||||
joiningT: stateBefore,
|
|
||||||
joinZWNJ: stateAfter,
|
|
||||||
joinZWJ: stateFAIL,
|
|
||||||
joinVirama: stateBeforeVirama,
|
|
||||||
},
|
|
||||||
stateBeforeVirama: {
|
|
||||||
joiningL: stateBefore,
|
|
||||||
joiningD: stateBefore,
|
|
||||||
joiningT: stateBefore,
|
|
||||||
},
|
|
||||||
stateAfter: {
|
|
||||||
joiningL: stateFAIL,
|
|
||||||
joiningD: stateBefore,
|
|
||||||
joiningT: stateAfter,
|
|
||||||
joiningR: stateStart,
|
|
||||||
joinZWNJ: stateFAIL,
|
|
||||||
joinZWJ: stateFAIL,
|
|
||||||
joinVirama: stateAfter, // no-op as we can't accept joiners here
|
|
||||||
},
|
|
||||||
stateFAIL: {
|
|
||||||
0: stateFAIL,
|
|
||||||
joiningL: stateFAIL,
|
|
||||||
joiningD: stateFAIL,
|
|
||||||
joiningT: stateFAIL,
|
|
||||||
joiningR: stateFAIL,
|
|
||||||
joinZWNJ: stateFAIL,
|
|
||||||
joinZWJ: stateFAIL,
|
|
||||||
joinVirama: stateFAIL,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are
|
|
||||||
// already implicitly satisfied by the overall implementation.
|
|
||||||
func (p *Profile) validateLabel(s string) (err error) {
|
|
||||||
if s == "" {
|
|
||||||
if p.verifyDNSLength {
|
|
||||||
return &labelError{s, "A4"}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if !p.validateLabels {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
trie := p.trie // p.validateLabels is only set if trie is set.
|
|
||||||
if len(s) > 4 && s[2] == '-' && s[3] == '-' {
|
|
||||||
return &labelError{s, "V2"}
|
|
||||||
}
|
|
||||||
if s[0] == '-' || s[len(s)-1] == '-' {
|
|
||||||
return &labelError{s, "V3"}
|
|
||||||
}
|
|
||||||
// TODO: merge the use of this in the trie.
|
|
||||||
v, sz := trie.lookupString(s)
|
|
||||||
x := info(v)
|
|
||||||
if x.isModifier() {
|
|
||||||
return &labelError{s, "V5"}
|
|
||||||
}
|
|
||||||
// Quickly return in the absence of zero-width (non) joiners.
|
|
||||||
if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
st := stateStart
|
|
||||||
for i := 0; ; {
|
|
||||||
jt := x.joinType()
|
|
||||||
if s[i:i+sz] == zwj {
|
|
||||||
jt = joinZWJ
|
|
||||||
} else if s[i:i+sz] == zwnj {
|
|
||||||
jt = joinZWNJ
|
|
||||||
}
|
|
||||||
st = joinStates[st][jt]
|
|
||||||
if x.isViramaModifier() {
|
|
||||||
st = joinStates[st][joinVirama]
|
|
||||||
}
|
|
||||||
if i += sz; i == len(s) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
v, sz = trie.lookupString(s[i:])
|
|
||||||
x = info(v)
|
|
||||||
}
|
|
||||||
if st == stateFAIL || st == stateAfter {
|
|
||||||
return &labelError{s, "C"}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ascii(s string) bool {
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
if s[i] >= utf8.RuneSelf {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
682
vendor/golang.org/x/net/idna/idna9.0.0.go
generated
vendored
682
vendor/golang.org/x/net/idna/idna9.0.0.go
generated
vendored
@ -1,682 +0,0 @@
|
|||||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
|
||||||
|
|
||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build !go1.10
|
|
||||||
|
|
||||||
// Package idna implements IDNA2008 using the compatibility processing
|
|
||||||
// defined by UTS (Unicode Technical Standard) #46, which defines a standard to
|
|
||||||
// deal with the transition from IDNA2003.
|
|
||||||
//
|
|
||||||
// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC
|
|
||||||
// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894.
|
|
||||||
// UTS #46 is defined in https://www.unicode.org/reports/tr46.
|
|
||||||
// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the
|
|
||||||
// differences between these two standards.
|
|
||||||
package idna // import "golang.org/x/net/idna"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"golang.org/x/text/secure/bidirule"
|
|
||||||
"golang.org/x/text/unicode/norm"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NOTE: Unlike common practice in Go APIs, the functions will return a
|
|
||||||
// sanitized domain name in case of errors. Browsers sometimes use a partially
|
|
||||||
// evaluated string as lookup.
|
|
||||||
// TODO: the current error handling is, in my opinion, the least opinionated.
|
|
||||||
// Other strategies are also viable, though:
|
|
||||||
// Option 1) Return an empty string in case of error, but allow the user to
|
|
||||||
// specify explicitly which errors to ignore.
|
|
||||||
// Option 2) Return the partially evaluated string if it is itself a valid
|
|
||||||
// string, otherwise return the empty string in case of error.
|
|
||||||
// Option 3) Option 1 and 2.
|
|
||||||
// Option 4) Always return an empty string for now and implement Option 1 as
|
|
||||||
// needed, and document that the return string may not be empty in case of
|
|
||||||
// error in the future.
|
|
||||||
// I think Option 1 is best, but it is quite opinionated.
|
|
||||||
|
|
||||||
// ToASCII is a wrapper for Punycode.ToASCII.
|
|
||||||
func ToASCII(s string) (string, error) {
|
|
||||||
return Punycode.process(s, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToUnicode is a wrapper for Punycode.ToUnicode.
|
|
||||||
func ToUnicode(s string) (string, error) {
|
|
||||||
return Punycode.process(s, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// An Option configures a Profile at creation time.
|
|
||||||
type Option func(*options)
|
|
||||||
|
|
||||||
// Transitional sets a Profile to use the Transitional mapping as defined in UTS
|
|
||||||
// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
|
|
||||||
// transitional mapping provides a compromise between IDNA2003 and IDNA2008
|
|
||||||
// compatibility. It is used by most browsers when resolving domain names. This
|
|
||||||
// option is only meaningful if combined with MapForLookup.
|
|
||||||
func Transitional(transitional bool) Option {
|
|
||||||
return func(o *options) { o.transitional = true }
|
|
||||||
}
|
|
||||||
|
|
||||||
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
|
|
||||||
// are longer than allowed by the RFC.
|
|
||||||
func VerifyDNSLength(verify bool) Option {
|
|
||||||
return func(o *options) { o.verifyDNSLength = verify }
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveLeadingDots removes leading label separators. Leading runes that map to
|
|
||||||
// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well.
|
|
||||||
//
|
|
||||||
// This is the behavior suggested by the UTS #46 and is adopted by some
|
|
||||||
// browsers.
|
|
||||||
func RemoveLeadingDots(remove bool) Option {
|
|
||||||
return func(o *options) { o.removeLeadingDots = remove }
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateLabels sets whether to check the mandatory label validation criteria
|
|
||||||
// as defined in Section 5.4 of RFC 5891. This includes testing for correct use
|
|
||||||
// of hyphens ('-'), normalization, validity of runes, and the context rules.
|
|
||||||
func ValidateLabels(enable bool) Option {
|
|
||||||
return func(o *options) {
|
|
||||||
// Don't override existing mappings, but set one that at least checks
|
|
||||||
// normalization if it is not set.
|
|
||||||
if o.mapping == nil && enable {
|
|
||||||
o.mapping = normalize
|
|
||||||
}
|
|
||||||
o.trie = trie
|
|
||||||
o.validateLabels = enable
|
|
||||||
o.fromPuny = validateFromPunycode
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// StrictDomainName limits the set of permissable ASCII characters to those
|
|
||||||
// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
|
|
||||||
// hyphen). This is set by default for MapForLookup and ValidateForRegistration.
|
|
||||||
//
|
|
||||||
// This option is useful, for instance, for browsers that allow characters
|
|
||||||
// outside this range, for example a '_' (U+005F LOW LINE). See
|
|
||||||
// http://www.rfc-editor.org/std/std3.txt for more details This option
|
|
||||||
// corresponds to the UseSTD3ASCIIRules option in UTS #46.
|
|
||||||
func StrictDomainName(use bool) Option {
|
|
||||||
return func(o *options) {
|
|
||||||
o.trie = trie
|
|
||||||
o.useSTD3Rules = use
|
|
||||||
o.fromPuny = validateFromPunycode
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE: the following options pull in tables. The tables should not be linked
|
|
||||||
// in as long as the options are not used.
|
|
||||||
|
|
||||||
// BidiRule enables the Bidi rule as defined in RFC 5893. Any application
|
|
||||||
// that relies on proper validation of labels should include this rule.
|
|
||||||
func BidiRule() Option {
|
|
||||||
return func(o *options) { o.bidirule = bidirule.ValidString }
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateForRegistration sets validation options to verify that a given IDN is
|
|
||||||
// properly formatted for registration as defined by Section 4 of RFC 5891.
|
|
||||||
func ValidateForRegistration() Option {
|
|
||||||
return func(o *options) {
|
|
||||||
o.mapping = validateRegistration
|
|
||||||
StrictDomainName(true)(o)
|
|
||||||
ValidateLabels(true)(o)
|
|
||||||
VerifyDNSLength(true)(o)
|
|
||||||
BidiRule()(o)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// MapForLookup sets validation and mapping options such that a given IDN is
|
|
||||||
// transformed for domain name lookup according to the requirements set out in
|
|
||||||
// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894,
|
|
||||||
// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option
|
|
||||||
// to add this check.
|
|
||||||
//
|
|
||||||
// The mappings include normalization and mapping case, width and other
|
|
||||||
// compatibility mappings.
|
|
||||||
func MapForLookup() Option {
|
|
||||||
return func(o *options) {
|
|
||||||
o.mapping = validateAndMap
|
|
||||||
StrictDomainName(true)(o)
|
|
||||||
ValidateLabels(true)(o)
|
|
||||||
RemoveLeadingDots(true)(o)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type options struct {
|
|
||||||
transitional bool
|
|
||||||
useSTD3Rules bool
|
|
||||||
validateLabels bool
|
|
||||||
verifyDNSLength bool
|
|
||||||
removeLeadingDots bool
|
|
||||||
|
|
||||||
trie *idnaTrie
|
|
||||||
|
|
||||||
// fromPuny calls validation rules when converting A-labels to U-labels.
|
|
||||||
fromPuny func(p *Profile, s string) error
|
|
||||||
|
|
||||||
// mapping implements a validation and mapping step as defined in RFC 5895
|
|
||||||
// or UTS 46, tailored to, for example, domain registration or lookup.
|
|
||||||
mapping func(p *Profile, s string) (string, error)
|
|
||||||
|
|
||||||
// bidirule, if specified, checks whether s conforms to the Bidi Rule
|
|
||||||
// defined in RFC 5893.
|
|
||||||
bidirule func(s string) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Profile defines the configuration of a IDNA mapper.
|
|
||||||
type Profile struct {
|
|
||||||
options
|
|
||||||
}
|
|
||||||
|
|
||||||
func apply(o *options, opts []Option) {
|
|
||||||
for _, f := range opts {
|
|
||||||
f(o)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new Profile.
|
|
||||||
//
|
|
||||||
// With no options, the returned Profile is the most permissive and equals the
|
|
||||||
// Punycode Profile. Options can be passed to further restrict the Profile. The
|
|
||||||
// MapForLookup and ValidateForRegistration options set a collection of options,
|
|
||||||
// for lookup and registration purposes respectively, which can be tailored by
|
|
||||||
// adding more fine-grained options, where later options override earlier
|
|
||||||
// options.
|
|
||||||
func New(o ...Option) *Profile {
|
|
||||||
p := &Profile{}
|
|
||||||
apply(&p.options, o)
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToASCII converts a domain or domain label to its ASCII form. For example,
|
|
||||||
// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
|
|
||||||
// ToASCII("golang") is "golang". If an error is encountered it will return
|
|
||||||
// an error and a (partially) processed result.
|
|
||||||
func (p *Profile) ToASCII(s string) (string, error) {
|
|
||||||
return p.process(s, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToUnicode converts a domain or domain label to its Unicode form. For example,
|
|
||||||
// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and
|
|
||||||
// ToUnicode("golang") is "golang". If an error is encountered it will return
|
|
||||||
// an error and a (partially) processed result.
|
|
||||||
func (p *Profile) ToUnicode(s string) (string, error) {
|
|
||||||
pp := *p
|
|
||||||
pp.transitional = false
|
|
||||||
return pp.process(s, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// String reports a string with a description of the profile for debugging
|
|
||||||
// purposes. The string format may change with different versions.
|
|
||||||
func (p *Profile) String() string {
|
|
||||||
s := ""
|
|
||||||
if p.transitional {
|
|
||||||
s = "Transitional"
|
|
||||||
} else {
|
|
||||||
s = "NonTransitional"
|
|
||||||
}
|
|
||||||
if p.useSTD3Rules {
|
|
||||||
s += ":UseSTD3Rules"
|
|
||||||
}
|
|
||||||
if p.validateLabels {
|
|
||||||
s += ":ValidateLabels"
|
|
||||||
}
|
|
||||||
if p.verifyDNSLength {
|
|
||||||
s += ":VerifyDNSLength"
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
// Punycode is a Profile that does raw punycode processing with a minimum
|
|
||||||
// of validation.
|
|
||||||
Punycode *Profile = punycode
|
|
||||||
|
|
||||||
// Lookup is the recommended profile for looking up domain names, according
|
|
||||||
// to Section 5 of RFC 5891. The exact configuration of this profile may
|
|
||||||
// change over time.
|
|
||||||
Lookup *Profile = lookup
|
|
||||||
|
|
||||||
// Display is the recommended profile for displaying domain names.
|
|
||||||
// The configuration of this profile may change over time.
|
|
||||||
Display *Profile = display
|
|
||||||
|
|
||||||
// Registration is the recommended profile for checking whether a given
|
|
||||||
// IDN is valid for registration, according to Section 4 of RFC 5891.
|
|
||||||
Registration *Profile = registration
|
|
||||||
|
|
||||||
punycode = &Profile{}
|
|
||||||
lookup = &Profile{options{
|
|
||||||
transitional: true,
|
|
||||||
useSTD3Rules: true,
|
|
||||||
validateLabels: true,
|
|
||||||
removeLeadingDots: true,
|
|
||||||
trie: trie,
|
|
||||||
fromPuny: validateFromPunycode,
|
|
||||||
mapping: validateAndMap,
|
|
||||||
bidirule: bidirule.ValidString,
|
|
||||||
}}
|
|
||||||
display = &Profile{options{
|
|
||||||
useSTD3Rules: true,
|
|
||||||
validateLabels: true,
|
|
||||||
removeLeadingDots: true,
|
|
||||||
trie: trie,
|
|
||||||
fromPuny: validateFromPunycode,
|
|
||||||
mapping: validateAndMap,
|
|
||||||
bidirule: bidirule.ValidString,
|
|
||||||
}}
|
|
||||||
registration = &Profile{options{
|
|
||||||
useSTD3Rules: true,
|
|
||||||
validateLabels: true,
|
|
||||||
verifyDNSLength: true,
|
|
||||||
trie: trie,
|
|
||||||
fromPuny: validateFromPunycode,
|
|
||||||
mapping: validateRegistration,
|
|
||||||
bidirule: bidirule.ValidString,
|
|
||||||
}}
|
|
||||||
|
|
||||||
// TODO: profiles
|
|
||||||
// Register: recommended for approving domain names: don't do any mappings
|
|
||||||
// but rather reject on invalid input. Bundle or block deviation characters.
|
|
||||||
)
|
|
||||||
|
|
||||||
type labelError struct{ label, code_ string }
|
|
||||||
|
|
||||||
func (e labelError) code() string { return e.code_ }
|
|
||||||
func (e labelError) Error() string {
|
|
||||||
return fmt.Sprintf("idna: invalid label %q", e.label)
|
|
||||||
}
|
|
||||||
|
|
||||||
type runeError rune
|
|
||||||
|
|
||||||
func (e runeError) code() string { return "P1" }
|
|
||||||
func (e runeError) Error() string {
|
|
||||||
return fmt.Sprintf("idna: disallowed rune %U", e)
|
|
||||||
}
|
|
||||||
|
|
||||||
// process implements the algorithm described in section 4 of UTS #46,
|
|
||||||
// see https://www.unicode.org/reports/tr46.
|
|
||||||
func (p *Profile) process(s string, toASCII bool) (string, error) {
|
|
||||||
var err error
|
|
||||||
if p.mapping != nil {
|
|
||||||
s, err = p.mapping(p, s)
|
|
||||||
}
|
|
||||||
// Remove leading empty labels.
|
|
||||||
if p.removeLeadingDots {
|
|
||||||
for ; len(s) > 0 && s[0] == '.'; s = s[1:] {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// It seems like we should only create this error on ToASCII, but the
|
|
||||||
// UTS 46 conformance tests suggests we should always check this.
|
|
||||||
if err == nil && p.verifyDNSLength && s == "" {
|
|
||||||
err = &labelError{s, "A4"}
|
|
||||||
}
|
|
||||||
labels := labelIter{orig: s}
|
|
||||||
for ; !labels.done(); labels.next() {
|
|
||||||
label := labels.label()
|
|
||||||
if label == "" {
|
|
||||||
// Empty labels are not okay. The label iterator skips the last
|
|
||||||
// label if it is empty.
|
|
||||||
if err == nil && p.verifyDNSLength {
|
|
||||||
err = &labelError{s, "A4"}
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(label, acePrefix) {
|
|
||||||
u, err2 := decode(label[len(acePrefix):])
|
|
||||||
if err2 != nil {
|
|
||||||
if err == nil {
|
|
||||||
err = err2
|
|
||||||
}
|
|
||||||
// Spec says keep the old label.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
labels.set(u)
|
|
||||||
if err == nil && p.validateLabels {
|
|
||||||
err = p.fromPuny(p, u)
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
// This should be called on NonTransitional, according to the
|
|
||||||
// spec, but that currently does not have any effect. Use the
|
|
||||||
// original profile to preserve options.
|
|
||||||
err = p.validateLabel(u)
|
|
||||||
}
|
|
||||||
} else if err == nil {
|
|
||||||
err = p.validateLabel(label)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if toASCII {
|
|
||||||
for labels.reset(); !labels.done(); labels.next() {
|
|
||||||
label := labels.label()
|
|
||||||
if !ascii(label) {
|
|
||||||
a, err2 := encode(acePrefix, label)
|
|
||||||
if err == nil {
|
|
||||||
err = err2
|
|
||||||
}
|
|
||||||
label = a
|
|
||||||
labels.set(a)
|
|
||||||
}
|
|
||||||
n := len(label)
|
|
||||||
if p.verifyDNSLength && err == nil && (n == 0 || n > 63) {
|
|
||||||
err = &labelError{label, "A4"}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s = labels.result()
|
|
||||||
if toASCII && p.verifyDNSLength && err == nil {
|
|
||||||
// Compute the length of the domain name minus the root label and its dot.
|
|
||||||
n := len(s)
|
|
||||||
if n > 0 && s[n-1] == '.' {
|
|
||||||
n--
|
|
||||||
}
|
|
||||||
if len(s) < 1 || n > 253 {
|
|
||||||
err = &labelError{s, "A4"}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return s, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func normalize(p *Profile, s string) (string, error) {
|
|
||||||
return norm.NFC.String(s), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateRegistration(p *Profile, s string) (string, error) {
|
|
||||||
if !norm.NFC.IsNormalString(s) {
|
|
||||||
return s, &labelError{s, "V1"}
|
|
||||||
}
|
|
||||||
for i := 0; i < len(s); {
|
|
||||||
v, sz := trie.lookupString(s[i:])
|
|
||||||
// Copy bytes not copied so far.
|
|
||||||
switch p.simplify(info(v).category()) {
|
|
||||||
// TODO: handle the NV8 defined in the Unicode idna data set to allow
|
|
||||||
// for strict conformance to IDNA2008.
|
|
||||||
case valid, deviation:
|
|
||||||
case disallowed, mapped, unknown, ignored:
|
|
||||||
r, _ := utf8.DecodeRuneInString(s[i:])
|
|
||||||
return s, runeError(r)
|
|
||||||
}
|
|
||||||
i += sz
|
|
||||||
}
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateAndMap(p *Profile, s string) (string, error) {
|
|
||||||
var (
|
|
||||||
err error
|
|
||||||
b []byte
|
|
||||||
k int
|
|
||||||
)
|
|
||||||
for i := 0; i < len(s); {
|
|
||||||
v, sz := trie.lookupString(s[i:])
|
|
||||||
start := i
|
|
||||||
i += sz
|
|
||||||
// Copy bytes not copied so far.
|
|
||||||
switch p.simplify(info(v).category()) {
|
|
||||||
case valid:
|
|
||||||
continue
|
|
||||||
case disallowed:
|
|
||||||
if err == nil {
|
|
||||||
r, _ := utf8.DecodeRuneInString(s[start:])
|
|
||||||
err = runeError(r)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
case mapped, deviation:
|
|
||||||
b = append(b, s[k:start]...)
|
|
||||||
b = info(v).appendMapping(b, s[start:i])
|
|
||||||
case ignored:
|
|
||||||
b = append(b, s[k:start]...)
|
|
||||||
// drop the rune
|
|
||||||
case unknown:
|
|
||||||
b = append(b, s[k:start]...)
|
|
||||||
b = append(b, "\ufffd"...)
|
|
||||||
}
|
|
||||||
k = i
|
|
||||||
}
|
|
||||||
if k == 0 {
|
|
||||||
// No changes so far.
|
|
||||||
s = norm.NFC.String(s)
|
|
||||||
} else {
|
|
||||||
b = append(b, s[k:]...)
|
|
||||||
if norm.NFC.QuickSpan(b) != len(b) {
|
|
||||||
b = norm.NFC.Bytes(b)
|
|
||||||
}
|
|
||||||
// TODO: the punycode converters require strings as input.
|
|
||||||
s = string(b)
|
|
||||||
}
|
|
||||||
return s, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// A labelIter allows iterating over domain name labels.
|
|
||||||
type labelIter struct {
|
|
||||||
orig string
|
|
||||||
slice []string
|
|
||||||
curStart int
|
|
||||||
curEnd int
|
|
||||||
i int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *labelIter) reset() {
|
|
||||||
l.curStart = 0
|
|
||||||
l.curEnd = 0
|
|
||||||
l.i = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *labelIter) done() bool {
|
|
||||||
return l.curStart >= len(l.orig)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *labelIter) result() string {
|
|
||||||
if l.slice != nil {
|
|
||||||
return strings.Join(l.slice, ".")
|
|
||||||
}
|
|
||||||
return l.orig
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *labelIter) label() string {
|
|
||||||
if l.slice != nil {
|
|
||||||
return l.slice[l.i]
|
|
||||||
}
|
|
||||||
p := strings.IndexByte(l.orig[l.curStart:], '.')
|
|
||||||
l.curEnd = l.curStart + p
|
|
||||||
if p == -1 {
|
|
||||||
l.curEnd = len(l.orig)
|
|
||||||
}
|
|
||||||
return l.orig[l.curStart:l.curEnd]
|
|
||||||
}
|
|
||||||
|
|
||||||
// next sets the value to the next label. It skips the last label if it is empty.
|
|
||||||
func (l *labelIter) next() {
|
|
||||||
l.i++
|
|
||||||
if l.slice != nil {
|
|
||||||
if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" {
|
|
||||||
l.curStart = len(l.orig)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
l.curStart = l.curEnd + 1
|
|
||||||
if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' {
|
|
||||||
l.curStart = len(l.orig)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *labelIter) set(s string) {
|
|
||||||
if l.slice == nil {
|
|
||||||
l.slice = strings.Split(l.orig, ".")
|
|
||||||
}
|
|
||||||
l.slice[l.i] = s
|
|
||||||
}
|
|
||||||
|
|
||||||
// acePrefix is the ASCII Compatible Encoding prefix.
|
|
||||||
const acePrefix = "xn--"
|
|
||||||
|
|
||||||
func (p *Profile) simplify(cat category) category {
|
|
||||||
switch cat {
|
|
||||||
case disallowedSTD3Mapped:
|
|
||||||
if p.useSTD3Rules {
|
|
||||||
cat = disallowed
|
|
||||||
} else {
|
|
||||||
cat = mapped
|
|
||||||
}
|
|
||||||
case disallowedSTD3Valid:
|
|
||||||
if p.useSTD3Rules {
|
|
||||||
cat = disallowed
|
|
||||||
} else {
|
|
||||||
cat = valid
|
|
||||||
}
|
|
||||||
case deviation:
|
|
||||||
if !p.transitional {
|
|
||||||
cat = valid
|
|
||||||
}
|
|
||||||
case validNV8, validXV8:
|
|
||||||
// TODO: handle V2008
|
|
||||||
cat = valid
|
|
||||||
}
|
|
||||||
return cat
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateFromPunycode(p *Profile, s string) error {
|
|
||||||
if !norm.NFC.IsNormalString(s) {
|
|
||||||
return &labelError{s, "V1"}
|
|
||||||
}
|
|
||||||
for i := 0; i < len(s); {
|
|
||||||
v, sz := trie.lookupString(s[i:])
|
|
||||||
if c := p.simplify(info(v).category()); c != valid && c != deviation {
|
|
||||||
return &labelError{s, "V6"}
|
|
||||||
}
|
|
||||||
i += sz
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
zwnj = "\u200c"
|
|
||||||
zwj = "\u200d"
|
|
||||||
)
|
|
||||||
|
|
||||||
type joinState int8
|
|
||||||
|
|
||||||
const (
|
|
||||||
stateStart joinState = iota
|
|
||||||
stateVirama
|
|
||||||
stateBefore
|
|
||||||
stateBeforeVirama
|
|
||||||
stateAfter
|
|
||||||
stateFAIL
|
|
||||||
)
|
|
||||||
|
|
||||||
var joinStates = [][numJoinTypes]joinState{
|
|
||||||
stateStart: {
|
|
||||||
joiningL: stateBefore,
|
|
||||||
joiningD: stateBefore,
|
|
||||||
joinZWNJ: stateFAIL,
|
|
||||||
joinZWJ: stateFAIL,
|
|
||||||
joinVirama: stateVirama,
|
|
||||||
},
|
|
||||||
stateVirama: {
|
|
||||||
joiningL: stateBefore,
|
|
||||||
joiningD: stateBefore,
|
|
||||||
},
|
|
||||||
stateBefore: {
|
|
||||||
joiningL: stateBefore,
|
|
||||||
joiningD: stateBefore,
|
|
||||||
joiningT: stateBefore,
|
|
||||||
joinZWNJ: stateAfter,
|
|
||||||
joinZWJ: stateFAIL,
|
|
||||||
joinVirama: stateBeforeVirama,
|
|
||||||
},
|
|
||||||
stateBeforeVirama: {
|
|
||||||
joiningL: stateBefore,
|
|
||||||
joiningD: stateBefore,
|
|
||||||
joiningT: stateBefore,
|
|
||||||
},
|
|
||||||
stateAfter: {
|
|
||||||
joiningL: stateFAIL,
|
|
||||||
joiningD: stateBefore,
|
|
||||||
joiningT: stateAfter,
|
|
||||||
joiningR: stateStart,
|
|
||||||
joinZWNJ: stateFAIL,
|
|
||||||
joinZWJ: stateFAIL,
|
|
||||||
joinVirama: stateAfter, // no-op as we can't accept joiners here
|
|
||||||
},
|
|
||||||
stateFAIL: {
|
|
||||||
0: stateFAIL,
|
|
||||||
joiningL: stateFAIL,
|
|
||||||
joiningD: stateFAIL,
|
|
||||||
joiningT: stateFAIL,
|
|
||||||
joiningR: stateFAIL,
|
|
||||||
joinZWNJ: stateFAIL,
|
|
||||||
joinZWJ: stateFAIL,
|
|
||||||
joinVirama: stateFAIL,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are
|
|
||||||
// already implicitly satisfied by the overall implementation.
|
|
||||||
func (p *Profile) validateLabel(s string) error {
|
|
||||||
if s == "" {
|
|
||||||
if p.verifyDNSLength {
|
|
||||||
return &labelError{s, "A4"}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if p.bidirule != nil && !p.bidirule(s) {
|
|
||||||
return &labelError{s, "B"}
|
|
||||||
}
|
|
||||||
if !p.validateLabels {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
trie := p.trie // p.validateLabels is only set if trie is set.
|
|
||||||
if len(s) > 4 && s[2] == '-' && s[3] == '-' {
|
|
||||||
return &labelError{s, "V2"}
|
|
||||||
}
|
|
||||||
if s[0] == '-' || s[len(s)-1] == '-' {
|
|
||||||
return &labelError{s, "V3"}
|
|
||||||
}
|
|
||||||
// TODO: merge the use of this in the trie.
|
|
||||||
v, sz := trie.lookupString(s)
|
|
||||||
x := info(v)
|
|
||||||
if x.isModifier() {
|
|
||||||
return &labelError{s, "V5"}
|
|
||||||
}
|
|
||||||
// Quickly return in the absence of zero-width (non) joiners.
|
|
||||||
if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
st := stateStart
|
|
||||||
for i := 0; ; {
|
|
||||||
jt := x.joinType()
|
|
||||||
if s[i:i+sz] == zwj {
|
|
||||||
jt = joinZWJ
|
|
||||||
} else if s[i:i+sz] == zwnj {
|
|
||||||
jt = joinZWNJ
|
|
||||||
}
|
|
||||||
st = joinStates[st][jt]
|
|
||||||
if x.isViramaModifier() {
|
|
||||||
st = joinStates[st][joinVirama]
|
|
||||||
}
|
|
||||||
if i += sz; i == len(s) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
v, sz = trie.lookupString(s[i:])
|
|
||||||
x = info(v)
|
|
||||||
}
|
|
||||||
if st == stateFAIL || st == stateAfter {
|
|
||||||
return &labelError{s, "C"}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ascii(s string) bool {
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
if s[i] >= utf8.RuneSelf {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
203
vendor/golang.org/x/net/idna/punycode.go
generated
vendored
203
vendor/golang.org/x/net/idna/punycode.go
generated
vendored
@ -1,203 +0,0 @@
|
|||||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
|
||||||
|
|
||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package idna
|
|
||||||
|
|
||||||
// This file implements the Punycode algorithm from RFC 3492.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"strings"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
// These parameter values are specified in section 5.
|
|
||||||
//
|
|
||||||
// All computation is done with int32s, so that overflow behavior is identical
|
|
||||||
// regardless of whether int is 32-bit or 64-bit.
|
|
||||||
const (
|
|
||||||
base int32 = 36
|
|
||||||
damp int32 = 700
|
|
||||||
initialBias int32 = 72
|
|
||||||
initialN int32 = 128
|
|
||||||
skew int32 = 38
|
|
||||||
tmax int32 = 26
|
|
||||||
tmin int32 = 1
|
|
||||||
)
|
|
||||||
|
|
||||||
func punyError(s string) error { return &labelError{s, "A3"} }
|
|
||||||
|
|
||||||
// decode decodes a string as specified in section 6.2.
|
|
||||||
func decode(encoded string) (string, error) {
|
|
||||||
if encoded == "" {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
pos := 1 + strings.LastIndex(encoded, "-")
|
|
||||||
if pos == 1 {
|
|
||||||
return "", punyError(encoded)
|
|
||||||
}
|
|
||||||
if pos == len(encoded) {
|
|
||||||
return encoded[:len(encoded)-1], nil
|
|
||||||
}
|
|
||||||
output := make([]rune, 0, len(encoded))
|
|
||||||
if pos != 0 {
|
|
||||||
for _, r := range encoded[:pos-1] {
|
|
||||||
output = append(output, r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
i, n, bias := int32(0), initialN, initialBias
|
|
||||||
for pos < len(encoded) {
|
|
||||||
oldI, w := i, int32(1)
|
|
||||||
for k := base; ; k += base {
|
|
||||||
if pos == len(encoded) {
|
|
||||||
return "", punyError(encoded)
|
|
||||||
}
|
|
||||||
digit, ok := decodeDigit(encoded[pos])
|
|
||||||
if !ok {
|
|
||||||
return "", punyError(encoded)
|
|
||||||
}
|
|
||||||
pos++
|
|
||||||
i += digit * w
|
|
||||||
if i < 0 {
|
|
||||||
return "", punyError(encoded)
|
|
||||||
}
|
|
||||||
t := k - bias
|
|
||||||
if t < tmin {
|
|
||||||
t = tmin
|
|
||||||
} else if t > tmax {
|
|
||||||
t = tmax
|
|
||||||
}
|
|
||||||
if digit < t {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
w *= base - t
|
|
||||||
if w >= math.MaxInt32/base {
|
|
||||||
return "", punyError(encoded)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
x := int32(len(output) + 1)
|
|
||||||
bias = adapt(i-oldI, x, oldI == 0)
|
|
||||||
n += i / x
|
|
||||||
i %= x
|
|
||||||
if n > utf8.MaxRune || len(output) >= 1024 {
|
|
||||||
return "", punyError(encoded)
|
|
||||||
}
|
|
||||||
output = append(output, 0)
|
|
||||||
copy(output[i+1:], output[i:])
|
|
||||||
output[i] = n
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
return string(output), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode encodes a string as specified in section 6.3 and prepends prefix to
|
|
||||||
// the result.
|
|
||||||
//
|
|
||||||
// The "while h < length(input)" line in the specification becomes "for
|
|
||||||
// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes.
|
|
||||||
func encode(prefix, s string) (string, error) {
|
|
||||||
output := make([]byte, len(prefix), len(prefix)+1+2*len(s))
|
|
||||||
copy(output, prefix)
|
|
||||||
delta, n, bias := int32(0), initialN, initialBias
|
|
||||||
b, remaining := int32(0), int32(0)
|
|
||||||
for _, r := range s {
|
|
||||||
if r < 0x80 {
|
|
||||||
b++
|
|
||||||
output = append(output, byte(r))
|
|
||||||
} else {
|
|
||||||
remaining++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
h := b
|
|
||||||
if b > 0 {
|
|
||||||
output = append(output, '-')
|
|
||||||
}
|
|
||||||
for remaining != 0 {
|
|
||||||
m := int32(0x7fffffff)
|
|
||||||
for _, r := range s {
|
|
||||||
if m > r && r >= n {
|
|
||||||
m = r
|
|
||||||
}
|
|
||||||
}
|
|
||||||
delta += (m - n) * (h + 1)
|
|
||||||
if delta < 0 {
|
|
||||||
return "", punyError(s)
|
|
||||||
}
|
|
||||||
n = m
|
|
||||||
for _, r := range s {
|
|
||||||
if r < n {
|
|
||||||
delta++
|
|
||||||
if delta < 0 {
|
|
||||||
return "", punyError(s)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if r > n {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
q := delta
|
|
||||||
for k := base; ; k += base {
|
|
||||||
t := k - bias
|
|
||||||
if t < tmin {
|
|
||||||
t = tmin
|
|
||||||
} else if t > tmax {
|
|
||||||
t = tmax
|
|
||||||
}
|
|
||||||
if q < t {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
output = append(output, encodeDigit(t+(q-t)%(base-t)))
|
|
||||||
q = (q - t) / (base - t)
|
|
||||||
}
|
|
||||||
output = append(output, encodeDigit(q))
|
|
||||||
bias = adapt(delta, h+1, h == b)
|
|
||||||
delta = 0
|
|
||||||
h++
|
|
||||||
remaining--
|
|
||||||
}
|
|
||||||
delta++
|
|
||||||
n++
|
|
||||||
}
|
|
||||||
return string(output), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeDigit(x byte) (digit int32, ok bool) {
|
|
||||||
switch {
|
|
||||||
case '0' <= x && x <= '9':
|
|
||||||
return int32(x - ('0' - 26)), true
|
|
||||||
case 'A' <= x && x <= 'Z':
|
|
||||||
return int32(x - 'A'), true
|
|
||||||
case 'a' <= x && x <= 'z':
|
|
||||||
return int32(x - 'a'), true
|
|
||||||
}
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
func encodeDigit(digit int32) byte {
|
|
||||||
switch {
|
|
||||||
case 0 <= digit && digit < 26:
|
|
||||||
return byte(digit + 'a')
|
|
||||||
case 26 <= digit && digit < 36:
|
|
||||||
return byte(digit + ('0' - 26))
|
|
||||||
}
|
|
||||||
panic("idna: internal error in punycode encoding")
|
|
||||||
}
|
|
||||||
|
|
||||||
// adapt is the bias adaptation function specified in section 6.1.
|
|
||||||
func adapt(delta, numPoints int32, firstTime bool) int32 {
|
|
||||||
if firstTime {
|
|
||||||
delta /= damp
|
|
||||||
} else {
|
|
||||||
delta /= 2
|
|
||||||
}
|
|
||||||
delta += delta / numPoints
|
|
||||||
k := int32(0)
|
|
||||||
for delta > ((base-tmin)*tmax)/2 {
|
|
||||||
delta /= base - tmin
|
|
||||||
k += base
|
|
||||||
}
|
|
||||||
return k + (base-tmin+1)*delta/(delta+skew)
|
|
||||||
}
|
|
4559
vendor/golang.org/x/net/idna/tables10.0.0.go
generated
vendored
4559
vendor/golang.org/x/net/idna/tables10.0.0.go
generated
vendored
File diff suppressed because it is too large
Load Diff
4653
vendor/golang.org/x/net/idna/tables11.0.0.go
generated
vendored
4653
vendor/golang.org/x/net/idna/tables11.0.0.go
generated
vendored
File diff suppressed because it is too large
Load Diff
4486
vendor/golang.org/x/net/idna/tables9.0.0.go
generated
vendored
4486
vendor/golang.org/x/net/idna/tables9.0.0.go
generated
vendored
File diff suppressed because it is too large
Load Diff
72
vendor/golang.org/x/net/idna/trie.go
generated
vendored
72
vendor/golang.org/x/net/idna/trie.go
generated
vendored
@ -1,72 +0,0 @@
|
|||||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
|
||||||
|
|
||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package idna
|
|
||||||
|
|
||||||
// appendMapping appends the mapping for the respective rune. isMapped must be
|
|
||||||
// true. A mapping is a categorization of a rune as defined in UTS #46.
|
|
||||||
func (c info) appendMapping(b []byte, s string) []byte {
|
|
||||||
index := int(c >> indexShift)
|
|
||||||
if c&xorBit == 0 {
|
|
||||||
s := mappings[index:]
|
|
||||||
return append(b, s[1:s[0]+1]...)
|
|
||||||
}
|
|
||||||
b = append(b, s...)
|
|
||||||
if c&inlineXOR == inlineXOR {
|
|
||||||
// TODO: support and handle two-byte inline masks
|
|
||||||
b[len(b)-1] ^= byte(index)
|
|
||||||
} else {
|
|
||||||
for p := len(b) - int(xorData[index]); p < len(b); p++ {
|
|
||||||
index++
|
|
||||||
b[p] ^= xorData[index]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sparse block handling code.
|
|
||||||
|
|
||||||
type valueRange struct {
|
|
||||||
value uint16 // header: value:stride
|
|
||||||
lo, hi byte // header: lo:n
|
|
||||||
}
|
|
||||||
|
|
||||||
type sparseBlocks struct {
|
|
||||||
values []valueRange
|
|
||||||
offset []uint16
|
|
||||||
}
|
|
||||||
|
|
||||||
var idnaSparse = sparseBlocks{
|
|
||||||
values: idnaSparseValues[:],
|
|
||||||
offset: idnaSparseOffset[:],
|
|
||||||
}
|
|
||||||
|
|
||||||
// Don't use newIdnaTrie to avoid unconditional linking in of the table.
|
|
||||||
var trie = &idnaTrie{}
|
|
||||||
|
|
||||||
// lookup determines the type of block n and looks up the value for b.
|
|
||||||
// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block
|
|
||||||
// is a list of ranges with an accompanying value. Given a matching range r,
|
|
||||||
// the value for b is by r.value + (b - r.lo) * stride.
|
|
||||||
func (t *sparseBlocks) lookup(n uint32, b byte) uint16 {
|
|
||||||
offset := t.offset[n]
|
|
||||||
header := t.values[offset]
|
|
||||||
lo := offset + 1
|
|
||||||
hi := lo + uint16(header.lo)
|
|
||||||
for lo < hi {
|
|
||||||
m := lo + (hi-lo)/2
|
|
||||||
r := t.values[m]
|
|
||||||
if r.lo <= b && b <= r.hi {
|
|
||||||
return r.value + uint16(b-r.lo)*header.value
|
|
||||||
}
|
|
||||||
if b < r.lo {
|
|
||||||
hi = m
|
|
||||||
} else {
|
|
||||||
lo = m + 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
119
vendor/golang.org/x/net/idna/trieval.go
generated
vendored
119
vendor/golang.org/x/net/idna/trieval.go
generated
vendored
@ -1,119 +0,0 @@
|
|||||||
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
|
||||||
|
|
||||||
package idna
|
|
||||||
|
|
||||||
// This file contains definitions for interpreting the trie value of the idna
|
|
||||||
// trie generated by "go run gen*.go". It is shared by both the generator
|
|
||||||
// program and the resultant package. Sharing is achieved by the generator
|
|
||||||
// copying gen_trieval.go to trieval.go and changing what's above this comment.
|
|
||||||
|
|
||||||
// info holds information from the IDNA mapping table for a single rune. It is
|
|
||||||
// the value returned by a trie lookup. In most cases, all information fits in
|
|
||||||
// a 16-bit value. For mappings, this value may contain an index into a slice
|
|
||||||
// with the mapped string. Such mappings can consist of the actual mapped value
|
|
||||||
// or an XOR pattern to be applied to the bytes of the UTF8 encoding of the
|
|
||||||
// input rune. This technique is used by the cases packages and reduces the
|
|
||||||
// table size significantly.
|
|
||||||
//
|
|
||||||
// The per-rune values have the following format:
|
|
||||||
//
|
|
||||||
// if mapped {
|
|
||||||
// if inlinedXOR {
|
|
||||||
// 15..13 inline XOR marker
|
|
||||||
// 12..11 unused
|
|
||||||
// 10..3 inline XOR mask
|
|
||||||
// } else {
|
|
||||||
// 15..3 index into xor or mapping table
|
|
||||||
// }
|
|
||||||
// } else {
|
|
||||||
// 15..14 unused
|
|
||||||
// 13 mayNeedNorm
|
|
||||||
// 12..11 attributes
|
|
||||||
// 10..8 joining type
|
|
||||||
// 7..3 category type
|
|
||||||
// }
|
|
||||||
// 2 use xor pattern
|
|
||||||
// 1..0 mapped category
|
|
||||||
//
|
|
||||||
// See the definitions below for a more detailed description of the various
|
|
||||||
// bits.
|
|
||||||
type info uint16
|
|
||||||
|
|
||||||
const (
|
|
||||||
catSmallMask = 0x3
|
|
||||||
catBigMask = 0xF8
|
|
||||||
indexShift = 3
|
|
||||||
xorBit = 0x4 // interpret the index as an xor pattern
|
|
||||||
inlineXOR = 0xE000 // These bits are set if the XOR pattern is inlined.
|
|
||||||
|
|
||||||
joinShift = 8
|
|
||||||
joinMask = 0x07
|
|
||||||
|
|
||||||
// Attributes
|
|
||||||
attributesMask = 0x1800
|
|
||||||
viramaModifier = 0x1800
|
|
||||||
modifier = 0x1000
|
|
||||||
rtl = 0x0800
|
|
||||||
|
|
||||||
mayNeedNorm = 0x2000
|
|
||||||
)
|
|
||||||
|
|
||||||
// A category corresponds to a category defined in the IDNA mapping table.
|
|
||||||
type category uint16
|
|
||||||
|
|
||||||
const (
|
|
||||||
unknown category = 0 // not currently defined in unicode.
|
|
||||||
mapped category = 1
|
|
||||||
disallowedSTD3Mapped category = 2
|
|
||||||
deviation category = 3
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
valid category = 0x08
|
|
||||||
validNV8 category = 0x18
|
|
||||||
validXV8 category = 0x28
|
|
||||||
disallowed category = 0x40
|
|
||||||
disallowedSTD3Valid category = 0x80
|
|
||||||
ignored category = 0xC0
|
|
||||||
)
|
|
||||||
|
|
||||||
// join types and additional rune information
|
|
||||||
const (
|
|
||||||
joiningL = (iota + 1)
|
|
||||||
joiningD
|
|
||||||
joiningT
|
|
||||||
joiningR
|
|
||||||
|
|
||||||
//the following types are derived during processing
|
|
||||||
joinZWJ
|
|
||||||
joinZWNJ
|
|
||||||
joinVirama
|
|
||||||
numJoinTypes
|
|
||||||
)
|
|
||||||
|
|
||||||
func (c info) isMapped() bool {
|
|
||||||
return c&0x3 != 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c info) category() category {
|
|
||||||
small := c & catSmallMask
|
|
||||||
if small != 0 {
|
|
||||||
return category(small)
|
|
||||||
}
|
|
||||||
return category(c & catBigMask)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c info) joinType() info {
|
|
||||||
if c.isMapped() {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return (c >> joinShift) & joinMask
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c info) isModifier() bool {
|
|
||||||
return c&(modifier|catSmallMask) == modifier
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c info) isViramaModifier() bool {
|
|
||||||
return c&(attributesMask|catSmallMask) == viramaModifier
|
|
||||||
}
|
|
525
vendor/golang.org/x/net/internal/timeseries/timeseries.go
generated
vendored
525
vendor/golang.org/x/net/internal/timeseries/timeseries.go
generated
vendored
@ -1,525 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package timeseries implements a time series structure for stats collection.
|
|
||||||
package timeseries // import "golang.org/x/net/internal/timeseries"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
timeSeriesNumBuckets = 64
|
|
||||||
minuteHourSeriesNumBuckets = 60
|
|
||||||
)
|
|
||||||
|
|
||||||
var timeSeriesResolutions = []time.Duration{
|
|
||||||
1 * time.Second,
|
|
||||||
10 * time.Second,
|
|
||||||
1 * time.Minute,
|
|
||||||
10 * time.Minute,
|
|
||||||
1 * time.Hour,
|
|
||||||
6 * time.Hour,
|
|
||||||
24 * time.Hour, // 1 day
|
|
||||||
7 * 24 * time.Hour, // 1 week
|
|
||||||
4 * 7 * 24 * time.Hour, // 4 weeks
|
|
||||||
16 * 7 * 24 * time.Hour, // 16 weeks
|
|
||||||
}
|
|
||||||
|
|
||||||
var minuteHourSeriesResolutions = []time.Duration{
|
|
||||||
1 * time.Second,
|
|
||||||
1 * time.Minute,
|
|
||||||
}
|
|
||||||
|
|
||||||
// An Observable is a kind of data that can be aggregated in a time series.
|
|
||||||
type Observable interface {
|
|
||||||
Multiply(ratio float64) // Multiplies the data in self by a given ratio
|
|
||||||
Add(other Observable) // Adds the data from a different observation to self
|
|
||||||
Clear() // Clears the observation so it can be reused.
|
|
||||||
CopyFrom(other Observable) // Copies the contents of a given observation to self
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float attaches the methods of Observable to a float64.
|
|
||||||
type Float float64
|
|
||||||
|
|
||||||
// NewFloat returns a Float.
|
|
||||||
func NewFloat() Observable {
|
|
||||||
f := Float(0)
|
|
||||||
return &f
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the float as a string.
|
|
||||||
func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) }
|
|
||||||
|
|
||||||
// Value returns the float's value.
|
|
||||||
func (f *Float) Value() float64 { return float64(*f) }
|
|
||||||
|
|
||||||
func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) }
|
|
||||||
|
|
||||||
func (f *Float) Add(other Observable) {
|
|
||||||
o := other.(*Float)
|
|
||||||
*f += *o
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Float) Clear() { *f = 0 }
|
|
||||||
|
|
||||||
func (f *Float) CopyFrom(other Observable) {
|
|
||||||
o := other.(*Float)
|
|
||||||
*f = *o
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Clock tells the current time.
|
|
||||||
type Clock interface {
|
|
||||||
Time() time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
type defaultClock int
|
|
||||||
|
|
||||||
var defaultClockInstance defaultClock
|
|
||||||
|
|
||||||
func (defaultClock) Time() time.Time { return time.Now() }
|
|
||||||
|
|
||||||
// Information kept per level. Each level consists of a circular list of
|
|
||||||
// observations. The start of the level may be derived from end and the
|
|
||||||
// len(buckets) * sizeInMillis.
|
|
||||||
type tsLevel struct {
|
|
||||||
oldest int // index to oldest bucketed Observable
|
|
||||||
newest int // index to newest bucketed Observable
|
|
||||||
end time.Time // end timestamp for this level
|
|
||||||
size time.Duration // duration of the bucketed Observable
|
|
||||||
buckets []Observable // collections of observations
|
|
||||||
provider func() Observable // used for creating new Observable
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *tsLevel) Clear() {
|
|
||||||
l.oldest = 0
|
|
||||||
l.newest = len(l.buckets) - 1
|
|
||||||
l.end = time.Time{}
|
|
||||||
for i := range l.buckets {
|
|
||||||
if l.buckets[i] != nil {
|
|
||||||
l.buckets[i].Clear()
|
|
||||||
l.buckets[i] = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) {
|
|
||||||
l.size = size
|
|
||||||
l.provider = f
|
|
||||||
l.buckets = make([]Observable, numBuckets)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keeps a sequence of levels. Each level is responsible for storing data at
|
|
||||||
// a given resolution. For example, the first level stores data at a one
|
|
||||||
// minute resolution while the second level stores data at a one hour
|
|
||||||
// resolution.
|
|
||||||
|
|
||||||
// Each level is represented by a sequence of buckets. Each bucket spans an
|
|
||||||
// interval equal to the resolution of the level. New observations are added
|
|
||||||
// to the last bucket.
|
|
||||||
type timeSeries struct {
|
|
||||||
provider func() Observable // make more Observable
|
|
||||||
numBuckets int // number of buckets in each level
|
|
||||||
levels []*tsLevel // levels of bucketed Observable
|
|
||||||
lastAdd time.Time // time of last Observable tracked
|
|
||||||
total Observable // convenient aggregation of all Observable
|
|
||||||
clock Clock // Clock for getting current time
|
|
||||||
pending Observable // observations not yet bucketed
|
|
||||||
pendingTime time.Time // what time are we keeping in pending
|
|
||||||
dirty bool // if there are pending observations
|
|
||||||
}
|
|
||||||
|
|
||||||
// init initializes a level according to the supplied criteria.
|
|
||||||
func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) {
|
|
||||||
ts.provider = f
|
|
||||||
ts.numBuckets = numBuckets
|
|
||||||
ts.clock = clock
|
|
||||||
ts.levels = make([]*tsLevel, len(resolutions))
|
|
||||||
|
|
||||||
for i := range resolutions {
|
|
||||||
if i > 0 && resolutions[i-1] >= resolutions[i] {
|
|
||||||
log.Print("timeseries: resolutions must be monotonically increasing")
|
|
||||||
break
|
|
||||||
}
|
|
||||||
newLevel := new(tsLevel)
|
|
||||||
newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider)
|
|
||||||
ts.levels[i] = newLevel
|
|
||||||
}
|
|
||||||
|
|
||||||
ts.Clear()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clear removes all observations from the time series.
|
|
||||||
func (ts *timeSeries) Clear() {
|
|
||||||
ts.lastAdd = time.Time{}
|
|
||||||
ts.total = ts.resetObservation(ts.total)
|
|
||||||
ts.pending = ts.resetObservation(ts.pending)
|
|
||||||
ts.pendingTime = time.Time{}
|
|
||||||
ts.dirty = false
|
|
||||||
|
|
||||||
for i := range ts.levels {
|
|
||||||
ts.levels[i].Clear()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add records an observation at the current time.
|
|
||||||
func (ts *timeSeries) Add(observation Observable) {
|
|
||||||
ts.AddWithTime(observation, ts.clock.Time())
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddWithTime records an observation at the specified time.
|
|
||||||
func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) {
|
|
||||||
|
|
||||||
smallBucketDuration := ts.levels[0].size
|
|
||||||
|
|
||||||
if t.After(ts.lastAdd) {
|
|
||||||
ts.lastAdd = t
|
|
||||||
}
|
|
||||||
|
|
||||||
if t.After(ts.pendingTime) {
|
|
||||||
ts.advance(t)
|
|
||||||
ts.mergePendingUpdates()
|
|
||||||
ts.pendingTime = ts.levels[0].end
|
|
||||||
ts.pending.CopyFrom(observation)
|
|
||||||
ts.dirty = true
|
|
||||||
} else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) {
|
|
||||||
// The observation is close enough to go into the pending bucket.
|
|
||||||
// This compensates for clock skewing and small scheduling delays
|
|
||||||
// by letting the update stay in the fast path.
|
|
||||||
ts.pending.Add(observation)
|
|
||||||
ts.dirty = true
|
|
||||||
} else {
|
|
||||||
ts.mergeValue(observation, t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// mergeValue inserts the observation at the specified time in the past into all levels.
|
|
||||||
func (ts *timeSeries) mergeValue(observation Observable, t time.Time) {
|
|
||||||
for _, level := range ts.levels {
|
|
||||||
index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size)
|
|
||||||
if 0 <= index && index < ts.numBuckets {
|
|
||||||
bucketNumber := (level.oldest + index) % ts.numBuckets
|
|
||||||
if level.buckets[bucketNumber] == nil {
|
|
||||||
level.buckets[bucketNumber] = level.provider()
|
|
||||||
}
|
|
||||||
level.buckets[bucketNumber].Add(observation)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ts.total.Add(observation)
|
|
||||||
}
|
|
||||||
|
|
||||||
// mergePendingUpdates applies the pending updates into all levels.
|
|
||||||
func (ts *timeSeries) mergePendingUpdates() {
|
|
||||||
if ts.dirty {
|
|
||||||
ts.mergeValue(ts.pending, ts.pendingTime)
|
|
||||||
ts.pending = ts.resetObservation(ts.pending)
|
|
||||||
ts.dirty = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// advance cycles the buckets at each level until the latest bucket in
|
|
||||||
// each level can hold the time specified.
|
|
||||||
func (ts *timeSeries) advance(t time.Time) {
|
|
||||||
if !t.After(ts.levels[0].end) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for i := 0; i < len(ts.levels); i++ {
|
|
||||||
level := ts.levels[i]
|
|
||||||
if !level.end.Before(t) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the time is sufficiently far, just clear the level and advance
|
|
||||||
// directly.
|
|
||||||
if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) {
|
|
||||||
for _, b := range level.buckets {
|
|
||||||
ts.resetObservation(b)
|
|
||||||
}
|
|
||||||
level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds())
|
|
||||||
}
|
|
||||||
|
|
||||||
for t.After(level.end) {
|
|
||||||
level.end = level.end.Add(level.size)
|
|
||||||
level.newest = level.oldest
|
|
||||||
level.oldest = (level.oldest + 1) % ts.numBuckets
|
|
||||||
ts.resetObservation(level.buckets[level.newest])
|
|
||||||
}
|
|
||||||
|
|
||||||
t = level.end
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Latest returns the sum of the num latest buckets from the level.
|
|
||||||
func (ts *timeSeries) Latest(level, num int) Observable {
|
|
||||||
now := ts.clock.Time()
|
|
||||||
if ts.levels[0].end.Before(now) {
|
|
||||||
ts.advance(now)
|
|
||||||
}
|
|
||||||
|
|
||||||
ts.mergePendingUpdates()
|
|
||||||
|
|
||||||
result := ts.provider()
|
|
||||||
l := ts.levels[level]
|
|
||||||
index := l.newest
|
|
||||||
|
|
||||||
for i := 0; i < num; i++ {
|
|
||||||
if l.buckets[index] != nil {
|
|
||||||
result.Add(l.buckets[index])
|
|
||||||
}
|
|
||||||
if index == 0 {
|
|
||||||
index = ts.numBuckets
|
|
||||||
}
|
|
||||||
index--
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// LatestBuckets returns a copy of the num latest buckets from level.
|
|
||||||
func (ts *timeSeries) LatestBuckets(level, num int) []Observable {
|
|
||||||
if level < 0 || level > len(ts.levels) {
|
|
||||||
log.Print("timeseries: bad level argument: ", level)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if num < 0 || num >= ts.numBuckets {
|
|
||||||
log.Print("timeseries: bad num argument: ", num)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
results := make([]Observable, num)
|
|
||||||
now := ts.clock.Time()
|
|
||||||
if ts.levels[0].end.Before(now) {
|
|
||||||
ts.advance(now)
|
|
||||||
}
|
|
||||||
|
|
||||||
ts.mergePendingUpdates()
|
|
||||||
|
|
||||||
l := ts.levels[level]
|
|
||||||
index := l.newest
|
|
||||||
|
|
||||||
for i := 0; i < num; i++ {
|
|
||||||
result := ts.provider()
|
|
||||||
results[i] = result
|
|
||||||
if l.buckets[index] != nil {
|
|
||||||
result.CopyFrom(l.buckets[index])
|
|
||||||
}
|
|
||||||
|
|
||||||
if index == 0 {
|
|
||||||
index = ts.numBuckets
|
|
||||||
}
|
|
||||||
index -= 1
|
|
||||||
}
|
|
||||||
return results
|
|
||||||
}
|
|
||||||
|
|
||||||
// ScaleBy updates observations by scaling by factor.
|
|
||||||
func (ts *timeSeries) ScaleBy(factor float64) {
|
|
||||||
for _, l := range ts.levels {
|
|
||||||
for i := 0; i < ts.numBuckets; i++ {
|
|
||||||
l.buckets[i].Multiply(factor)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ts.total.Multiply(factor)
|
|
||||||
ts.pending.Multiply(factor)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Range returns the sum of observations added over the specified time range.
|
|
||||||
// If start or finish times don't fall on bucket boundaries of the same
|
|
||||||
// level, then return values are approximate answers.
|
|
||||||
func (ts *timeSeries) Range(start, finish time.Time) Observable {
|
|
||||||
return ts.ComputeRange(start, finish, 1)[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Recent returns the sum of observations from the last delta.
|
|
||||||
func (ts *timeSeries) Recent(delta time.Duration) Observable {
|
|
||||||
now := ts.clock.Time()
|
|
||||||
return ts.Range(now.Add(-delta), now)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Total returns the total of all observations.
|
|
||||||
func (ts *timeSeries) Total() Observable {
|
|
||||||
ts.mergePendingUpdates()
|
|
||||||
return ts.total
|
|
||||||
}
|
|
||||||
|
|
||||||
// ComputeRange computes a specified number of values into a slice using
|
|
||||||
// the observations recorded over the specified time period. The return
|
|
||||||
// values are approximate if the start or finish times don't fall on the
|
|
||||||
// bucket boundaries at the same level or if the number of buckets spanning
|
|
||||||
// the range is not an integral multiple of num.
|
|
||||||
func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable {
|
|
||||||
if start.After(finish) {
|
|
||||||
log.Printf("timeseries: start > finish, %v>%v", start, finish)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if num < 0 {
|
|
||||||
log.Printf("timeseries: num < 0, %v", num)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
results := make([]Observable, num)
|
|
||||||
|
|
||||||
for _, l := range ts.levels {
|
|
||||||
if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) {
|
|
||||||
ts.extract(l, start, finish, num, results)
|
|
||||||
return results
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Failed to find a level that covers the desired range. So just
|
|
||||||
// extract from the last level, even if it doesn't cover the entire
|
|
||||||
// desired range.
|
|
||||||
ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results)
|
|
||||||
|
|
||||||
return results
|
|
||||||
}
|
|
||||||
|
|
||||||
// RecentList returns the specified number of values in slice over the most
|
|
||||||
// recent time period of the specified range.
|
|
||||||
func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable {
|
|
||||||
if delta < 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
now := ts.clock.Time()
|
|
||||||
return ts.ComputeRange(now.Add(-delta), now, num)
|
|
||||||
}
|
|
||||||
|
|
||||||
// extract returns a slice of specified number of observations from a given
|
|
||||||
// level over a given range.
|
|
||||||
func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) {
|
|
||||||
ts.mergePendingUpdates()
|
|
||||||
|
|
||||||
srcInterval := l.size
|
|
||||||
dstInterval := finish.Sub(start) / time.Duration(num)
|
|
||||||
dstStart := start
|
|
||||||
srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets))
|
|
||||||
|
|
||||||
srcIndex := 0
|
|
||||||
|
|
||||||
// Where should scanning start?
|
|
||||||
if dstStart.After(srcStart) {
|
|
||||||
advance := dstStart.Sub(srcStart) / srcInterval
|
|
||||||
srcIndex += int(advance)
|
|
||||||
srcStart = srcStart.Add(advance * srcInterval)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The i'th value is computed as show below.
|
|
||||||
// interval = (finish/start)/num
|
|
||||||
// i'th value = sum of observation in range
|
|
||||||
// [ start + i * interval,
|
|
||||||
// start + (i + 1) * interval )
|
|
||||||
for i := 0; i < num; i++ {
|
|
||||||
results[i] = ts.resetObservation(results[i])
|
|
||||||
dstEnd := dstStart.Add(dstInterval)
|
|
||||||
for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) {
|
|
||||||
srcEnd := srcStart.Add(srcInterval)
|
|
||||||
if srcEnd.After(ts.lastAdd) {
|
|
||||||
srcEnd = ts.lastAdd
|
|
||||||
}
|
|
||||||
|
|
||||||
if !srcEnd.Before(dstStart) {
|
|
||||||
srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets]
|
|
||||||
if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) {
|
|
||||||
// dst completely contains src.
|
|
||||||
if srcValue != nil {
|
|
||||||
results[i].Add(srcValue)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// dst partially overlaps src.
|
|
||||||
overlapStart := maxTime(srcStart, dstStart)
|
|
||||||
overlapEnd := minTime(srcEnd, dstEnd)
|
|
||||||
base := srcEnd.Sub(srcStart)
|
|
||||||
fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds()
|
|
||||||
|
|
||||||
used := ts.provider()
|
|
||||||
if srcValue != nil {
|
|
||||||
used.CopyFrom(srcValue)
|
|
||||||
}
|
|
||||||
used.Multiply(fraction)
|
|
||||||
results[i].Add(used)
|
|
||||||
}
|
|
||||||
|
|
||||||
if srcEnd.After(dstEnd) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
srcIndex++
|
|
||||||
srcStart = srcStart.Add(srcInterval)
|
|
||||||
}
|
|
||||||
dstStart = dstStart.Add(dstInterval)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// resetObservation clears the content so the struct may be reused.
|
|
||||||
func (ts *timeSeries) resetObservation(observation Observable) Observable {
|
|
||||||
if observation == nil {
|
|
||||||
observation = ts.provider()
|
|
||||||
} else {
|
|
||||||
observation.Clear()
|
|
||||||
}
|
|
||||||
return observation
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimeSeries tracks data at granularities from 1 second to 16 weeks.
|
|
||||||
type TimeSeries struct {
|
|
||||||
timeSeries
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable.
|
|
||||||
func NewTimeSeries(f func() Observable) *TimeSeries {
|
|
||||||
return NewTimeSeriesWithClock(f, defaultClockInstance)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for
|
|
||||||
// assigning timestamps.
|
|
||||||
func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries {
|
|
||||||
ts := new(TimeSeries)
|
|
||||||
ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock)
|
|
||||||
return ts
|
|
||||||
}
|
|
||||||
|
|
||||||
// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour.
|
|
||||||
type MinuteHourSeries struct {
|
|
||||||
timeSeries
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable.
|
|
||||||
func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries {
|
|
||||||
return NewMinuteHourSeriesWithClock(f, defaultClockInstance)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for
|
|
||||||
// assigning timestamps.
|
|
||||||
func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries {
|
|
||||||
ts := new(MinuteHourSeries)
|
|
||||||
ts.timeSeries.init(minuteHourSeriesResolutions, f,
|
|
||||||
minuteHourSeriesNumBuckets, clock)
|
|
||||||
return ts
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *MinuteHourSeries) Minute() Observable {
|
|
||||||
return ts.timeSeries.Latest(0, 60)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *MinuteHourSeries) Hour() Observable {
|
|
||||||
return ts.timeSeries.Latest(1, 60)
|
|
||||||
}
|
|
||||||
|
|
||||||
func minTime(a, b time.Time) time.Time {
|
|
||||||
if a.Before(b) {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
func maxTime(a, b time.Time) time.Time {
|
|
||||||
if a.After(b) {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
532
vendor/golang.org/x/net/trace/events.go
generated
vendored
532
vendor/golang.org/x/net/trace/events.go
generated
vendored
@ -1,532 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package trace
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"html/template"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"runtime"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"text/tabwriter"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const maxEventsPerLog = 100
|
|
||||||
|
|
||||||
type bucket struct {
|
|
||||||
MaxErrAge time.Duration
|
|
||||||
String string
|
|
||||||
}
|
|
||||||
|
|
||||||
var buckets = []bucket{
|
|
||||||
{0, "total"},
|
|
||||||
{10 * time.Second, "errs<10s"},
|
|
||||||
{1 * time.Minute, "errs<1m"},
|
|
||||||
{10 * time.Minute, "errs<10m"},
|
|
||||||
{1 * time.Hour, "errs<1h"},
|
|
||||||
{10 * time.Hour, "errs<10h"},
|
|
||||||
{24000 * time.Hour, "errors"},
|
|
||||||
}
|
|
||||||
|
|
||||||
// RenderEvents renders the HTML page typically served at /debug/events.
|
|
||||||
// It does not do any auth checking. The request may be nil.
|
|
||||||
//
|
|
||||||
// Most users will use the Events handler.
|
|
||||||
func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) {
|
|
||||||
now := time.Now()
|
|
||||||
data := &struct {
|
|
||||||
Families []string // family names
|
|
||||||
Buckets []bucket
|
|
||||||
Counts [][]int // eventLog count per family/bucket
|
|
||||||
|
|
||||||
// Set when a bucket has been selected.
|
|
||||||
Family string
|
|
||||||
Bucket int
|
|
||||||
EventLogs eventLogs
|
|
||||||
Expanded bool
|
|
||||||
}{
|
|
||||||
Buckets: buckets,
|
|
||||||
}
|
|
||||||
|
|
||||||
data.Families = make([]string, 0, len(families))
|
|
||||||
famMu.RLock()
|
|
||||||
for name := range families {
|
|
||||||
data.Families = append(data.Families, name)
|
|
||||||
}
|
|
||||||
famMu.RUnlock()
|
|
||||||
sort.Strings(data.Families)
|
|
||||||
|
|
||||||
// Count the number of eventLogs in each family for each error age.
|
|
||||||
data.Counts = make([][]int, len(data.Families))
|
|
||||||
for i, name := range data.Families {
|
|
||||||
// TODO(sameer): move this loop under the family lock.
|
|
||||||
f := getEventFamily(name)
|
|
||||||
data.Counts[i] = make([]int, len(data.Buckets))
|
|
||||||
for j, b := range data.Buckets {
|
|
||||||
data.Counts[i][j] = f.Count(now, b.MaxErrAge)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if req != nil {
|
|
||||||
var ok bool
|
|
||||||
data.Family, data.Bucket, ok = parseEventsArgs(req)
|
|
||||||
if !ok {
|
|
||||||
// No-op
|
|
||||||
} else {
|
|
||||||
data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge)
|
|
||||||
}
|
|
||||||
if data.EventLogs != nil {
|
|
||||||
defer data.EventLogs.Free()
|
|
||||||
sort.Sort(data.EventLogs)
|
|
||||||
}
|
|
||||||
if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
|
|
||||||
data.Expanded = exp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
famMu.RLock()
|
|
||||||
defer famMu.RUnlock()
|
|
||||||
if err := eventsTmpl().Execute(w, data); err != nil {
|
|
||||||
log.Printf("net/trace: Failed executing template: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) {
|
|
||||||
fam, bStr := req.FormValue("fam"), req.FormValue("b")
|
|
||||||
if fam == "" || bStr == "" {
|
|
||||||
return "", 0, false
|
|
||||||
}
|
|
||||||
b, err := strconv.Atoi(bStr)
|
|
||||||
if err != nil || b < 0 || b >= len(buckets) {
|
|
||||||
return "", 0, false
|
|
||||||
}
|
|
||||||
return fam, b, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// An EventLog provides a log of events associated with a specific object.
|
|
||||||
type EventLog interface {
|
|
||||||
// Printf formats its arguments with fmt.Sprintf and adds the
|
|
||||||
// result to the event log.
|
|
||||||
Printf(format string, a ...interface{})
|
|
||||||
|
|
||||||
// Errorf is like Printf, but it marks this event as an error.
|
|
||||||
Errorf(format string, a ...interface{})
|
|
||||||
|
|
||||||
// Finish declares that this event log is complete.
|
|
||||||
// The event log should not be used after calling this method.
|
|
||||||
Finish()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEventLog returns a new EventLog with the specified family name
|
|
||||||
// and title.
|
|
||||||
func NewEventLog(family, title string) EventLog {
|
|
||||||
el := newEventLog()
|
|
||||||
el.ref()
|
|
||||||
el.Family, el.Title = family, title
|
|
||||||
el.Start = time.Now()
|
|
||||||
el.events = make([]logEntry, 0, maxEventsPerLog)
|
|
||||||
el.stack = make([]uintptr, 32)
|
|
||||||
n := runtime.Callers(2, el.stack)
|
|
||||||
el.stack = el.stack[:n]
|
|
||||||
|
|
||||||
getEventFamily(family).add(el)
|
|
||||||
return el
|
|
||||||
}
|
|
||||||
|
|
||||||
func (el *eventLog) Finish() {
|
|
||||||
getEventFamily(el.Family).remove(el)
|
|
||||||
el.unref() // matches ref in New
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
famMu sync.RWMutex
|
|
||||||
families = make(map[string]*eventFamily) // family name => family
|
|
||||||
)
|
|
||||||
|
|
||||||
func getEventFamily(fam string) *eventFamily {
|
|
||||||
famMu.Lock()
|
|
||||||
defer famMu.Unlock()
|
|
||||||
f := families[fam]
|
|
||||||
if f == nil {
|
|
||||||
f = &eventFamily{}
|
|
||||||
families[fam] = f
|
|
||||||
}
|
|
||||||
return f
|
|
||||||
}
|
|
||||||
|
|
||||||
type eventFamily struct {
|
|
||||||
mu sync.RWMutex
|
|
||||||
eventLogs eventLogs
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *eventFamily) add(el *eventLog) {
|
|
||||||
f.mu.Lock()
|
|
||||||
f.eventLogs = append(f.eventLogs, el)
|
|
||||||
f.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *eventFamily) remove(el *eventLog) {
|
|
||||||
f.mu.Lock()
|
|
||||||
defer f.mu.Unlock()
|
|
||||||
for i, el0 := range f.eventLogs {
|
|
||||||
if el == el0 {
|
|
||||||
copy(f.eventLogs[i:], f.eventLogs[i+1:])
|
|
||||||
f.eventLogs = f.eventLogs[:len(f.eventLogs)-1]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) {
|
|
||||||
f.mu.RLock()
|
|
||||||
defer f.mu.RUnlock()
|
|
||||||
for _, el := range f.eventLogs {
|
|
||||||
if el.hasRecentError(now, maxErrAge) {
|
|
||||||
n++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) {
|
|
||||||
f.mu.RLock()
|
|
||||||
defer f.mu.RUnlock()
|
|
||||||
els = make(eventLogs, 0, len(f.eventLogs))
|
|
||||||
for _, el := range f.eventLogs {
|
|
||||||
if el.hasRecentError(now, maxErrAge) {
|
|
||||||
el.ref()
|
|
||||||
els = append(els, el)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
type eventLogs []*eventLog
|
|
||||||
|
|
||||||
// Free calls unref on each element of the list.
|
|
||||||
func (els eventLogs) Free() {
|
|
||||||
for _, el := range els {
|
|
||||||
el.unref()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// eventLogs may be sorted in reverse chronological order.
|
|
||||||
func (els eventLogs) Len() int { return len(els) }
|
|
||||||
func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) }
|
|
||||||
func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] }
|
|
||||||
|
|
||||||
// A logEntry is a timestamped log entry in an event log.
|
|
||||||
type logEntry struct {
|
|
||||||
When time.Time
|
|
||||||
Elapsed time.Duration // since previous event in log
|
|
||||||
NewDay bool // whether this event is on a different day to the previous event
|
|
||||||
What string
|
|
||||||
IsErr bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// WhenString returns a string representation of the elapsed time of the event.
|
|
||||||
// It will include the date if midnight was crossed.
|
|
||||||
func (e logEntry) WhenString() string {
|
|
||||||
if e.NewDay {
|
|
||||||
return e.When.Format("2006/01/02 15:04:05.000000")
|
|
||||||
}
|
|
||||||
return e.When.Format("15:04:05.000000")
|
|
||||||
}
|
|
||||||
|
|
||||||
// An eventLog represents an active event log.
|
|
||||||
type eventLog struct {
|
|
||||||
// Family is the top-level grouping of event logs to which this belongs.
|
|
||||||
Family string
|
|
||||||
|
|
||||||
// Title is the title of this event log.
|
|
||||||
Title string
|
|
||||||
|
|
||||||
// Timing information.
|
|
||||||
Start time.Time
|
|
||||||
|
|
||||||
// Call stack where this event log was created.
|
|
||||||
stack []uintptr
|
|
||||||
|
|
||||||
// Append-only sequence of events.
|
|
||||||
//
|
|
||||||
// TODO(sameer): change this to a ring buffer to avoid the array copy
|
|
||||||
// when we hit maxEventsPerLog.
|
|
||||||
mu sync.RWMutex
|
|
||||||
events []logEntry
|
|
||||||
LastErrorTime time.Time
|
|
||||||
discarded int
|
|
||||||
|
|
||||||
refs int32 // how many buckets this is in
|
|
||||||
}
|
|
||||||
|
|
||||||
func (el *eventLog) reset() {
|
|
||||||
// Clear all but the mutex. Mutexes may not be copied, even when unlocked.
|
|
||||||
el.Family = ""
|
|
||||||
el.Title = ""
|
|
||||||
el.Start = time.Time{}
|
|
||||||
el.stack = nil
|
|
||||||
el.events = nil
|
|
||||||
el.LastErrorTime = time.Time{}
|
|
||||||
el.discarded = 0
|
|
||||||
el.refs = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool {
|
|
||||||
if maxErrAge == 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
el.mu.RLock()
|
|
||||||
defer el.mu.RUnlock()
|
|
||||||
return now.Sub(el.LastErrorTime) < maxErrAge
|
|
||||||
}
|
|
||||||
|
|
||||||
// delta returns the elapsed time since the last event or the log start,
|
|
||||||
// and whether it spans midnight.
|
|
||||||
// L >= el.mu
|
|
||||||
func (el *eventLog) delta(t time.Time) (time.Duration, bool) {
|
|
||||||
if len(el.events) == 0 {
|
|
||||||
return t.Sub(el.Start), false
|
|
||||||
}
|
|
||||||
prev := el.events[len(el.events)-1].When
|
|
||||||
return t.Sub(prev), prev.Day() != t.Day()
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (el *eventLog) Printf(format string, a ...interface{}) {
|
|
||||||
el.printf(false, format, a...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (el *eventLog) Errorf(format string, a ...interface{}) {
|
|
||||||
el.printf(true, format, a...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (el *eventLog) printf(isErr bool, format string, a ...interface{}) {
|
|
||||||
e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)}
|
|
||||||
el.mu.Lock()
|
|
||||||
e.Elapsed, e.NewDay = el.delta(e.When)
|
|
||||||
if len(el.events) < maxEventsPerLog {
|
|
||||||
el.events = append(el.events, e)
|
|
||||||
} else {
|
|
||||||
// Discard the oldest event.
|
|
||||||
if el.discarded == 0 {
|
|
||||||
// el.discarded starts at two to count for the event it
|
|
||||||
// is replacing, plus the next one that we are about to
|
|
||||||
// drop.
|
|
||||||
el.discarded = 2
|
|
||||||
} else {
|
|
||||||
el.discarded++
|
|
||||||
}
|
|
||||||
// TODO(sameer): if this causes allocations on a critical path,
|
|
||||||
// change eventLog.What to be a fmt.Stringer, as in trace.go.
|
|
||||||
el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded)
|
|
||||||
// The timestamp of the discarded meta-event should be
|
|
||||||
// the time of the last event it is representing.
|
|
||||||
el.events[0].When = el.events[1].When
|
|
||||||
copy(el.events[1:], el.events[2:])
|
|
||||||
el.events[maxEventsPerLog-1] = e
|
|
||||||
}
|
|
||||||
if e.IsErr {
|
|
||||||
el.LastErrorTime = e.When
|
|
||||||
}
|
|
||||||
el.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (el *eventLog) ref() {
|
|
||||||
atomic.AddInt32(&el.refs, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (el *eventLog) unref() {
|
|
||||||
if atomic.AddInt32(&el.refs, -1) == 0 {
|
|
||||||
freeEventLog(el)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (el *eventLog) When() string {
|
|
||||||
return el.Start.Format("2006/01/02 15:04:05.000000")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (el *eventLog) ElapsedTime() string {
|
|
||||||
elapsed := time.Since(el.Start)
|
|
||||||
return fmt.Sprintf("%.6f", elapsed.Seconds())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (el *eventLog) Stack() string {
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0)
|
|
||||||
printStackRecord(tw, el.stack)
|
|
||||||
tw.Flush()
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// printStackRecord prints the function + source line information
|
|
||||||
// for a single stack trace.
|
|
||||||
// Adapted from runtime/pprof/pprof.go.
|
|
||||||
func printStackRecord(w io.Writer, stk []uintptr) {
|
|
||||||
for _, pc := range stk {
|
|
||||||
f := runtime.FuncForPC(pc)
|
|
||||||
if f == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
file, line := f.FileLine(pc)
|
|
||||||
name := f.Name()
|
|
||||||
// Hide runtime.goexit and any runtime functions at the beginning.
|
|
||||||
if strings.HasPrefix(name, "runtime.") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (el *eventLog) Events() []logEntry {
|
|
||||||
el.mu.RLock()
|
|
||||||
defer el.mu.RUnlock()
|
|
||||||
return el.events
|
|
||||||
}
|
|
||||||
|
|
||||||
// freeEventLogs is a freelist of *eventLog
|
|
||||||
var freeEventLogs = make(chan *eventLog, 1000)
|
|
||||||
|
|
||||||
// newEventLog returns a event log ready to use.
|
|
||||||
func newEventLog() *eventLog {
|
|
||||||
select {
|
|
||||||
case el := <-freeEventLogs:
|
|
||||||
return el
|
|
||||||
default:
|
|
||||||
return new(eventLog)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// freeEventLog adds el to freeEventLogs if there's room.
|
|
||||||
// This is non-blocking.
|
|
||||||
func freeEventLog(el *eventLog) {
|
|
||||||
el.reset()
|
|
||||||
select {
|
|
||||||
case freeEventLogs <- el:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var eventsTmplCache *template.Template
|
|
||||||
var eventsTmplOnce sync.Once
|
|
||||||
|
|
||||||
func eventsTmpl() *template.Template {
|
|
||||||
eventsTmplOnce.Do(func() {
|
|
||||||
eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{
|
|
||||||
"elapsed": elapsed,
|
|
||||||
"trimSpace": strings.TrimSpace,
|
|
||||||
}).Parse(eventsHTML))
|
|
||||||
})
|
|
||||||
return eventsTmplCache
|
|
||||||
}
|
|
||||||
|
|
||||||
const eventsHTML = `
|
|
||||||
<html>
|
|
||||||
<head>
|
|
||||||
<title>events</title>
|
|
||||||
</head>
|
|
||||||
<style type="text/css">
|
|
||||||
body {
|
|
||||||
font-family: sans-serif;
|
|
||||||
}
|
|
||||||
table#req-status td.family {
|
|
||||||
padding-right: 2em;
|
|
||||||
}
|
|
||||||
table#req-status td.active {
|
|
||||||
padding-right: 1em;
|
|
||||||
}
|
|
||||||
table#req-status td.empty {
|
|
||||||
color: #aaa;
|
|
||||||
}
|
|
||||||
table#reqs {
|
|
||||||
margin-top: 1em;
|
|
||||||
}
|
|
||||||
table#reqs tr.first {
|
|
||||||
{{if $.Expanded}}font-weight: bold;{{end}}
|
|
||||||
}
|
|
||||||
table#reqs td {
|
|
||||||
font-family: monospace;
|
|
||||||
}
|
|
||||||
table#reqs td.when {
|
|
||||||
text-align: right;
|
|
||||||
white-space: nowrap;
|
|
||||||
}
|
|
||||||
table#reqs td.elapsed {
|
|
||||||
padding: 0 0.5em;
|
|
||||||
text-align: right;
|
|
||||||
white-space: pre;
|
|
||||||
width: 10em;
|
|
||||||
}
|
|
||||||
address {
|
|
||||||
font-size: smaller;
|
|
||||||
margin-top: 5em;
|
|
||||||
}
|
|
||||||
</style>
|
|
||||||
<body>
|
|
||||||
|
|
||||||
<h1>/debug/events</h1>
|
|
||||||
|
|
||||||
<table id="req-status">
|
|
||||||
{{range $i, $fam := .Families}}
|
|
||||||
<tr>
|
|
||||||
<td class="family">{{$fam}}</td>
|
|
||||||
|
|
||||||
{{range $j, $bucket := $.Buckets}}
|
|
||||||
{{$n := index $.Counts $i $j}}
|
|
||||||
<td class="{{if not $bucket.MaxErrAge}}active{{end}}{{if not $n}}empty{{end}}">
|
|
||||||
{{if $n}}<a href="?fam={{$fam}}&b={{$j}}{{if $.Expanded}}&exp=1{{end}}">{{end}}
|
|
||||||
[{{$n}} {{$bucket.String}}]
|
|
||||||
{{if $n}}</a>{{end}}
|
|
||||||
</td>
|
|
||||||
{{end}}
|
|
||||||
|
|
||||||
</tr>{{end}}
|
|
||||||
</table>
|
|
||||||
|
|
||||||
{{if $.EventLogs}}
|
|
||||||
<hr />
|
|
||||||
<h3>Family: {{$.Family}}</h3>
|
|
||||||
|
|
||||||
{{if $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}">{{end}}
|
|
||||||
[Summary]{{if $.Expanded}}</a>{{end}}
|
|
||||||
|
|
||||||
{{if not $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1">{{end}}
|
|
||||||
[Expanded]{{if not $.Expanded}}</a>{{end}}
|
|
||||||
|
|
||||||
<table id="reqs">
|
|
||||||
<tr><th>When</th><th>Elapsed</th></tr>
|
|
||||||
{{range $el := $.EventLogs}}
|
|
||||||
<tr class="first">
|
|
||||||
<td class="when">{{$el.When}}</td>
|
|
||||||
<td class="elapsed">{{$el.ElapsedTime}}</td>
|
|
||||||
<td>{{$el.Title}}
|
|
||||||
</tr>
|
|
||||||
{{if $.Expanded}}
|
|
||||||
<tr>
|
|
||||||
<td class="when"></td>
|
|
||||||
<td class="elapsed"></td>
|
|
||||||
<td><pre>{{$el.Stack|trimSpace}}</pre></td>
|
|
||||||
</tr>
|
|
||||||
{{range $el.Events}}
|
|
||||||
<tr>
|
|
||||||
<td class="when">{{.WhenString}}</td>
|
|
||||||
<td class="elapsed">{{elapsed .Elapsed}}</td>
|
|
||||||
<td>.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}</td>
|
|
||||||
</tr>
|
|
||||||
{{end}}
|
|
||||||
{{end}}
|
|
||||||
{{end}}
|
|
||||||
</table>
|
|
||||||
{{end}}
|
|
||||||
</body>
|
|
||||||
</html>
|
|
||||||
`
|
|
365
vendor/golang.org/x/net/trace/histogram.go
generated
vendored
365
vendor/golang.org/x/net/trace/histogram.go
generated
vendored
@ -1,365 +0,0 @@
|
|||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package trace
|
|
||||||
|
|
||||||
// This file implements histogramming for RPC statistics collection.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"html/template"
|
|
||||||
"log"
|
|
||||||
"math"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"golang.org/x/net/internal/timeseries"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
bucketCount = 38
|
|
||||||
)
|
|
||||||
|
|
||||||
// histogram keeps counts of values in buckets that are spaced
|
|
||||||
// out in powers of 2: 0-1, 2-3, 4-7...
|
|
||||||
// histogram implements timeseries.Observable
|
|
||||||
type histogram struct {
|
|
||||||
sum int64 // running total of measurements
|
|
||||||
sumOfSquares float64 // square of running total
|
|
||||||
buckets []int64 // bucketed values for histogram
|
|
||||||
value int // holds a single value as an optimization
|
|
||||||
valueCount int64 // number of values recorded for single value
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddMeasurement records a value measurement observation to the histogram.
|
|
||||||
func (h *histogram) addMeasurement(value int64) {
|
|
||||||
// TODO: assert invariant
|
|
||||||
h.sum += value
|
|
||||||
h.sumOfSquares += float64(value) * float64(value)
|
|
||||||
|
|
||||||
bucketIndex := getBucket(value)
|
|
||||||
|
|
||||||
if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) {
|
|
||||||
h.value = bucketIndex
|
|
||||||
h.valueCount++
|
|
||||||
} else {
|
|
||||||
h.allocateBuckets()
|
|
||||||
h.buckets[bucketIndex]++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *histogram) allocateBuckets() {
|
|
||||||
if h.buckets == nil {
|
|
||||||
h.buckets = make([]int64, bucketCount)
|
|
||||||
h.buckets[h.value] = h.valueCount
|
|
||||||
h.value = 0
|
|
||||||
h.valueCount = -1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func log2(i int64) int {
|
|
||||||
n := 0
|
|
||||||
for ; i >= 0x100; i >>= 8 {
|
|
||||||
n += 8
|
|
||||||
}
|
|
||||||
for ; i > 0; i >>= 1 {
|
|
||||||
n += 1
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func getBucket(i int64) (index int) {
|
|
||||||
index = log2(i) - 1
|
|
||||||
if index < 0 {
|
|
||||||
index = 0
|
|
||||||
}
|
|
||||||
if index >= bucketCount {
|
|
||||||
index = bucketCount - 1
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Total returns the number of recorded observations.
|
|
||||||
func (h *histogram) total() (total int64) {
|
|
||||||
if h.valueCount >= 0 {
|
|
||||||
total = h.valueCount
|
|
||||||
}
|
|
||||||
for _, val := range h.buckets {
|
|
||||||
total += int64(val)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Average returns the average value of recorded observations.
|
|
||||||
func (h *histogram) average() float64 {
|
|
||||||
t := h.total()
|
|
||||||
if t == 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return float64(h.sum) / float64(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Variance returns the variance of recorded observations.
|
|
||||||
func (h *histogram) variance() float64 {
|
|
||||||
t := float64(h.total())
|
|
||||||
if t == 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
s := float64(h.sum) / t
|
|
||||||
return h.sumOfSquares/t - s*s
|
|
||||||
}
|
|
||||||
|
|
||||||
// StandardDeviation returns the standard deviation of recorded observations.
|
|
||||||
func (h *histogram) standardDeviation() float64 {
|
|
||||||
return math.Sqrt(h.variance())
|
|
||||||
}
|
|
||||||
|
|
||||||
// PercentileBoundary estimates the value that the given fraction of recorded
|
|
||||||
// observations are less than.
|
|
||||||
func (h *histogram) percentileBoundary(percentile float64) int64 {
|
|
||||||
total := h.total()
|
|
||||||
|
|
||||||
// Corner cases (make sure result is strictly less than Total())
|
|
||||||
if total == 0 {
|
|
||||||
return 0
|
|
||||||
} else if total == 1 {
|
|
||||||
return int64(h.average())
|
|
||||||
}
|
|
||||||
|
|
||||||
percentOfTotal := round(float64(total) * percentile)
|
|
||||||
var runningTotal int64
|
|
||||||
|
|
||||||
for i := range h.buckets {
|
|
||||||
value := h.buckets[i]
|
|
||||||
runningTotal += value
|
|
||||||
if runningTotal == percentOfTotal {
|
|
||||||
// We hit an exact bucket boundary. If the next bucket has data, it is a
|
|
||||||
// good estimate of the value. If the bucket is empty, we interpolate the
|
|
||||||
// midpoint between the next bucket's boundary and the next non-zero
|
|
||||||
// bucket. If the remaining buckets are all empty, then we use the
|
|
||||||
// boundary for the next bucket as the estimate.
|
|
||||||
j := uint8(i + 1)
|
|
||||||
min := bucketBoundary(j)
|
|
||||||
if runningTotal < total {
|
|
||||||
for h.buckets[j] == 0 {
|
|
||||||
j++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
max := bucketBoundary(j)
|
|
||||||
return min + round(float64(max-min)/2)
|
|
||||||
} else if runningTotal > percentOfTotal {
|
|
||||||
// The value is in this bucket. Interpolate the value.
|
|
||||||
delta := runningTotal - percentOfTotal
|
|
||||||
percentBucket := float64(value-delta) / float64(value)
|
|
||||||
bucketMin := bucketBoundary(uint8(i))
|
|
||||||
nextBucketMin := bucketBoundary(uint8(i + 1))
|
|
||||||
bucketSize := nextBucketMin - bucketMin
|
|
||||||
return bucketMin + round(percentBucket*float64(bucketSize))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return bucketBoundary(bucketCount - 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Median returns the estimated median of the observed values.
|
|
||||||
func (h *histogram) median() int64 {
|
|
||||||
return h.percentileBoundary(0.5)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add adds other to h.
|
|
||||||
func (h *histogram) Add(other timeseries.Observable) {
|
|
||||||
o := other.(*histogram)
|
|
||||||
if o.valueCount == 0 {
|
|
||||||
// Other histogram is empty
|
|
||||||
} else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value {
|
|
||||||
// Both have a single bucketed value, aggregate them
|
|
||||||
h.valueCount += o.valueCount
|
|
||||||
} else {
|
|
||||||
// Two different values necessitate buckets in this histogram
|
|
||||||
h.allocateBuckets()
|
|
||||||
if o.valueCount >= 0 {
|
|
||||||
h.buckets[o.value] += o.valueCount
|
|
||||||
} else {
|
|
||||||
for i := range h.buckets {
|
|
||||||
h.buckets[i] += o.buckets[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
h.sumOfSquares += o.sumOfSquares
|
|
||||||
h.sum += o.sum
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clear resets the histogram to an empty state, removing all observed values.
|
|
||||||
func (h *histogram) Clear() {
|
|
||||||
h.buckets = nil
|
|
||||||
h.value = 0
|
|
||||||
h.valueCount = 0
|
|
||||||
h.sum = 0
|
|
||||||
h.sumOfSquares = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyFrom copies from other, which must be a *histogram, into h.
|
|
||||||
func (h *histogram) CopyFrom(other timeseries.Observable) {
|
|
||||||
o := other.(*histogram)
|
|
||||||
if o.valueCount == -1 {
|
|
||||||
h.allocateBuckets()
|
|
||||||
copy(h.buckets, o.buckets)
|
|
||||||
}
|
|
||||||
h.sum = o.sum
|
|
||||||
h.sumOfSquares = o.sumOfSquares
|
|
||||||
h.value = o.value
|
|
||||||
h.valueCount = o.valueCount
|
|
||||||
}
|
|
||||||
|
|
||||||
// Multiply scales the histogram by the specified ratio.
|
|
||||||
func (h *histogram) Multiply(ratio float64) {
|
|
||||||
if h.valueCount == -1 {
|
|
||||||
for i := range h.buckets {
|
|
||||||
h.buckets[i] = int64(float64(h.buckets[i]) * ratio)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
h.valueCount = int64(float64(h.valueCount) * ratio)
|
|
||||||
}
|
|
||||||
h.sum = int64(float64(h.sum) * ratio)
|
|
||||||
h.sumOfSquares = h.sumOfSquares * ratio
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new histogram.
|
|
||||||
func (h *histogram) New() timeseries.Observable {
|
|
||||||
r := new(histogram)
|
|
||||||
r.Clear()
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *histogram) String() string {
|
|
||||||
return fmt.Sprintf("%d, %f, %d, %d, %v",
|
|
||||||
h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets)
|
|
||||||
}
|
|
||||||
|
|
||||||
// round returns the closest int64 to the argument
|
|
||||||
func round(in float64) int64 {
|
|
||||||
return int64(math.Floor(in + 0.5))
|
|
||||||
}
|
|
||||||
|
|
||||||
// bucketBoundary returns the first value in the bucket.
|
|
||||||
func bucketBoundary(bucket uint8) int64 {
|
|
||||||
if bucket == 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return 1 << bucket
|
|
||||||
}
|
|
||||||
|
|
||||||
// bucketData holds data about a specific bucket for use in distTmpl.
|
|
||||||
type bucketData struct {
|
|
||||||
Lower, Upper int64
|
|
||||||
N int64
|
|
||||||
Pct, CumulativePct float64
|
|
||||||
GraphWidth int
|
|
||||||
}
|
|
||||||
|
|
||||||
// data holds data about a Distribution for use in distTmpl.
|
|
||||||
type data struct {
|
|
||||||
Buckets []*bucketData
|
|
||||||
Count, Median int64
|
|
||||||
Mean, StandardDeviation float64
|
|
||||||
}
|
|
||||||
|
|
||||||
// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets.
|
|
||||||
const maxHTMLBarWidth = 350.0
|
|
||||||
|
|
||||||
// newData returns data representing h for use in distTmpl.
|
|
||||||
func (h *histogram) newData() *data {
|
|
||||||
// Force the allocation of buckets to simplify the rendering implementation
|
|
||||||
h.allocateBuckets()
|
|
||||||
// We scale the bars on the right so that the largest bar is
|
|
||||||
// maxHTMLBarWidth pixels in width.
|
|
||||||
maxBucket := int64(0)
|
|
||||||
for _, n := range h.buckets {
|
|
||||||
if n > maxBucket {
|
|
||||||
maxBucket = n
|
|
||||||
}
|
|
||||||
}
|
|
||||||
total := h.total()
|
|
||||||
barsizeMult := maxHTMLBarWidth / float64(maxBucket)
|
|
||||||
var pctMult float64
|
|
||||||
if total == 0 {
|
|
||||||
pctMult = 1.0
|
|
||||||
} else {
|
|
||||||
pctMult = 100.0 / float64(total)
|
|
||||||
}
|
|
||||||
|
|
||||||
buckets := make([]*bucketData, len(h.buckets))
|
|
||||||
runningTotal := int64(0)
|
|
||||||
for i, n := range h.buckets {
|
|
||||||
if n == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
runningTotal += n
|
|
||||||
var upperBound int64
|
|
||||||
if i < bucketCount-1 {
|
|
||||||
upperBound = bucketBoundary(uint8(i + 1))
|
|
||||||
} else {
|
|
||||||
upperBound = math.MaxInt64
|
|
||||||
}
|
|
||||||
buckets[i] = &bucketData{
|
|
||||||
Lower: bucketBoundary(uint8(i)),
|
|
||||||
Upper: upperBound,
|
|
||||||
N: n,
|
|
||||||
Pct: float64(n) * pctMult,
|
|
||||||
CumulativePct: float64(runningTotal) * pctMult,
|
|
||||||
GraphWidth: int(float64(n) * barsizeMult),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return &data{
|
|
||||||
Buckets: buckets,
|
|
||||||
Count: total,
|
|
||||||
Median: h.median(),
|
|
||||||
Mean: h.average(),
|
|
||||||
StandardDeviation: h.standardDeviation(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *histogram) html() template.HTML {
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
if err := distTmpl().Execute(buf, h.newData()); err != nil {
|
|
||||||
buf.Reset()
|
|
||||||
log.Printf("net/trace: couldn't execute template: %v", err)
|
|
||||||
}
|
|
||||||
return template.HTML(buf.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
var distTmplCache *template.Template
|
|
||||||
var distTmplOnce sync.Once
|
|
||||||
|
|
||||||
func distTmpl() *template.Template {
|
|
||||||
distTmplOnce.Do(func() {
|
|
||||||
// Input: data
|
|
||||||
distTmplCache = template.Must(template.New("distTmpl").Parse(`
|
|
||||||
<table>
|
|
||||||
<tr>
|
|
||||||
<td style="padding:0.25em">Count: {{.Count}}</td>
|
|
||||||
<td style="padding:0.25em">Mean: {{printf "%.0f" .Mean}}</td>
|
|
||||||
<td style="padding:0.25em">StdDev: {{printf "%.0f" .StandardDeviation}}</td>
|
|
||||||
<td style="padding:0.25em">Median: {{.Median}}</td>
|
|
||||||
</tr>
|
|
||||||
</table>
|
|
||||||
<hr>
|
|
||||||
<table>
|
|
||||||
{{range $b := .Buckets}}
|
|
||||||
{{if $b}}
|
|
||||||
<tr>
|
|
||||||
<td style="padding:0 0 0 0.25em">[</td>
|
|
||||||
<td style="text-align:right;padding:0 0.25em">{{.Lower}},</td>
|
|
||||||
<td style="text-align:right;padding:0 0.25em">{{.Upper}})</td>
|
|
||||||
<td style="text-align:right;padding:0 0.25em">{{.N}}</td>
|
|
||||||
<td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .Pct}}%</td>
|
|
||||||
<td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .CumulativePct}}%</td>
|
|
||||||
<td><div style="background-color: blue; height: 1em; width: {{.GraphWidth}};"></div></td>
|
|
||||||
</tr>
|
|
||||||
{{end}}
|
|
||||||
{{end}}
|
|
||||||
</table>
|
|
||||||
`))
|
|
||||||
})
|
|
||||||
return distTmplCache
|
|
||||||
}
|
|
1130
vendor/golang.org/x/net/trace/trace.go
generated
vendored
1130
vendor/golang.org/x/net/trace/trace.go
generated
vendored
File diff suppressed because it is too large
Load Diff
13
vendor/golang.org/x/oauth2/.travis.yml
generated
vendored
13
vendor/golang.org/x/oauth2/.travis.yml
generated
vendored
@ -1,13 +0,0 @@
|
|||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- tip
|
|
||||||
|
|
||||||
install:
|
|
||||||
- export GOPATH="$HOME/gopath"
|
|
||||||
- mkdir -p "$GOPATH/src/golang.org/x"
|
|
||||||
- mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2"
|
|
||||||
- go get -v -t -d golang.org/x/oauth2/...
|
|
||||||
|
|
||||||
script:
|
|
||||||
- go test -v golang.org/x/oauth2/...
|
|
3
vendor/golang.org/x/oauth2/AUTHORS
generated
vendored
3
vendor/golang.org/x/oauth2/AUTHORS
generated
vendored
@ -1,3 +0,0 @@
|
|||||||
# This source code refers to The Go Authors for copyright purposes.
|
|
||||||
# The master list of authors is in the main Go distribution,
|
|
||||||
# visible at http://tip.golang.org/AUTHORS.
|
|
26
vendor/golang.org/x/oauth2/CONTRIBUTING.md
generated
vendored
26
vendor/golang.org/x/oauth2/CONTRIBUTING.md
generated
vendored
@ -1,26 +0,0 @@
|
|||||||
# Contributing to Go
|
|
||||||
|
|
||||||
Go is an open source project.
|
|
||||||
|
|
||||||
It is the work of hundreds of contributors. We appreciate your help!
|
|
||||||
|
|
||||||
## Filing issues
|
|
||||||
|
|
||||||
When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions:
|
|
||||||
|
|
||||||
1. What version of Go are you using (`go version`)?
|
|
||||||
2. What operating system and processor architecture are you using?
|
|
||||||
3. What did you do?
|
|
||||||
4. What did you expect to see?
|
|
||||||
5. What did you see instead?
|
|
||||||
|
|
||||||
General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
|
|
||||||
The gophers there will answer or ask you to file an issue if you've tripped over a bug.
|
|
||||||
|
|
||||||
## Contributing code
|
|
||||||
|
|
||||||
Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
|
|
||||||
before sending patches.
|
|
||||||
|
|
||||||
Unless otherwise noted, the Go source files are distributed under
|
|
||||||
the BSD-style license found in the LICENSE file.
|
|
3
vendor/golang.org/x/oauth2/CONTRIBUTORS
generated
vendored
3
vendor/golang.org/x/oauth2/CONTRIBUTORS
generated
vendored
@ -1,3 +0,0 @@
|
|||||||
# This source code was written by the Go contributors.
|
|
||||||
# The master list of contributors is in the main Go distribution,
|
|
||||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user