vendor: add cfssl dependency
This commit is contained in:
parent
9e45f62fc3
commit
902b9faa6f
107
Godeps/Godeps.json
generated
107
Godeps/Godeps.json
generated
@ -223,6 +223,71 @@
|
||||
"Comment": "v1.1.0-65-gee4a088",
|
||||
"Rev": "ee4a0888a9abe7eefe5a0992ca4cb06864839873"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cloudflare/cfssl/auth",
|
||||
"Comment": "1.2.0",
|
||||
"Rev": "db0d0650b6496bfe8061ec56a92edd32d8e75c30"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cloudflare/cfssl/certdb",
|
||||
"Comment": "1.2.0",
|
||||
"Rev": "db0d0650b6496bfe8061ec56a92edd32d8e75c30"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cloudflare/cfssl/config",
|
||||
"Comment": "1.2.0",
|
||||
"Rev": "db0d0650b6496bfe8061ec56a92edd32d8e75c30"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cloudflare/cfssl/crypto/pkcs7",
|
||||
"Comment": "1.2.0",
|
||||
"Rev": "db0d0650b6496bfe8061ec56a92edd32d8e75c30"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cloudflare/cfssl/csr",
|
||||
"Comment": "1.2.0",
|
||||
"Rev": "db0d0650b6496bfe8061ec56a92edd32d8e75c30"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cloudflare/cfssl/errors",
|
||||
"Comment": "1.2.0",
|
||||
"Rev": "db0d0650b6496bfe8061ec56a92edd32d8e75c30"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cloudflare/cfssl/helpers",
|
||||
"Comment": "1.2.0",
|
||||
"Rev": "db0d0650b6496bfe8061ec56a92edd32d8e75c30"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cloudflare/cfssl/helpers/derhelpers",
|
||||
"Comment": "1.2.0",
|
||||
"Rev": "db0d0650b6496bfe8061ec56a92edd32d8e75c30"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cloudflare/cfssl/info",
|
||||
"Comment": "1.2.0",
|
||||
"Rev": "db0d0650b6496bfe8061ec56a92edd32d8e75c30"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cloudflare/cfssl/log",
|
||||
"Comment": "1.2.0",
|
||||
"Rev": "db0d0650b6496bfe8061ec56a92edd32d8e75c30"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cloudflare/cfssl/ocsp/config",
|
||||
"Comment": "1.2.0",
|
||||
"Rev": "db0d0650b6496bfe8061ec56a92edd32d8e75c30"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cloudflare/cfssl/signer",
|
||||
"Comment": "1.2.0",
|
||||
"Rev": "db0d0650b6496bfe8061ec56a92edd32d8e75c30"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cloudflare/cfssl/signer/local",
|
||||
"Comment": "1.2.0",
|
||||
"Rev": "db0d0650b6496bfe8061ec56a92edd32d8e75c30"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/codegangsta/negroni",
|
||||
"Comment": "v0.1.0-62-g8d75e11",
|
||||
@ -1125,6 +1190,26 @@
|
||||
"Comment": "v0.23.2-25-g51574ec",
|
||||
"Rev": "51574ec04ff12ca5a50f0935625ec02437191a06"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/certificate-transparency/go",
|
||||
"Rev": "af98904302724c29aa6659ca372d41c9687de2b7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/certificate-transparency/go/asn1",
|
||||
"Rev": "af98904302724c29aa6659ca372d41c9687de2b7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/certificate-transparency/go/client",
|
||||
"Rev": "af98904302724c29aa6659ca372d41c9687de2b7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/certificate-transparency/go/x509",
|
||||
"Rev": "af98904302724c29aa6659ca372d41c9687de2b7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/certificate-transparency/go/x509/pkix",
|
||||
"Rev": "af98904302724c29aa6659ca372d41c9687de2b7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/gofuzz",
|
||||
"Rev": "bbcb9da2d746f8bdbd6a936686a0a6067ada0ec5"
|
||||
@ -1330,6 +1415,10 @@
|
||||
"ImportPath": "github.com/mitchellh/mapstructure",
|
||||
"Rev": "740c764bc6149d3f1806231418adb9f52c11bcbf"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mreiferson/go-httpclient",
|
||||
"Rev": "31f0106b4474f14bc441575c19d3a5fa21aa1f6c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mvdan/xurls",
|
||||
"Comment": "v0.8.0-14-g1b768d7",
|
||||
@ -1971,15 +2060,27 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/bcrypt",
|
||||
"Rev": "c84e1f8e3a7e322d497cd16c0e8a13c7e127baf3"
|
||||
"Rev": "1f22c0103821b9390939b6776727195525381532"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/blowfish",
|
||||
"Rev": "c84e1f8e3a7e322d497cd16c0e8a13c7e127baf3"
|
||||
"Rev": "1f22c0103821b9390939b6776727195525381532"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/curve25519",
|
||||
"Rev": "1f22c0103821b9390939b6776727195525381532"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/pkcs12",
|
||||
"Rev": "1f22c0103821b9390939b6776727195525381532"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/pkcs12/internal/rc2",
|
||||
"Rev": "1f22c0103821b9390939b6776727195525381532"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ssh",
|
||||
"Rev": "c84e1f8e3a7e322d497cd16c0e8a13c7e127baf3"
|
||||
"Rev": "1f22c0103821b9390939b6776727195525381532"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/exp/inotify",
|
||||
|
1600
Godeps/LICENSES
generated
1600
Godeps/LICENSES
generated
File diff suppressed because it is too large
Load Diff
24
vendor/github.com/cloudflare/cfssl/LICENSE
generated
vendored
Normal file
24
vendor/github.com/cloudflare/cfssl/LICENSE
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
Copyright (c) 2014 CloudFlare Inc.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
94
vendor/github.com/cloudflare/cfssl/auth/auth.go
generated
vendored
Normal file
94
vendor/github.com/cloudflare/cfssl/auth/auth.go
generated
vendored
Normal file
@ -0,0 +1,94 @@
|
||||
// Package auth implements an interface for providing CFSSL
|
||||
// authentication. This is meant to authenticate a client CFSSL to a
|
||||
// remote CFSSL in order to prevent unauthorised use of the signature
|
||||
// capabilities. This package provides both the interface and a
|
||||
// standard HMAC-based implementation.
|
||||
package auth
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// An AuthenticatedRequest contains a request and authentication
|
||||
// token. The Provider may determine whether to validate the timestamp
|
||||
// and remote address.
|
||||
type AuthenticatedRequest struct {
|
||||
// An Authenticator decides whether to use this field.
|
||||
Timestamp int64 `json:"timestamp,omitempty"`
|
||||
RemoteAddress []byte `json:"remote_address,omitempty"`
|
||||
Token []byte `json:"token"`
|
||||
Request []byte `json:"request"`
|
||||
}
|
||||
|
||||
// A Provider can generate tokens from a request and verify a
|
||||
// request. The handling of additional authentication data (such as
|
||||
// the IP address) is handled by the concrete type, as is any
|
||||
// serialisation and state-keeping.
|
||||
type Provider interface {
|
||||
Token(req []byte) (token []byte, err error)
|
||||
Verify(aReq *AuthenticatedRequest) bool
|
||||
}
|
||||
|
||||
// Standard implements an HMAC-SHA-256 authentication provider. It may
|
||||
// be supplied additional data at creation time that will be used as
|
||||
// request || additional-data with the HMAC.
|
||||
type Standard struct {
|
||||
key []byte
|
||||
ad []byte
|
||||
}
|
||||
|
||||
// New generates a new standard authentication provider from the key
|
||||
// and additional data. The additional data will be used when
|
||||
// generating a new token.
|
||||
func New(key string, ad []byte) (*Standard, error) {
|
||||
if splitKey := strings.SplitN(key, ":", 2); len(splitKey) == 2 {
|
||||
switch splitKey[0] {
|
||||
case "env":
|
||||
key = os.Getenv(splitKey[1])
|
||||
case "file":
|
||||
data, err := ioutil.ReadFile(splitKey[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key = string(data)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown key prefix: %s", splitKey[0])
|
||||
}
|
||||
}
|
||||
|
||||
keyBytes, err := hex.DecodeString(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Standard{keyBytes, ad}, nil
|
||||
}
|
||||
|
||||
// Token generates a new authentication token from the request.
|
||||
func (p Standard) Token(req []byte) (token []byte, err error) {
|
||||
h := hmac.New(sha256.New, p.key)
|
||||
h.Write(req)
|
||||
h.Write(p.ad)
|
||||
return h.Sum(nil), nil
|
||||
}
|
||||
|
||||
// Verify determines whether an authenticated request is valid.
|
||||
func (p Standard) Verify(ad *AuthenticatedRequest) bool {
|
||||
if ad == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Standard token generation returns no error.
|
||||
token, _ := p.Token(ad.Request)
|
||||
if len(ad.Token) != len(token) {
|
||||
return false
|
||||
}
|
||||
|
||||
return hmac.Equal(token, ad.Token)
|
||||
}
|
58
vendor/github.com/cloudflare/cfssl/certdb/README.md
generated
vendored
Normal file
58
vendor/github.com/cloudflare/cfssl/certdb/README.md
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
# certdb usage
|
||||
|
||||
Using a database enables additional functionality for existing commands when a
|
||||
db config is provided:
|
||||
|
||||
- `sign` and `gencert` add a certificate to the certdb after signing it
|
||||
- `serve` enables database functionality for the sign and revoke endpoints
|
||||
|
||||
A database is required for the following:
|
||||
|
||||
- `revoke` marks certificates revoked in the database with an optional reason
|
||||
- `ocsprefresh` refreshes the table of cached OCSP responses
|
||||
- `ocspdump` outputs cached OCSP responses in a concatenated base64-encoded format
|
||||
|
||||
## Setup/Migration
|
||||
|
||||
This directory stores [goose](https://bitbucket.org/liamstask/goose/) db migration scripts for various DB backends.
|
||||
Currently supported:
|
||||
- SQLite in sqlite
|
||||
- PostgreSQL in pg
|
||||
|
||||
### Get goose
|
||||
|
||||
go get https://bitbucket.org/liamstask/goose/
|
||||
|
||||
### Use goose to start and terminate a SQLite DB
|
||||
To start a SQLite DB using goose:
|
||||
|
||||
goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/sqlite up'
|
||||
|
||||
To tear down a SQLite DB using goose
|
||||
|
||||
goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/sqlite down
|
||||
|
||||
### Use goose to start and terminate a PostgreSQL DB
|
||||
To start a PostgreSQL using goose:
|
||||
|
||||
goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/pg up
|
||||
|
||||
To tear down a PostgreSQL DB using goose
|
||||
|
||||
goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/pg down
|
||||
|
||||
Note: the administration of PostgreSQL DB is not included. We assume
|
||||
the databases being connected to are already created and access control
|
||||
are properly handled.
|
||||
|
||||
## CFSSL Configuration
|
||||
|
||||
Several cfssl commands take a -db-config flag. Create a file with a
|
||||
JSON dictionary:
|
||||
|
||||
{"driver":"sqlite3","data_source":"certs.db"}
|
||||
|
||||
or
|
||||
|
||||
{"driver":"postgres","data_source":"postgres://user:password@host/db"}
|
||||
|
40
vendor/github.com/cloudflare/cfssl/certdb/certdb.go
generated
vendored
Normal file
40
vendor/github.com/cloudflare/cfssl/certdb/certdb.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
package certdb
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// CertificateRecord encodes a certificate and its metadata
|
||||
// that will be recorded in a database.
|
||||
type CertificateRecord struct {
|
||||
Serial string `db:"serial_number"`
|
||||
AKI string `db:"authority_key_identifier"`
|
||||
CALabel string `db:"ca_label"`
|
||||
Status string `db:"status"`
|
||||
Reason int `db:"reason"`
|
||||
Expiry time.Time `db:"expiry"`
|
||||
RevokedAt time.Time `db:"revoked_at"`
|
||||
PEM string `db:"pem"`
|
||||
}
|
||||
|
||||
// OCSPRecord encodes a OCSP response body and its metadata
|
||||
// that will be recorded in a database.
|
||||
type OCSPRecord struct {
|
||||
Serial string `db:"serial_number"`
|
||||
AKI string `db:"authority_key_identifier"`
|
||||
Body string `db:"body"`
|
||||
Expiry time.Time `db:"expiry"`
|
||||
}
|
||||
|
||||
// Accessor abstracts the CRUD of certdb objects from a DB.
|
||||
type Accessor interface {
|
||||
InsertCertificate(cr CertificateRecord) error
|
||||
GetCertificate(serial, aki string) ([]CertificateRecord, error)
|
||||
GetUnexpiredCertificates() ([]CertificateRecord, error)
|
||||
RevokeCertificate(serial, aki string, reasonCode int) error
|
||||
InsertOCSP(rr OCSPRecord) error
|
||||
GetOCSP(serial, aki string) ([]OCSPRecord, error)
|
||||
GetUnexpiredOCSPs() ([]OCSPRecord, error)
|
||||
UpdateOCSP(serial, aki, body string, expiry time.Time) error
|
||||
UpsertOCSP(serial, aki, body string, expiry time.Time) error
|
||||
}
|
563
vendor/github.com/cloudflare/cfssl/config/config.go
generated
vendored
Normal file
563
vendor/github.com/cloudflare/cfssl/config/config.go
generated
vendored
Normal file
@ -0,0 +1,563 @@
|
||||
// Package config contains the configuration logic for CFSSL.
|
||||
package config
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/asn1"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cloudflare/cfssl/auth"
|
||||
cferr "github.com/cloudflare/cfssl/errors"
|
||||
"github.com/cloudflare/cfssl/helpers"
|
||||
"github.com/cloudflare/cfssl/log"
|
||||
ocspConfig "github.com/cloudflare/cfssl/ocsp/config"
|
||||
)
|
||||
|
||||
// A CSRWhitelist stores booleans for fields in the CSR. If a CSRWhitelist is
|
||||
// not present in a SigningProfile, all of these fields may be copied from the
|
||||
// CSR into the signed certificate. If a CSRWhitelist *is* present in a
|
||||
// SigningProfile, only those fields with a `true` value in the CSRWhitelist may
|
||||
// be copied from the CSR to the signed certificate. Note that some of these
|
||||
// fields, like Subject, can be provided or partially provided through the API.
|
||||
// Since API clients are expected to be trusted, but CSRs are not, fields
|
||||
// provided through the API are not subject to whitelisting through this
|
||||
// mechanism.
|
||||
type CSRWhitelist struct {
|
||||
Subject, PublicKeyAlgorithm, PublicKey, SignatureAlgorithm bool
|
||||
DNSNames, IPAddresses, EmailAddresses bool
|
||||
}
|
||||
|
||||
// OID is our own version of asn1's ObjectIdentifier, so we can define a custom
|
||||
// JSON marshal / unmarshal.
|
||||
type OID asn1.ObjectIdentifier
|
||||
|
||||
// CertificatePolicy represents the ASN.1 PolicyInformation structure from
|
||||
// https://tools.ietf.org/html/rfc3280.html#page-106.
|
||||
// Valid values of Type are "id-qt-unotice" and "id-qt-cps"
|
||||
type CertificatePolicy struct {
|
||||
ID OID
|
||||
Qualifiers []CertificatePolicyQualifier
|
||||
}
|
||||
|
||||
// CertificatePolicyQualifier represents a single qualifier from an ASN.1
|
||||
// PolicyInformation structure.
|
||||
type CertificatePolicyQualifier struct {
|
||||
Type string
|
||||
Value string
|
||||
}
|
||||
|
||||
// AuthRemote is an authenticated remote signer.
|
||||
type AuthRemote struct {
|
||||
RemoteName string `json:"remote"`
|
||||
AuthKeyName string `json:"auth_key"`
|
||||
}
|
||||
|
||||
// A SigningProfile stores information that the CA needs to store
|
||||
// signature policy.
|
||||
type SigningProfile struct {
|
||||
Usage []string `json:"usages"`
|
||||
IssuerURL []string `json:"issuer_urls"`
|
||||
OCSP string `json:"ocsp_url"`
|
||||
CRL string `json:"crl_url"`
|
||||
CA bool `json:"is_ca"`
|
||||
OCSPNoCheck bool `json:"ocsp_no_check"`
|
||||
ExpiryString string `json:"expiry"`
|
||||
BackdateString string `json:"backdate"`
|
||||
AuthKeyName string `json:"auth_key"`
|
||||
RemoteName string `json:"remote"`
|
||||
NotBefore time.Time `json:"not_before"`
|
||||
NotAfter time.Time `json:"not_after"`
|
||||
NameWhitelistString string `json:"name_whitelist"`
|
||||
AuthRemote AuthRemote `json:"auth_remote"`
|
||||
CTLogServers []string `json:"ct_log_servers"`
|
||||
AllowedExtensions []OID `json:"allowed_extensions"`
|
||||
CertStore string `json:"cert_store"`
|
||||
|
||||
Policies []CertificatePolicy
|
||||
Expiry time.Duration
|
||||
Backdate time.Duration
|
||||
Provider auth.Provider
|
||||
RemoteProvider auth.Provider
|
||||
RemoteServer string
|
||||
CSRWhitelist *CSRWhitelist
|
||||
NameWhitelist *regexp.Regexp
|
||||
ExtensionWhitelist map[string]bool
|
||||
ClientProvidesSerialNumbers bool
|
||||
}
|
||||
|
||||
// UnmarshalJSON unmarshals a JSON string into an OID.
|
||||
func (oid *OID) UnmarshalJSON(data []byte) (err error) {
|
||||
if data[0] != '"' || data[len(data)-1] != '"' {
|
||||
return errors.New("OID JSON string not wrapped in quotes." + string(data))
|
||||
}
|
||||
data = data[1 : len(data)-1]
|
||||
parsedOid, err := parseObjectIdentifier(string(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*oid = OID(parsedOid)
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalJSON marshals an oid into a JSON string.
|
||||
func (oid OID) MarshalJSON() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf(`"%v"`, asn1.ObjectIdentifier(oid))), nil
|
||||
}
|
||||
|
||||
func parseObjectIdentifier(oidString string) (oid asn1.ObjectIdentifier, err error) {
|
||||
validOID, err := regexp.MatchString("\\d(\\.\\d+)*", oidString)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if !validOID {
|
||||
err = errors.New("Invalid OID")
|
||||
return
|
||||
}
|
||||
|
||||
segments := strings.Split(oidString, ".")
|
||||
oid = make(asn1.ObjectIdentifier, len(segments))
|
||||
for i, intString := range segments {
|
||||
oid[i], err = strconv.Atoi(intString)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const timeFormat = "2006-01-02T15:04:05"
|
||||
|
||||
// populate is used to fill in the fields that are not in JSON
|
||||
//
|
||||
// First, the ExpiryString parameter is needed to parse
|
||||
// expiration timestamps from JSON. The JSON decoder is not able to
|
||||
// decode a string time duration to a time.Duration, so this is called
|
||||
// when loading the configuration to properly parse and fill out the
|
||||
// Expiry parameter.
|
||||
// This function is also used to create references to the auth key
|
||||
// and default remote for the profile.
|
||||
// It returns true if ExpiryString is a valid representation of a
|
||||
// time.Duration, and the AuthKeyString and RemoteName point to
|
||||
// valid objects. It returns false otherwise.
|
||||
func (p *SigningProfile) populate(cfg *Config) error {
|
||||
if p == nil {
|
||||
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("can't parse nil profile"))
|
||||
}
|
||||
|
||||
var err error
|
||||
if p.RemoteName == "" && p.AuthRemote.RemoteName == "" {
|
||||
log.Debugf("parse expiry in profile")
|
||||
if p.ExpiryString == "" {
|
||||
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("empty expiry string"))
|
||||
}
|
||||
|
||||
dur, err := time.ParseDuration(p.ExpiryString)
|
||||
if err != nil {
|
||||
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
|
||||
}
|
||||
|
||||
log.Debugf("expiry is valid")
|
||||
p.Expiry = dur
|
||||
|
||||
if p.BackdateString != "" {
|
||||
dur, err = time.ParseDuration(p.BackdateString)
|
||||
if err != nil {
|
||||
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
|
||||
}
|
||||
|
||||
p.Backdate = dur
|
||||
}
|
||||
|
||||
if !p.NotBefore.IsZero() && !p.NotAfter.IsZero() && p.NotAfter.Before(p.NotBefore) {
|
||||
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
|
||||
}
|
||||
|
||||
if len(p.Policies) > 0 {
|
||||
for _, policy := range p.Policies {
|
||||
for _, qualifier := range policy.Qualifiers {
|
||||
if qualifier.Type != "" && qualifier.Type != "id-qt-unotice" && qualifier.Type != "id-qt-cps" {
|
||||
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
|
||||
errors.New("invalid policy qualifier type"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if p.RemoteName != "" {
|
||||
log.Debug("match remote in profile to remotes section")
|
||||
if p.AuthRemote.RemoteName != "" {
|
||||
log.Error("profile has both a remote and an auth remote specified")
|
||||
return cferr.New(cferr.PolicyError, cferr.InvalidPolicy)
|
||||
}
|
||||
if remote := cfg.Remotes[p.RemoteName]; remote != "" {
|
||||
if err := p.updateRemote(remote); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
|
||||
errors.New("failed to find remote in remotes section"))
|
||||
}
|
||||
} else {
|
||||
log.Debug("match auth remote in profile to remotes section")
|
||||
if remote := cfg.Remotes[p.AuthRemote.RemoteName]; remote != "" {
|
||||
if err := p.updateRemote(remote); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
|
||||
errors.New("failed to find remote in remotes section"))
|
||||
}
|
||||
}
|
||||
|
||||
if p.AuthKeyName != "" {
|
||||
log.Debug("match auth key in profile to auth_keys section")
|
||||
if key, ok := cfg.AuthKeys[p.AuthKeyName]; ok == true {
|
||||
if key.Type == "standard" {
|
||||
p.Provider, err = auth.New(key.Key, nil)
|
||||
if err != nil {
|
||||
log.Debugf("failed to create new standard auth provider: %v", err)
|
||||
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
|
||||
errors.New("failed to create new standard auth provider"))
|
||||
}
|
||||
} else {
|
||||
log.Debugf("unknown authentication type %v", key.Type)
|
||||
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
|
||||
errors.New("unknown authentication type"))
|
||||
}
|
||||
} else {
|
||||
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
|
||||
errors.New("failed to find auth_key in auth_keys section"))
|
||||
}
|
||||
}
|
||||
|
||||
if p.AuthRemote.AuthKeyName != "" {
|
||||
log.Debug("match auth remote key in profile to auth_keys section")
|
||||
if key, ok := cfg.AuthKeys[p.AuthRemote.AuthKeyName]; ok == true {
|
||||
if key.Type == "standard" {
|
||||
p.RemoteProvider, err = auth.New(key.Key, nil)
|
||||
if err != nil {
|
||||
log.Debugf("failed to create new standard auth provider: %v", err)
|
||||
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
|
||||
errors.New("failed to create new standard auth provider"))
|
||||
}
|
||||
} else {
|
||||
log.Debugf("unknown authentication type %v", key.Type)
|
||||
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
|
||||
errors.New("unknown authentication type"))
|
||||
}
|
||||
} else {
|
||||
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
|
||||
errors.New("failed to find auth_remote's auth_key in auth_keys section"))
|
||||
}
|
||||
}
|
||||
|
||||
if p.NameWhitelistString != "" {
|
||||
log.Debug("compiling whitelist regular expression")
|
||||
rule, err := regexp.Compile(p.NameWhitelistString)
|
||||
if err != nil {
|
||||
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
|
||||
errors.New("failed to compile name whitelist section"))
|
||||
}
|
||||
p.NameWhitelist = rule
|
||||
}
|
||||
|
||||
p.ExtensionWhitelist = map[string]bool{}
|
||||
for _, oid := range p.AllowedExtensions {
|
||||
p.ExtensionWhitelist[asn1.ObjectIdentifier(oid).String()] = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateRemote takes a signing profile and initializes the remote server object
|
||||
// to the hostname:port combination sent by remote.
|
||||
func (p *SigningProfile) updateRemote(remote string) error {
|
||||
if remote != "" {
|
||||
p.RemoteServer = remote
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// OverrideRemotes takes a signing configuration and updates the remote server object
|
||||
// to the hostname:port combination sent by remote
|
||||
func (p *Signing) OverrideRemotes(remote string) error {
|
||||
if remote != "" {
|
||||
var err error
|
||||
for _, profile := range p.Profiles {
|
||||
err = profile.updateRemote(remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = p.Default.updateRemote(remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NeedsRemoteSigner returns true if one of the profiles has a remote set
|
||||
func (p *Signing) NeedsRemoteSigner() bool {
|
||||
for _, profile := range p.Profiles {
|
||||
if profile.RemoteServer != "" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if p.Default.RemoteServer != "" {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// NeedsLocalSigner returns true if one of the profiles doe not have a remote set
|
||||
func (p *Signing) NeedsLocalSigner() bool {
|
||||
for _, profile := range p.Profiles {
|
||||
if profile.RemoteServer == "" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if p.Default.RemoteServer == "" {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Usages parses the list of key uses in the profile, translating them
|
||||
// to a list of X.509 key usages and extended key usages. The unknown
|
||||
// uses are collected into a slice that is also returned.
|
||||
func (p *SigningProfile) Usages() (ku x509.KeyUsage, eku []x509.ExtKeyUsage, unk []string) {
|
||||
for _, keyUse := range p.Usage {
|
||||
if kuse, ok := KeyUsage[keyUse]; ok {
|
||||
ku |= kuse
|
||||
} else if ekuse, ok := ExtKeyUsage[keyUse]; ok {
|
||||
eku = append(eku, ekuse)
|
||||
} else {
|
||||
unk = append(unk, keyUse)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// A valid profile must be a valid local profile or a valid remote profile.
|
||||
// A valid local profile has defined at least key usages to be used, and a
|
||||
// valid local default profile has defined at least a default expiration.
|
||||
// A valid remote profile (default or not) has remote signer initialized.
|
||||
// In addition, a remote profile must has a valid auth provider if auth
|
||||
// key defined.
|
||||
func (p *SigningProfile) validProfile(isDefault bool) bool {
|
||||
if p == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if p.RemoteName != "" {
|
||||
log.Debugf("validate remote profile")
|
||||
|
||||
if p.RemoteServer == "" {
|
||||
log.Debugf("invalid remote profile: no remote signer specified")
|
||||
return false
|
||||
}
|
||||
|
||||
if p.AuthKeyName != "" && p.Provider == nil {
|
||||
log.Debugf("invalid remote profile: auth key name is defined but no auth provider is set")
|
||||
return false
|
||||
}
|
||||
|
||||
if p.AuthRemote.RemoteName != "" {
|
||||
log.Debugf("invalid remote profile: auth remote is also specified")
|
||||
}
|
||||
} else if p.AuthRemote.RemoteName != "" {
|
||||
log.Debugf("validate auth remote profile")
|
||||
if p.RemoteServer == "" {
|
||||
log.Debugf("invalid auth remote profile: no remote signer specified")
|
||||
return false
|
||||
}
|
||||
|
||||
if p.AuthRemote.AuthKeyName == "" || p.RemoteProvider == nil {
|
||||
log.Debugf("invalid auth remote profile: no auth key is defined")
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
log.Debugf("validate local profile")
|
||||
if !isDefault {
|
||||
if len(p.Usage) == 0 {
|
||||
log.Debugf("invalid local profile: no usages specified")
|
||||
return false
|
||||
} else if _, _, unk := p.Usages(); len(unk) == len(p.Usage) {
|
||||
log.Debugf("invalid local profile: no valid usages")
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
if p.Expiry == 0 {
|
||||
log.Debugf("invalid local profile: no expiry set")
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("profile is valid")
|
||||
return true
|
||||
}
|
||||
|
||||
// Signing codifies the signature configuration policy for a CA.
|
||||
type Signing struct {
|
||||
Profiles map[string]*SigningProfile `json:"profiles"`
|
||||
Default *SigningProfile `json:"default"`
|
||||
}
|
||||
|
||||
// Config stores configuration information for the CA.
|
||||
type Config struct {
|
||||
Signing *Signing `json:"signing"`
|
||||
OCSP *ocspConfig.Config `json:"ocsp"`
|
||||
AuthKeys map[string]AuthKey `json:"auth_keys,omitempty"`
|
||||
Remotes map[string]string `json:"remotes,omitempty"`
|
||||
}
|
||||
|
||||
// Valid ensures that Config is a valid configuration. It should be
|
||||
// called immediately after parsing a configuration file.
|
||||
func (c *Config) Valid() bool {
|
||||
return c.Signing.Valid()
|
||||
}
|
||||
|
||||
// Valid checks the signature policies, ensuring they are valid
|
||||
// policies. A policy is valid if it has defined at least key usages
|
||||
// to be used, and a valid default profile has defined at least a
|
||||
// default expiration.
|
||||
func (p *Signing) Valid() bool {
|
||||
if p == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
log.Debugf("validating configuration")
|
||||
if !p.Default.validProfile(true) {
|
||||
log.Debugf("default profile is invalid")
|
||||
return false
|
||||
}
|
||||
|
||||
for _, sp := range p.Profiles {
|
||||
if !sp.validProfile(false) {
|
||||
log.Debugf("invalid profile")
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// KeyUsage contains a mapping of string names to key usages.
|
||||
var KeyUsage = map[string]x509.KeyUsage{
|
||||
"signing": x509.KeyUsageDigitalSignature,
|
||||
"digital signature": x509.KeyUsageDigitalSignature,
|
||||
"content committment": x509.KeyUsageContentCommitment,
|
||||
"key encipherment": x509.KeyUsageKeyEncipherment,
|
||||
"key agreement": x509.KeyUsageKeyAgreement,
|
||||
"data encipherment": x509.KeyUsageDataEncipherment,
|
||||
"cert sign": x509.KeyUsageCertSign,
|
||||
"crl sign": x509.KeyUsageCRLSign,
|
||||
"encipher only": x509.KeyUsageEncipherOnly,
|
||||
"decipher only": x509.KeyUsageDecipherOnly,
|
||||
}
|
||||
|
||||
// ExtKeyUsage contains a mapping of string names to extended key
|
||||
// usages.
|
||||
var ExtKeyUsage = map[string]x509.ExtKeyUsage{
|
||||
"any": x509.ExtKeyUsageAny,
|
||||
"server auth": x509.ExtKeyUsageServerAuth,
|
||||
"client auth": x509.ExtKeyUsageClientAuth,
|
||||
"code signing": x509.ExtKeyUsageCodeSigning,
|
||||
"email protection": x509.ExtKeyUsageEmailProtection,
|
||||
"s/mime": x509.ExtKeyUsageEmailProtection,
|
||||
"ipsec end system": x509.ExtKeyUsageIPSECEndSystem,
|
||||
"ipsec tunnel": x509.ExtKeyUsageIPSECTunnel,
|
||||
"ipsec user": x509.ExtKeyUsageIPSECUser,
|
||||
"timestamping": x509.ExtKeyUsageTimeStamping,
|
||||
"ocsp signing": x509.ExtKeyUsageOCSPSigning,
|
||||
"microsoft sgc": x509.ExtKeyUsageMicrosoftServerGatedCrypto,
|
||||
"netscape sgc": x509.ExtKeyUsageNetscapeServerGatedCrypto,
|
||||
}
|
||||
|
||||
// An AuthKey contains an entry for a key used for authentication.
|
||||
type AuthKey struct {
|
||||
// Type contains information needed to select the appropriate
|
||||
// constructor. For example, "standard" for HMAC-SHA-256,
|
||||
// "standard-ip" for HMAC-SHA-256 incorporating the client's
|
||||
// IP.
|
||||
Type string `json:"type"`
|
||||
// Key contains the key information, such as a hex-encoded
|
||||
// HMAC key.
|
||||
Key string `json:"key"`
|
||||
}
|
||||
|
||||
// DefaultConfig returns a default configuration specifying basic key
|
||||
// usage and a 1 year expiration time. The key usages chosen are
|
||||
// signing, key encipherment, client auth and server auth.
|
||||
func DefaultConfig() *SigningProfile {
|
||||
d := helpers.OneYear
|
||||
return &SigningProfile{
|
||||
Usage: []string{"signing", "key encipherment", "server auth", "client auth"},
|
||||
Expiry: d,
|
||||
ExpiryString: "8760h",
|
||||
}
|
||||
}
|
||||
|
||||
// LoadFile attempts to load the configuration file stored at the path
|
||||
// and returns the configuration. On error, it returns nil.
|
||||
func LoadFile(path string) (*Config, error) {
|
||||
log.Debugf("loading configuration file from %s", path)
|
||||
if path == "" {
|
||||
return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("invalid path"))
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("could not read configuration file"))
|
||||
}
|
||||
|
||||
return LoadConfig(body)
|
||||
}
|
||||
|
||||
// LoadConfig attempts to load the configuration from a byte slice.
|
||||
// On error, it returns nil.
|
||||
func LoadConfig(config []byte) (*Config, error) {
|
||||
var cfg = &Config{}
|
||||
err := json.Unmarshal(config, &cfg)
|
||||
if err != nil {
|
||||
return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy,
|
||||
errors.New("failed to unmarshal configuration: "+err.Error()))
|
||||
}
|
||||
|
||||
if cfg.Signing == nil {
|
||||
return nil, errors.New("No \"signing\" field present")
|
||||
}
|
||||
|
||||
if cfg.Signing.Default == nil {
|
||||
log.Debugf("no default given: using default config")
|
||||
cfg.Signing.Default = DefaultConfig()
|
||||
} else {
|
||||
if err := cfg.Signing.Default.populate(cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for k := range cfg.Signing.Profiles {
|
||||
if err := cfg.Signing.Profiles[k].populate(cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if !cfg.Valid() {
|
||||
return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("invalid configuration"))
|
||||
}
|
||||
|
||||
log.Debugf("configuration ok")
|
||||
return cfg, nil
|
||||
}
|
188
vendor/github.com/cloudflare/cfssl/crypto/pkcs7/pkcs7.go
generated
vendored
Normal file
188
vendor/github.com/cloudflare/cfssl/crypto/pkcs7/pkcs7.go
generated
vendored
Normal file
@ -0,0 +1,188 @@
|
||||
// Package pkcs7 implements the subset of the CMS PKCS #7 datatype that is typically
|
||||
// used to package certificates and CRLs. Using openssl, every certificate converted
|
||||
// to PKCS #7 format from another encoding such as PEM conforms to this implementation.
|
||||
// reference: https://www.openssl.org/docs/apps/crl2pkcs7.html
|
||||
//
|
||||
// PKCS #7 Data type, reference: https://tools.ietf.org/html/rfc2315
|
||||
//
|
||||
// The full pkcs#7 cryptographic message syntax allows for cryptographic enhancements,
|
||||
// for example data can be encrypted and signed and then packaged through pkcs#7 to be
|
||||
// sent over a network and then verified and decrypted. It is asn1, and the type of
|
||||
// PKCS #7 ContentInfo, which comprises the PKCS #7 structure, is:
|
||||
//
|
||||
// ContentInfo ::= SEQUENCE {
|
||||
// contentType ContentType,
|
||||
// content [0] EXPLICIT ANY DEFINED BY contentType OPTIONAL
|
||||
// }
|
||||
//
|
||||
// There are 6 possible ContentTypes, data, signedData, envelopedData,
|
||||
// signedAndEnvelopedData, digestedData, and encryptedData. Here signedData, Data, and encrypted
|
||||
// Data are implemented, as the degenerate case of signedData without a signature is the typical
|
||||
// format for transferring certificates and CRLS, and Data and encryptedData are used in PKCS #12
|
||||
// formats.
|
||||
// The ContentType signedData has the form:
|
||||
//
|
||||
//
|
||||
// signedData ::= SEQUENCE {
|
||||
// version Version,
|
||||
// digestAlgorithms DigestAlgorithmIdentifiers,
|
||||
// contentInfo ContentInfo,
|
||||
// certificates [0] IMPLICIT ExtendedCertificatesAndCertificates OPTIONAL
|
||||
// crls [1] IMPLICIT CertificateRevocationLists OPTIONAL,
|
||||
// signerInfos SignerInfos
|
||||
// }
|
||||
//
|
||||
// As of yet signerInfos and digestAlgorithms are not parsed, as they are not relevant to
|
||||
// this system's use of PKCS #7 data. Version is an integer type, note that PKCS #7 is
|
||||
// recursive, this second layer of ContentInfo is similar ignored for our degenerate
|
||||
// usage. The ExtendedCertificatesAndCertificates type consists of a sequence of choices
|
||||
// between PKCS #6 extended certificates and x509 certificates. Any sequence consisting
|
||||
// of any number of extended certificates is not yet supported in this implementation.
|
||||
//
|
||||
// The ContentType Data is simply a raw octet string and is parsed directly into a Go []byte slice.
|
||||
//
|
||||
// The ContentType encryptedData is the most complicated and its form can be gathered by
|
||||
// the go type below. It essentially contains a raw octet string of encrypted data and an
|
||||
// algorithm identifier for use in decrypting this data.
|
||||
package pkcs7
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"errors"
|
||||
|
||||
cferr "github.com/cloudflare/cfssl/errors"
|
||||
)
|
||||
|
||||
// Types used for asn1 Unmarshaling.
|
||||
|
||||
type signedData struct {
|
||||
Version int
|
||||
DigestAlgorithms asn1.RawValue
|
||||
ContentInfo asn1.RawValue
|
||||
Certificates asn1.RawValue `asn1:"optional" asn1:"tag:0"`
|
||||
Crls asn1.RawValue `asn1:"optional"`
|
||||
SignerInfos asn1.RawValue
|
||||
}
|
||||
|
||||
type initPKCS7 struct {
|
||||
Raw asn1.RawContent
|
||||
ContentType asn1.ObjectIdentifier
|
||||
Content asn1.RawValue `asn1:"tag:0,explicit,optional"`
|
||||
}
|
||||
|
||||
// Object identifier strings of the three implemented PKCS7 types.
|
||||
const (
|
||||
ObjIDData = "1.2.840.113549.1.7.1"
|
||||
ObjIDSignedData = "1.2.840.113549.1.7.2"
|
||||
ObjIDEncryptedData = "1.2.840.113549.1.7.6"
|
||||
)
|
||||
|
||||
// PKCS7 represents the ASN1 PKCS #7 Content type. It contains one of three
|
||||
// possible types of Content objects, as denoted by the object identifier in
|
||||
// the ContentInfo field, the other two being nil. SignedData
|
||||
// is the degenerate SignedData Content info without signature used
|
||||
// to hold certificates and crls. Data is raw bytes, and EncryptedData
|
||||
// is as defined in PKCS #7 standard.
|
||||
type PKCS7 struct {
|
||||
Raw asn1.RawContent
|
||||
ContentInfo string
|
||||
Content Content
|
||||
}
|
||||
|
||||
// Content implements three of the six possible PKCS7 data types. Only one is non-nil.
|
||||
type Content struct {
|
||||
Data []byte
|
||||
SignedData SignedData
|
||||
EncryptedData EncryptedData
|
||||
}
|
||||
|
||||
// SignedData defines the typical carrier of certificates and crls.
|
||||
type SignedData struct {
|
||||
Raw asn1.RawContent
|
||||
Version int
|
||||
Certificates []*x509.Certificate
|
||||
Crl *pkix.CertificateList
|
||||
}
|
||||
|
||||
// Data contains raw bytes. Used as a subtype in PKCS12.
|
||||
type Data struct {
|
||||
Bytes []byte
|
||||
}
|
||||
|
||||
// EncryptedData contains encrypted data. Used as a subtype in PKCS12.
|
||||
type EncryptedData struct {
|
||||
Raw asn1.RawContent
|
||||
Version int
|
||||
EncryptedContentInfo EncryptedContentInfo
|
||||
}
|
||||
|
||||
// EncryptedContentInfo is a subtype of PKCS7EncryptedData.
|
||||
type EncryptedContentInfo struct {
|
||||
Raw asn1.RawContent
|
||||
ContentType asn1.ObjectIdentifier
|
||||
ContentEncryptionAlgorithm pkix.AlgorithmIdentifier
|
||||
EncryptedContent []byte `asn1:"tag:0,optional"`
|
||||
}
|
||||
|
||||
// ParsePKCS7 attempts to parse the DER encoded bytes of a
|
||||
// PKCS7 structure.
|
||||
func ParsePKCS7(raw []byte) (msg *PKCS7, err error) {
|
||||
|
||||
var pkcs7 initPKCS7
|
||||
_, err = asn1.Unmarshal(raw, &pkcs7)
|
||||
if err != nil {
|
||||
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
|
||||
}
|
||||
|
||||
msg = new(PKCS7)
|
||||
msg.Raw = pkcs7.Raw
|
||||
msg.ContentInfo = pkcs7.ContentType.String()
|
||||
switch {
|
||||
case msg.ContentInfo == ObjIDData:
|
||||
msg.ContentInfo = "Data"
|
||||
_, err = asn1.Unmarshal(pkcs7.Content.Bytes, &msg.Content.Data)
|
||||
if err != nil {
|
||||
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
|
||||
}
|
||||
case msg.ContentInfo == ObjIDSignedData:
|
||||
msg.ContentInfo = "SignedData"
|
||||
var signedData signedData
|
||||
_, err = asn1.Unmarshal(pkcs7.Content.Bytes, &signedData)
|
||||
if err != nil {
|
||||
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
|
||||
}
|
||||
if len(signedData.Certificates.Bytes) != 0 {
|
||||
msg.Content.SignedData.Certificates, err = x509.ParseCertificates(signedData.Certificates.Bytes)
|
||||
if err != nil {
|
||||
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
|
||||
}
|
||||
}
|
||||
if len(signedData.Crls.Bytes) != 0 {
|
||||
msg.Content.SignedData.Crl, err = x509.ParseDERCRL(signedData.Crls.Bytes)
|
||||
if err != nil {
|
||||
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
|
||||
}
|
||||
}
|
||||
msg.Content.SignedData.Version = signedData.Version
|
||||
msg.Content.SignedData.Raw = pkcs7.Content.Bytes
|
||||
case msg.ContentInfo == ObjIDEncryptedData:
|
||||
msg.ContentInfo = "EncryptedData"
|
||||
var encryptedData EncryptedData
|
||||
_, err = asn1.Unmarshal(pkcs7.Content.Bytes, &encryptedData)
|
||||
if err != nil {
|
||||
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
|
||||
}
|
||||
if encryptedData.Version != 0 {
|
||||
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("Only support for PKCS #7 encryptedData version 0"))
|
||||
}
|
||||
msg.Content.EncryptedData = encryptedData
|
||||
|
||||
default:
|
||||
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("Attempt to parse PKCS# 7 Content not of type data, signed data or encrypted data"))
|
||||
}
|
||||
|
||||
return msg, nil
|
||||
|
||||
}
|
414
vendor/github.com/cloudflare/cfssl/csr/csr.go
generated
vendored
Normal file
414
vendor/github.com/cloudflare/cfssl/csr/csr.go
generated
vendored
Normal file
@ -0,0 +1,414 @@
|
||||
// Package csr implements certificate requests for CFSSL.
|
||||
package csr
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"net"
|
||||
"net/mail"
|
||||
"strings"
|
||||
|
||||
cferr "github.com/cloudflare/cfssl/errors"
|
||||
"github.com/cloudflare/cfssl/helpers"
|
||||
"github.com/cloudflare/cfssl/log"
|
||||
)
|
||||
|
||||
const (
|
||||
curveP256 = 256
|
||||
curveP384 = 384
|
||||
curveP521 = 521
|
||||
)
|
||||
|
||||
// A Name contains the SubjectInfo fields.
|
||||
type Name struct {
|
||||
C string // Country
|
||||
ST string // State
|
||||
L string // Locality
|
||||
O string // OrganisationName
|
||||
OU string // OrganisationalUnitName
|
||||
SerialNumber string
|
||||
}
|
||||
|
||||
// A KeyRequest is a generic request for a new key.
|
||||
type KeyRequest interface {
|
||||
Algo() string
|
||||
Size() int
|
||||
Generate() (crypto.PrivateKey, error)
|
||||
SigAlgo() x509.SignatureAlgorithm
|
||||
}
|
||||
|
||||
// A BasicKeyRequest contains the algorithm and key size for a new private key.
|
||||
type BasicKeyRequest struct {
|
||||
A string `json:"algo"`
|
||||
S int `json:"size"`
|
||||
}
|
||||
|
||||
// NewBasicKeyRequest returns a default BasicKeyRequest.
|
||||
func NewBasicKeyRequest() *BasicKeyRequest {
|
||||
return &BasicKeyRequest{"ecdsa", curveP256}
|
||||
}
|
||||
|
||||
// Algo returns the requested key algorithm represented as a string.
|
||||
func (kr *BasicKeyRequest) Algo() string {
|
||||
return kr.A
|
||||
}
|
||||
|
||||
// Size returns the requested key size.
|
||||
func (kr *BasicKeyRequest) Size() int {
|
||||
return kr.S
|
||||
}
|
||||
|
||||
// Generate generates a key as specified in the request. Currently,
|
||||
// only ECDSA and RSA are supported.
|
||||
func (kr *BasicKeyRequest) Generate() (crypto.PrivateKey, error) {
|
||||
log.Debugf("generate key from request: algo=%s, size=%d", kr.Algo(), kr.Size())
|
||||
switch kr.Algo() {
|
||||
case "rsa":
|
||||
if kr.Size() < 2048 {
|
||||
return nil, errors.New("RSA key is too weak")
|
||||
}
|
||||
if kr.Size() > 8192 {
|
||||
return nil, errors.New("RSA key size too large")
|
||||
}
|
||||
return rsa.GenerateKey(rand.Reader, kr.Size())
|
||||
case "ecdsa":
|
||||
var curve elliptic.Curve
|
||||
switch kr.Size() {
|
||||
case curveP256:
|
||||
curve = elliptic.P256()
|
||||
case curveP384:
|
||||
curve = elliptic.P384()
|
||||
case curveP521:
|
||||
curve = elliptic.P521()
|
||||
default:
|
||||
return nil, errors.New("invalid curve")
|
||||
}
|
||||
return ecdsa.GenerateKey(curve, rand.Reader)
|
||||
default:
|
||||
return nil, errors.New("invalid algorithm")
|
||||
}
|
||||
}
|
||||
|
||||
// SigAlgo returns an appropriate X.509 signature algorithm given the
|
||||
// key request's type and size.
|
||||
func (kr *BasicKeyRequest) SigAlgo() x509.SignatureAlgorithm {
|
||||
switch kr.Algo() {
|
||||
case "rsa":
|
||||
switch {
|
||||
case kr.Size() >= 4096:
|
||||
return x509.SHA512WithRSA
|
||||
case kr.Size() >= 3072:
|
||||
return x509.SHA384WithRSA
|
||||
case kr.Size() >= 2048:
|
||||
return x509.SHA256WithRSA
|
||||
default:
|
||||
return x509.SHA1WithRSA
|
||||
}
|
||||
case "ecdsa":
|
||||
switch kr.Size() {
|
||||
case curveP521:
|
||||
return x509.ECDSAWithSHA512
|
||||
case curveP384:
|
||||
return x509.ECDSAWithSHA384
|
||||
case curveP256:
|
||||
return x509.ECDSAWithSHA256
|
||||
default:
|
||||
return x509.ECDSAWithSHA1
|
||||
}
|
||||
default:
|
||||
return x509.UnknownSignatureAlgorithm
|
||||
}
|
||||
}
|
||||
|
||||
// CAConfig is a section used in the requests initialising a new CA.
|
||||
type CAConfig struct {
|
||||
PathLength int `json:"pathlen"`
|
||||
Expiry string `json:"expiry"`
|
||||
}
|
||||
|
||||
// A CertificateRequest encapsulates the API interface to the
|
||||
// certificate request functionality.
|
||||
type CertificateRequest struct {
|
||||
CN string
|
||||
Names []Name `json:"names"`
|
||||
Hosts []string `json:"hosts"`
|
||||
KeyRequest KeyRequest `json:"key,omitempty"`
|
||||
CA *CAConfig `json:"ca,omitempty"`
|
||||
SerialNumber string `json:"serialnumber,omitempty"`
|
||||
}
|
||||
|
||||
// New returns a new, empty CertificateRequest with a
|
||||
// BasicKeyRequest.
|
||||
func New() *CertificateRequest {
|
||||
return &CertificateRequest{
|
||||
KeyRequest: NewBasicKeyRequest(),
|
||||
}
|
||||
}
|
||||
|
||||
// appendIf appends to a if s is not an empty string.
|
||||
func appendIf(s string, a *[]string) {
|
||||
if s != "" {
|
||||
*a = append(*a, s)
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the PKIX name for the request.
|
||||
func (cr *CertificateRequest) Name() pkix.Name {
|
||||
var name pkix.Name
|
||||
name.CommonName = cr.CN
|
||||
|
||||
for _, n := range cr.Names {
|
||||
appendIf(n.C, &name.Country)
|
||||
appendIf(n.ST, &name.Province)
|
||||
appendIf(n.L, &name.Locality)
|
||||
appendIf(n.O, &name.Organization)
|
||||
appendIf(n.OU, &name.OrganizationalUnit)
|
||||
}
|
||||
name.SerialNumber = cr.SerialNumber
|
||||
return name
|
||||
}
|
||||
|
||||
// ParseRequest takes a certificate request and generates a key and
|
||||
// CSR from it. It does no validation -- caveat emptor. It will,
|
||||
// however, fail if the key request is not valid (i.e., an unsupported
|
||||
// curve or RSA key size). The lack of validation was specifically
|
||||
// chosen to allow the end user to define a policy and validate the
|
||||
// request appropriately before calling this function.
|
||||
func ParseRequest(req *CertificateRequest) (csr, key []byte, err error) {
|
||||
log.Info("received CSR")
|
||||
if req.KeyRequest == nil {
|
||||
req.KeyRequest = NewBasicKeyRequest()
|
||||
}
|
||||
|
||||
log.Infof("generating key: %s-%d", req.KeyRequest.Algo(), req.KeyRequest.Size())
|
||||
priv, err := req.KeyRequest.Generate()
|
||||
if err != nil {
|
||||
err = cferr.Wrap(cferr.PrivateKeyError, cferr.GenerationFailed, err)
|
||||
return
|
||||
}
|
||||
|
||||
switch priv := priv.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
key = x509.MarshalPKCS1PrivateKey(priv)
|
||||
block := pem.Block{
|
||||
Type: "RSA PRIVATE KEY",
|
||||
Bytes: key,
|
||||
}
|
||||
key = pem.EncodeToMemory(&block)
|
||||
case *ecdsa.PrivateKey:
|
||||
key, err = x509.MarshalECPrivateKey(priv)
|
||||
if err != nil {
|
||||
err = cferr.Wrap(cferr.PrivateKeyError, cferr.Unknown, err)
|
||||
return
|
||||
}
|
||||
block := pem.Block{
|
||||
Type: "EC PRIVATE KEY",
|
||||
Bytes: key,
|
||||
}
|
||||
key = pem.EncodeToMemory(&block)
|
||||
default:
|
||||
panic("Generate should have failed to produce a valid key.")
|
||||
}
|
||||
|
||||
var tpl = x509.CertificateRequest{
|
||||
Subject: req.Name(),
|
||||
SignatureAlgorithm: req.KeyRequest.SigAlgo(),
|
||||
}
|
||||
|
||||
for i := range req.Hosts {
|
||||
if ip := net.ParseIP(req.Hosts[i]); ip != nil {
|
||||
tpl.IPAddresses = append(tpl.IPAddresses, ip)
|
||||
} else if email, err := mail.ParseAddress(req.Hosts[i]); err == nil && email != nil {
|
||||
tpl.EmailAddresses = append(tpl.EmailAddresses, req.Hosts[i])
|
||||
} else {
|
||||
tpl.DNSNames = append(tpl.DNSNames, req.Hosts[i])
|
||||
}
|
||||
}
|
||||
|
||||
csr, err = x509.CreateCertificateRequest(rand.Reader, &tpl, priv)
|
||||
if err != nil {
|
||||
log.Errorf("failed to generate a CSR: %v", err)
|
||||
err = cferr.Wrap(cferr.CSRError, cferr.BadRequest, err)
|
||||
return
|
||||
}
|
||||
block := pem.Block{
|
||||
Type: "CERTIFICATE REQUEST",
|
||||
Bytes: csr,
|
||||
}
|
||||
|
||||
log.Info("encoded CSR")
|
||||
csr = pem.EncodeToMemory(&block)
|
||||
return
|
||||
}
|
||||
|
||||
// ExtractCertificateRequest extracts a CertificateRequest from
|
||||
// x509.Certificate. It is aimed to used for generating a new certificate
|
||||
// from an existing certificate. For a root certificate, the CA expiry
|
||||
// length is calculated as the duration between cert.NotAfter and cert.NotBefore.
|
||||
func ExtractCertificateRequest(cert *x509.Certificate) *CertificateRequest {
|
||||
req := New()
|
||||
req.CN = cert.Subject.CommonName
|
||||
req.Names = getNames(cert.Subject)
|
||||
req.Hosts = getHosts(cert)
|
||||
req.SerialNumber = cert.Subject.SerialNumber
|
||||
|
||||
if cert.IsCA {
|
||||
req.CA = new(CAConfig)
|
||||
// CA expiry length is calculated based on the input cert
|
||||
// issue date and expiry date.
|
||||
req.CA.Expiry = cert.NotAfter.Sub(cert.NotBefore).String()
|
||||
req.CA.PathLength = cert.MaxPathLen
|
||||
}
|
||||
|
||||
return req
|
||||
}
|
||||
|
||||
func getHosts(cert *x509.Certificate) []string {
|
||||
var hosts []string
|
||||
for _, ip := range cert.IPAddresses {
|
||||
hosts = append(hosts, ip.String())
|
||||
}
|
||||
for _, dns := range cert.DNSNames {
|
||||
hosts = append(hosts, dns)
|
||||
}
|
||||
for _, email := range cert.EmailAddresses {
|
||||
hosts = append(hosts, email)
|
||||
}
|
||||
|
||||
return hosts
|
||||
}
|
||||
|
||||
// getNames returns an array of Names from the certificate
|
||||
// It onnly cares about Country, Organization, OrganizationalUnit, Locality, Province
|
||||
func getNames(sub pkix.Name) []Name {
|
||||
// anonymous func for finding the max of a list of interger
|
||||
max := func(v1 int, vn ...int) (max int) {
|
||||
max = v1
|
||||
for i := 0; i < len(vn); i++ {
|
||||
if vn[i] > max {
|
||||
max = vn[i]
|
||||
}
|
||||
}
|
||||
return max
|
||||
}
|
||||
|
||||
nc := len(sub.Country)
|
||||
norg := len(sub.Organization)
|
||||
nou := len(sub.OrganizationalUnit)
|
||||
nl := len(sub.Locality)
|
||||
np := len(sub.Province)
|
||||
|
||||
n := max(nc, norg, nou, nl, np)
|
||||
|
||||
names := make([]Name, n)
|
||||
for i := range names {
|
||||
if i < nc {
|
||||
names[i].C = sub.Country[i]
|
||||
}
|
||||
if i < norg {
|
||||
names[i].O = sub.Organization[i]
|
||||
}
|
||||
if i < nou {
|
||||
names[i].OU = sub.OrganizationalUnit[i]
|
||||
}
|
||||
if i < nl {
|
||||
names[i].L = sub.Locality[i]
|
||||
}
|
||||
if i < np {
|
||||
names[i].ST = sub.Province[i]
|
||||
}
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// A Generator is responsible for validating certificate requests.
|
||||
type Generator struct {
|
||||
Validator func(*CertificateRequest) error
|
||||
}
|
||||
|
||||
// ProcessRequest validates and processes the incoming request. It is
|
||||
// a wrapper around a validator and the ParseRequest function.
|
||||
func (g *Generator) ProcessRequest(req *CertificateRequest) (csr, key []byte, err error) {
|
||||
|
||||
log.Info("generate received request")
|
||||
err = g.Validator(req)
|
||||
if err != nil {
|
||||
log.Warningf("invalid request: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
csr, key, err = ParseRequest(req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// IsNameEmpty returns true if the name has no identifying information in it.
|
||||
func IsNameEmpty(n Name) bool {
|
||||
empty := func(s string) bool { return strings.TrimSpace(s) == "" }
|
||||
|
||||
if empty(n.C) && empty(n.ST) && empty(n.L) && empty(n.O) && empty(n.OU) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Regenerate uses the provided CSR as a template for signing a new
|
||||
// CSR using priv.
|
||||
func Regenerate(priv crypto.Signer, csr []byte) ([]byte, error) {
|
||||
req, extra, err := helpers.ParseCSR(csr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if len(extra) > 0 {
|
||||
return nil, errors.New("csr: trailing data in certificate request")
|
||||
}
|
||||
|
||||
return x509.CreateCertificateRequest(rand.Reader, req, priv)
|
||||
}
|
||||
|
||||
// Generate creates a new CSR from a CertificateRequest structure and
|
||||
// an existing key. The KeyRequest field is ignored.
|
||||
func Generate(priv crypto.Signer, req *CertificateRequest) (csr []byte, err error) {
|
||||
sigAlgo := helpers.SignerAlgo(priv, crypto.SHA256)
|
||||
if sigAlgo == x509.UnknownSignatureAlgorithm {
|
||||
return nil, cferr.New(cferr.PrivateKeyError, cferr.Unavailable)
|
||||
}
|
||||
|
||||
var tpl = x509.CertificateRequest{
|
||||
Subject: req.Name(),
|
||||
SignatureAlgorithm: sigAlgo,
|
||||
}
|
||||
|
||||
for i := range req.Hosts {
|
||||
if ip := net.ParseIP(req.Hosts[i]); ip != nil {
|
||||
tpl.IPAddresses = append(tpl.IPAddresses, ip)
|
||||
} else if email, err := mail.ParseAddress(req.Hosts[i]); err == nil && email != nil {
|
||||
tpl.EmailAddresses = append(tpl.EmailAddresses, email.Address)
|
||||
} else {
|
||||
tpl.DNSNames = append(tpl.DNSNames, req.Hosts[i])
|
||||
}
|
||||
}
|
||||
|
||||
csr, err = x509.CreateCertificateRequest(rand.Reader, &tpl, priv)
|
||||
if err != nil {
|
||||
log.Errorf("failed to generate a CSR: %v", err)
|
||||
err = cferr.Wrap(cferr.CSRError, cferr.BadRequest, err)
|
||||
return
|
||||
}
|
||||
block := pem.Block{
|
||||
Type: "CERTIFICATE REQUEST",
|
||||
Bytes: csr,
|
||||
}
|
||||
|
||||
log.Info("encoded CSR")
|
||||
csr = pem.EncodeToMemory(&block)
|
||||
return
|
||||
}
|
46
vendor/github.com/cloudflare/cfssl/errors/doc.go
generated
vendored
Normal file
46
vendor/github.com/cloudflare/cfssl/errors/doc.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
/*
|
||||
Package errors provides error types returned in CF SSL.
|
||||
|
||||
1. Type Error is intended for errors produced by CF SSL packages.
|
||||
It formats to a json object that consists of an error message and a 4-digit code for error reasoning.
|
||||
|
||||
Example: {"code":1002, "message": "Failed to decode certificate"}
|
||||
|
||||
The index of codes are listed below:
|
||||
1XXX: CertificateError
|
||||
1000: Unknown
|
||||
1001: ReadFailed
|
||||
1002: DecodeFailed
|
||||
1003: ParseFailed
|
||||
1100: SelfSigned
|
||||
12XX: VerifyFailed
|
||||
121X: CertificateInvalid
|
||||
1210: NotAuthorizedToSign
|
||||
1211: Expired
|
||||
1212: CANotAuthorizedForThisName
|
||||
1213: TooManyIntermediates
|
||||
1214: IncompatibleUsage
|
||||
1220: UnknownAuthority
|
||||
2XXX: PrivatekeyError
|
||||
2000: Unknown
|
||||
2001: ReadFailed
|
||||
2002: DecodeFailed
|
||||
2003: ParseFailed
|
||||
2100: Encrypted
|
||||
2200: NotRSA
|
||||
2300: KeyMismatch
|
||||
2400: GenerationFailed
|
||||
2500: Unavailable
|
||||
3XXX: IntermediatesError
|
||||
4XXX: RootError
|
||||
5XXX: PolicyError
|
||||
5100: NoKeyUsages
|
||||
5200: InvalidPolicy
|
||||
5300: InvalidRequest
|
||||
5400: UnknownProfile
|
||||
6XXX: DialError
|
||||
|
||||
2. Type HttpError is intended for CF SSL API to consume. It contains a HTTP status code that will be read and returned
|
||||
by the API server.
|
||||
*/
|
||||
package errors
|
420
vendor/github.com/cloudflare/cfssl/errors/error.go
generated
vendored
Normal file
420
vendor/github.com/cloudflare/cfssl/errors/error.go
generated
vendored
Normal file
@ -0,0 +1,420 @@
|
||||
package errors
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Error is the error type usually returned by functions in CF SSL package.
|
||||
// It contains a 4-digit error code where the most significant digit
|
||||
// describes the category where the error occurred and the rest 3 digits
|
||||
// describe the specific error reason.
|
||||
type Error struct {
|
||||
ErrorCode int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// Category is the most significant digit of the error code.
|
||||
type Category int
|
||||
|
||||
// Reason is the last 3 digits of the error code.
|
||||
type Reason int
|
||||
|
||||
const (
|
||||
// Success indicates no error occurred.
|
||||
Success Category = 1000 * iota // 0XXX
|
||||
|
||||
// CertificateError indicates a fault in a certificate.
|
||||
CertificateError // 1XXX
|
||||
|
||||
// PrivateKeyError indicates a fault in a private key.
|
||||
PrivateKeyError // 2XXX
|
||||
|
||||
// IntermediatesError indicates a fault in an intermediate.
|
||||
IntermediatesError // 3XXX
|
||||
|
||||
// RootError indicates a fault in a root.
|
||||
RootError // 4XXX
|
||||
|
||||
// PolicyError indicates an error arising from a malformed or
|
||||
// non-existent policy, or a breach of policy.
|
||||
PolicyError // 5XXX
|
||||
|
||||
// DialError indicates a network fault.
|
||||
DialError // 6XXX
|
||||
|
||||
// APIClientError indicates a problem with the API client.
|
||||
APIClientError // 7XXX
|
||||
|
||||
// OCSPError indicates a problem with OCSP signing
|
||||
OCSPError // 8XXX
|
||||
|
||||
// CSRError indicates a problem with CSR parsing
|
||||
CSRError // 9XXX
|
||||
|
||||
// CTError indicates a problem with the certificate transparency process
|
||||
CTError // 10XXX
|
||||
|
||||
// CertStoreError indicates a problem with the certificate store
|
||||
CertStoreError // 11XXX
|
||||
)
|
||||
|
||||
// None is a non-specified error.
|
||||
const (
|
||||
None Reason = iota
|
||||
)
|
||||
|
||||
// Warning code for a success
|
||||
const (
|
||||
BundleExpiringBit int = 1 << iota // 0x01
|
||||
BundleNotUbiquitousBit // 0x02
|
||||
)
|
||||
|
||||
// Parsing errors
|
||||
const (
|
||||
Unknown Reason = iota // X000
|
||||
ReadFailed // X001
|
||||
DecodeFailed // X002
|
||||
ParseFailed // X003
|
||||
)
|
||||
|
||||
// The following represent certificate non-parsing errors, and must be
|
||||
// specified along with CertificateError.
|
||||
const (
|
||||
// SelfSigned indicates that a certificate is self-signed and
|
||||
// cannot be used in the manner being attempted.
|
||||
SelfSigned Reason = 100 * (iota + 1) // Code 11XX
|
||||
|
||||
// VerifyFailed is an X.509 verification failure. The least two
|
||||
// significant digits of 12XX is determined as the actual x509
|
||||
// error is examined.
|
||||
VerifyFailed // Code 12XX
|
||||
|
||||
// BadRequest indicates that the certificate request is invalid.
|
||||
BadRequest // Code 13XX
|
||||
|
||||
// MissingSerial indicates that the profile specified
|
||||
// 'ClientProvidesSerialNumbers', but the SignRequest did not include a serial
|
||||
// number.
|
||||
MissingSerial // Code 14XX
|
||||
)
|
||||
|
||||
const (
|
||||
certificateInvalid = 10 * (iota + 1) //121X
|
||||
unknownAuthority //122x
|
||||
)
|
||||
|
||||
// The following represent private-key non-parsing errors, and must be
|
||||
// specified with PrivateKeyError.
|
||||
const (
|
||||
// Encrypted indicates that the private key is a PKCS #8 encrypted
|
||||
// private key. At this time, CFSSL does not support decrypting
|
||||
// these keys.
|
||||
Encrypted Reason = 100 * (iota + 1) //21XX
|
||||
|
||||
// NotRSAOrECC indicates that they key is not an RSA or ECC
|
||||
// private key; these are the only two private key types supported
|
||||
// at this time by CFSSL.
|
||||
NotRSAOrECC //22XX
|
||||
|
||||
// KeyMismatch indicates that the private key does not match
|
||||
// the public key or certificate being presented with the key.
|
||||
KeyMismatch //23XX
|
||||
|
||||
// GenerationFailed indicates that a private key could not
|
||||
// be generated.
|
||||
GenerationFailed //24XX
|
||||
|
||||
// Unavailable indicates that a private key mechanism (such as
|
||||
// PKCS #11) was requested but support for that mechanism is
|
||||
// not available.
|
||||
Unavailable
|
||||
)
|
||||
|
||||
// The following are policy-related non-parsing errors, and must be
|
||||
// specified along with PolicyError.
|
||||
const (
|
||||
// NoKeyUsages indicates that the profile does not permit any
|
||||
// key usages for the certificate.
|
||||
NoKeyUsages Reason = 100 * (iota + 1) // 51XX
|
||||
|
||||
// InvalidPolicy indicates that policy being requested is not
|
||||
// a valid policy or does not exist.
|
||||
InvalidPolicy // 52XX
|
||||
|
||||
// InvalidRequest indicates a certificate request violated the
|
||||
// constraints of the policy being applied to the request.
|
||||
InvalidRequest // 53XX
|
||||
|
||||
// UnknownProfile indicates that the profile does not exist.
|
||||
UnknownProfile // 54XX
|
||||
)
|
||||
|
||||
// The following are API client related errors, and should be
|
||||
// specified with APIClientError.
|
||||
const (
|
||||
// AuthenticationFailure occurs when the client is unable
|
||||
// to obtain an authentication token for the request.
|
||||
AuthenticationFailure Reason = 100 * (iota + 1)
|
||||
|
||||
// JSONError wraps an encoding/json error.
|
||||
JSONError
|
||||
|
||||
// IOError wraps an io/ioutil error.
|
||||
IOError
|
||||
|
||||
// ClientHTTPError wraps a net/http error.
|
||||
ClientHTTPError
|
||||
|
||||
// ServerRequestFailed covers any other failures from the API
|
||||
// client.
|
||||
ServerRequestFailed
|
||||
)
|
||||
|
||||
// The following are OCSP related errors, and should be
|
||||
// specified with OCSPError
|
||||
const (
|
||||
// IssuerMismatch ocurs when the certificate in the OCSP signing
|
||||
// request was not issued by the CA that this responder responds for.
|
||||
IssuerMismatch Reason = 100 * (iota + 1) // 81XX
|
||||
|
||||
// InvalidStatus occurs when the OCSP signing requests includes an
|
||||
// invalid value for the certificate status.
|
||||
InvalidStatus
|
||||
)
|
||||
|
||||
// Certificate transparency related errors specified with CTError
|
||||
const (
|
||||
// PrecertSubmissionFailed occurs when submitting a precertificate to
|
||||
// a log server fails
|
||||
PrecertSubmissionFailed = 100 * (iota + 1)
|
||||
)
|
||||
|
||||
// Certificate persistence related errors specified with CertStoreError
|
||||
const (
|
||||
// InsertionFailed occurs when a SQL insert query failes to complete.
|
||||
InsertionFailed = 100 * (iota + 1)
|
||||
// RecordNotFound occurs when a SQL query targeting on one unique
|
||||
// record failes to update the specified row in the table.
|
||||
RecordNotFound
|
||||
)
|
||||
|
||||
// The error interface implementation, which formats to a JSON object string.
|
||||
func (e *Error) Error() string {
|
||||
marshaled, err := json.Marshal(e)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return string(marshaled)
|
||||
|
||||
}
|
||||
|
||||
// New returns an error that contains an error code and message derived from
|
||||
// the given category, reason. Currently, to avoid confusion, it is not
|
||||
// allowed to create an error of category Success
|
||||
func New(category Category, reason Reason) *Error {
|
||||
errorCode := int(category) + int(reason)
|
||||
var msg string
|
||||
switch category {
|
||||
case OCSPError:
|
||||
switch reason {
|
||||
case ReadFailed:
|
||||
msg = "No certificate provided"
|
||||
case IssuerMismatch:
|
||||
msg = "Certificate not issued by this issuer"
|
||||
case InvalidStatus:
|
||||
msg = "Invalid revocation status"
|
||||
}
|
||||
case CertificateError:
|
||||
switch reason {
|
||||
case Unknown:
|
||||
msg = "Unknown certificate error"
|
||||
case ReadFailed:
|
||||
msg = "Failed to read certificate"
|
||||
case DecodeFailed:
|
||||
msg = "Failed to decode certificate"
|
||||
case ParseFailed:
|
||||
msg = "Failed to parse certificate"
|
||||
case SelfSigned:
|
||||
msg = "Certificate is self signed"
|
||||
case VerifyFailed:
|
||||
msg = "Unable to verify certificate"
|
||||
case BadRequest:
|
||||
msg = "Invalid certificate request"
|
||||
case MissingSerial:
|
||||
msg = "Missing serial number in request"
|
||||
default:
|
||||
panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category CertificateError.",
|
||||
reason))
|
||||
|
||||
}
|
||||
case PrivateKeyError:
|
||||
switch reason {
|
||||
case Unknown:
|
||||
msg = "Unknown private key error"
|
||||
case ReadFailed:
|
||||
msg = "Failed to read private key"
|
||||
case DecodeFailed:
|
||||
msg = "Failed to decode private key"
|
||||
case ParseFailed:
|
||||
msg = "Failed to parse private key"
|
||||
case Encrypted:
|
||||
msg = "Private key is encrypted."
|
||||
case NotRSAOrECC:
|
||||
msg = "Private key algorithm is not RSA or ECC"
|
||||
case KeyMismatch:
|
||||
msg = "Private key does not match public key"
|
||||
case GenerationFailed:
|
||||
msg = "Failed to new private key"
|
||||
case Unavailable:
|
||||
msg = "Private key is unavailable"
|
||||
default:
|
||||
panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category PrivateKeyError.",
|
||||
reason))
|
||||
}
|
||||
case IntermediatesError:
|
||||
switch reason {
|
||||
case Unknown:
|
||||
msg = "Unknown intermediate certificate error"
|
||||
case ReadFailed:
|
||||
msg = "Failed to read intermediate certificate"
|
||||
case DecodeFailed:
|
||||
msg = "Failed to decode intermediate certificate"
|
||||
case ParseFailed:
|
||||
msg = "Failed to parse intermediate certificate"
|
||||
default:
|
||||
panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category IntermediatesError.",
|
||||
reason))
|
||||
}
|
||||
case RootError:
|
||||
switch reason {
|
||||
case Unknown:
|
||||
msg = "Unknown root certificate error"
|
||||
case ReadFailed:
|
||||
msg = "Failed to read root certificate"
|
||||
case DecodeFailed:
|
||||
msg = "Failed to decode root certificate"
|
||||
case ParseFailed:
|
||||
msg = "Failed to parse root certificate"
|
||||
default:
|
||||
panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category RootError.",
|
||||
reason))
|
||||
}
|
||||
case PolicyError:
|
||||
switch reason {
|
||||
case Unknown:
|
||||
msg = "Unknown policy error"
|
||||
case NoKeyUsages:
|
||||
msg = "Invalid policy: no key usage available"
|
||||
case InvalidPolicy:
|
||||
msg = "Invalid or unknown policy"
|
||||
case InvalidRequest:
|
||||
msg = "Policy violation request"
|
||||
case UnknownProfile:
|
||||
msg = "Unknown policy profile"
|
||||
default:
|
||||
panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category PolicyError.",
|
||||
reason))
|
||||
}
|
||||
case DialError:
|
||||
switch reason {
|
||||
case Unknown:
|
||||
msg = "Failed to dial remote server"
|
||||
default:
|
||||
panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category DialError.",
|
||||
reason))
|
||||
}
|
||||
case APIClientError:
|
||||
switch reason {
|
||||
case AuthenticationFailure:
|
||||
msg = "API client authentication failure"
|
||||
case JSONError:
|
||||
msg = "API client JSON config error"
|
||||
case ClientHTTPError:
|
||||
msg = "API client HTTP error"
|
||||
case IOError:
|
||||
msg = "API client IO error"
|
||||
case ServerRequestFailed:
|
||||
msg = "API client error: Server request failed"
|
||||
default:
|
||||
panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category APIClientError.",
|
||||
reason))
|
||||
}
|
||||
case CSRError:
|
||||
switch reason {
|
||||
case Unknown:
|
||||
msg = "CSR parsing failed due to unknown error"
|
||||
case ReadFailed:
|
||||
msg = "CSR file read failed"
|
||||
case ParseFailed:
|
||||
msg = "CSR Parsing failed"
|
||||
case DecodeFailed:
|
||||
msg = "CSR Decode failed"
|
||||
case BadRequest:
|
||||
msg = "CSR Bad request"
|
||||
default:
|
||||
panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category APIClientError.", reason))
|
||||
}
|
||||
case CTError:
|
||||
switch reason {
|
||||
case Unknown:
|
||||
msg = "Certificate transparency parsing failed due to unknown error"
|
||||
case PrecertSubmissionFailed:
|
||||
msg = "Certificate transparency precertificate submission failed"
|
||||
default:
|
||||
panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category CTError.", reason))
|
||||
}
|
||||
case CertStoreError:
|
||||
switch reason {
|
||||
case Unknown:
|
||||
msg = "Certificate store action failed due to unknown error"
|
||||
default:
|
||||
panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category CertStoreError.", reason))
|
||||
}
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("Unsupported CFSSL error type: %d.",
|
||||
category))
|
||||
}
|
||||
return &Error{ErrorCode: errorCode, Message: msg}
|
||||
}
|
||||
|
||||
// Wrap returns an error that contains the given error and an error code derived from
|
||||
// the given category, reason and the error. Currently, to avoid confusion, it is not
|
||||
// allowed to create an error of category Success
|
||||
func Wrap(category Category, reason Reason, err error) *Error {
|
||||
errorCode := int(category) + int(reason)
|
||||
if err == nil {
|
||||
panic("Wrap needs a supplied error to initialize.")
|
||||
}
|
||||
|
||||
// do not double wrap a error
|
||||
switch err.(type) {
|
||||
case *Error:
|
||||
panic("Unable to wrap a wrapped error.")
|
||||
}
|
||||
|
||||
switch category {
|
||||
case CertificateError:
|
||||
// given VerifyFailed , report the status with more detailed status code
|
||||
// for some certificate errors we care.
|
||||
if reason == VerifyFailed {
|
||||
switch errorType := err.(type) {
|
||||
case x509.CertificateInvalidError:
|
||||
errorCode += certificateInvalid + int(errorType.Reason)
|
||||
case x509.UnknownAuthorityError:
|
||||
errorCode += unknownAuthority
|
||||
}
|
||||
}
|
||||
case PrivateKeyError, IntermediatesError, RootError, PolicyError, DialError,
|
||||
APIClientError, CSRError, CTError, CertStoreError:
|
||||
// no-op, just use the error
|
||||
default:
|
||||
panic(fmt.Sprintf("Unsupported CFSSL error type: %d.",
|
||||
category))
|
||||
}
|
||||
|
||||
return &Error{ErrorCode: errorCode, Message: err.Error()}
|
||||
|
||||
}
|
47
vendor/github.com/cloudflare/cfssl/errors/http.go
generated
vendored
Normal file
47
vendor/github.com/cloudflare/cfssl/errors/http.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
package errors
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// HTTPError is an augmented error with a HTTP status code.
|
||||
type HTTPError struct {
|
||||
StatusCode int
|
||||
error
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e *HTTPError) Error() string {
|
||||
return e.error.Error()
|
||||
}
|
||||
|
||||
// NewMethodNotAllowed returns an appropriate error in the case that
|
||||
// an HTTP client uses an invalid method (i.e. a GET in place of a POST)
|
||||
// on an API endpoint.
|
||||
func NewMethodNotAllowed(method string) *HTTPError {
|
||||
return &HTTPError{http.StatusMethodNotAllowed, errors.New(`Method is not allowed:"` + method + `"`)}
|
||||
}
|
||||
|
||||
// NewBadRequest creates a HttpError with the given error and error code 400.
|
||||
func NewBadRequest(err error) *HTTPError {
|
||||
return &HTTPError{http.StatusBadRequest, err}
|
||||
}
|
||||
|
||||
// NewBadRequestString returns a HttpError with the supplied message
|
||||
// and error code 400.
|
||||
func NewBadRequestString(s string) *HTTPError {
|
||||
return NewBadRequest(errors.New(s))
|
||||
}
|
||||
|
||||
// NewBadRequestMissingParameter returns a 400 HttpError as a required
|
||||
// parameter is missing in the HTTP request.
|
||||
func NewBadRequestMissingParameter(s string) *HTTPError {
|
||||
return NewBadRequestString(`Missing parameter "` + s + `"`)
|
||||
}
|
||||
|
||||
// NewBadRequestUnwantedParameter returns a 400 HttpError as a unnecessary
|
||||
// parameter is present in the HTTP request.
|
||||
func NewBadRequestUnwantedParameter(s string) *HTTPError {
|
||||
return NewBadRequestString(`Unwanted parameter "` + s + `"`)
|
||||
}
|
42
vendor/github.com/cloudflare/cfssl/helpers/derhelpers/derhelpers.go
generated
vendored
Normal file
42
vendor/github.com/cloudflare/cfssl/helpers/derhelpers/derhelpers.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
// Package derhelpers implements common functionality
|
||||
// on DER encoded data
|
||||
package derhelpers
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
|
||||
cferr "github.com/cloudflare/cfssl/errors"
|
||||
)
|
||||
|
||||
// ParsePrivateKeyDER parses a PKCS #1, PKCS #8, or elliptic curve
|
||||
// DER-encoded private key. The key must not be in PEM format.
|
||||
func ParsePrivateKeyDER(keyDER []byte) (key crypto.Signer, err error) {
|
||||
generalKey, err := x509.ParsePKCS8PrivateKey(keyDER)
|
||||
if err != nil {
|
||||
generalKey, err = x509.ParsePKCS1PrivateKey(keyDER)
|
||||
if err != nil {
|
||||
generalKey, err = x509.ParseECPrivateKey(keyDER)
|
||||
if err != nil {
|
||||
// We don't include the actual error into
|
||||
// the final error. The reason might be
|
||||
// we don't want to leak any info about
|
||||
// the private key.
|
||||
return nil, cferr.New(cferr.PrivateKeyError,
|
||||
cferr.ParseFailed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch generalKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return generalKey.(*rsa.PrivateKey), nil
|
||||
case *ecdsa.PrivateKey:
|
||||
return generalKey.(*ecdsa.PrivateKey), nil
|
||||
}
|
||||
|
||||
// should never reach here
|
||||
return nil, cferr.New(cferr.PrivateKeyError, cferr.ParseFailed)
|
||||
}
|
478
vendor/github.com/cloudflare/cfssl/helpers/helpers.go
generated
vendored
Normal file
478
vendor/github.com/cloudflare/cfssl/helpers/helpers.go
generated
vendored
Normal file
@ -0,0 +1,478 @@
|
||||
// Package helpers implements utility functionality common to many
|
||||
// CFSSL packages.
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/asn1"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cloudflare/cfssl/crypto/pkcs7"
|
||||
cferr "github.com/cloudflare/cfssl/errors"
|
||||
"github.com/cloudflare/cfssl/helpers/derhelpers"
|
||||
"github.com/cloudflare/cfssl/log"
|
||||
"golang.org/x/crypto/pkcs12"
|
||||
)
|
||||
|
||||
// OneYear is a time.Duration representing a year's worth of seconds.
|
||||
const OneYear = 8760 * time.Hour
|
||||
|
||||
// OneDay is a time.Duration representing a day's worth of seconds.
|
||||
const OneDay = 24 * time.Hour
|
||||
|
||||
// InclusiveDate returns the time.Time representation of a date - 1
|
||||
// nanosecond. This allows time.After to be used inclusively.
|
||||
func InclusiveDate(year int, month time.Month, day int) time.Time {
|
||||
return time.Date(year, month, day, 0, 0, 0, 0, time.UTC).Add(-1 * time.Nanosecond)
|
||||
}
|
||||
|
||||
// Jul2012 is the July 2012 CAB Forum deadline for when CAs must stop
|
||||
// issuing certificates valid for more than 5 years.
|
||||
var Jul2012 = InclusiveDate(2012, time.July, 01)
|
||||
|
||||
// Apr2015 is the April 2015 CAB Forum deadline for when CAs must stop
|
||||
// issuing certificates valid for more than 39 months.
|
||||
var Apr2015 = InclusiveDate(2015, time.April, 01)
|
||||
|
||||
// KeyLength returns the bit size of ECDSA or RSA PublicKey
|
||||
func KeyLength(key interface{}) int {
|
||||
if key == nil {
|
||||
return 0
|
||||
}
|
||||
if ecdsaKey, ok := key.(*ecdsa.PublicKey); ok {
|
||||
return ecdsaKey.Curve.Params().BitSize
|
||||
} else if rsaKey, ok := key.(*rsa.PublicKey); ok {
|
||||
return rsaKey.N.BitLen()
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// ExpiryTime returns the time when the certificate chain is expired.
|
||||
func ExpiryTime(chain []*x509.Certificate) (notAfter time.Time) {
|
||||
if len(chain) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
notAfter = chain[0].NotAfter
|
||||
for _, cert := range chain {
|
||||
if notAfter.After(cert.NotAfter) {
|
||||
notAfter = cert.NotAfter
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MonthsValid returns the number of months for which a certificate is valid.
|
||||
func MonthsValid(c *x509.Certificate) int {
|
||||
issued := c.NotBefore
|
||||
expiry := c.NotAfter
|
||||
years := (expiry.Year() - issued.Year())
|
||||
months := years*12 + int(expiry.Month()) - int(issued.Month())
|
||||
|
||||
// Round up if valid for less than a full month
|
||||
if expiry.Day() > issued.Day() {
|
||||
months++
|
||||
}
|
||||
return months
|
||||
}
|
||||
|
||||
// ValidExpiry determines if a certificate is valid for an acceptable
|
||||
// length of time per the CA/Browser Forum baseline requirements.
|
||||
// See https://cabforum.org/wp-content/uploads/CAB-Forum-BR-1.3.0.pdf
|
||||
func ValidExpiry(c *x509.Certificate) bool {
|
||||
issued := c.NotBefore
|
||||
|
||||
var maxMonths int
|
||||
switch {
|
||||
case issued.After(Apr2015):
|
||||
maxMonths = 39
|
||||
case issued.After(Jul2012):
|
||||
maxMonths = 60
|
||||
case issued.Before(Jul2012):
|
||||
maxMonths = 120
|
||||
}
|
||||
|
||||
if MonthsValid(c) > maxMonths {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// SignatureString returns the TLS signature string corresponding to
|
||||
// an X509 signature algorithm.
|
||||
func SignatureString(alg x509.SignatureAlgorithm) string {
|
||||
switch alg {
|
||||
case x509.MD2WithRSA:
|
||||
return "MD2WithRSA"
|
||||
case x509.MD5WithRSA:
|
||||
return "MD5WithRSA"
|
||||
case x509.SHA1WithRSA:
|
||||
return "SHA1WithRSA"
|
||||
case x509.SHA256WithRSA:
|
||||
return "SHA256WithRSA"
|
||||
case x509.SHA384WithRSA:
|
||||
return "SHA384WithRSA"
|
||||
case x509.SHA512WithRSA:
|
||||
return "SHA512WithRSA"
|
||||
case x509.DSAWithSHA1:
|
||||
return "DSAWithSHA1"
|
||||
case x509.DSAWithSHA256:
|
||||
return "DSAWithSHA256"
|
||||
case x509.ECDSAWithSHA1:
|
||||
return "ECDSAWithSHA1"
|
||||
case x509.ECDSAWithSHA256:
|
||||
return "ECDSAWithSHA256"
|
||||
case x509.ECDSAWithSHA384:
|
||||
return "ECDSAWithSHA384"
|
||||
case x509.ECDSAWithSHA512:
|
||||
return "ECDSAWithSHA512"
|
||||
default:
|
||||
return "Unknown Signature"
|
||||
}
|
||||
}
|
||||
|
||||
// HashAlgoString returns the hash algorithm name contains in the signature
|
||||
// method.
|
||||
func HashAlgoString(alg x509.SignatureAlgorithm) string {
|
||||
switch alg {
|
||||
case x509.MD2WithRSA:
|
||||
return "MD2"
|
||||
case x509.MD5WithRSA:
|
||||
return "MD5"
|
||||
case x509.SHA1WithRSA:
|
||||
return "SHA1"
|
||||
case x509.SHA256WithRSA:
|
||||
return "SHA256"
|
||||
case x509.SHA384WithRSA:
|
||||
return "SHA384"
|
||||
case x509.SHA512WithRSA:
|
||||
return "SHA512"
|
||||
case x509.DSAWithSHA1:
|
||||
return "SHA1"
|
||||
case x509.DSAWithSHA256:
|
||||
return "SHA256"
|
||||
case x509.ECDSAWithSHA1:
|
||||
return "SHA1"
|
||||
case x509.ECDSAWithSHA256:
|
||||
return "SHA256"
|
||||
case x509.ECDSAWithSHA384:
|
||||
return "SHA384"
|
||||
case x509.ECDSAWithSHA512:
|
||||
return "SHA512"
|
||||
default:
|
||||
return "Unknown Hash Algorithm"
|
||||
}
|
||||
}
|
||||
|
||||
// EncodeCertificatesPEM encodes a number of x509 certficates to PEM
|
||||
func EncodeCertificatesPEM(certs []*x509.Certificate) []byte {
|
||||
var buffer bytes.Buffer
|
||||
for _, cert := range certs {
|
||||
pem.Encode(&buffer, &pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: cert.Raw,
|
||||
})
|
||||
}
|
||||
|
||||
return buffer.Bytes()
|
||||
}
|
||||
|
||||
// EncodeCertificatePEM encodes a single x509 certficates to PEM
|
||||
func EncodeCertificatePEM(cert *x509.Certificate) []byte {
|
||||
return EncodeCertificatesPEM([]*x509.Certificate{cert})
|
||||
}
|
||||
|
||||
// ParseCertificatesPEM parses a sequence of PEM-encoded certificate and returns them,
|
||||
// can handle PEM encoded PKCS #7 structures.
|
||||
func ParseCertificatesPEM(certsPEM []byte) ([]*x509.Certificate, error) {
|
||||
var certs []*x509.Certificate
|
||||
var err error
|
||||
certsPEM = bytes.TrimSpace(certsPEM)
|
||||
for len(certsPEM) > 0 {
|
||||
var cert []*x509.Certificate
|
||||
cert, certsPEM, err = ParseOneCertificateFromPEM(certsPEM)
|
||||
if err != nil {
|
||||
|
||||
return nil, cferr.New(cferr.CertificateError, cferr.ParseFailed)
|
||||
} else if cert == nil {
|
||||
break
|
||||
}
|
||||
|
||||
certs = append(certs, cert...)
|
||||
}
|
||||
if len(certsPEM) > 0 {
|
||||
return nil, cferr.New(cferr.CertificateError, cferr.DecodeFailed)
|
||||
}
|
||||
return certs, nil
|
||||
}
|
||||
|
||||
// ParseCertificatesDER parses a DER encoding of a certificate object and possibly private key,
|
||||
// either PKCS #7, PKCS #12, or raw x509.
|
||||
func ParseCertificatesDER(certsDER []byte, password string) (certs []*x509.Certificate, key crypto.Signer, err error) {
|
||||
certsDER = bytes.TrimSpace(certsDER)
|
||||
pkcs7data, err := pkcs7.ParsePKCS7(certsDER)
|
||||
if err != nil {
|
||||
var pkcs12data interface{}
|
||||
certs = make([]*x509.Certificate, 1)
|
||||
pkcs12data, certs[0], err = pkcs12.Decode(certsDER, password)
|
||||
if err != nil {
|
||||
certs, err = x509.ParseCertificates(certsDER)
|
||||
if err != nil {
|
||||
return nil, nil, cferr.New(cferr.CertificateError, cferr.DecodeFailed)
|
||||
}
|
||||
} else {
|
||||
key = pkcs12data.(crypto.Signer)
|
||||
}
|
||||
} else {
|
||||
if pkcs7data.ContentInfo != "SignedData" {
|
||||
return nil, nil, cferr.Wrap(cferr.CertificateError, cferr.DecodeFailed, errors.New("can only extract certificates from signed data content info"))
|
||||
}
|
||||
certs = pkcs7data.Content.SignedData.Certificates
|
||||
}
|
||||
if certs == nil {
|
||||
return nil, key, cferr.New(cferr.CertificateError, cferr.DecodeFailed)
|
||||
}
|
||||
return certs, key, nil
|
||||
}
|
||||
|
||||
// ParseSelfSignedCertificatePEM parses a PEM-encoded certificate and check if it is self-signed.
|
||||
func ParseSelfSignedCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
|
||||
cert, err := ParseCertificatePEM(certPEM)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature); err != nil {
|
||||
return nil, cferr.Wrap(cferr.CertificateError, cferr.VerifyFailed, err)
|
||||
}
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// ParseCertificatePEM parses and returns a PEM-encoded certificate,
|
||||
// can handle PEM encoded PKCS #7 structures.
|
||||
func ParseCertificatePEM(certPEM []byte) (*x509.Certificate, error) {
|
||||
certPEM = bytes.TrimSpace(certPEM)
|
||||
cert, rest, err := ParseOneCertificateFromPEM(certPEM)
|
||||
if err != nil {
|
||||
// Log the actual parsing error but throw a default parse error message.
|
||||
log.Debugf("Certificate parsing error: %v", err)
|
||||
return nil, cferr.New(cferr.CertificateError, cferr.ParseFailed)
|
||||
} else if cert == nil {
|
||||
return nil, cferr.New(cferr.CertificateError, cferr.DecodeFailed)
|
||||
} else if len(rest) > 0 {
|
||||
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("the PEM file should contain only one object"))
|
||||
} else if len(cert) > 1 {
|
||||
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("the PKCS7 object in the PEM file should contain only one certificate"))
|
||||
}
|
||||
return cert[0], nil
|
||||
}
|
||||
|
||||
// ParseOneCertificateFromPEM attempts to parse one PEM encoded certificate object,
|
||||
// either a raw x509 certificate or a PKCS #7 structure possibly containing
|
||||
// multiple certificates, from the top of certsPEM, which itself may
|
||||
// contain multiple PEM encoded certificate objects.
|
||||
func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, error) {
|
||||
|
||||
block, rest := pem.Decode(certsPEM)
|
||||
if block == nil {
|
||||
return nil, rest, nil
|
||||
}
|
||||
|
||||
cert, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
pkcs7data, err := pkcs7.ParsePKCS7(block.Bytes)
|
||||
if err != nil {
|
||||
return nil, rest, err
|
||||
}
|
||||
if pkcs7data.ContentInfo != "SignedData" {
|
||||
return nil, rest, errors.New("only PKCS #7 Signed Data Content Info supported for certificate parsing")
|
||||
}
|
||||
certs := pkcs7data.Content.SignedData.Certificates
|
||||
if certs == nil {
|
||||
return nil, rest, errors.New("PKCS #7 structure contains no certificates")
|
||||
}
|
||||
return certs, rest, nil
|
||||
}
|
||||
var certs = []*x509.Certificate{cert}
|
||||
return certs, rest, nil
|
||||
}
|
||||
|
||||
// LoadPEMCertPool loads a pool of PEM certificates from file.
|
||||
func LoadPEMCertPool(certsFile string) (*x509.CertPool, error) {
|
||||
pemCerts, err := ioutil.ReadFile(certsFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
certPool := x509.NewCertPool()
|
||||
if !certPool.AppendCertsFromPEM(pemCerts) {
|
||||
return nil, errors.New("failed to load cert pool")
|
||||
}
|
||||
|
||||
return certPool, nil
|
||||
}
|
||||
|
||||
// ParsePrivateKeyPEM parses and returns a PEM-encoded private
|
||||
// key. The private key may be either an unencrypted PKCS#8, PKCS#1,
|
||||
// or elliptic private key.
|
||||
func ParsePrivateKeyPEM(keyPEM []byte) (key crypto.Signer, err error) {
|
||||
return ParsePrivateKeyPEMWithPassword(keyPEM, nil)
|
||||
}
|
||||
|
||||
// ParsePrivateKeyPEMWithPassword parses and returns a PEM-encoded private
|
||||
// key. The private key may be a potentially encrypted PKCS#8, PKCS#1,
|
||||
// or elliptic private key.
|
||||
func ParsePrivateKeyPEMWithPassword(keyPEM []byte, password []byte) (key crypto.Signer, err error) {
|
||||
keyDER, err := GetKeyDERFromPEM(keyPEM, password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return derhelpers.ParsePrivateKeyDER(keyDER)
|
||||
}
|
||||
|
||||
// GetKeyDERFromPEM parses a PEM-encoded private key and returns DER-format key bytes.
|
||||
func GetKeyDERFromPEM(in []byte, password []byte) ([]byte, error) {
|
||||
keyDER, _ := pem.Decode(in)
|
||||
if keyDER != nil {
|
||||
if procType, ok := keyDER.Headers["Proc-Type"]; ok {
|
||||
if strings.Contains(procType, "ENCRYPTED") {
|
||||
if password != nil {
|
||||
return x509.DecryptPEMBlock(keyDER, password)
|
||||
}
|
||||
return nil, cferr.New(cferr.PrivateKeyError, cferr.Encrypted)
|
||||
}
|
||||
}
|
||||
return keyDER.Bytes, nil
|
||||
}
|
||||
|
||||
return nil, cferr.New(cferr.PrivateKeyError, cferr.DecodeFailed)
|
||||
}
|
||||
|
||||
// CheckSignature verifies a signature made by the key on a CSR, such
|
||||
// as on the CSR itself.
|
||||
func CheckSignature(csr *x509.CertificateRequest, algo x509.SignatureAlgorithm, signed, signature []byte) error {
|
||||
var hashType crypto.Hash
|
||||
|
||||
switch algo {
|
||||
case x509.SHA1WithRSA, x509.ECDSAWithSHA1:
|
||||
hashType = crypto.SHA1
|
||||
case x509.SHA256WithRSA, x509.ECDSAWithSHA256:
|
||||
hashType = crypto.SHA256
|
||||
case x509.SHA384WithRSA, x509.ECDSAWithSHA384:
|
||||
hashType = crypto.SHA384
|
||||
case x509.SHA512WithRSA, x509.ECDSAWithSHA512:
|
||||
hashType = crypto.SHA512
|
||||
default:
|
||||
return x509.ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
if !hashType.Available() {
|
||||
return x509.ErrUnsupportedAlgorithm
|
||||
}
|
||||
h := hashType.New()
|
||||
|
||||
h.Write(signed)
|
||||
digest := h.Sum(nil)
|
||||
|
||||
switch pub := csr.PublicKey.(type) {
|
||||
case *rsa.PublicKey:
|
||||
return rsa.VerifyPKCS1v15(pub, hashType, digest, signature)
|
||||
case *ecdsa.PublicKey:
|
||||
ecdsaSig := new(struct{ R, S *big.Int })
|
||||
if _, err := asn1.Unmarshal(signature, ecdsaSig); err != nil {
|
||||
return err
|
||||
}
|
||||
if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 {
|
||||
return errors.New("x509: ECDSA signature contained zero or negative values")
|
||||
}
|
||||
if !ecdsa.Verify(pub, digest, ecdsaSig.R, ecdsaSig.S) {
|
||||
return errors.New("x509: ECDSA verification failure")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return x509.ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
// ParseCSR parses a PEM- or DER-encoded PKCS #10 certificate signing request.
|
||||
func ParseCSR(in []byte) (csr *x509.CertificateRequest, rest []byte, err error) {
|
||||
in = bytes.TrimSpace(in)
|
||||
p, rest := pem.Decode(in)
|
||||
if p != nil {
|
||||
if p.Type != "CERTIFICATE REQUEST" {
|
||||
return nil, rest, cferr.New(cferr.CSRError, cferr.BadRequest)
|
||||
}
|
||||
|
||||
csr, err = x509.ParseCertificateRequest(p.Bytes)
|
||||
} else {
|
||||
csr, err = x509.ParseCertificateRequest(in)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, rest, err
|
||||
}
|
||||
|
||||
err = CheckSignature(csr, csr.SignatureAlgorithm, csr.RawTBSCertificateRequest, csr.Signature)
|
||||
if err != nil {
|
||||
return nil, rest, err
|
||||
}
|
||||
|
||||
return csr, rest, nil
|
||||
}
|
||||
|
||||
// ParseCSRPEM parses a PEM-encoded certificiate signing request.
|
||||
// It does not check the signature. This is useful for dumping data from a CSR
|
||||
// locally.
|
||||
func ParseCSRPEM(csrPEM []byte) (*x509.CertificateRequest, error) {
|
||||
block, _ := pem.Decode([]byte(csrPEM))
|
||||
der := block.Bytes
|
||||
csrObject, err := x509.ParseCertificateRequest(der)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return csrObject, nil
|
||||
}
|
||||
|
||||
// SignerAlgo returns an X.509 signature algorithm corresponding to
|
||||
// the crypto.Hash provided from a crypto.Signer.
|
||||
func SignerAlgo(priv crypto.Signer, h crypto.Hash) x509.SignatureAlgorithm {
|
||||
switch priv.Public().(type) {
|
||||
case *rsa.PublicKey:
|
||||
switch h {
|
||||
case crypto.SHA512:
|
||||
return x509.SHA512WithRSA
|
||||
case crypto.SHA384:
|
||||
return x509.SHA384WithRSA
|
||||
case crypto.SHA256:
|
||||
return x509.SHA256WithRSA
|
||||
default:
|
||||
return x509.SHA1WithRSA
|
||||
}
|
||||
case *ecdsa.PublicKey:
|
||||
switch h {
|
||||
case crypto.SHA512:
|
||||
return x509.ECDSAWithSHA512
|
||||
case crypto.SHA384:
|
||||
return x509.ECDSAWithSHA384
|
||||
case crypto.SHA256:
|
||||
return x509.ECDSAWithSHA256
|
||||
default:
|
||||
return x509.ECDSAWithSHA1
|
||||
}
|
||||
default:
|
||||
return x509.UnknownSignatureAlgorithm
|
||||
}
|
||||
}
|
15
vendor/github.com/cloudflare/cfssl/info/info.go
generated
vendored
Normal file
15
vendor/github.com/cloudflare/cfssl/info/info.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
// Package info contains the definitions for the info endpoint
|
||||
package info
|
||||
|
||||
// Req is the request struct for an info API request.
|
||||
type Req struct {
|
||||
Label string `json:"label"`
|
||||
Profile string `json:"profile"`
|
||||
}
|
||||
|
||||
// Resp is the response for an Info API request.
|
||||
type Resp struct {
|
||||
Certificate string `json:"certificate"`
|
||||
Usage []string `json:"usages"`
|
||||
ExpiryString string `json:"expiry"`
|
||||
}
|
174
vendor/github.com/cloudflare/cfssl/log/log.go
generated
vendored
Normal file
174
vendor/github.com/cloudflare/cfssl/log/log.go
generated
vendored
Normal file
@ -0,0 +1,174 @@
|
||||
// Package log implements a wrapper around the Go standard library's
|
||||
// logging package. Clients should set the current log level; only
|
||||
// messages below that level will actually be logged. For example, if
|
||||
// Level is set to LevelWarning, only log messages at the Warning,
|
||||
// Error, and Critical levels will be logged.
|
||||
package log
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
// The following constants represent logging levels in increasing levels of seriousness.
|
||||
const (
|
||||
// LevelDebug is the log level for Debug statements.
|
||||
LevelDebug = iota
|
||||
// LevelInfo is the log level for Info statements.
|
||||
LevelInfo
|
||||
// LevelWarning is the log level for Warning statements.
|
||||
LevelWarning
|
||||
// LevelError is the log level for Error statements.
|
||||
LevelError
|
||||
// LevelCritical is the log level for Critical statements.
|
||||
LevelCritical
|
||||
// LevelFatal is the log level for Fatal statements.
|
||||
LevelFatal
|
||||
)
|
||||
|
||||
var levelPrefix = [...]string{
|
||||
LevelDebug: "DEBUG",
|
||||
LevelInfo: "INFO",
|
||||
LevelWarning: "WARNING",
|
||||
LevelError: "ERROR",
|
||||
LevelCritical: "CRITICAL",
|
||||
LevelFatal: "FATAL",
|
||||
}
|
||||
|
||||
// Level stores the current logging level.
|
||||
var Level = LevelInfo
|
||||
|
||||
// SyslogWriter specifies the necessary methods for an alternate output
|
||||
// destination passed in via SetLogger.
|
||||
//
|
||||
// SyslogWriter is satisfied by *syslog.Writer.
|
||||
type SyslogWriter interface {
|
||||
Debug(string) error
|
||||
Info(string) error
|
||||
Warning(string) error
|
||||
Err(string) error
|
||||
Crit(string) error
|
||||
Emerg(string) error
|
||||
}
|
||||
|
||||
// syslogWriter stores the SetLogger() parameter.
|
||||
var syslogWriter SyslogWriter
|
||||
|
||||
// SetLogger sets the output used for output by this package.
|
||||
// A *syslog.Writer is a good choice for the logger parameter.
|
||||
// Call with a nil parameter to revert to default behavior.
|
||||
func SetLogger(logger SyslogWriter) {
|
||||
syslogWriter = logger
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Only define loglevel flag once.
|
||||
if flag.Lookup("loglevel") == nil {
|
||||
flag.IntVar(&Level, "loglevel", LevelInfo, "Log level (0 = DEBUG, 5 = FATAL)")
|
||||
}
|
||||
}
|
||||
|
||||
func print(l int, msg string) {
|
||||
if l >= Level {
|
||||
if syslogWriter != nil {
|
||||
var err error
|
||||
switch l {
|
||||
case LevelDebug:
|
||||
err = syslogWriter.Debug(msg)
|
||||
case LevelInfo:
|
||||
err = syslogWriter.Info(msg)
|
||||
case LevelWarning:
|
||||
err = syslogWriter.Warning(msg)
|
||||
case LevelError:
|
||||
err = syslogWriter.Err(msg)
|
||||
case LevelCritical:
|
||||
err = syslogWriter.Crit(msg)
|
||||
case LevelFatal:
|
||||
err = syslogWriter.Emerg(msg)
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf("Unable to write syslog: %v for msg: %s\n", err, msg)
|
||||
}
|
||||
} else {
|
||||
log.Printf("[%s] %s", levelPrefix[l], msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func outputf(l int, format string, v []interface{}) {
|
||||
print(l, fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
func output(l int, v []interface{}) {
|
||||
print(l, fmt.Sprint(v...))
|
||||
}
|
||||
|
||||
// Fatalf logs a formatted message at the "fatal" level and then exits. The
|
||||
// arguments are handled in the same manner as fmt.Printf.
|
||||
func Fatalf(format string, v ...interface{}) {
|
||||
outputf(LevelFatal, format, v)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Fatal logs its arguments at the "fatal" level and then exits.
|
||||
func Fatal(v ...interface{}) {
|
||||
output(LevelFatal, v)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Criticalf logs a formatted message at the "critical" level. The
|
||||
// arguments are handled in the same manner as fmt.Printf.
|
||||
func Criticalf(format string, v ...interface{}) {
|
||||
outputf(LevelCritical, format, v)
|
||||
}
|
||||
|
||||
// Critical logs its arguments at the "critical" level.
|
||||
func Critical(v ...interface{}) {
|
||||
output(LevelCritical, v)
|
||||
}
|
||||
|
||||
// Errorf logs a formatted message at the "error" level. The arguments
|
||||
// are handled in the same manner as fmt.Printf.
|
||||
func Errorf(format string, v ...interface{}) {
|
||||
outputf(LevelError, format, v)
|
||||
}
|
||||
|
||||
// Error logs its arguments at the "error" level.
|
||||
func Error(v ...interface{}) {
|
||||
output(LevelError, v)
|
||||
}
|
||||
|
||||
// Warningf logs a formatted message at the "warning" level. The
|
||||
// arguments are handled in the same manner as fmt.Printf.
|
||||
func Warningf(format string, v ...interface{}) {
|
||||
outputf(LevelWarning, format, v)
|
||||
}
|
||||
|
||||
// Warning logs its arguments at the "warning" level.
|
||||
func Warning(v ...interface{}) {
|
||||
output(LevelWarning, v)
|
||||
}
|
||||
|
||||
// Infof logs a formatted message at the "info" level. The arguments
|
||||
// are handled in the same manner as fmt.Printf.
|
||||
func Infof(format string, v ...interface{}) {
|
||||
outputf(LevelInfo, format, v)
|
||||
}
|
||||
|
||||
// Info logs its arguments at the "info" level.
|
||||
func Info(v ...interface{}) {
|
||||
output(LevelInfo, v)
|
||||
}
|
||||
|
||||
// Debugf logs a formatted message at the "debug" level. The arguments
|
||||
// are handled in the same manner as fmt.Printf.
|
||||
func Debugf(format string, v ...interface{}) {
|
||||
outputf(LevelDebug, format, v)
|
||||
}
|
||||
|
||||
// Debug logs its arguments at the "debug" level.
|
||||
func Debug(v ...interface{}) {
|
||||
output(LevelDebug, v)
|
||||
}
|
13
vendor/github.com/cloudflare/cfssl/ocsp/config/config.go
generated
vendored
Normal file
13
vendor/github.com/cloudflare/cfssl/ocsp/config/config.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
// Package config in the ocsp directory provides configuration data for an OCSP
|
||||
// signer.
|
||||
package config
|
||||
|
||||
import "time"
|
||||
|
||||
// Config contains configuration information required to set up an OCSP signer.
|
||||
type Config struct {
|
||||
CACertFile string
|
||||
ResponderCertFile string
|
||||
KeyFile string
|
||||
Interval time.Duration
|
||||
}
|
447
vendor/github.com/cloudflare/cfssl/signer/local/local.go
generated
vendored
Normal file
447
vendor/github.com/cloudflare/cfssl/signer/local/local.go
generated
vendored
Normal file
@ -0,0 +1,447 @@
|
||||
// Package local implements certificate signature functionality for CFSSL.
|
||||
package local
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"net"
|
||||
"net/mail"
|
||||
"os"
|
||||
|
||||
"github.com/cloudflare/cfssl/certdb"
|
||||
"github.com/cloudflare/cfssl/config"
|
||||
cferr "github.com/cloudflare/cfssl/errors"
|
||||
"github.com/cloudflare/cfssl/helpers"
|
||||
"github.com/cloudflare/cfssl/info"
|
||||
"github.com/cloudflare/cfssl/log"
|
||||
"github.com/cloudflare/cfssl/signer"
|
||||
"github.com/google/certificate-transparency/go"
|
||||
"github.com/google/certificate-transparency/go/client"
|
||||
)
|
||||
|
||||
// Signer contains a signer that uses the standard library to
|
||||
// support both ECDSA and RSA CA keys.
|
||||
type Signer struct {
|
||||
ca *x509.Certificate
|
||||
priv crypto.Signer
|
||||
policy *config.Signing
|
||||
sigAlgo x509.SignatureAlgorithm
|
||||
dbAccessor certdb.Accessor
|
||||
}
|
||||
|
||||
// NewSigner creates a new Signer directly from a
|
||||
// private key and certificate, with optional policy.
|
||||
func NewSigner(priv crypto.Signer, cert *x509.Certificate, sigAlgo x509.SignatureAlgorithm, policy *config.Signing) (*Signer, error) {
|
||||
if policy == nil {
|
||||
policy = &config.Signing{
|
||||
Profiles: map[string]*config.SigningProfile{},
|
||||
Default: config.DefaultConfig()}
|
||||
}
|
||||
|
||||
if !policy.Valid() {
|
||||
return nil, cferr.New(cferr.PolicyError, cferr.InvalidPolicy)
|
||||
}
|
||||
|
||||
return &Signer{
|
||||
ca: cert,
|
||||
priv: priv,
|
||||
sigAlgo: sigAlgo,
|
||||
policy: policy,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewSignerFromFile generates a new local signer from a caFile
|
||||
// and a caKey file, both PEM encoded.
|
||||
func NewSignerFromFile(caFile, caKeyFile string, policy *config.Signing) (*Signer, error) {
|
||||
log.Debug("Loading CA: ", caFile)
|
||||
ca, err := ioutil.ReadFile(caFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debug("Loading CA key: ", caKeyFile)
|
||||
cakey, err := ioutil.ReadFile(caKeyFile)
|
||||
if err != nil {
|
||||
return nil, cferr.Wrap(cferr.CertificateError, cferr.ReadFailed, err)
|
||||
}
|
||||
|
||||
parsedCa, err := helpers.ParseCertificatePEM(ca)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
strPassword := os.Getenv("CFSSL_CA_PK_PASSWORD")
|
||||
password := []byte(strPassword)
|
||||
if strPassword == "" {
|
||||
password = nil
|
||||
}
|
||||
|
||||
priv, err := helpers.ParsePrivateKeyPEMWithPassword(cakey, password)
|
||||
if err != nil {
|
||||
log.Debug("Malformed private key %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewSigner(priv, parsedCa, signer.DefaultSigAlgo(priv), policy)
|
||||
}
|
||||
|
||||
func (s *Signer) sign(template *x509.Certificate, profile *config.SigningProfile) (cert []byte, err error) {
|
||||
err = signer.FillTemplate(template, s.policy.Default, profile)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var initRoot bool
|
||||
if s.ca == nil {
|
||||
if !template.IsCA {
|
||||
err = cferr.New(cferr.PolicyError, cferr.InvalidRequest)
|
||||
return
|
||||
}
|
||||
template.DNSNames = nil
|
||||
template.EmailAddresses = nil
|
||||
s.ca = template
|
||||
initRoot = true
|
||||
template.MaxPathLen = signer.MaxPathLen
|
||||
} else if template.IsCA {
|
||||
template.MaxPathLen = 1
|
||||
template.DNSNames = nil
|
||||
template.EmailAddresses = nil
|
||||
}
|
||||
|
||||
derBytes, err := x509.CreateCertificate(rand.Reader, template, s.ca, template.PublicKey, s.priv)
|
||||
if err != nil {
|
||||
return nil, cferr.Wrap(cferr.CertificateError, cferr.Unknown, err)
|
||||
}
|
||||
if initRoot {
|
||||
s.ca, err = x509.ParseCertificate(derBytes)
|
||||
if err != nil {
|
||||
return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err)
|
||||
}
|
||||
}
|
||||
|
||||
cert = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
|
||||
log.Infof("signed certificate with serial number %d", template.SerialNumber)
|
||||
return
|
||||
}
|
||||
|
||||
// replaceSliceIfEmpty replaces the contents of replaced with newContents if
|
||||
// the slice referenced by replaced is empty
|
||||
func replaceSliceIfEmpty(replaced, newContents *[]string) {
|
||||
if len(*replaced) == 0 {
|
||||
*replaced = *newContents
|
||||
}
|
||||
}
|
||||
|
||||
// PopulateSubjectFromCSR has functionality similar to Name, except
|
||||
// it fills the fields of the resulting pkix.Name with req's if the
|
||||
// subject's corresponding fields are empty
|
||||
func PopulateSubjectFromCSR(s *signer.Subject, req pkix.Name) pkix.Name {
|
||||
// if no subject, use req
|
||||
if s == nil {
|
||||
return req
|
||||
}
|
||||
|
||||
name := s.Name()
|
||||
|
||||
if name.CommonName == "" {
|
||||
name.CommonName = req.CommonName
|
||||
}
|
||||
|
||||
replaceSliceIfEmpty(&name.Country, &req.Country)
|
||||
replaceSliceIfEmpty(&name.Province, &req.Province)
|
||||
replaceSliceIfEmpty(&name.Locality, &req.Locality)
|
||||
replaceSliceIfEmpty(&name.Organization, &req.Organization)
|
||||
replaceSliceIfEmpty(&name.OrganizationalUnit, &req.OrganizationalUnit)
|
||||
if name.SerialNumber == "" {
|
||||
name.SerialNumber = req.SerialNumber
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// OverrideHosts fills template's IPAddresses, EmailAddresses, and DNSNames with the
|
||||
// content of hosts, if it is not nil.
|
||||
func OverrideHosts(template *x509.Certificate, hosts []string) {
|
||||
if hosts != nil {
|
||||
template.IPAddresses = []net.IP{}
|
||||
template.EmailAddresses = []string{}
|
||||
template.DNSNames = []string{}
|
||||
}
|
||||
|
||||
for i := range hosts {
|
||||
if ip := net.ParseIP(hosts[i]); ip != nil {
|
||||
template.IPAddresses = append(template.IPAddresses, ip)
|
||||
} else if email, err := mail.ParseAddress(hosts[i]); err == nil && email != nil {
|
||||
template.EmailAddresses = append(template.EmailAddresses, email.Address)
|
||||
} else {
|
||||
template.DNSNames = append(template.DNSNames, hosts[i])
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Sign signs a new certificate based on the PEM-encoded client
|
||||
// certificate or certificate request with the signing profile,
|
||||
// specified by profileName.
|
||||
func (s *Signer) Sign(req signer.SignRequest) (cert []byte, err error) {
|
||||
profile, err := signer.Profile(s, req.Profile)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
block, _ := pem.Decode([]byte(req.Request))
|
||||
if block == nil {
|
||||
return nil, cferr.New(cferr.CSRError, cferr.DecodeFailed)
|
||||
}
|
||||
|
||||
if block.Type != "CERTIFICATE REQUEST" {
|
||||
return nil, cferr.Wrap(cferr.CSRError,
|
||||
cferr.BadRequest, errors.New("not a certificate or csr"))
|
||||
}
|
||||
|
||||
csrTemplate, err := signer.ParseCertificateRequest(s, block.Bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Copy out only the fields from the CSR authorized by policy.
|
||||
safeTemplate := x509.Certificate{}
|
||||
// If the profile contains no explicit whitelist, assume that all fields
|
||||
// should be copied from the CSR.
|
||||
if profile.CSRWhitelist == nil {
|
||||
safeTemplate = *csrTemplate
|
||||
} else {
|
||||
if profile.CSRWhitelist.Subject {
|
||||
safeTemplate.Subject = csrTemplate.Subject
|
||||
}
|
||||
if profile.CSRWhitelist.PublicKeyAlgorithm {
|
||||
safeTemplate.PublicKeyAlgorithm = csrTemplate.PublicKeyAlgorithm
|
||||
}
|
||||
if profile.CSRWhitelist.PublicKey {
|
||||
safeTemplate.PublicKey = csrTemplate.PublicKey
|
||||
}
|
||||
if profile.CSRWhitelist.SignatureAlgorithm {
|
||||
safeTemplate.SignatureAlgorithm = csrTemplate.SignatureAlgorithm
|
||||
}
|
||||
if profile.CSRWhitelist.DNSNames {
|
||||
safeTemplate.DNSNames = csrTemplate.DNSNames
|
||||
}
|
||||
if profile.CSRWhitelist.IPAddresses {
|
||||
safeTemplate.IPAddresses = csrTemplate.IPAddresses
|
||||
}
|
||||
if profile.CSRWhitelist.EmailAddresses {
|
||||
safeTemplate.EmailAddresses = csrTemplate.EmailAddresses
|
||||
}
|
||||
}
|
||||
|
||||
OverrideHosts(&safeTemplate, req.Hosts)
|
||||
safeTemplate.Subject = PopulateSubjectFromCSR(req.Subject, safeTemplate.Subject)
|
||||
|
||||
// If there is a whitelist, ensure that both the Common Name and SAN DNSNames match
|
||||
if profile.NameWhitelist != nil {
|
||||
if safeTemplate.Subject.CommonName != "" {
|
||||
if profile.NameWhitelist.Find([]byte(safeTemplate.Subject.CommonName)) == nil {
|
||||
return nil, cferr.New(cferr.PolicyError, cferr.InvalidPolicy)
|
||||
}
|
||||
}
|
||||
for _, name := range safeTemplate.DNSNames {
|
||||
if profile.NameWhitelist.Find([]byte(name)) == nil {
|
||||
return nil, cferr.New(cferr.PolicyError, cferr.InvalidPolicy)
|
||||
}
|
||||
}
|
||||
for _, name := range safeTemplate.EmailAddresses {
|
||||
if profile.NameWhitelist.Find([]byte(name)) == nil {
|
||||
return nil, cferr.New(cferr.PolicyError, cferr.InvalidPolicy)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if profile.ClientProvidesSerialNumbers {
|
||||
if req.Serial == nil {
|
||||
return nil, cferr.New(cferr.CertificateError, cferr.MissingSerial)
|
||||
}
|
||||
safeTemplate.SerialNumber = req.Serial
|
||||
} else {
|
||||
// RFC 5280 4.1.2.2:
|
||||
// Certificate users MUST be able to handle serialNumber
|
||||
// values up to 20 octets. Conforming CAs MUST NOT use
|
||||
// serialNumber values longer than 20 octets.
|
||||
//
|
||||
// If CFSSL is providing the serial numbers, it makes
|
||||
// sense to use the max supported size.
|
||||
serialNumber := make([]byte, 20)
|
||||
_, err = io.ReadFull(rand.Reader, serialNumber)
|
||||
if err != nil {
|
||||
return nil, cferr.Wrap(cferr.CertificateError, cferr.Unknown, err)
|
||||
}
|
||||
|
||||
// SetBytes interprets buf as the bytes of a big-endian
|
||||
// unsigned integer. The leading byte should be masked
|
||||
// off to ensure it isn't negative.
|
||||
serialNumber[0] &= 0x7F
|
||||
|
||||
safeTemplate.SerialNumber = new(big.Int).SetBytes(serialNumber)
|
||||
}
|
||||
|
||||
if len(req.Extensions) > 0 {
|
||||
for _, ext := range req.Extensions {
|
||||
oid := asn1.ObjectIdentifier(ext.ID)
|
||||
if !profile.ExtensionWhitelist[oid.String()] {
|
||||
return nil, cferr.New(cferr.CertificateError, cferr.InvalidRequest)
|
||||
}
|
||||
|
||||
rawValue, err := hex.DecodeString(ext.Value)
|
||||
if err != nil {
|
||||
return nil, cferr.Wrap(cferr.CertificateError, cferr.InvalidRequest, err)
|
||||
}
|
||||
|
||||
safeTemplate.ExtraExtensions = append(safeTemplate.ExtraExtensions, pkix.Extension{
|
||||
Id: oid,
|
||||
Critical: ext.Critical,
|
||||
Value: rawValue,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var certTBS = safeTemplate
|
||||
|
||||
if len(profile.CTLogServers) > 0 {
|
||||
// Add a poison extension which prevents validation
|
||||
var poisonExtension = pkix.Extension{Id: signer.CTPoisonOID, Critical: true, Value: []byte{0x05, 0x00}}
|
||||
var poisonedPreCert = certTBS
|
||||
poisonedPreCert.ExtraExtensions = append(safeTemplate.ExtraExtensions, poisonExtension)
|
||||
cert, err = s.sign(&poisonedPreCert, profile)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
derCert, _ := pem.Decode(cert)
|
||||
prechain := []ct.ASN1Cert{derCert.Bytes, s.ca.Raw}
|
||||
var sctList []ct.SignedCertificateTimestamp
|
||||
|
||||
for _, server := range profile.CTLogServers {
|
||||
log.Infof("submitting poisoned precertificate to %s", server)
|
||||
var ctclient = client.New(server)
|
||||
var resp *ct.SignedCertificateTimestamp
|
||||
resp, err = ctclient.AddPreChain(prechain)
|
||||
if err != nil {
|
||||
return nil, cferr.Wrap(cferr.CTError, cferr.PrecertSubmissionFailed, err)
|
||||
}
|
||||
sctList = append(sctList, *resp)
|
||||
}
|
||||
|
||||
var serializedSCTList []byte
|
||||
serializedSCTList, err = serializeSCTList(sctList)
|
||||
if err != nil {
|
||||
return nil, cferr.Wrap(cferr.CTError, cferr.Unknown, err)
|
||||
}
|
||||
|
||||
// Serialize again as an octet string before embedding
|
||||
serializedSCTList, err = asn1.Marshal(serializedSCTList)
|
||||
if err != nil {
|
||||
return nil, cferr.Wrap(cferr.CTError, cferr.Unknown, err)
|
||||
}
|
||||
|
||||
var SCTListExtension = pkix.Extension{Id: signer.SCTListOID, Critical: false, Value: serializedSCTList}
|
||||
certTBS.ExtraExtensions = append(certTBS.ExtraExtensions, SCTListExtension)
|
||||
}
|
||||
var signedCert []byte
|
||||
signedCert, err = s.sign(&certTBS, profile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if s.dbAccessor != nil {
|
||||
var certRecord = certdb.CertificateRecord{
|
||||
Serial: certTBS.SerialNumber.String(),
|
||||
// this relies on the specific behavior of x509.CreateCertificate
|
||||
// which updates certTBS AuthorityKeyId from the signer's SubjectKeyId
|
||||
AKI: hex.EncodeToString(certTBS.AuthorityKeyId),
|
||||
CALabel: req.Label,
|
||||
Status: "good",
|
||||
Expiry: certTBS.NotAfter,
|
||||
PEM: string(signedCert),
|
||||
}
|
||||
|
||||
err = s.dbAccessor.InsertCertificate(certRecord)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debug("saved certificate with serial number ", certTBS.SerialNumber)
|
||||
}
|
||||
|
||||
return signedCert, nil
|
||||
}
|
||||
|
||||
func serializeSCTList(sctList []ct.SignedCertificateTimestamp) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
for _, sct := range sctList {
|
||||
sct, err := ct.SerializeSCT(sct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
binary.Write(&buf, binary.BigEndian, uint16(len(sct)))
|
||||
buf.Write(sct)
|
||||
}
|
||||
|
||||
var sctListLengthField = make([]byte, 2)
|
||||
binary.BigEndian.PutUint16(sctListLengthField, uint16(buf.Len()))
|
||||
return bytes.Join([][]byte{sctListLengthField, buf.Bytes()}, nil), nil
|
||||
}
|
||||
|
||||
// Info return a populated info.Resp struct or an error.
|
||||
func (s *Signer) Info(req info.Req) (resp *info.Resp, err error) {
|
||||
cert, err := s.Certificate(req.Label, req.Profile)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
profile, err := signer.Profile(s, req.Profile)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
resp = new(info.Resp)
|
||||
if cert.Raw != nil {
|
||||
resp.Certificate = string(bytes.TrimSpace(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})))
|
||||
}
|
||||
resp.Usage = profile.Usage
|
||||
resp.ExpiryString = profile.ExpiryString
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// SigAlgo returns the RSA signer's signature algorithm.
|
||||
func (s *Signer) SigAlgo() x509.SignatureAlgorithm {
|
||||
return s.sigAlgo
|
||||
}
|
||||
|
||||
// Certificate returns the signer's certificate.
|
||||
func (s *Signer) Certificate(label, profile string) (*x509.Certificate, error) {
|
||||
cert := *s.ca
|
||||
return &cert, nil
|
||||
}
|
||||
|
||||
// SetPolicy sets the signer's signature policy.
|
||||
func (s *Signer) SetPolicy(policy *config.Signing) {
|
||||
s.policy = policy
|
||||
}
|
||||
|
||||
// SetDBAccessor sets the signers' cert db accessor
|
||||
func (s *Signer) SetDBAccessor(dba certdb.Accessor) {
|
||||
s.dbAccessor = dba
|
||||
}
|
||||
|
||||
// Policy returns the signer's policy.
|
||||
func (s *Signer) Policy() *config.Signing {
|
||||
return s.policy
|
||||
}
|
385
vendor/github.com/cloudflare/cfssl/signer/signer.go
generated
vendored
Normal file
385
vendor/github.com/cloudflare/cfssl/signer/signer.go
generated
vendored
Normal file
@ -0,0 +1,385 @@
|
||||
// Package signer implements certificate signature functionality for CFSSL.
|
||||
package signer
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rsa"
|
||||
"crypto/sha1"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"errors"
|
||||
"math/big"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cloudflare/cfssl/certdb"
|
||||
"github.com/cloudflare/cfssl/config"
|
||||
"github.com/cloudflare/cfssl/csr"
|
||||
cferr "github.com/cloudflare/cfssl/errors"
|
||||
"github.com/cloudflare/cfssl/helpers"
|
||||
"github.com/cloudflare/cfssl/info"
|
||||
)
|
||||
|
||||
// MaxPathLen is the default path length for a new CA certificate.
|
||||
var MaxPathLen = 2
|
||||
|
||||
// Subject contains the information that should be used to override the
|
||||
// subject information when signing a certificate.
|
||||
type Subject struct {
|
||||
CN string
|
||||
Names []csr.Name `json:"names"`
|
||||
SerialNumber string
|
||||
}
|
||||
|
||||
// Extension represents a raw extension to be included in the certificate. The
|
||||
// "value" field must be hex encoded.
|
||||
type Extension struct {
|
||||
ID config.OID `json:"id"`
|
||||
Critical bool `json:"critical"`
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
// SignRequest stores a signature request, which contains the hostname,
|
||||
// the CSR, optional subject information, and the signature profile.
|
||||
//
|
||||
// Extensions provided in the signRequest are copied into the certificate, as
|
||||
// long as they are in the ExtensionWhitelist for the signer's policy.
|
||||
// Extensions requested in the CSR are ignored, except for those processed by
|
||||
// ParseCertificateRequest (mainly subjectAltName).
|
||||
type SignRequest struct {
|
||||
Hosts []string `json:"hosts"`
|
||||
Request string `json:"certificate_request"`
|
||||
Subject *Subject `json:"subject,omitempty"`
|
||||
Profile string `json:"profile"`
|
||||
Label string `json:"label"`
|
||||
Serial *big.Int `json:"serial,omitempty"`
|
||||
Extensions []Extension `json:"extensions,omitempty"`
|
||||
}
|
||||
|
||||
// appendIf appends to a if s is not an empty string.
|
||||
func appendIf(s string, a *[]string) {
|
||||
if s != "" {
|
||||
*a = append(*a, s)
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the PKIX name for the subject.
|
||||
func (s *Subject) Name() pkix.Name {
|
||||
var name pkix.Name
|
||||
name.CommonName = s.CN
|
||||
|
||||
for _, n := range s.Names {
|
||||
appendIf(n.C, &name.Country)
|
||||
appendIf(n.ST, &name.Province)
|
||||
appendIf(n.L, &name.Locality)
|
||||
appendIf(n.O, &name.Organization)
|
||||
appendIf(n.OU, &name.OrganizationalUnit)
|
||||
}
|
||||
name.SerialNumber = s.SerialNumber
|
||||
return name
|
||||
}
|
||||
|
||||
// SplitHosts takes a comma-spearated list of hosts and returns a slice
|
||||
// with the hosts split
|
||||
func SplitHosts(hostList string) []string {
|
||||
if hostList == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
return strings.Split(hostList, ",")
|
||||
}
|
||||
|
||||
// A Signer contains a CA's certificate and private key for signing
|
||||
// certificates, a Signing policy to refer to and a SignatureAlgorithm.
|
||||
type Signer interface {
|
||||
Info(info.Req) (*info.Resp, error)
|
||||
Policy() *config.Signing
|
||||
SetDBAccessor(certdb.Accessor)
|
||||
SetPolicy(*config.Signing)
|
||||
SigAlgo() x509.SignatureAlgorithm
|
||||
Sign(req SignRequest) (cert []byte, err error)
|
||||
}
|
||||
|
||||
// Profile gets the specific profile from the signer
|
||||
func Profile(s Signer, profile string) (*config.SigningProfile, error) {
|
||||
var p *config.SigningProfile
|
||||
policy := s.Policy()
|
||||
if policy != nil && policy.Profiles != nil && profile != "" {
|
||||
p = policy.Profiles[profile]
|
||||
}
|
||||
|
||||
if p == nil && policy != nil {
|
||||
p = policy.Default
|
||||
}
|
||||
|
||||
if p == nil {
|
||||
return nil, cferr.Wrap(cferr.APIClientError, cferr.ClientHTTPError, errors.New("profile must not be nil"))
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// DefaultSigAlgo returns an appropriate X.509 signature algorithm given
|
||||
// the CA's private key.
|
||||
func DefaultSigAlgo(priv crypto.Signer) x509.SignatureAlgorithm {
|
||||
pub := priv.Public()
|
||||
switch pub := pub.(type) {
|
||||
case *rsa.PublicKey:
|
||||
keySize := pub.N.BitLen()
|
||||
switch {
|
||||
case keySize >= 4096:
|
||||
return x509.SHA512WithRSA
|
||||
case keySize >= 3072:
|
||||
return x509.SHA384WithRSA
|
||||
case keySize >= 2048:
|
||||
return x509.SHA256WithRSA
|
||||
default:
|
||||
return x509.SHA1WithRSA
|
||||
}
|
||||
case *ecdsa.PublicKey:
|
||||
switch pub.Curve {
|
||||
case elliptic.P256():
|
||||
return x509.ECDSAWithSHA256
|
||||
case elliptic.P384():
|
||||
return x509.ECDSAWithSHA384
|
||||
case elliptic.P521():
|
||||
return x509.ECDSAWithSHA512
|
||||
default:
|
||||
return x509.ECDSAWithSHA1
|
||||
}
|
||||
default:
|
||||
return x509.UnknownSignatureAlgorithm
|
||||
}
|
||||
}
|
||||
|
||||
// ParseCertificateRequest takes an incoming certificate request and
|
||||
// builds a certificate template from it.
|
||||
func ParseCertificateRequest(s Signer, csrBytes []byte) (template *x509.Certificate, err error) {
|
||||
csr, err := x509.ParseCertificateRequest(csrBytes)
|
||||
if err != nil {
|
||||
err = cferr.Wrap(cferr.CSRError, cferr.ParseFailed, err)
|
||||
return
|
||||
}
|
||||
|
||||
err = helpers.CheckSignature(csr, csr.SignatureAlgorithm, csr.RawTBSCertificateRequest, csr.Signature)
|
||||
if err != nil {
|
||||
err = cferr.Wrap(cferr.CSRError, cferr.KeyMismatch, err)
|
||||
return
|
||||
}
|
||||
|
||||
template = &x509.Certificate{
|
||||
Subject: csr.Subject,
|
||||
PublicKeyAlgorithm: csr.PublicKeyAlgorithm,
|
||||
PublicKey: csr.PublicKey,
|
||||
SignatureAlgorithm: s.SigAlgo(),
|
||||
DNSNames: csr.DNSNames,
|
||||
IPAddresses: csr.IPAddresses,
|
||||
EmailAddresses: csr.EmailAddresses,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type subjectPublicKeyInfo struct {
|
||||
Algorithm pkix.AlgorithmIdentifier
|
||||
SubjectPublicKey asn1.BitString
|
||||
}
|
||||
|
||||
// ComputeSKI derives an SKI from the certificate's public key in a
|
||||
// standard manner. This is done by computing the SHA-1 digest of the
|
||||
// SubjectPublicKeyInfo component of the certificate.
|
||||
func ComputeSKI(template *x509.Certificate) ([]byte, error) {
|
||||
pub := template.PublicKey
|
||||
encodedPub, err := x509.MarshalPKIXPublicKey(pub)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var subPKI subjectPublicKeyInfo
|
||||
_, err = asn1.Unmarshal(encodedPub, &subPKI)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pubHash := sha1.Sum(subPKI.SubjectPublicKey.Bytes)
|
||||
return pubHash[:], nil
|
||||
}
|
||||
|
||||
// FillTemplate is a utility function that tries to load as much of
|
||||
// the certificate template as possible from the profiles and current
|
||||
// template. It fills in the key uses, expiration, revocation URLs
|
||||
// and SKI.
|
||||
func FillTemplate(template *x509.Certificate, defaultProfile, profile *config.SigningProfile) error {
|
||||
ski, err := ComputeSKI(template)
|
||||
|
||||
var (
|
||||
eku []x509.ExtKeyUsage
|
||||
ku x509.KeyUsage
|
||||
backdate time.Duration
|
||||
expiry time.Duration
|
||||
notBefore time.Time
|
||||
notAfter time.Time
|
||||
crlURL, ocspURL string
|
||||
)
|
||||
|
||||
// The third value returned from Usages is a list of unknown key usages.
|
||||
// This should be used when validating the profile at load, and isn't used
|
||||
// here.
|
||||
ku, eku, _ = profile.Usages()
|
||||
if profile.IssuerURL == nil {
|
||||
profile.IssuerURL = defaultProfile.IssuerURL
|
||||
}
|
||||
|
||||
if ku == 0 && len(eku) == 0 {
|
||||
return cferr.New(cferr.PolicyError, cferr.NoKeyUsages)
|
||||
}
|
||||
|
||||
if expiry = profile.Expiry; expiry == 0 {
|
||||
expiry = defaultProfile.Expiry
|
||||
}
|
||||
|
||||
if crlURL = profile.CRL; crlURL == "" {
|
||||
crlURL = defaultProfile.CRL
|
||||
}
|
||||
if ocspURL = profile.OCSP; ocspURL == "" {
|
||||
ocspURL = defaultProfile.OCSP
|
||||
}
|
||||
if backdate = profile.Backdate; backdate == 0 {
|
||||
backdate = -5 * time.Minute
|
||||
} else {
|
||||
backdate = -1 * profile.Backdate
|
||||
}
|
||||
|
||||
if !profile.NotBefore.IsZero() {
|
||||
notBefore = profile.NotBefore.UTC()
|
||||
} else {
|
||||
notBefore = time.Now().Round(time.Minute).Add(backdate).UTC()
|
||||
}
|
||||
|
||||
if !profile.NotAfter.IsZero() {
|
||||
notAfter = profile.NotAfter.UTC()
|
||||
} else {
|
||||
notAfter = notBefore.Add(expiry).UTC()
|
||||
}
|
||||
|
||||
template.NotBefore = notBefore
|
||||
template.NotAfter = notAfter
|
||||
template.KeyUsage = ku
|
||||
template.ExtKeyUsage = eku
|
||||
template.BasicConstraintsValid = true
|
||||
template.IsCA = profile.CA
|
||||
template.SubjectKeyId = ski
|
||||
|
||||
if ocspURL != "" {
|
||||
template.OCSPServer = []string{ocspURL}
|
||||
}
|
||||
if crlURL != "" {
|
||||
template.CRLDistributionPoints = []string{crlURL}
|
||||
}
|
||||
|
||||
if len(profile.IssuerURL) != 0 {
|
||||
template.IssuingCertificateURL = profile.IssuerURL
|
||||
}
|
||||
if len(profile.Policies) != 0 {
|
||||
err = addPolicies(template, profile.Policies)
|
||||
if err != nil {
|
||||
return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err)
|
||||
}
|
||||
}
|
||||
if profile.OCSPNoCheck {
|
||||
ocspNoCheckExtension := pkix.Extension{
|
||||
Id: asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1, 5},
|
||||
Critical: false,
|
||||
Value: []byte{0x05, 0x00},
|
||||
}
|
||||
template.ExtraExtensions = append(template.ExtraExtensions, ocspNoCheckExtension)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type policyInformation struct {
|
||||
PolicyIdentifier asn1.ObjectIdentifier
|
||||
Qualifiers []interface{} `asn1:"tag:optional,omitempty"`
|
||||
}
|
||||
|
||||
type cpsPolicyQualifier struct {
|
||||
PolicyQualifierID asn1.ObjectIdentifier
|
||||
Qualifier string `asn1:"tag:optional,ia5"`
|
||||
}
|
||||
|
||||
type userNotice struct {
|
||||
ExplicitText string `asn1:"tag:optional,utf8"`
|
||||
}
|
||||
type userNoticePolicyQualifier struct {
|
||||
PolicyQualifierID asn1.ObjectIdentifier
|
||||
Qualifier userNotice
|
||||
}
|
||||
|
||||
var (
|
||||
// Per https://tools.ietf.org/html/rfc3280.html#page-106, this represents:
|
||||
// iso(1) identified-organization(3) dod(6) internet(1) security(5)
|
||||
// mechanisms(5) pkix(7) id-qt(2) id-qt-cps(1)
|
||||
iDQTCertificationPracticeStatement = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 1}
|
||||
// iso(1) identified-organization(3) dod(6) internet(1) security(5)
|
||||
// mechanisms(5) pkix(7) id-qt(2) id-qt-unotice(2)
|
||||
iDQTUserNotice = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 2}
|
||||
|
||||
// CTPoisonOID is the object ID of the critical poison extension for precertificates
|
||||
// https://tools.ietf.org/html/rfc6962#page-9
|
||||
CTPoisonOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3}
|
||||
|
||||
// SCTListOID is the object ID for the Signed Certificate Timestamp certificate extension
|
||||
// https://tools.ietf.org/html/rfc6962#page-14
|
||||
SCTListOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2}
|
||||
)
|
||||
|
||||
// addPolicies adds Certificate Policies and optional Policy Qualifiers to a
|
||||
// certificate, based on the input config. Go's x509 library allows setting
|
||||
// Certificate Policies easily, but does not support nested Policy Qualifiers
|
||||
// under those policies. So we need to construct the ASN.1 structure ourselves.
|
||||
func addPolicies(template *x509.Certificate, policies []config.CertificatePolicy) error {
|
||||
asn1PolicyList := []policyInformation{}
|
||||
|
||||
for _, policy := range policies {
|
||||
pi := policyInformation{
|
||||
// The PolicyIdentifier is an OID assigned to a given issuer.
|
||||
PolicyIdentifier: asn1.ObjectIdentifier(policy.ID),
|
||||
}
|
||||
for _, qualifier := range policy.Qualifiers {
|
||||
switch qualifier.Type {
|
||||
case "id-qt-unotice":
|
||||
pi.Qualifiers = append(pi.Qualifiers,
|
||||
userNoticePolicyQualifier{
|
||||
PolicyQualifierID: iDQTUserNotice,
|
||||
Qualifier: userNotice{
|
||||
ExplicitText: qualifier.Value,
|
||||
},
|
||||
})
|
||||
case "id-qt-cps":
|
||||
pi.Qualifiers = append(pi.Qualifiers,
|
||||
cpsPolicyQualifier{
|
||||
PolicyQualifierID: iDQTCertificationPracticeStatement,
|
||||
Qualifier: qualifier.Value,
|
||||
})
|
||||
default:
|
||||
return errors.New("Invalid qualifier type in Policies " + qualifier.Type)
|
||||
}
|
||||
}
|
||||
asn1PolicyList = append(asn1PolicyList, pi)
|
||||
}
|
||||
|
||||
asn1Bytes, err := asn1.Marshal(asn1PolicyList)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
template.ExtraExtensions = append(template.ExtraExtensions, pkix.Extension{
|
||||
Id: asn1.ObjectIdentifier{2, 5, 29, 32},
|
||||
Critical: false,
|
||||
Value: asn1Bytes,
|
||||
})
|
||||
return nil
|
||||
}
|
202
vendor/github.com/google/certificate-transparency/LICENSE
generated
vendored
Normal file
202
vendor/github.com/google/certificate-transparency/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
25
vendor/github.com/google/certificate-transparency/go/README.md
generated
vendored
Normal file
25
vendor/github.com/google/certificate-transparency/go/README.md
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
This is the really early beginnings of a certificate transparency log
|
||||
client written in Go, along with a log scanner tool.
|
||||
|
||||
You'll need go v1.1 or higher to compile.
|
||||
|
||||
# Installation
|
||||
|
||||
This go code must be imported into your go workspace before you can
|
||||
use it, which can be done with:
|
||||
|
||||
go get github.com/google/certificate-transparency/go/client
|
||||
go get github.com/google/certificate-transparency/go/scanner
|
||||
etc.
|
||||
|
||||
# Building the binaries
|
||||
|
||||
To compile the log scanner run:
|
||||
|
||||
go build github.com/google/certificate-transparency/go/scanner/main/scanner.go
|
||||
|
||||
# Contributing
|
||||
|
||||
When sending pull requests, please ensure that everything's been run
|
||||
through ```gofmt``` beforehand so we can keep everything nice and
|
||||
tidy.
|
956
vendor/github.com/google/certificate-transparency/go/asn1/asn1.go
generated
vendored
Executable file
956
vendor/github.com/google/certificate-transparency/go/asn1/asn1.go
generated
vendored
Executable file
@ -0,0 +1,956 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package asn1 implements parsing of DER-encoded ASN.1 data structures,
|
||||
// as defined in ITU-T Rec X.690.
|
||||
//
|
||||
// See also ``A Layman's Guide to a Subset of ASN.1, BER, and DER,''
|
||||
// http://luca.ntop.org/Teaching/Appunti/asn1.html.
|
||||
//
|
||||
// START CT CHANGES
|
||||
// This is a fork of the Go standard library ASN.1 implementation
|
||||
// (encoding/asn1). The main difference is that this version tries to correct
|
||||
// for errors (e.g. use of tagPrintableString when the string data is really
|
||||
// ISO8859-1 - a common error present in many x509 certificates in the wild.)
|
||||
// END CT CHANGES
|
||||
package asn1
|
||||
|
||||
// ASN.1 is a syntax for specifying abstract objects and BER, DER, PER, XER etc
|
||||
// are different encoding formats for those objects. Here, we'll be dealing
|
||||
// with DER, the Distinguished Encoding Rules. DER is used in X.509 because
|
||||
// it's fast to parse and, unlike BER, has a unique encoding for every object.
|
||||
// When calculating hashes over objects, it's important that the resulting
|
||||
// bytes be the same at both ends and DER removes this margin of error.
|
||||
//
|
||||
// ASN.1 is very complex and this package doesn't attempt to implement
|
||||
// everything by any means.
|
||||
|
||||
import (
|
||||
// START CT CHANGES
|
||||
"errors"
|
||||
"fmt"
|
||||
// END CT CHANGES
|
||||
"math/big"
|
||||
"reflect"
|
||||
// START CT CHANGES
|
||||
"strings"
|
||||
// END CT CHANGES
|
||||
"time"
|
||||
)
|
||||
|
||||
// A StructuralError suggests that the ASN.1 data is valid, but the Go type
|
||||
// which is receiving it doesn't match.
|
||||
type StructuralError struct {
|
||||
Msg string
|
||||
}
|
||||
|
||||
func (e StructuralError) Error() string { return "asn1: structure error: " + e.Msg }
|
||||
|
||||
// A SyntaxError suggests that the ASN.1 data is invalid.
|
||||
type SyntaxError struct {
|
||||
Msg string
|
||||
}
|
||||
|
||||
func (e SyntaxError) Error() string { return "asn1: syntax error: " + e.Msg }
|
||||
|
||||
// We start by dealing with each of the primitive types in turn.
|
||||
|
||||
// BOOLEAN
|
||||
|
||||
func parseBool(bytes []byte) (ret bool, err error) {
|
||||
if len(bytes) != 1 {
|
||||
err = SyntaxError{"invalid boolean"}
|
||||
return
|
||||
}
|
||||
|
||||
// DER demands that "If the encoding represents the boolean value TRUE,
|
||||
// its single contents octet shall have all eight bits set to one."
|
||||
// Thus only 0 and 255 are valid encoded values.
|
||||
switch bytes[0] {
|
||||
case 0:
|
||||
ret = false
|
||||
case 0xff:
|
||||
ret = true
|
||||
default:
|
||||
err = SyntaxError{"invalid boolean"}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// INTEGER
|
||||
|
||||
// parseInt64 treats the given bytes as a big-endian, signed integer and
|
||||
// returns the result.
|
||||
func parseInt64(bytes []byte) (ret int64, err error) {
|
||||
if len(bytes) > 8 {
|
||||
// We'll overflow an int64 in this case.
|
||||
err = StructuralError{"integer too large"}
|
||||
return
|
||||
}
|
||||
for bytesRead := 0; bytesRead < len(bytes); bytesRead++ {
|
||||
ret <<= 8
|
||||
ret |= int64(bytes[bytesRead])
|
||||
}
|
||||
|
||||
// Shift up and down in order to sign extend the result.
|
||||
ret <<= 64 - uint8(len(bytes))*8
|
||||
ret >>= 64 - uint8(len(bytes))*8
|
||||
return
|
||||
}
|
||||
|
||||
// parseInt treats the given bytes as a big-endian, signed integer and returns
|
||||
// the result.
|
||||
func parseInt32(bytes []byte) (int32, error) {
|
||||
ret64, err := parseInt64(bytes)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if ret64 != int64(int32(ret64)) {
|
||||
return 0, StructuralError{"integer too large"}
|
||||
}
|
||||
return int32(ret64), nil
|
||||
}
|
||||
|
||||
var bigOne = big.NewInt(1)
|
||||
|
||||
// parseBigInt treats the given bytes as a big-endian, signed integer and returns
|
||||
// the result.
|
||||
func parseBigInt(bytes []byte) *big.Int {
|
||||
ret := new(big.Int)
|
||||
if len(bytes) > 0 && bytes[0]&0x80 == 0x80 {
|
||||
// This is a negative number.
|
||||
notBytes := make([]byte, len(bytes))
|
||||
for i := range notBytes {
|
||||
notBytes[i] = ^bytes[i]
|
||||
}
|
||||
ret.SetBytes(notBytes)
|
||||
ret.Add(ret, bigOne)
|
||||
ret.Neg(ret)
|
||||
return ret
|
||||
}
|
||||
ret.SetBytes(bytes)
|
||||
return ret
|
||||
}
|
||||
|
||||
// BIT STRING
|
||||
|
||||
// BitString is the structure to use when you want an ASN.1 BIT STRING type. A
|
||||
// bit string is padded up to the nearest byte in memory and the number of
|
||||
// valid bits is recorded. Padding bits will be zero.
|
||||
type BitString struct {
|
||||
Bytes []byte // bits packed into bytes.
|
||||
BitLength int // length in bits.
|
||||
}
|
||||
|
||||
// At returns the bit at the given index. If the index is out of range it
|
||||
// returns false.
|
||||
func (b BitString) At(i int) int {
|
||||
if i < 0 || i >= b.BitLength {
|
||||
return 0
|
||||
}
|
||||
x := i / 8
|
||||
y := 7 - uint(i%8)
|
||||
return int(b.Bytes[x]>>y) & 1
|
||||
}
|
||||
|
||||
// RightAlign returns a slice where the padding bits are at the beginning. The
|
||||
// slice may share memory with the BitString.
|
||||
func (b BitString) RightAlign() []byte {
|
||||
shift := uint(8 - (b.BitLength % 8))
|
||||
if shift == 8 || len(b.Bytes) == 0 {
|
||||
return b.Bytes
|
||||
}
|
||||
|
||||
a := make([]byte, len(b.Bytes))
|
||||
a[0] = b.Bytes[0] >> shift
|
||||
for i := 1; i < len(b.Bytes); i++ {
|
||||
a[i] = b.Bytes[i-1] << (8 - shift)
|
||||
a[i] |= b.Bytes[i] >> shift
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// parseBitString parses an ASN.1 bit string from the given byte slice and returns it.
|
||||
func parseBitString(bytes []byte) (ret BitString, err error) {
|
||||
if len(bytes) == 0 {
|
||||
err = SyntaxError{"zero length BIT STRING"}
|
||||
return
|
||||
}
|
||||
paddingBits := int(bytes[0])
|
||||
if paddingBits > 7 ||
|
||||
len(bytes) == 1 && paddingBits > 0 ||
|
||||
bytes[len(bytes)-1]&((1<<bytes[0])-1) != 0 {
|
||||
err = SyntaxError{"invalid padding bits in BIT STRING"}
|
||||
return
|
||||
}
|
||||
ret.BitLength = (len(bytes)-1)*8 - paddingBits
|
||||
ret.Bytes = bytes[1:]
|
||||
return
|
||||
}
|
||||
|
||||
// OBJECT IDENTIFIER
|
||||
|
||||
// An ObjectIdentifier represents an ASN.1 OBJECT IDENTIFIER.
|
||||
type ObjectIdentifier []int
|
||||
|
||||
// Equal reports whether oi and other represent the same identifier.
|
||||
func (oi ObjectIdentifier) Equal(other ObjectIdentifier) bool {
|
||||
if len(oi) != len(other) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < len(oi); i++ {
|
||||
if oi[i] != other[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and
|
||||
// returns it. An object identifier is a sequence of variable length integers
|
||||
// that are assigned in a hierarchy.
|
||||
func parseObjectIdentifier(bytes []byte) (s []int, err error) {
|
||||
if len(bytes) == 0 {
|
||||
err = SyntaxError{"zero length OBJECT IDENTIFIER"}
|
||||
return
|
||||
}
|
||||
|
||||
// In the worst case, we get two elements from the first byte (which is
|
||||
// encoded differently) and then every varint is a single byte long.
|
||||
s = make([]int, len(bytes)+1)
|
||||
|
||||
// The first varint is 40*value1 + value2:
|
||||
// According to this packing, value1 can take the values 0, 1 and 2 only.
|
||||
// When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2,
|
||||
// then there are no restrictions on value2.
|
||||
v, offset, err := parseBase128Int(bytes, 0)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if v < 80 {
|
||||
s[0] = v / 40
|
||||
s[1] = v % 40
|
||||
} else {
|
||||
s[0] = 2
|
||||
s[1] = v - 80
|
||||
}
|
||||
|
||||
i := 2
|
||||
for ; offset < len(bytes); i++ {
|
||||
v, offset, err = parseBase128Int(bytes, offset)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
s[i] = v
|
||||
}
|
||||
s = s[0:i]
|
||||
return
|
||||
}
|
||||
|
||||
// ENUMERATED
|
||||
|
||||
// An Enumerated is represented as a plain int.
|
||||
type Enumerated int
|
||||
|
||||
// FLAG
|
||||
|
||||
// A Flag accepts any data and is set to true if present.
|
||||
type Flag bool
|
||||
|
||||
// parseBase128Int parses a base-128 encoded int from the given offset in the
|
||||
// given byte slice. It returns the value and the new offset.
|
||||
func parseBase128Int(bytes []byte, initOffset int) (ret, offset int, err error) {
|
||||
offset = initOffset
|
||||
for shifted := 0; offset < len(bytes); shifted++ {
|
||||
if shifted > 4 {
|
||||
err = StructuralError{"base 128 integer too large"}
|
||||
return
|
||||
}
|
||||
ret <<= 7
|
||||
b := bytes[offset]
|
||||
ret |= int(b & 0x7f)
|
||||
offset++
|
||||
if b&0x80 == 0 {
|
||||
return
|
||||
}
|
||||
}
|
||||
err = SyntaxError{"truncated base 128 integer"}
|
||||
return
|
||||
}
|
||||
|
||||
// UTCTime
|
||||
|
||||
func parseUTCTime(bytes []byte) (ret time.Time, err error) {
|
||||
s := string(bytes)
|
||||
ret, err = time.Parse("0601021504Z0700", s)
|
||||
if err != nil {
|
||||
ret, err = time.Parse("060102150405Z0700", s)
|
||||
}
|
||||
if err == nil && ret.Year() >= 2050 {
|
||||
// UTCTime only encodes times prior to 2050. See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1
|
||||
ret = ret.AddDate(-100, 0, 0)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// parseGeneralizedTime parses the GeneralizedTime from the given byte slice
|
||||
// and returns the resulting time.
|
||||
func parseGeneralizedTime(bytes []byte) (ret time.Time, err error) {
|
||||
return time.Parse("20060102150405Z0700", string(bytes))
|
||||
}
|
||||
|
||||
// PrintableString
|
||||
|
||||
// parsePrintableString parses a ASN.1 PrintableString from the given byte
|
||||
// array and returns it.
|
||||
func parsePrintableString(bytes []byte) (ret string, err error) {
|
||||
for _, b := range bytes {
|
||||
if !isPrintable(b) {
|
||||
err = SyntaxError{"PrintableString contains invalid character"}
|
||||
return
|
||||
}
|
||||
}
|
||||
ret = string(bytes)
|
||||
return
|
||||
}
|
||||
|
||||
// isPrintable returns true iff the given b is in the ASN.1 PrintableString set.
|
||||
func isPrintable(b byte) bool {
|
||||
return 'a' <= b && b <= 'z' ||
|
||||
'A' <= b && b <= 'Z' ||
|
||||
'0' <= b && b <= '9' ||
|
||||
'\'' <= b && b <= ')' ||
|
||||
'+' <= b && b <= '/' ||
|
||||
b == ' ' ||
|
||||
b == ':' ||
|
||||
b == '=' ||
|
||||
b == '?' ||
|
||||
// This is technically not allowed in a PrintableString.
|
||||
// However, x509 certificates with wildcard strings don't
|
||||
// always use the correct string type so we permit it.
|
||||
b == '*'
|
||||
}
|
||||
|
||||
// IA5String
|
||||
|
||||
// parseIA5String parses a ASN.1 IA5String (ASCII string) from the given
|
||||
// byte slice and returns it.
|
||||
func parseIA5String(bytes []byte) (ret string, err error) {
|
||||
for _, b := range bytes {
|
||||
if b >= 0x80 {
|
||||
err = SyntaxError{"IA5String contains invalid character"}
|
||||
return
|
||||
}
|
||||
}
|
||||
ret = string(bytes)
|
||||
return
|
||||
}
|
||||
|
||||
// T61String
|
||||
|
||||
// parseT61String parses a ASN.1 T61String (8-bit clean string) from the given
|
||||
// byte slice and returns it.
|
||||
func parseT61String(bytes []byte) (ret string, err error) {
|
||||
return string(bytes), nil
|
||||
}
|
||||
|
||||
// UTF8String
|
||||
|
||||
// parseUTF8String parses a ASN.1 UTF8String (raw UTF-8) from the given byte
|
||||
// array and returns it.
|
||||
func parseUTF8String(bytes []byte) (ret string, err error) {
|
||||
return string(bytes), nil
|
||||
}
|
||||
|
||||
// A RawValue represents an undecoded ASN.1 object.
|
||||
type RawValue struct {
|
||||
Class, Tag int
|
||||
IsCompound bool
|
||||
Bytes []byte
|
||||
FullBytes []byte // includes the tag and length
|
||||
}
|
||||
|
||||
// RawContent is used to signal that the undecoded, DER data needs to be
|
||||
// preserved for a struct. To use it, the first field of the struct must have
|
||||
// this type. It's an error for any of the other fields to have this type.
|
||||
type RawContent []byte
|
||||
|
||||
// Tagging
|
||||
|
||||
// parseTagAndLength parses an ASN.1 tag and length pair from the given offset
|
||||
// into a byte slice. It returns the parsed data and the new offset. SET and
|
||||
// SET OF (tag 17) are mapped to SEQUENCE and SEQUENCE OF (tag 16) since we
|
||||
// don't distinguish between ordered and unordered objects in this code.
|
||||
func parseTagAndLength(bytes []byte, initOffset int) (ret tagAndLength, offset int, err error) {
|
||||
offset = initOffset
|
||||
b := bytes[offset]
|
||||
offset++
|
||||
ret.class = int(b >> 6)
|
||||
ret.isCompound = b&0x20 == 0x20
|
||||
ret.tag = int(b & 0x1f)
|
||||
|
||||
// If the bottom five bits are set, then the tag number is actually base 128
|
||||
// encoded afterwards
|
||||
if ret.tag == 0x1f {
|
||||
ret.tag, offset, err = parseBase128Int(bytes, offset)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if offset >= len(bytes) {
|
||||
err = SyntaxError{"truncated tag or length"}
|
||||
return
|
||||
}
|
||||
b = bytes[offset]
|
||||
offset++
|
||||
if b&0x80 == 0 {
|
||||
// The length is encoded in the bottom 7 bits.
|
||||
ret.length = int(b & 0x7f)
|
||||
} else {
|
||||
// Bottom 7 bits give the number of length bytes to follow.
|
||||
numBytes := int(b & 0x7f)
|
||||
if numBytes == 0 {
|
||||
err = SyntaxError{"indefinite length found (not DER)"}
|
||||
return
|
||||
}
|
||||
ret.length = 0
|
||||
for i := 0; i < numBytes; i++ {
|
||||
if offset >= len(bytes) {
|
||||
err = SyntaxError{"truncated tag or length"}
|
||||
return
|
||||
}
|
||||
b = bytes[offset]
|
||||
offset++
|
||||
if ret.length >= 1<<23 {
|
||||
// We can't shift ret.length up without
|
||||
// overflowing.
|
||||
err = StructuralError{"length too large"}
|
||||
return
|
||||
}
|
||||
ret.length <<= 8
|
||||
ret.length |= int(b)
|
||||
if ret.length == 0 {
|
||||
// DER requires that lengths be minimal.
|
||||
err = StructuralError{"superfluous leading zeros in length"}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// parseSequenceOf is used for SEQUENCE OF and SET OF values. It tries to parse
|
||||
// a number of ASN.1 values from the given byte slice and returns them as a
|
||||
// slice of Go values of the given type.
|
||||
func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type) (ret reflect.Value, err error) {
|
||||
expectedTag, compoundType, ok := getUniversalType(elemType)
|
||||
if !ok {
|
||||
err = StructuralError{"unknown Go type for slice"}
|
||||
return
|
||||
}
|
||||
|
||||
// First we iterate over the input and count the number of elements,
|
||||
// checking that the types are correct in each case.
|
||||
numElements := 0
|
||||
for offset := 0; offset < len(bytes); {
|
||||
var t tagAndLength
|
||||
t, offset, err = parseTagAndLength(bytes, offset)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// We pretend that GENERAL STRINGs are PRINTABLE STRINGs so
|
||||
// that a sequence of them can be parsed into a []string.
|
||||
if t.tag == tagGeneralString {
|
||||
t.tag = tagPrintableString
|
||||
}
|
||||
if t.class != classUniversal || t.isCompound != compoundType || t.tag != expectedTag {
|
||||
err = StructuralError{"sequence tag mismatch"}
|
||||
return
|
||||
}
|
||||
if invalidLength(offset, t.length, len(bytes)) {
|
||||
err = SyntaxError{"truncated sequence"}
|
||||
return
|
||||
}
|
||||
offset += t.length
|
||||
numElements++
|
||||
}
|
||||
ret = reflect.MakeSlice(sliceType, numElements, numElements)
|
||||
params := fieldParameters{}
|
||||
offset := 0
|
||||
for i := 0; i < numElements; i++ {
|
||||
offset, err = parseField(ret.Index(i), bytes, offset, params)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
bitStringType = reflect.TypeOf(BitString{})
|
||||
objectIdentifierType = reflect.TypeOf(ObjectIdentifier{})
|
||||
enumeratedType = reflect.TypeOf(Enumerated(0))
|
||||
flagType = reflect.TypeOf(Flag(false))
|
||||
timeType = reflect.TypeOf(time.Time{})
|
||||
rawValueType = reflect.TypeOf(RawValue{})
|
||||
rawContentsType = reflect.TypeOf(RawContent(nil))
|
||||
bigIntType = reflect.TypeOf(new(big.Int))
|
||||
)
|
||||
|
||||
// invalidLength returns true iff offset + length > sliceLength, or if the
|
||||
// addition would overflow.
|
||||
func invalidLength(offset, length, sliceLength int) bool {
|
||||
return offset+length < offset || offset+length > sliceLength
|
||||
}
|
||||
|
||||
// START CT CHANGES
|
||||
|
||||
// Tests whether the data in |bytes| would be a valid ISO8859-1 string.
|
||||
// Clearly, a sequence of bytes comprised solely of valid ISO8859-1
|
||||
// codepoints does not imply that the encoding MUST be ISO8859-1, rather that
|
||||
// you would not encounter an error trying to interpret the data as such.
|
||||
func couldBeISO8859_1(bytes []byte) bool {
|
||||
for _, b := range bytes {
|
||||
if b < 0x20 || (b >= 0x7F && b < 0xA0) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Checks whether the data in |bytes| would be a valid T.61 string.
|
||||
// Clearly, a sequence of bytes comprised solely of valid T.61
|
||||
// codepoints does not imply that the encoding MUST be T.61, rather that
|
||||
// you would not encounter an error trying to interpret the data as such.
|
||||
func couldBeT61(bytes []byte) bool {
|
||||
for _, b := range bytes {
|
||||
switch b {
|
||||
case 0x00:
|
||||
// Since we're guessing at (incorrect) encodings for a
|
||||
// PrintableString, we'll err on the side of caution and disallow
|
||||
// strings with a NUL in them, don't want to re-create a PayPal NUL
|
||||
// situation in monitors.
|
||||
fallthrough
|
||||
case 0x23, 0x24, 0x5C, 0x5E, 0x60, 0x7B, 0x7D, 0x7E, 0xA5, 0xA6, 0xAC, 0xAD, 0xAE, 0xAF,
|
||||
0xB9, 0xBA, 0xC0, 0xC9, 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9,
|
||||
0xDA, 0xDB, 0xDC, 0xDE, 0xDF, 0xE5, 0xFF:
|
||||
// These are all invalid code points in T.61, so it can't be a T.61 string.
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Converts the data in |bytes| to the equivalent UTF-8 string.
|
||||
func iso8859_1ToUTF8(bytes []byte) string {
|
||||
buf := make([]rune, len(bytes))
|
||||
for i, b := range bytes {
|
||||
buf[i] = rune(b)
|
||||
}
|
||||
return string(buf)
|
||||
}
|
||||
|
||||
// END CT CHANGES
|
||||
|
||||
// parseField is the main parsing function. Given a byte slice and an offset
|
||||
// into the array, it will try to parse a suitable ASN.1 value out and store it
|
||||
// in the given Value.
|
||||
func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParameters) (offset int, err error) {
|
||||
offset = initOffset
|
||||
fieldType := v.Type()
|
||||
|
||||
// If we have run out of data, it may be that there are optional elements at the end.
|
||||
if offset == len(bytes) {
|
||||
if !setDefaultValue(v, params) {
|
||||
err = SyntaxError{"sequence truncated"}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Deal with raw values.
|
||||
if fieldType == rawValueType {
|
||||
var t tagAndLength
|
||||
t, offset, err = parseTagAndLength(bytes, offset)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if invalidLength(offset, t.length, len(bytes)) {
|
||||
err = SyntaxError{"data truncated"}
|
||||
return
|
||||
}
|
||||
result := RawValue{t.class, t.tag, t.isCompound, bytes[offset : offset+t.length], bytes[initOffset : offset+t.length]}
|
||||
offset += t.length
|
||||
v.Set(reflect.ValueOf(result))
|
||||
return
|
||||
}
|
||||
|
||||
// Deal with the ANY type.
|
||||
if ifaceType := fieldType; ifaceType.Kind() == reflect.Interface && ifaceType.NumMethod() == 0 {
|
||||
var t tagAndLength
|
||||
t, offset, err = parseTagAndLength(bytes, offset)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if invalidLength(offset, t.length, len(bytes)) {
|
||||
err = SyntaxError{"data truncated"}
|
||||
return
|
||||
}
|
||||
var result interface{}
|
||||
if !t.isCompound && t.class == classUniversal {
|
||||
innerBytes := bytes[offset : offset+t.length]
|
||||
switch t.tag {
|
||||
case tagPrintableString:
|
||||
result, err = parsePrintableString(innerBytes)
|
||||
// START CT CHANGES
|
||||
if err != nil && strings.Contains(err.Error(), "PrintableString contains invalid character") {
|
||||
// Probably an ISO8859-1 string stuffed in, check if it
|
||||
// would be valid and assume that's what's happened if so,
|
||||
// otherwise try T.61, failing that give up and just assign
|
||||
// the bytes
|
||||
switch {
|
||||
case couldBeISO8859_1(innerBytes):
|
||||
result, err = iso8859_1ToUTF8(innerBytes), nil
|
||||
case couldBeT61(innerBytes):
|
||||
result, err = parseT61String(innerBytes)
|
||||
default:
|
||||
result = nil
|
||||
err = errors.New("PrintableString contains invalid character, but couldn't determine correct String type.")
|
||||
}
|
||||
}
|
||||
// END CT CHANGES
|
||||
case tagIA5String:
|
||||
result, err = parseIA5String(innerBytes)
|
||||
case tagT61String:
|
||||
result, err = parseT61String(innerBytes)
|
||||
case tagUTF8String:
|
||||
result, err = parseUTF8String(innerBytes)
|
||||
case tagInteger:
|
||||
result, err = parseInt64(innerBytes)
|
||||
case tagBitString:
|
||||
result, err = parseBitString(innerBytes)
|
||||
case tagOID:
|
||||
result, err = parseObjectIdentifier(innerBytes)
|
||||
case tagUTCTime:
|
||||
result, err = parseUTCTime(innerBytes)
|
||||
case tagOctetString:
|
||||
result = innerBytes
|
||||
default:
|
||||
// If we don't know how to handle the type, we just leave Value as nil.
|
||||
}
|
||||
}
|
||||
offset += t.length
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if result != nil {
|
||||
v.Set(reflect.ValueOf(result))
|
||||
}
|
||||
return
|
||||
}
|
||||
universalTag, compoundType, ok1 := getUniversalType(fieldType)
|
||||
if !ok1 {
|
||||
err = StructuralError{fmt.Sprintf("unknown Go type: %v", fieldType)}
|
||||
return
|
||||
}
|
||||
|
||||
t, offset, err := parseTagAndLength(bytes, offset)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if params.explicit {
|
||||
expectedClass := classContextSpecific
|
||||
if params.application {
|
||||
expectedClass = classApplication
|
||||
}
|
||||
if t.class == expectedClass && t.tag == *params.tag && (t.length == 0 || t.isCompound) {
|
||||
if t.length > 0 {
|
||||
t, offset, err = parseTagAndLength(bytes, offset)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if fieldType != flagType {
|
||||
err = StructuralError{"zero length explicit tag was not an asn1.Flag"}
|
||||
return
|
||||
}
|
||||
v.SetBool(true)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// The tags didn't match, it might be an optional element.
|
||||
ok := setDefaultValue(v, params)
|
||||
if ok {
|
||||
offset = initOffset
|
||||
} else {
|
||||
err = StructuralError{"explicitly tagged member didn't match"}
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Special case for strings: all the ASN.1 string types map to the Go
|
||||
// type string. getUniversalType returns the tag for PrintableString
|
||||
// when it sees a string, so if we see a different string type on the
|
||||
// wire, we change the universal type to match.
|
||||
if universalTag == tagPrintableString {
|
||||
switch t.tag {
|
||||
case tagIA5String, tagGeneralString, tagT61String, tagUTF8String:
|
||||
universalTag = t.tag
|
||||
}
|
||||
}
|
||||
|
||||
// Special case for time: UTCTime and GeneralizedTime both map to the
|
||||
// Go type time.Time.
|
||||
if universalTag == tagUTCTime && t.tag == tagGeneralizedTime {
|
||||
universalTag = tagGeneralizedTime
|
||||
}
|
||||
|
||||
expectedClass := classUniversal
|
||||
expectedTag := universalTag
|
||||
|
||||
if !params.explicit && params.tag != nil {
|
||||
expectedClass = classContextSpecific
|
||||
expectedTag = *params.tag
|
||||
}
|
||||
|
||||
if !params.explicit && params.application && params.tag != nil {
|
||||
expectedClass = classApplication
|
||||
expectedTag = *params.tag
|
||||
}
|
||||
|
||||
// We have unwrapped any explicit tagging at this point.
|
||||
if t.class != expectedClass || t.tag != expectedTag || t.isCompound != compoundType {
|
||||
// Tags don't match. Again, it could be an optional element.
|
||||
ok := setDefaultValue(v, params)
|
||||
if ok {
|
||||
offset = initOffset
|
||||
} else {
|
||||
err = StructuralError{fmt.Sprintf("tags don't match (%d vs %+v) %+v %s @%d", expectedTag, t, params, fieldType.Name(), offset)}
|
||||
}
|
||||
return
|
||||
}
|
||||
if invalidLength(offset, t.length, len(bytes)) {
|
||||
err = SyntaxError{"data truncated"}
|
||||
return
|
||||
}
|
||||
innerBytes := bytes[offset : offset+t.length]
|
||||
offset += t.length
|
||||
|
||||
// We deal with the structures defined in this package first.
|
||||
switch fieldType {
|
||||
case objectIdentifierType:
|
||||
newSlice, err1 := parseObjectIdentifier(innerBytes)
|
||||
v.Set(reflect.MakeSlice(v.Type(), len(newSlice), len(newSlice)))
|
||||
if err1 == nil {
|
||||
reflect.Copy(v, reflect.ValueOf(newSlice))
|
||||
}
|
||||
err = err1
|
||||
return
|
||||
case bitStringType:
|
||||
bs, err1 := parseBitString(innerBytes)
|
||||
if err1 == nil {
|
||||
v.Set(reflect.ValueOf(bs))
|
||||
}
|
||||
err = err1
|
||||
return
|
||||
case timeType:
|
||||
var time time.Time
|
||||
var err1 error
|
||||
if universalTag == tagUTCTime {
|
||||
time, err1 = parseUTCTime(innerBytes)
|
||||
} else {
|
||||
time, err1 = parseGeneralizedTime(innerBytes)
|
||||
}
|
||||
if err1 == nil {
|
||||
v.Set(reflect.ValueOf(time))
|
||||
}
|
||||
err = err1
|
||||
return
|
||||
case enumeratedType:
|
||||
parsedInt, err1 := parseInt32(innerBytes)
|
||||
if err1 == nil {
|
||||
v.SetInt(int64(parsedInt))
|
||||
}
|
||||
err = err1
|
||||
return
|
||||
case flagType:
|
||||
v.SetBool(true)
|
||||
return
|
||||
case bigIntType:
|
||||
parsedInt := parseBigInt(innerBytes)
|
||||
v.Set(reflect.ValueOf(parsedInt))
|
||||
return
|
||||
}
|
||||
switch val := v; val.Kind() {
|
||||
case reflect.Bool:
|
||||
parsedBool, err1 := parseBool(innerBytes)
|
||||
if err1 == nil {
|
||||
val.SetBool(parsedBool)
|
||||
}
|
||||
err = err1
|
||||
return
|
||||
case reflect.Int, reflect.Int32, reflect.Int64:
|
||||
if val.Type().Size() == 4 {
|
||||
parsedInt, err1 := parseInt32(innerBytes)
|
||||
if err1 == nil {
|
||||
val.SetInt(int64(parsedInt))
|
||||
}
|
||||
err = err1
|
||||
} else {
|
||||
parsedInt, err1 := parseInt64(innerBytes)
|
||||
if err1 == nil {
|
||||
val.SetInt(parsedInt)
|
||||
}
|
||||
err = err1
|
||||
}
|
||||
return
|
||||
// TODO(dfc) Add support for the remaining integer types
|
||||
case reflect.Struct:
|
||||
structType := fieldType
|
||||
|
||||
if structType.NumField() > 0 &&
|
||||
structType.Field(0).Type == rawContentsType {
|
||||
bytes := bytes[initOffset:offset]
|
||||
val.Field(0).Set(reflect.ValueOf(RawContent(bytes)))
|
||||
}
|
||||
|
||||
innerOffset := 0
|
||||
for i := 0; i < structType.NumField(); i++ {
|
||||
field := structType.Field(i)
|
||||
if i == 0 && field.Type == rawContentsType {
|
||||
continue
|
||||
}
|
||||
innerOffset, err = parseField(val.Field(i), innerBytes, innerOffset, parseFieldParameters(field.Tag.Get("asn1")))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
// We allow extra bytes at the end of the SEQUENCE because
|
||||
// adding elements to the end has been used in X.509 as the
|
||||
// version numbers have increased.
|
||||
return
|
||||
case reflect.Slice:
|
||||
sliceType := fieldType
|
||||
if sliceType.Elem().Kind() == reflect.Uint8 {
|
||||
val.Set(reflect.MakeSlice(sliceType, len(innerBytes), len(innerBytes)))
|
||||
reflect.Copy(val, reflect.ValueOf(innerBytes))
|
||||
return
|
||||
}
|
||||
newSlice, err1 := parseSequenceOf(innerBytes, sliceType, sliceType.Elem())
|
||||
if err1 == nil {
|
||||
val.Set(newSlice)
|
||||
}
|
||||
err = err1
|
||||
return
|
||||
case reflect.String:
|
||||
var v string
|
||||
switch universalTag {
|
||||
case tagPrintableString:
|
||||
v, err = parsePrintableString(innerBytes)
|
||||
case tagIA5String:
|
||||
v, err = parseIA5String(innerBytes)
|
||||
case tagT61String:
|
||||
v, err = parseT61String(innerBytes)
|
||||
case tagUTF8String:
|
||||
v, err = parseUTF8String(innerBytes)
|
||||
case tagGeneralString:
|
||||
// GeneralString is specified in ISO-2022/ECMA-35,
|
||||
// A brief review suggests that it includes structures
|
||||
// that allow the encoding to change midstring and
|
||||
// such. We give up and pass it as an 8-bit string.
|
||||
v, err = parseT61String(innerBytes)
|
||||
default:
|
||||
err = SyntaxError{fmt.Sprintf("internal error: unknown string type %d", universalTag)}
|
||||
}
|
||||
if err == nil {
|
||||
val.SetString(v)
|
||||
}
|
||||
return
|
||||
}
|
||||
err = StructuralError{"unsupported: " + v.Type().String()}
|
||||
return
|
||||
}
|
||||
|
||||
// setDefaultValue is used to install a default value, from a tag string, into
|
||||
// a Value. It is successful is the field was optional, even if a default value
|
||||
// wasn't provided or it failed to install it into the Value.
|
||||
func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) {
|
||||
if !params.optional {
|
||||
return
|
||||
}
|
||||
ok = true
|
||||
if params.defaultValue == nil {
|
||||
return
|
||||
}
|
||||
switch val := v; val.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
val.SetInt(*params.defaultValue)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Unmarshal parses the DER-encoded ASN.1 data structure b
|
||||
// and uses the reflect package to fill in an arbitrary value pointed at by val.
|
||||
// Because Unmarshal uses the reflect package, the structs
|
||||
// being written to must use upper case field names.
|
||||
//
|
||||
// An ASN.1 INTEGER can be written to an int, int32, int64,
|
||||
// or *big.Int (from the math/big package).
|
||||
// If the encoded value does not fit in the Go type,
|
||||
// Unmarshal returns a parse error.
|
||||
//
|
||||
// An ASN.1 BIT STRING can be written to a BitString.
|
||||
//
|
||||
// An ASN.1 OCTET STRING can be written to a []byte.
|
||||
//
|
||||
// An ASN.1 OBJECT IDENTIFIER can be written to an
|
||||
// ObjectIdentifier.
|
||||
//
|
||||
// An ASN.1 ENUMERATED can be written to an Enumerated.
|
||||
//
|
||||
// An ASN.1 UTCTIME or GENERALIZEDTIME can be written to a time.Time.
|
||||
//
|
||||
// An ASN.1 PrintableString or IA5String can be written to a string.
|
||||
//
|
||||
// Any of the above ASN.1 values can be written to an interface{}.
|
||||
// The value stored in the interface has the corresponding Go type.
|
||||
// For integers, that type is int64.
|
||||
//
|
||||
// An ASN.1 SEQUENCE OF x or SET OF x can be written
|
||||
// to a slice if an x can be written to the slice's element type.
|
||||
//
|
||||
// An ASN.1 SEQUENCE or SET can be written to a struct
|
||||
// if each of the elements in the sequence can be
|
||||
// written to the corresponding element in the struct.
|
||||
//
|
||||
// The following tags on struct fields have special meaning to Unmarshal:
|
||||
//
|
||||
// optional marks the field as ASN.1 OPTIONAL
|
||||
// [explicit] tag:x specifies the ASN.1 tag number; implies ASN.1 CONTEXT SPECIFIC
|
||||
// default:x sets the default value for optional integer fields
|
||||
//
|
||||
// If the type of the first field of a structure is RawContent then the raw
|
||||
// ASN1 contents of the struct will be stored in it.
|
||||
//
|
||||
// Other ASN.1 types are not supported; if it encounters them,
|
||||
// Unmarshal returns a parse error.
|
||||
func Unmarshal(b []byte, val interface{}) (rest []byte, err error) {
|
||||
return UnmarshalWithParams(b, val, "")
|
||||
}
|
||||
|
||||
// UnmarshalWithParams allows field parameters to be specified for the
|
||||
// top-level element. The form of the params is the same as the field tags.
|
||||
func UnmarshalWithParams(b []byte, val interface{}, params string) (rest []byte, err error) {
|
||||
v := reflect.ValueOf(val).Elem()
|
||||
offset, err := parseField(v, b, 0, parseFieldParameters(params))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[offset:], nil
|
||||
}
|
163
vendor/github.com/google/certificate-transparency/go/asn1/common.go
generated
vendored
Executable file
163
vendor/github.com/google/certificate-transparency/go/asn1/common.go
generated
vendored
Executable file
@ -0,0 +1,163 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package asn1
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ASN.1 objects have metadata preceding them:
|
||||
// the tag: the type of the object
|
||||
// a flag denoting if this object is compound or not
|
||||
// the class type: the namespace of the tag
|
||||
// the length of the object, in bytes
|
||||
|
||||
// Here are some standard tags and classes
|
||||
|
||||
const (
|
||||
tagBoolean = 1
|
||||
tagInteger = 2
|
||||
tagBitString = 3
|
||||
tagOctetString = 4
|
||||
tagOID = 6
|
||||
tagEnum = 10
|
||||
tagUTF8String = 12
|
||||
tagSequence = 16
|
||||
tagSet = 17
|
||||
tagPrintableString = 19
|
||||
tagT61String = 20
|
||||
tagIA5String = 22
|
||||
tagUTCTime = 23
|
||||
tagGeneralizedTime = 24
|
||||
tagGeneralString = 27
|
||||
)
|
||||
|
||||
const (
|
||||
classUniversal = 0
|
||||
classApplication = 1
|
||||
classContextSpecific = 2
|
||||
classPrivate = 3
|
||||
)
|
||||
|
||||
type tagAndLength struct {
|
||||
class, tag, length int
|
||||
isCompound bool
|
||||
}
|
||||
|
||||
// ASN.1 has IMPLICIT and EXPLICIT tags, which can be translated as "instead
|
||||
// of" and "in addition to". When not specified, every primitive type has a
|
||||
// default tag in the UNIVERSAL class.
|
||||
//
|
||||
// For example: a BIT STRING is tagged [UNIVERSAL 3] by default (although ASN.1
|
||||
// doesn't actually have a UNIVERSAL keyword). However, by saying [IMPLICIT
|
||||
// CONTEXT-SPECIFIC 42], that means that the tag is replaced by another.
|
||||
//
|
||||
// On the other hand, if it said [EXPLICIT CONTEXT-SPECIFIC 10], then an
|
||||
// /additional/ tag would wrap the default tag. This explicit tag will have the
|
||||
// compound flag set.
|
||||
//
|
||||
// (This is used in order to remove ambiguity with optional elements.)
|
||||
//
|
||||
// You can layer EXPLICIT and IMPLICIT tags to an arbitrary depth, however we
|
||||
// don't support that here. We support a single layer of EXPLICIT or IMPLICIT
|
||||
// tagging with tag strings on the fields of a structure.
|
||||
|
||||
// fieldParameters is the parsed representation of tag string from a structure field.
|
||||
type fieldParameters struct {
|
||||
optional bool // true iff the field is OPTIONAL
|
||||
explicit bool // true iff an EXPLICIT tag is in use.
|
||||
application bool // true iff an APPLICATION tag is in use.
|
||||
defaultValue *int64 // a default value for INTEGER typed fields (maybe nil).
|
||||
tag *int // the EXPLICIT or IMPLICIT tag (maybe nil).
|
||||
stringType int // the string tag to use when marshaling.
|
||||
set bool // true iff this should be encoded as a SET
|
||||
omitEmpty bool // true iff this should be omitted if empty when marshaling.
|
||||
|
||||
// Invariants:
|
||||
// if explicit is set, tag is non-nil.
|
||||
}
|
||||
|
||||
// Given a tag string with the format specified in the package comment,
|
||||
// parseFieldParameters will parse it into a fieldParameters structure,
|
||||
// ignoring unknown parts of the string.
|
||||
func parseFieldParameters(str string) (ret fieldParameters) {
|
||||
for _, part := range strings.Split(str, ",") {
|
||||
switch {
|
||||
case part == "optional":
|
||||
ret.optional = true
|
||||
case part == "explicit":
|
||||
ret.explicit = true
|
||||
if ret.tag == nil {
|
||||
ret.tag = new(int)
|
||||
}
|
||||
case part == "ia5":
|
||||
ret.stringType = tagIA5String
|
||||
case part == "printable":
|
||||
ret.stringType = tagPrintableString
|
||||
case part == "utf8":
|
||||
ret.stringType = tagUTF8String
|
||||
case strings.HasPrefix(part, "default:"):
|
||||
i, err := strconv.ParseInt(part[8:], 10, 64)
|
||||
if err == nil {
|
||||
ret.defaultValue = new(int64)
|
||||
*ret.defaultValue = i
|
||||
}
|
||||
case strings.HasPrefix(part, "tag:"):
|
||||
i, err := strconv.Atoi(part[4:])
|
||||
if err == nil {
|
||||
ret.tag = new(int)
|
||||
*ret.tag = i
|
||||
}
|
||||
case part == "set":
|
||||
ret.set = true
|
||||
case part == "application":
|
||||
ret.application = true
|
||||
if ret.tag == nil {
|
||||
ret.tag = new(int)
|
||||
}
|
||||
case part == "omitempty":
|
||||
ret.omitEmpty = true
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Given a reflected Go type, getUniversalType returns the default tag number
|
||||
// and expected compound flag.
|
||||
func getUniversalType(t reflect.Type) (tagNumber int, isCompound, ok bool) {
|
||||
switch t {
|
||||
case objectIdentifierType:
|
||||
return tagOID, false, true
|
||||
case bitStringType:
|
||||
return tagBitString, false, true
|
||||
case timeType:
|
||||
return tagUTCTime, false, true
|
||||
case enumeratedType:
|
||||
return tagEnum, false, true
|
||||
case bigIntType:
|
||||
return tagInteger, false, true
|
||||
}
|
||||
switch t.Kind() {
|
||||
case reflect.Bool:
|
||||
return tagBoolean, false, true
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return tagInteger, false, true
|
||||
case reflect.Struct:
|
||||
return tagSequence, true, true
|
||||
case reflect.Slice:
|
||||
if t.Elem().Kind() == reflect.Uint8 {
|
||||
return tagOctetString, false, true
|
||||
}
|
||||
if strings.HasSuffix(t.Name(), "SET") {
|
||||
return tagSet, true, true
|
||||
}
|
||||
return tagSequence, true, true
|
||||
case reflect.String:
|
||||
return tagPrintableString, false, true
|
||||
}
|
||||
return 0, false, false
|
||||
}
|
581
vendor/github.com/google/certificate-transparency/go/asn1/marshal.go
generated
vendored
Executable file
581
vendor/github.com/google/certificate-transparency/go/asn1/marshal.go
generated
vendored
Executable file
@ -0,0 +1,581 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package asn1
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// A forkableWriter is an in-memory buffer that can be
|
||||
// 'forked' to create new forkableWriters that bracket the
|
||||
// original. After
|
||||
// pre, post := w.fork();
|
||||
// the overall sequence of bytes represented is logically w+pre+post.
|
||||
type forkableWriter struct {
|
||||
*bytes.Buffer
|
||||
pre, post *forkableWriter
|
||||
}
|
||||
|
||||
func newForkableWriter() *forkableWriter {
|
||||
return &forkableWriter{new(bytes.Buffer), nil, nil}
|
||||
}
|
||||
|
||||
func (f *forkableWriter) fork() (pre, post *forkableWriter) {
|
||||
if f.pre != nil || f.post != nil {
|
||||
panic("have already forked")
|
||||
}
|
||||
f.pre = newForkableWriter()
|
||||
f.post = newForkableWriter()
|
||||
return f.pre, f.post
|
||||
}
|
||||
|
||||
func (f *forkableWriter) Len() (l int) {
|
||||
l += f.Buffer.Len()
|
||||
if f.pre != nil {
|
||||
l += f.pre.Len()
|
||||
}
|
||||
if f.post != nil {
|
||||
l += f.post.Len()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (f *forkableWriter) writeTo(out io.Writer) (n int, err error) {
|
||||
n, err = out.Write(f.Bytes())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var nn int
|
||||
|
||||
if f.pre != nil {
|
||||
nn, err = f.pre.writeTo(out)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if f.post != nil {
|
||||
nn, err = f.post.writeTo(out)
|
||||
n += nn
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func marshalBase128Int(out *forkableWriter, n int64) (err error) {
|
||||
if n == 0 {
|
||||
err = out.WriteByte(0)
|
||||
return
|
||||
}
|
||||
|
||||
l := 0
|
||||
for i := n; i > 0; i >>= 7 {
|
||||
l++
|
||||
}
|
||||
|
||||
for i := l - 1; i >= 0; i-- {
|
||||
o := byte(n >> uint(i*7))
|
||||
o &= 0x7f
|
||||
if i != 0 {
|
||||
o |= 0x80
|
||||
}
|
||||
err = out.WriteByte(o)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func marshalInt64(out *forkableWriter, i int64) (err error) {
|
||||
n := int64Length(i)
|
||||
|
||||
for ; n > 0; n-- {
|
||||
err = out.WriteByte(byte(i >> uint((n-1)*8)))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func int64Length(i int64) (numBytes int) {
|
||||
numBytes = 1
|
||||
|
||||
for i > 127 {
|
||||
numBytes++
|
||||
i >>= 8
|
||||
}
|
||||
|
||||
for i < -128 {
|
||||
numBytes++
|
||||
i >>= 8
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func marshalBigInt(out *forkableWriter, n *big.Int) (err error) {
|
||||
if n.Sign() < 0 {
|
||||
// A negative number has to be converted to two's-complement
|
||||
// form. So we'll subtract 1 and invert. If the
|
||||
// most-significant-bit isn't set then we'll need to pad the
|
||||
// beginning with 0xff in order to keep the number negative.
|
||||
nMinus1 := new(big.Int).Neg(n)
|
||||
nMinus1.Sub(nMinus1, bigOne)
|
||||
bytes := nMinus1.Bytes()
|
||||
for i := range bytes {
|
||||
bytes[i] ^= 0xff
|
||||
}
|
||||
if len(bytes) == 0 || bytes[0]&0x80 == 0 {
|
||||
err = out.WriteByte(0xff)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
_, err = out.Write(bytes)
|
||||
} else if n.Sign() == 0 {
|
||||
// Zero is written as a single 0 zero rather than no bytes.
|
||||
err = out.WriteByte(0x00)
|
||||
} else {
|
||||
bytes := n.Bytes()
|
||||
if len(bytes) > 0 && bytes[0]&0x80 != 0 {
|
||||
// We'll have to pad this with 0x00 in order to stop it
|
||||
// looking like a negative number.
|
||||
err = out.WriteByte(0)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
_, err = out.Write(bytes)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func marshalLength(out *forkableWriter, i int) (err error) {
|
||||
n := lengthLength(i)
|
||||
|
||||
for ; n > 0; n-- {
|
||||
err = out.WriteByte(byte(i >> uint((n-1)*8)))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func lengthLength(i int) (numBytes int) {
|
||||
numBytes = 1
|
||||
for i > 255 {
|
||||
numBytes++
|
||||
i >>= 8
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func marshalTagAndLength(out *forkableWriter, t tagAndLength) (err error) {
|
||||
b := uint8(t.class) << 6
|
||||
if t.isCompound {
|
||||
b |= 0x20
|
||||
}
|
||||
if t.tag >= 31 {
|
||||
b |= 0x1f
|
||||
err = out.WriteByte(b)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = marshalBase128Int(out, int64(t.tag))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
b |= uint8(t.tag)
|
||||
err = out.WriteByte(b)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if t.length >= 128 {
|
||||
l := lengthLength(t.length)
|
||||
err = out.WriteByte(0x80 | byte(l))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = marshalLength(out, t.length)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = out.WriteByte(byte(t.length))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func marshalBitString(out *forkableWriter, b BitString) (err error) {
|
||||
paddingBits := byte((8 - b.BitLength%8) % 8)
|
||||
err = out.WriteByte(paddingBits)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = out.Write(b.Bytes)
|
||||
return
|
||||
}
|
||||
|
||||
func marshalObjectIdentifier(out *forkableWriter, oid []int) (err error) {
|
||||
if len(oid) < 2 || oid[0] > 2 || (oid[0] < 2 && oid[1] >= 40) {
|
||||
return StructuralError{"invalid object identifier"}
|
||||
}
|
||||
|
||||
err = marshalBase128Int(out, int64(oid[0]*40+oid[1]))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for i := 2; i < len(oid); i++ {
|
||||
err = marshalBase128Int(out, int64(oid[i]))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func marshalPrintableString(out *forkableWriter, s string) (err error) {
|
||||
b := []byte(s)
|
||||
for _, c := range b {
|
||||
if !isPrintable(c) {
|
||||
return StructuralError{"PrintableString contains invalid character"}
|
||||
}
|
||||
}
|
||||
|
||||
_, err = out.Write(b)
|
||||
return
|
||||
}
|
||||
|
||||
func marshalIA5String(out *forkableWriter, s string) (err error) {
|
||||
b := []byte(s)
|
||||
for _, c := range b {
|
||||
if c > 127 {
|
||||
return StructuralError{"IA5String contains invalid character"}
|
||||
}
|
||||
}
|
||||
|
||||
_, err = out.Write(b)
|
||||
return
|
||||
}
|
||||
|
||||
func marshalUTF8String(out *forkableWriter, s string) (err error) {
|
||||
_, err = out.Write([]byte(s))
|
||||
return
|
||||
}
|
||||
|
||||
func marshalTwoDigits(out *forkableWriter, v int) (err error) {
|
||||
err = out.WriteByte(byte('0' + (v/10)%10))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return out.WriteByte(byte('0' + v%10))
|
||||
}
|
||||
|
||||
func marshalUTCTime(out *forkableWriter, t time.Time) (err error) {
|
||||
year, month, day := t.Date()
|
||||
|
||||
switch {
|
||||
case 1950 <= year && year < 2000:
|
||||
err = marshalTwoDigits(out, int(year-1900))
|
||||
case 2000 <= year && year < 2050:
|
||||
err = marshalTwoDigits(out, int(year-2000))
|
||||
default:
|
||||
return StructuralError{"cannot represent time as UTCTime"}
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = marshalTwoDigits(out, int(month))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = marshalTwoDigits(out, day)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
hour, min, sec := t.Clock()
|
||||
|
||||
err = marshalTwoDigits(out, hour)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = marshalTwoDigits(out, min)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = marshalTwoDigits(out, sec)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, offset := t.Zone()
|
||||
|
||||
switch {
|
||||
case offset/60 == 0:
|
||||
err = out.WriteByte('Z')
|
||||
return
|
||||
case offset > 0:
|
||||
err = out.WriteByte('+')
|
||||
case offset < 0:
|
||||
err = out.WriteByte('-')
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
offsetMinutes := offset / 60
|
||||
if offsetMinutes < 0 {
|
||||
offsetMinutes = -offsetMinutes
|
||||
}
|
||||
|
||||
err = marshalTwoDigits(out, offsetMinutes/60)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = marshalTwoDigits(out, offsetMinutes%60)
|
||||
return
|
||||
}
|
||||
|
||||
func stripTagAndLength(in []byte) []byte {
|
||||
_, offset, err := parseTagAndLength(in, 0)
|
||||
if err != nil {
|
||||
return in
|
||||
}
|
||||
return in[offset:]
|
||||
}
|
||||
|
||||
func marshalBody(out *forkableWriter, value reflect.Value, params fieldParameters) (err error) {
|
||||
switch value.Type() {
|
||||
case timeType:
|
||||
return marshalUTCTime(out, value.Interface().(time.Time))
|
||||
case bitStringType:
|
||||
return marshalBitString(out, value.Interface().(BitString))
|
||||
case objectIdentifierType:
|
||||
return marshalObjectIdentifier(out, value.Interface().(ObjectIdentifier))
|
||||
case bigIntType:
|
||||
return marshalBigInt(out, value.Interface().(*big.Int))
|
||||
}
|
||||
|
||||
switch v := value; v.Kind() {
|
||||
case reflect.Bool:
|
||||
if v.Bool() {
|
||||
return out.WriteByte(255)
|
||||
} else {
|
||||
return out.WriteByte(0)
|
||||
}
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return marshalInt64(out, int64(v.Int()))
|
||||
case reflect.Struct:
|
||||
t := v.Type()
|
||||
|
||||
startingField := 0
|
||||
|
||||
// If the first element of the structure is a non-empty
|
||||
// RawContents, then we don't bother serializing the rest.
|
||||
if t.NumField() > 0 && t.Field(0).Type == rawContentsType {
|
||||
s := v.Field(0)
|
||||
if s.Len() > 0 {
|
||||
bytes := make([]byte, s.Len())
|
||||
for i := 0; i < s.Len(); i++ {
|
||||
bytes[i] = uint8(s.Index(i).Uint())
|
||||
}
|
||||
/* The RawContents will contain the tag and
|
||||
* length fields but we'll also be writing
|
||||
* those ourselves, so we strip them out of
|
||||
* bytes */
|
||||
_, err = out.Write(stripTagAndLength(bytes))
|
||||
return
|
||||
} else {
|
||||
startingField = 1
|
||||
}
|
||||
}
|
||||
|
||||
for i := startingField; i < t.NumField(); i++ {
|
||||
var pre *forkableWriter
|
||||
pre, out = out.fork()
|
||||
err = marshalField(pre, v.Field(i), parseFieldParameters(t.Field(i).Tag.Get("asn1")))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
case reflect.Slice:
|
||||
sliceType := v.Type()
|
||||
if sliceType.Elem().Kind() == reflect.Uint8 {
|
||||
bytes := make([]byte, v.Len())
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
bytes[i] = uint8(v.Index(i).Uint())
|
||||
}
|
||||
_, err = out.Write(bytes)
|
||||
return
|
||||
}
|
||||
|
||||
var fp fieldParameters
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
var pre *forkableWriter
|
||||
pre, out = out.fork()
|
||||
err = marshalField(pre, v.Index(i), fp)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
case reflect.String:
|
||||
switch params.stringType {
|
||||
case tagIA5String:
|
||||
return marshalIA5String(out, v.String())
|
||||
case tagPrintableString:
|
||||
return marshalPrintableString(out, v.String())
|
||||
default:
|
||||
return marshalUTF8String(out, v.String())
|
||||
}
|
||||
}
|
||||
|
||||
return StructuralError{"unknown Go type"}
|
||||
}
|
||||
|
||||
func marshalField(out *forkableWriter, v reflect.Value, params fieldParameters) (err error) {
|
||||
// If the field is an interface{} then recurse into it.
|
||||
if v.Kind() == reflect.Interface && v.Type().NumMethod() == 0 {
|
||||
return marshalField(out, v.Elem(), params)
|
||||
}
|
||||
|
||||
if v.Kind() == reflect.Slice && v.Len() == 0 && params.omitEmpty {
|
||||
return
|
||||
}
|
||||
|
||||
if params.optional && reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) {
|
||||
return
|
||||
}
|
||||
|
||||
if v.Type() == rawValueType {
|
||||
rv := v.Interface().(RawValue)
|
||||
if len(rv.FullBytes) != 0 {
|
||||
_, err = out.Write(rv.FullBytes)
|
||||
} else {
|
||||
err = marshalTagAndLength(out, tagAndLength{rv.Class, rv.Tag, len(rv.Bytes), rv.IsCompound})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = out.Write(rv.Bytes)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
tag, isCompound, ok := getUniversalType(v.Type())
|
||||
if !ok {
|
||||
err = StructuralError{fmt.Sprintf("unknown Go type: %v", v.Type())}
|
||||
return
|
||||
}
|
||||
class := classUniversal
|
||||
|
||||
if params.stringType != 0 && tag != tagPrintableString {
|
||||
return StructuralError{"explicit string type given to non-string member"}
|
||||
}
|
||||
|
||||
if tag == tagPrintableString {
|
||||
if params.stringType == 0 {
|
||||
// This is a string without an explicit string type. We'll use
|
||||
// a PrintableString if the character set in the string is
|
||||
// sufficiently limited, otherwise we'll use a UTF8String.
|
||||
for _, r := range v.String() {
|
||||
if r >= utf8.RuneSelf || !isPrintable(byte(r)) {
|
||||
if !utf8.ValidString(v.String()) {
|
||||
return errors.New("asn1: string not valid UTF-8")
|
||||
}
|
||||
tag = tagUTF8String
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
tag = params.stringType
|
||||
}
|
||||
}
|
||||
|
||||
if params.set {
|
||||
if tag != tagSequence {
|
||||
return StructuralError{"non sequence tagged as set"}
|
||||
}
|
||||
tag = tagSet
|
||||
}
|
||||
|
||||
tags, body := out.fork()
|
||||
|
||||
err = marshalBody(body, v, params)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
bodyLen := body.Len()
|
||||
|
||||
var explicitTag *forkableWriter
|
||||
if params.explicit {
|
||||
explicitTag, tags = tags.fork()
|
||||
}
|
||||
|
||||
if !params.explicit && params.tag != nil {
|
||||
// implicit tag.
|
||||
tag = *params.tag
|
||||
class = classContextSpecific
|
||||
}
|
||||
|
||||
err = marshalTagAndLength(tags, tagAndLength{class, tag, bodyLen, isCompound})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if params.explicit {
|
||||
err = marshalTagAndLength(explicitTag, tagAndLength{
|
||||
class: classContextSpecific,
|
||||
tag: *params.tag,
|
||||
length: bodyLen + tags.Len(),
|
||||
isCompound: true,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Marshal returns the ASN.1 encoding of val.
|
||||
func Marshal(val interface{}) ([]byte, error) {
|
||||
var out bytes.Buffer
|
||||
v := reflect.ValueOf(val)
|
||||
f := newForkableWriter()
|
||||
err := marshalField(f, v, fieldParameters{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = f.writeTo(&out)
|
||||
return out.Bytes(), nil
|
||||
}
|
358
vendor/github.com/google/certificate-transparency/go/client/logclient.go
generated
vendored
Normal file
358
vendor/github.com/google/certificate-transparency/go/client/logclient.go
generated
vendored
Normal file
@ -0,0 +1,358 @@
|
||||
// Package client is a CT log client implementation and contains types and code
|
||||
// for interacting with RFC6962-compliant CT Log instances.
|
||||
// See http://tools.ietf.org/html/rfc6962 for details
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/google/certificate-transparency/go"
|
||||
"github.com/mreiferson/go-httpclient"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// URI paths for CT Log endpoints
|
||||
const (
|
||||
AddChainPath = "/ct/v1/add-chain"
|
||||
AddPreChainPath = "/ct/v1/add-pre-chain"
|
||||
GetSTHPath = "/ct/v1/get-sth"
|
||||
GetEntriesPath = "/ct/v1/get-entries"
|
||||
)
|
||||
|
||||
// LogClient represents a client for a given CT Log instance
|
||||
type LogClient struct {
|
||||
uri string // the base URI of the log. e.g. http://ct.googleapis/pilot
|
||||
httpClient *http.Client // used to interact with the log via HTTP
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////////
|
||||
// JSON structures follow.
|
||||
// These represent the structures returned by the CT Log server.
|
||||
//////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// addChainRequest represents the JSON request body sent to the add-chain CT
|
||||
// method.
|
||||
type addChainRequest struct {
|
||||
Chain []string `json:"chain"`
|
||||
}
|
||||
|
||||
// addChainResponse represents the JSON response to the add-chain CT method.
|
||||
// An SCT represents a Log's promise to integrate a [pre-]certificate into the
|
||||
// log within a defined period of time.
|
||||
type addChainResponse struct {
|
||||
SCTVersion ct.Version `json:"sct_version"` // SCT structure version
|
||||
ID string `json:"id"` // Log ID
|
||||
Timestamp uint64 `json:"timestamp"` // Timestamp of issuance
|
||||
Extensions string `json:"extensions"` // Holder for any CT extensions
|
||||
Signature string `json:"signature"` // Log signature for this SCT
|
||||
}
|
||||
|
||||
// getSTHResponse respresents the JSON response to the get-sth CT method
|
||||
type getSTHResponse struct {
|
||||
TreeSize uint64 `json:"tree_size"` // Number of certs in the current tree
|
||||
Timestamp uint64 `json:"timestamp"` // Time that the tree was created
|
||||
SHA256RootHash string `json:"sha256_root_hash"` // Root hash of the tree
|
||||
TreeHeadSignature string `json:"tree_head_signature"` // Log signature for this STH
|
||||
}
|
||||
|
||||
// base64LeafEntry respresents a Base64 encoded leaf entry
|
||||
type base64LeafEntry struct {
|
||||
LeafInput string `json:"leaf_input"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
}
|
||||
|
||||
// getEntriesReponse respresents the JSON response to the CT get-entries method
|
||||
type getEntriesResponse struct {
|
||||
Entries []base64LeafEntry `json:"entries"` // the list of returned entries
|
||||
}
|
||||
|
||||
// getConsistencyProofResponse represents the JSON response to the CT get-consistency-proof method
|
||||
type getConsistencyProofResponse struct {
|
||||
Consistency []string `json:"consistency"`
|
||||
}
|
||||
|
||||
// getAuditProofResponse represents the JSON response to the CT get-audit-proof method
|
||||
type getAuditProofResponse struct {
|
||||
Hash []string `json:"hash"` // the hashes which make up the proof
|
||||
TreeSize uint64 `json:"tree_size"` // the tree size against which this proof is constructed
|
||||
}
|
||||
|
||||
// getAcceptedRootsResponse represents the JSON response to the CT get-roots method.
|
||||
type getAcceptedRootsResponse struct {
|
||||
Certificates []string `json:"certificates"`
|
||||
}
|
||||
|
||||
// getEntryAndProodReponse represents the JSON response to the CT get-entry-and-proof method
|
||||
type getEntryAndProofResponse struct {
|
||||
LeafInput string `json:"leaf_input"` // the entry itself
|
||||
ExtraData string `json:"extra_data"` // any chain provided when the entry was added to the log
|
||||
AuditPath []string `json:"audit_path"` // the corresponding proof
|
||||
}
|
||||
|
||||
// New constructs a new LogClient instance.
|
||||
// |uri| is the base URI of the CT log instance to interact with, e.g.
|
||||
// http://ct.googleapis.com/pilot
|
||||
func New(uri string) *LogClient {
|
||||
var c LogClient
|
||||
c.uri = uri
|
||||
transport := &httpclient.Transport{
|
||||
ConnectTimeout: 10 * time.Second,
|
||||
RequestTimeout: 30 * time.Second,
|
||||
ResponseHeaderTimeout: 30 * time.Second,
|
||||
MaxIdleConnsPerHost: 10,
|
||||
DisableKeepAlives: false,
|
||||
}
|
||||
c.httpClient = &http.Client{Transport: transport}
|
||||
return &c
|
||||
}
|
||||
|
||||
// Makes a HTTP call to |uri|, and attempts to parse the response as a JSON
|
||||
// representation of the structure in |res|.
|
||||
// Returns a non-nil |error| if there was a problem.
|
||||
func (c *LogClient) fetchAndParse(uri string, res interface{}) error {
|
||||
req, err := http.NewRequest("GET", uri, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Keep-Alive", "timeout=15, max=100")
|
||||
resp, err := c.httpClient.Do(req)
|
||||
var body []byte
|
||||
if resp != nil {
|
||||
body, err = ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = json.Unmarshal(body, &res); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Makes a HTTP POST call to |uri|, and attempts to parse the response as a JSON
|
||||
// representation of the structure in |res|.
|
||||
// Returns a non-nil |error| if there was a problem.
|
||||
func (c *LogClient) postAndParse(uri string, req interface{}, res interface{}) (*http.Response, string, error) {
|
||||
postBody, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
httpReq, err := http.NewRequest("POST", uri, bytes.NewReader(postBody))
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
httpReq.Header.Set("Keep-Alive", "timeout=15, max=100")
|
||||
httpReq.Header.Set("Content-Type", "application/json")
|
||||
resp, err := c.httpClient.Do(httpReq)
|
||||
// Read all of the body, if there is one, so that the http.Client can do
|
||||
// Keep-Alive:
|
||||
var body []byte
|
||||
if resp != nil {
|
||||
body, err = ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return resp, string(body), err
|
||||
}
|
||||
if resp.StatusCode == 200 {
|
||||
if err != nil {
|
||||
return resp, string(body), err
|
||||
}
|
||||
if err = json.Unmarshal(body, &res); err != nil {
|
||||
return resp, string(body), err
|
||||
}
|
||||
}
|
||||
return resp, string(body), nil
|
||||
}
|
||||
|
||||
func backoffForRetry(ctx context.Context, d time.Duration) error {
|
||||
backoffTimer := time.NewTimer(d)
|
||||
if ctx != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-backoffTimer.C:
|
||||
}
|
||||
} else {
|
||||
<-backoffTimer.C
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Attempts to add |chain| to the log, using the api end-point specified by
|
||||
// |path|. If provided context expires before submission is complete an
|
||||
// error will be returned.
|
||||
func (c *LogClient) addChainWithRetry(ctx context.Context, path string, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
|
||||
var resp addChainResponse
|
||||
var req addChainRequest
|
||||
for _, link := range chain {
|
||||
req.Chain = append(req.Chain, base64.StdEncoding.EncodeToString(link))
|
||||
}
|
||||
httpStatus := "Unknown"
|
||||
backoffSeconds := 0
|
||||
done := false
|
||||
for !done {
|
||||
if backoffSeconds > 0 {
|
||||
log.Printf("Got %s, backing-off %d seconds", httpStatus, backoffSeconds)
|
||||
}
|
||||
err := backoffForRetry(ctx, time.Second*time.Duration(backoffSeconds))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if backoffSeconds > 0 {
|
||||
backoffSeconds = 0
|
||||
}
|
||||
httpResp, errorBody, err := c.postAndParse(c.uri+path, &req, &resp)
|
||||
if err != nil {
|
||||
backoffSeconds = 10
|
||||
continue
|
||||
}
|
||||
switch {
|
||||
case httpResp.StatusCode == 200:
|
||||
done = true
|
||||
case httpResp.StatusCode == 408:
|
||||
// request timeout, retry immediately
|
||||
case httpResp.StatusCode == 503:
|
||||
// Retry
|
||||
backoffSeconds = 10
|
||||
if retryAfter := httpResp.Header.Get("Retry-After"); retryAfter != "" {
|
||||
if seconds, err := strconv.Atoi(retryAfter); err == nil {
|
||||
backoffSeconds = seconds
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("got HTTP Status %s: %s", httpResp.Status, errorBody)
|
||||
}
|
||||
httpStatus = httpResp.Status
|
||||
}
|
||||
|
||||
rawLogID, err := base64.StdEncoding.DecodeString(resp.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rawSignature, err := base64.StdEncoding.DecodeString(resp.Signature)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ds, err := ct.UnmarshalDigitallySigned(bytes.NewReader(rawSignature))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var logID ct.SHA256Hash
|
||||
copy(logID[:], rawLogID)
|
||||
return &ct.SignedCertificateTimestamp{
|
||||
SCTVersion: resp.SCTVersion,
|
||||
LogID: logID,
|
||||
Timestamp: resp.Timestamp,
|
||||
Extensions: ct.CTExtensions(resp.Extensions),
|
||||
Signature: *ds}, nil
|
||||
}
|
||||
|
||||
// AddChain adds the (DER represented) X509 |chain| to the log.
|
||||
func (c *LogClient) AddChain(chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
|
||||
return c.addChainWithRetry(nil, AddChainPath, chain)
|
||||
}
|
||||
|
||||
// AddPreChain adds the (DER represented) Precertificate |chain| to the log.
|
||||
func (c *LogClient) AddPreChain(chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
|
||||
return c.addChainWithRetry(nil, AddPreChainPath, chain)
|
||||
}
|
||||
|
||||
// AddChainWithContext adds the (DER represented) X509 |chain| to the log and
|
||||
// fails if the provided context expires before the chain is submitted.
|
||||
func (c *LogClient) AddChainWithContext(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
|
||||
return c.addChainWithRetry(ctx, AddChainPath, chain)
|
||||
}
|
||||
|
||||
// GetSTH retrieves the current STH from the log.
|
||||
// Returns a populated SignedTreeHead, or a non-nil error.
|
||||
func (c *LogClient) GetSTH() (sth *ct.SignedTreeHead, err error) {
|
||||
var resp getSTHResponse
|
||||
if err = c.fetchAndParse(c.uri+GetSTHPath, &resp); err != nil {
|
||||
return
|
||||
}
|
||||
sth = &ct.SignedTreeHead{
|
||||
TreeSize: resp.TreeSize,
|
||||
Timestamp: resp.Timestamp,
|
||||
}
|
||||
|
||||
rawRootHash, err := base64.StdEncoding.DecodeString(resp.SHA256RootHash)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid base64 encoding in sha256_root_hash: %v", err)
|
||||
}
|
||||
if len(rawRootHash) != sha256.Size {
|
||||
return nil, fmt.Errorf("sha256_root_hash is invalid length, expected %d got %d", sha256.Size, len(rawRootHash))
|
||||
}
|
||||
copy(sth.SHA256RootHash[:], rawRootHash)
|
||||
|
||||
rawSignature, err := base64.StdEncoding.DecodeString(resp.TreeHeadSignature)
|
||||
if err != nil {
|
||||
return nil, errors.New("invalid base64 encoding in tree_head_signature")
|
||||
}
|
||||
ds, err := ct.UnmarshalDigitallySigned(bytes.NewReader(rawSignature))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO(alcutter): Verify signature
|
||||
sth.TreeHeadSignature = *ds
|
||||
return
|
||||
}
|
||||
|
||||
// GetEntries attempts to retrieve the entries in the sequence [|start|, |end|] from the CT
|
||||
// log server. (see section 4.6.)
|
||||
// Returns a slice of LeafInputs or a non-nil error.
|
||||
func (c *LogClient) GetEntries(start, end int64) ([]ct.LogEntry, error) {
|
||||
if end < 0 {
|
||||
return nil, errors.New("end should be >= 0")
|
||||
}
|
||||
if end < start {
|
||||
return nil, errors.New("start should be <= end")
|
||||
}
|
||||
var resp getEntriesResponse
|
||||
err := c.fetchAndParse(fmt.Sprintf("%s%s?start=%d&end=%d", c.uri, GetEntriesPath, start, end), &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries := make([]ct.LogEntry, len(resp.Entries))
|
||||
for index, entry := range resp.Entries {
|
||||
leafBytes, err := base64.StdEncoding.DecodeString(entry.LeafInput)
|
||||
leaf, err := ct.ReadMerkleTreeLeaf(bytes.NewBuffer(leafBytes))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries[index].Leaf = *leaf
|
||||
chainBytes, err := base64.StdEncoding.DecodeString(entry.ExtraData)
|
||||
|
||||
var chain []ct.ASN1Cert
|
||||
switch leaf.TimestampedEntry.EntryType {
|
||||
case ct.X509LogEntryType:
|
||||
chain, err = ct.UnmarshalX509ChainArray(chainBytes)
|
||||
|
||||
case ct.PrecertLogEntryType:
|
||||
chain, err = ct.UnmarshalPrecertChainArray(chainBytes)
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("saw unknown entry type: %v", leaf.TimestampedEntry.EntryType)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries[index].Chain = chain
|
||||
entries[index].Index = start + int64(index)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
512
vendor/github.com/google/certificate-transparency/go/serialization.go
generated
vendored
Normal file
512
vendor/github.com/google/certificate-transparency/go/serialization.go
generated
vendored
Normal file
@ -0,0 +1,512 @@
|
||||
package ct
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"container/list"
|
||||
"crypto"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Variable size structure prefix-header byte lengths
|
||||
const (
|
||||
CertificateLengthBytes = 3
|
||||
PreCertificateLengthBytes = 3
|
||||
ExtensionsLengthBytes = 2
|
||||
CertificateChainLengthBytes = 3
|
||||
SignatureLengthBytes = 2
|
||||
)
|
||||
|
||||
// Max lengths
|
||||
const (
|
||||
MaxCertificateLength = (1 << 24) - 1
|
||||
MaxExtensionsLength = (1 << 16) - 1
|
||||
)
|
||||
|
||||
func writeUint(w io.Writer, value uint64, numBytes int) error {
|
||||
buf := make([]uint8, numBytes)
|
||||
for i := 0; i < numBytes; i++ {
|
||||
buf[numBytes-i-1] = uint8(value & 0xff)
|
||||
value >>= 8
|
||||
}
|
||||
if value != 0 {
|
||||
return errors.New("numBytes was insufficiently large to represent value")
|
||||
}
|
||||
if _, err := w.Write(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeVarBytes(w io.Writer, value []byte, numLenBytes int) error {
|
||||
if err := writeUint(w, uint64(len(value)), numLenBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(value); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func readUint(r io.Reader, numBytes int) (uint64, error) {
|
||||
var l uint64
|
||||
for i := 0; i < numBytes; i++ {
|
||||
l <<= 8
|
||||
var t uint8
|
||||
if err := binary.Read(r, binary.BigEndian, &t); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
l |= uint64(t)
|
||||
}
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// Reads a variable length array of bytes from |r|. |numLenBytes| specifies the
|
||||
// number of (BigEndian) prefix-bytes which contain the length of the actual
|
||||
// array data bytes that follow.
|
||||
// Allocates an array to hold the contents and returns a slice view into it if
|
||||
// the read was successful, or an error otherwise.
|
||||
func readVarBytes(r io.Reader, numLenBytes int) ([]byte, error) {
|
||||
switch {
|
||||
case numLenBytes > 8:
|
||||
return nil, fmt.Errorf("numLenBytes too large (%d)", numLenBytes)
|
||||
case numLenBytes == 0:
|
||||
return nil, errors.New("numLenBytes should be > 0")
|
||||
}
|
||||
l, err := readUint(r, numLenBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data := make([]byte, l)
|
||||
n, err := r.Read(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n != int(l) {
|
||||
return nil, fmt.Errorf("short read: expected %d but got %d", l, n)
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Reads a list of ASN1Cert types from |r|
|
||||
func readASN1CertList(r io.Reader, totalLenBytes int, elementLenBytes int) ([]ASN1Cert, error) {
|
||||
listBytes, err := readVarBytes(r, totalLenBytes)
|
||||
if err != nil {
|
||||
return []ASN1Cert{}, err
|
||||
}
|
||||
list := list.New()
|
||||
listReader := bytes.NewReader(listBytes)
|
||||
var entry []byte
|
||||
for err == nil {
|
||||
entry, err = readVarBytes(listReader, elementLenBytes)
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return []ASN1Cert{}, err
|
||||
}
|
||||
} else {
|
||||
list.PushBack(entry)
|
||||
}
|
||||
}
|
||||
ret := make([]ASN1Cert, list.Len())
|
||||
i := 0
|
||||
for e := list.Front(); e != nil; e = e.Next() {
|
||||
ret[i] = e.Value.([]byte)
|
||||
i++
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// ReadTimestampedEntryInto parses the byte-stream representation of a
|
||||
// TimestampedEntry from |r| and populates the struct |t| with the data. See
|
||||
// RFC section 3.4 for details on the format.
|
||||
// Returns a non-nil error if there was a problem.
|
||||
func ReadTimestampedEntryInto(r io.Reader, t *TimestampedEntry) error {
|
||||
var err error
|
||||
if err = binary.Read(r, binary.BigEndian, &t.Timestamp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = binary.Read(r, binary.BigEndian, &t.EntryType); err != nil {
|
||||
return err
|
||||
}
|
||||
switch t.EntryType {
|
||||
case X509LogEntryType:
|
||||
if t.X509Entry, err = readVarBytes(r, CertificateLengthBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
case PrecertLogEntryType:
|
||||
if err := binary.Read(r, binary.BigEndian, &t.PrecertEntry.IssuerKeyHash); err != nil {
|
||||
return err
|
||||
}
|
||||
if t.PrecertEntry.TBSCertificate, err = readVarBytes(r, PreCertificateLengthBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unknown EntryType: %d", t.EntryType)
|
||||
}
|
||||
t.Extensions, err = readVarBytes(r, ExtensionsLengthBytes)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadMerkleTreeLeaf parses the byte-stream representation of a MerkleTreeLeaf
|
||||
// and returns a pointer to a new MerkleTreeLeaf structure containing the
|
||||
// parsed data.
|
||||
// See RFC section 3.4 for details on the format.
|
||||
// Returns a pointer to a new MerkleTreeLeaf or non-nil error if there was a
|
||||
// problem
|
||||
func ReadMerkleTreeLeaf(r io.Reader) (*MerkleTreeLeaf, error) {
|
||||
var m MerkleTreeLeaf
|
||||
if err := binary.Read(r, binary.BigEndian, &m.Version); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if m.Version != V1 {
|
||||
return nil, fmt.Errorf("unknown Version %d", m.Version)
|
||||
}
|
||||
if err := binary.Read(r, binary.BigEndian, &m.LeafType); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if m.LeafType != TimestampedEntryLeafType {
|
||||
return nil, fmt.Errorf("unknown LeafType %d", m.LeafType)
|
||||
}
|
||||
if err := ReadTimestampedEntryInto(r, &m.TimestampedEntry); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &m, nil
|
||||
}
|
||||
|
||||
// UnmarshalX509ChainArray unmarshalls the contents of the "chain:" entry in a
|
||||
// GetEntries response in the case where the entry refers to an X509 leaf.
|
||||
func UnmarshalX509ChainArray(b []byte) ([]ASN1Cert, error) {
|
||||
return readASN1CertList(bytes.NewReader(b), CertificateChainLengthBytes, CertificateLengthBytes)
|
||||
}
|
||||
|
||||
// UnmarshalPrecertChainArray unmarshalls the contents of the "chain:" entry in
|
||||
// a GetEntries response in the case where the entry refers to a Precertificate
|
||||
// leaf.
|
||||
func UnmarshalPrecertChainArray(b []byte) ([]ASN1Cert, error) {
|
||||
var chain []ASN1Cert
|
||||
|
||||
reader := bytes.NewReader(b)
|
||||
// read the pre-cert entry:
|
||||
precert, err := readVarBytes(reader, CertificateLengthBytes)
|
||||
if err != nil {
|
||||
return chain, err
|
||||
}
|
||||
chain = append(chain, precert)
|
||||
// and then read and return the chain up to the root:
|
||||
remainingChain, err := readASN1CertList(reader, CertificateChainLengthBytes, CertificateLengthBytes)
|
||||
if err != nil {
|
||||
return chain, err
|
||||
}
|
||||
chain = append(chain, remainingChain...)
|
||||
return chain, nil
|
||||
}
|
||||
|
||||
// UnmarshalDigitallySigned reconstructs a DigitallySigned structure from a Reader
|
||||
func UnmarshalDigitallySigned(r io.Reader) (*DigitallySigned, error) {
|
||||
var h byte
|
||||
if err := binary.Read(r, binary.BigEndian, &h); err != nil {
|
||||
return nil, fmt.Errorf("failed to read HashAlgorithm: %v", err)
|
||||
}
|
||||
|
||||
var s byte
|
||||
if err := binary.Read(r, binary.BigEndian, &s); err != nil {
|
||||
return nil, fmt.Errorf("failed to read SignatureAlgorithm: %v", err)
|
||||
}
|
||||
|
||||
sig, err := readVarBytes(r, SignatureLengthBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read Signature bytes: %v", err)
|
||||
}
|
||||
|
||||
return &DigitallySigned{
|
||||
HashAlgorithm: HashAlgorithm(h),
|
||||
SignatureAlgorithm: SignatureAlgorithm(s),
|
||||
Signature: sig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func marshalDigitallySignedHere(ds DigitallySigned, here []byte) ([]byte, error) {
|
||||
sigLen := len(ds.Signature)
|
||||
dsOutLen := 2 + SignatureLengthBytes + sigLen
|
||||
if here == nil {
|
||||
here = make([]byte, dsOutLen)
|
||||
}
|
||||
if len(here) < dsOutLen {
|
||||
return nil, ErrNotEnoughBuffer
|
||||
}
|
||||
here = here[0:dsOutLen]
|
||||
|
||||
here[0] = byte(ds.HashAlgorithm)
|
||||
here[1] = byte(ds.SignatureAlgorithm)
|
||||
binary.BigEndian.PutUint16(here[2:4], uint16(sigLen))
|
||||
copy(here[4:], ds.Signature)
|
||||
|
||||
return here, nil
|
||||
}
|
||||
|
||||
// MarshalDigitallySigned marshalls a DigitallySigned structure into a byte array
|
||||
func MarshalDigitallySigned(ds DigitallySigned) ([]byte, error) {
|
||||
return marshalDigitallySignedHere(ds, nil)
|
||||
}
|
||||
|
||||
func checkCertificateFormat(cert ASN1Cert) error {
|
||||
if len(cert) == 0 {
|
||||
return errors.New("certificate is zero length")
|
||||
}
|
||||
if len(cert) > MaxCertificateLength {
|
||||
return errors.New("certificate too large")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkExtensionsFormat(ext CTExtensions) error {
|
||||
if len(ext) > MaxExtensionsLength {
|
||||
return errors.New("extensions too large")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func serializeV1CertSCTSignatureInput(timestamp uint64, cert ASN1Cert, ext CTExtensions) ([]byte, error) {
|
||||
if err := checkCertificateFormat(cert); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := checkExtensionsFormat(ext); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := binary.Write(&buf, binary.BigEndian, V1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Write(&buf, binary.BigEndian, CertificateTimestampSignatureType); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Write(&buf, binary.BigEndian, timestamp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Write(&buf, binary.BigEndian, X509LogEntryType); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := writeVarBytes(&buf, cert, CertificateLengthBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := writeVarBytes(&buf, ext, ExtensionsLengthBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func serializeV1PrecertSCTSignatureInput(timestamp uint64, issuerKeyHash [issuerKeyHashLength]byte, tbs []byte, ext CTExtensions) ([]byte, error) {
|
||||
if err := checkCertificateFormat(tbs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := checkExtensionsFormat(ext); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := binary.Write(&buf, binary.BigEndian, V1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Write(&buf, binary.BigEndian, CertificateTimestampSignatureType); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Write(&buf, binary.BigEndian, timestamp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Write(&buf, binary.BigEndian, PrecertLogEntryType); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := buf.Write(issuerKeyHash[:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := writeVarBytes(&buf, tbs, CertificateLengthBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := writeVarBytes(&buf, ext, ExtensionsLengthBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func serializeV1SCTSignatureInput(sct SignedCertificateTimestamp, entry LogEntry) ([]byte, error) {
|
||||
if sct.SCTVersion != V1 {
|
||||
return nil, fmt.Errorf("unsupported SCT version, expected V1, but got %s", sct.SCTVersion)
|
||||
}
|
||||
if entry.Leaf.LeafType != TimestampedEntryLeafType {
|
||||
return nil, fmt.Errorf("Unsupported leaf type %s", entry.Leaf.LeafType)
|
||||
}
|
||||
switch entry.Leaf.TimestampedEntry.EntryType {
|
||||
case X509LogEntryType:
|
||||
return serializeV1CertSCTSignatureInput(sct.Timestamp, entry.Leaf.TimestampedEntry.X509Entry, entry.Leaf.TimestampedEntry.Extensions)
|
||||
case PrecertLogEntryType:
|
||||
return serializeV1PrecertSCTSignatureInput(sct.Timestamp, entry.Leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash,
|
||||
entry.Leaf.TimestampedEntry.PrecertEntry.TBSCertificate,
|
||||
entry.Leaf.TimestampedEntry.Extensions)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown TimestampedEntryLeafType %s", entry.Leaf.TimestampedEntry.EntryType)
|
||||
}
|
||||
}
|
||||
|
||||
// SerializeSCTSignatureInput serializes the passed in sct and log entry into
|
||||
// the correct format for signing.
|
||||
func SerializeSCTSignatureInput(sct SignedCertificateTimestamp, entry LogEntry) ([]byte, error) {
|
||||
switch sct.SCTVersion {
|
||||
case V1:
|
||||
return serializeV1SCTSignatureInput(sct, entry)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown SCT version %d", sct.SCTVersion)
|
||||
}
|
||||
}
|
||||
|
||||
// SerializedLength will return the space (in bytes)
|
||||
func (sct SignedCertificateTimestamp) SerializedLength() (int, error) {
|
||||
switch sct.SCTVersion {
|
||||
case V1:
|
||||
extLen := len(sct.Extensions)
|
||||
sigLen := len(sct.Signature.Signature)
|
||||
return 1 + 32 + 8 + 2 + extLen + 2 + 2 + sigLen, nil
|
||||
default:
|
||||
return 0, ErrInvalidVersion
|
||||
}
|
||||
}
|
||||
|
||||
func serializeV1SCTHere(sct SignedCertificateTimestamp, here []byte) ([]byte, error) {
|
||||
if sct.SCTVersion != V1 {
|
||||
return nil, ErrInvalidVersion
|
||||
}
|
||||
sctLen, err := sct.SerializedLength()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if here == nil {
|
||||
here = make([]byte, sctLen)
|
||||
}
|
||||
if len(here) < sctLen {
|
||||
return nil, ErrNotEnoughBuffer
|
||||
}
|
||||
if err := checkExtensionsFormat(sct.Extensions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
here = here[0:sctLen]
|
||||
|
||||
// Write Version
|
||||
here[0] = byte(sct.SCTVersion)
|
||||
|
||||
// Write LogID
|
||||
copy(here[1:33], sct.LogID[:])
|
||||
|
||||
// Write Timestamp
|
||||
binary.BigEndian.PutUint64(here[33:41], sct.Timestamp)
|
||||
|
||||
// Write Extensions
|
||||
extLen := len(sct.Extensions)
|
||||
binary.BigEndian.PutUint16(here[41:43], uint16(extLen))
|
||||
n := 43 + extLen
|
||||
copy(here[43:n], sct.Extensions)
|
||||
|
||||
// Write Signature
|
||||
_, err = marshalDigitallySignedHere(sct.Signature, here[n:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return here, nil
|
||||
}
|
||||
|
||||
// SerializeSCTHere serializes the passed in sct into the format specified
|
||||
// by RFC6962 section 3.2.
|
||||
// If a bytes slice here is provided then it will attempt to serialize into the
|
||||
// provided byte slice, ErrNotEnoughBuffer will be returned if the buffer is
|
||||
// too small.
|
||||
// If a nil byte slice is provided, a buffer for will be allocated for you
|
||||
// The returned slice will be sliced to the correct length.
|
||||
func SerializeSCTHere(sct SignedCertificateTimestamp, here []byte) ([]byte, error) {
|
||||
switch sct.SCTVersion {
|
||||
case V1:
|
||||
return serializeV1SCTHere(sct, here)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown SCT version %d", sct.SCTVersion)
|
||||
}
|
||||
}
|
||||
|
||||
// SerializeSCT serializes the passed in sct into the format specified
|
||||
// by RFC6962 section 3.2
|
||||
// Equivalent to SerializeSCTHere(sct, nil)
|
||||
func SerializeSCT(sct SignedCertificateTimestamp) ([]byte, error) {
|
||||
return SerializeSCTHere(sct, nil)
|
||||
}
|
||||
|
||||
func deserializeSCTV1(r io.Reader, sct *SignedCertificateTimestamp) error {
|
||||
if err := binary.Read(r, binary.BigEndian, &sct.LogID); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := binary.Read(r, binary.BigEndian, &sct.Timestamp); err != nil {
|
||||
return err
|
||||
}
|
||||
ext, err := readVarBytes(r, ExtensionsLengthBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sct.Extensions = ext
|
||||
ds, err := UnmarshalDigitallySigned(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sct.Signature = *ds
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeserializeSCT(r io.Reader) (*SignedCertificateTimestamp, error) {
|
||||
var sct SignedCertificateTimestamp
|
||||
if err := binary.Read(r, binary.BigEndian, &sct.SCTVersion); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch sct.SCTVersion {
|
||||
case V1:
|
||||
return &sct, deserializeSCTV1(r, &sct)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown SCT version %d", sct.SCTVersion)
|
||||
}
|
||||
}
|
||||
|
||||
func serializeV1STHSignatureInput(sth SignedTreeHead) ([]byte, error) {
|
||||
if sth.Version != V1 {
|
||||
return nil, fmt.Errorf("invalid STH version %d", sth.Version)
|
||||
}
|
||||
if sth.TreeSize < 0 {
|
||||
return nil, fmt.Errorf("invalid tree size %d", sth.TreeSize)
|
||||
}
|
||||
if len(sth.SHA256RootHash) != crypto.SHA256.Size() {
|
||||
return nil, fmt.Errorf("invalid TreeHash length, got %d expected %d", len(sth.SHA256RootHash), crypto.SHA256.Size())
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := binary.Write(&buf, binary.BigEndian, V1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Write(&buf, binary.BigEndian, TreeHashSignatureType); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Write(&buf, binary.BigEndian, sth.Timestamp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Write(&buf, binary.BigEndian, sth.TreeSize); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := binary.Write(&buf, binary.BigEndian, sth.SHA256RootHash); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// SerializeSTHSignatureInput serializes the passed in sth into the correct
|
||||
// format for signing.
|
||||
func SerializeSTHSignatureInput(sth SignedTreeHead) ([]byte, error) {
|
||||
switch sth.Version {
|
||||
case V1:
|
||||
return serializeV1STHSignatureInput(sth)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported STH version %d", sth.Version)
|
||||
}
|
||||
}
|
131
vendor/github.com/google/certificate-transparency/go/signatures.go
generated
vendored
Normal file
131
vendor/github.com/google/certificate-transparency/go/signatures.go
generated
vendored
Normal file
@ -0,0 +1,131 @@
|
||||
package ct
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"encoding/asn1"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
var allowVerificationWithNonCompliantKeys = flag.Bool("allow_verification_with_non_compliant_keys", false,
|
||||
"Allow a SignatureVerifier to use keys which are technically non-compliant with RFC6962.")
|
||||
|
||||
// PublicKeyFromPEM parses a PEM formatted block and returns the public key contained within and any remaining unread bytes, or an error.
|
||||
func PublicKeyFromPEM(b []byte) (crypto.PublicKey, SHA256Hash, []byte, error) {
|
||||
p, rest := pem.Decode(b)
|
||||
if p == nil {
|
||||
return nil, [sha256.Size]byte{}, rest, fmt.Errorf("no PEM block found in %s", string(b))
|
||||
}
|
||||
k, err := x509.ParsePKIXPublicKey(p.Bytes)
|
||||
return k, sha256.Sum256(p.Bytes), rest, err
|
||||
}
|
||||
|
||||
// SignatureVerifier can verify signatures on SCTs and STHs
|
||||
type SignatureVerifier struct {
|
||||
pubKey crypto.PublicKey
|
||||
}
|
||||
|
||||
// NewSignatureVerifier creates a new SignatureVerifier using the passed in PublicKey.
|
||||
func NewSignatureVerifier(pk crypto.PublicKey) (*SignatureVerifier, error) {
|
||||
switch pkType := pk.(type) {
|
||||
case *rsa.PublicKey:
|
||||
if pkType.N.BitLen() < 2048 {
|
||||
e := fmt.Errorf("public key is RSA with < 2048 bits (size:%d)", pkType.N.BitLen())
|
||||
if !(*allowVerificationWithNonCompliantKeys) {
|
||||
return nil, e
|
||||
}
|
||||
log.Printf("WARNING: %v", e)
|
||||
}
|
||||
case *ecdsa.PublicKey:
|
||||
params := *(pkType.Params())
|
||||
if params != *elliptic.P256().Params() {
|
||||
e := fmt.Errorf("public is ECDSA, but not on the P256 curve")
|
||||
if !(*allowVerificationWithNonCompliantKeys) {
|
||||
return nil, e
|
||||
}
|
||||
log.Printf("WARNING: %v", e)
|
||||
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported public key type %v", pkType)
|
||||
}
|
||||
|
||||
return &SignatureVerifier{
|
||||
pubKey: pk,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// verifySignature verifies that the passed in signature over data was created by our PublicKey.
|
||||
// Currently, only SHA256 is supported as a HashAlgorithm, and only ECDSA and RSA signatures are supported.
|
||||
func (s SignatureVerifier) verifySignature(data []byte, sig DigitallySigned) error {
|
||||
if sig.HashAlgorithm != SHA256 {
|
||||
return fmt.Errorf("unsupported HashAlgorithm in signature: %v", sig.HashAlgorithm)
|
||||
}
|
||||
|
||||
hasherType := crypto.SHA256
|
||||
hasher := hasherType.New()
|
||||
if _, err := hasher.Write(data); err != nil {
|
||||
return fmt.Errorf("failed to write to hasher: %v", err)
|
||||
}
|
||||
hash := hasher.Sum([]byte{})
|
||||
|
||||
switch sig.SignatureAlgorithm {
|
||||
case RSA:
|
||||
rsaKey, ok := s.pubKey.(*rsa.PublicKey)
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot verify RSA signature with %T key", s.pubKey)
|
||||
}
|
||||
if err := rsa.VerifyPKCS1v15(rsaKey, hasherType, hash, sig.Signature); err != nil {
|
||||
return fmt.Errorf("failed to verify rsa signature: %v", err)
|
||||
}
|
||||
case ECDSA:
|
||||
ecdsaKey, ok := s.pubKey.(*ecdsa.PublicKey)
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot verify ECDSA signature with %T key", s.pubKey)
|
||||
}
|
||||
var ecdsaSig struct {
|
||||
R, S *big.Int
|
||||
}
|
||||
rest, err := asn1.Unmarshal(sig.Signature, &ecdsaSig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal ECDSA signature: %v", err)
|
||||
}
|
||||
if len(rest) != 0 {
|
||||
log.Printf("Garbage following signature %v", rest)
|
||||
}
|
||||
|
||||
if !ecdsa.Verify(ecdsaKey, hash, ecdsaSig.R, ecdsaSig.S) {
|
||||
return errors.New("failed to verify ecdsa signature")
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unsupported signature type %v", sig.SignatureAlgorithm)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifySCTSignature verifies that the SCT's signature is valid for the given LogEntry
|
||||
func (s SignatureVerifier) VerifySCTSignature(sct SignedCertificateTimestamp, entry LogEntry) error {
|
||||
sctData, err := SerializeSCTSignatureInput(sct, entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.verifySignature(sctData, sct.Signature)
|
||||
}
|
||||
|
||||
// VerifySTHSignature verifies that the STH's signature is valid.
|
||||
func (s SignatureVerifier) VerifySTHSignature(sth SignedTreeHead) error {
|
||||
sthData, err := SerializeSTHSignatureInput(sth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.verifySignature(sthData, sth.TreeHeadSignature)
|
||||
}
|
363
vendor/github.com/google/certificate-transparency/go/types.go
generated
vendored
Normal file
363
vendor/github.com/google/certificate-transparency/go/types.go
generated
vendored
Normal file
@ -0,0 +1,363 @@
|
||||
package ct
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/certificate-transparency/go/x509"
|
||||
)
|
||||
|
||||
const (
|
||||
issuerKeyHashLength = 32
|
||||
)
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// The following structures represent those outlined in the RFC6962 document:
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// LogEntryType represents the LogEntryType enum from section 3.1 of the RFC:
|
||||
// enum { x509_entry(0), precert_entry(1), (65535) } LogEntryType;
|
||||
type LogEntryType uint16
|
||||
|
||||
func (e LogEntryType) String() string {
|
||||
switch e {
|
||||
case X509LogEntryType:
|
||||
return "X509LogEntryType"
|
||||
case PrecertLogEntryType:
|
||||
return "PrecertLogEntryType"
|
||||
}
|
||||
panic(fmt.Sprintf("No string defined for LogEntryType constant value %d", e))
|
||||
}
|
||||
|
||||
// LogEntryType constants, see section 3.1 of RFC6962.
|
||||
const (
|
||||
X509LogEntryType LogEntryType = 0
|
||||
PrecertLogEntryType LogEntryType = 1
|
||||
)
|
||||
|
||||
// MerkleLeafType represents the MerkleLeafType enum from section 3.4 of the
|
||||
// RFC: enum { timestamped_entry(0), (255) } MerkleLeafType;
|
||||
type MerkleLeafType uint8
|
||||
|
||||
func (m MerkleLeafType) String() string {
|
||||
switch m {
|
||||
case TimestampedEntryLeafType:
|
||||
return "TimestampedEntryLeafType"
|
||||
default:
|
||||
return fmt.Sprintf("UnknownLeafType(%d)", m)
|
||||
}
|
||||
}
|
||||
|
||||
// MerkleLeafType constants, see section 3.4 of the RFC.
|
||||
const (
|
||||
TimestampedEntryLeafType MerkleLeafType = 0 // Entry type for an SCT
|
||||
)
|
||||
|
||||
// Version represents the Version enum from section 3.2 of the RFC:
|
||||
// enum { v1(0), (255) } Version;
|
||||
type Version uint8
|
||||
|
||||
func (v Version) String() string {
|
||||
switch v {
|
||||
case V1:
|
||||
return "V1"
|
||||
default:
|
||||
return fmt.Sprintf("UnknownVersion(%d)", v)
|
||||
}
|
||||
}
|
||||
|
||||
// CT Version constants, see section 3.2 of the RFC.
|
||||
const (
|
||||
V1 Version = 0
|
||||
)
|
||||
|
||||
// SignatureType differentiates STH signatures from SCT signatures, see RFC
|
||||
// section 3.2
|
||||
type SignatureType uint8
|
||||
|
||||
func (st SignatureType) String() string {
|
||||
switch st {
|
||||
case CertificateTimestampSignatureType:
|
||||
return "CertificateTimestamp"
|
||||
case TreeHashSignatureType:
|
||||
return "TreeHash"
|
||||
default:
|
||||
return fmt.Sprintf("UnknownSignatureType(%d)", st)
|
||||
}
|
||||
}
|
||||
|
||||
// SignatureType constants, see RFC section 3.2
|
||||
const (
|
||||
CertificateTimestampSignatureType SignatureType = 0
|
||||
TreeHashSignatureType SignatureType = 1
|
||||
)
|
||||
|
||||
// ASN1Cert type for holding the raw DER bytes of an ASN.1 Certificate
|
||||
// (section 3.1)
|
||||
type ASN1Cert []byte
|
||||
|
||||
// PreCert represents a Precertificate (section 3.2)
|
||||
type PreCert struct {
|
||||
IssuerKeyHash [issuerKeyHashLength]byte
|
||||
TBSCertificate []byte
|
||||
}
|
||||
|
||||
// CTExtensions is a representation of the raw bytes of any CtExtension
|
||||
// structure (see section 3.2)
|
||||
type CTExtensions []byte
|
||||
|
||||
// MerkleTreeNode represents an internal node in the CT tree
|
||||
type MerkleTreeNode []byte
|
||||
|
||||
// ConsistencyProof represents a CT consistency proof (see sections 2.1.2 and
|
||||
// 4.4)
|
||||
type ConsistencyProof []MerkleTreeNode
|
||||
|
||||
// AuditPath represents a CT inclusion proof (see sections 2.1.1 and 4.5)
|
||||
type AuditPath []MerkleTreeNode
|
||||
|
||||
// LeafInput represents a serialized MerkleTreeLeaf structure
|
||||
type LeafInput []byte
|
||||
|
||||
// HashAlgorithm from the DigitallySigned struct
|
||||
type HashAlgorithm byte
|
||||
|
||||
// HashAlgorithm constants
|
||||
const (
|
||||
None HashAlgorithm = 0
|
||||
MD5 HashAlgorithm = 1
|
||||
SHA1 HashAlgorithm = 2
|
||||
SHA224 HashAlgorithm = 3
|
||||
SHA256 HashAlgorithm = 4
|
||||
SHA384 HashAlgorithm = 5
|
||||
SHA512 HashAlgorithm = 6
|
||||
)
|
||||
|
||||
func (h HashAlgorithm) String() string {
|
||||
switch h {
|
||||
case None:
|
||||
return "None"
|
||||
case MD5:
|
||||
return "MD5"
|
||||
case SHA1:
|
||||
return "SHA1"
|
||||
case SHA224:
|
||||
return "SHA224"
|
||||
case SHA256:
|
||||
return "SHA256"
|
||||
case SHA384:
|
||||
return "SHA384"
|
||||
case SHA512:
|
||||
return "SHA512"
|
||||
default:
|
||||
return fmt.Sprintf("UNKNOWN(%d)", h)
|
||||
}
|
||||
}
|
||||
|
||||
// SignatureAlgorithm from the the DigitallySigned struct
|
||||
type SignatureAlgorithm byte
|
||||
|
||||
// SignatureAlgorithm constants
|
||||
const (
|
||||
Anonymous SignatureAlgorithm = 0
|
||||
RSA SignatureAlgorithm = 1
|
||||
DSA SignatureAlgorithm = 2
|
||||
ECDSA SignatureAlgorithm = 3
|
||||
)
|
||||
|
||||
func (s SignatureAlgorithm) String() string {
|
||||
switch s {
|
||||
case Anonymous:
|
||||
return "Anonymous"
|
||||
case RSA:
|
||||
return "RSA"
|
||||
case DSA:
|
||||
return "DSA"
|
||||
case ECDSA:
|
||||
return "ECDSA"
|
||||
default:
|
||||
return fmt.Sprintf("UNKNOWN(%d)", s)
|
||||
}
|
||||
}
|
||||
|
||||
// DigitallySigned represents an RFC5246 DigitallySigned structure
|
||||
type DigitallySigned struct {
|
||||
HashAlgorithm HashAlgorithm
|
||||
SignatureAlgorithm SignatureAlgorithm
|
||||
Signature []byte
|
||||
}
|
||||
|
||||
// FromBase64String populates the DigitallySigned structure from the base64 data passed in.
|
||||
// Returns an error if the base64 data is invalid.
|
||||
func (d *DigitallySigned) FromBase64String(b64 string) error {
|
||||
raw, err := base64.StdEncoding.DecodeString(b64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unbase64 DigitallySigned: %v", err)
|
||||
}
|
||||
ds, err := UnmarshalDigitallySigned(bytes.NewReader(raw))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal DigitallySigned: %v", err)
|
||||
}
|
||||
*d = *ds
|
||||
return nil
|
||||
}
|
||||
|
||||
// Base64String returns the base64 representation of the DigitallySigned struct.
|
||||
func (d DigitallySigned) Base64String() (string, error) {
|
||||
b, err := MarshalDigitallySigned(d)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(b), nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements the json.Marshaller interface.
|
||||
func (d DigitallySigned) MarshalJSON() ([]byte, error) {
|
||||
b64, err := d.Base64String()
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
return []byte(`"` + b64 + `"`), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
func (d *DigitallySigned) UnmarshalJSON(b []byte) error {
|
||||
var content string
|
||||
if err := json.Unmarshal(b, &content); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal DigitallySigned: %v", err)
|
||||
}
|
||||
return d.FromBase64String(content)
|
||||
}
|
||||
|
||||
// LogEntry represents the contents of an entry in a CT log, see section 3.1.
|
||||
type LogEntry struct {
|
||||
Index int64
|
||||
Leaf MerkleTreeLeaf
|
||||
X509Cert *x509.Certificate
|
||||
Precert *Precertificate
|
||||
Chain []ASN1Cert
|
||||
}
|
||||
|
||||
// SHA256Hash represents the output from the SHA256 hash function.
|
||||
type SHA256Hash [sha256.Size]byte
|
||||
|
||||
// FromBase64String populates the SHA256 struct with the contents of the base64 data passed in.
|
||||
func (s *SHA256Hash) FromBase64String(b64 string) error {
|
||||
bs, err := base64.StdEncoding.DecodeString(b64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unbase64 LogID: %v", err)
|
||||
}
|
||||
if len(bs) != sha256.Size {
|
||||
return fmt.Errorf("invalid SHA256 length, expected 32 but got %d", len(bs))
|
||||
}
|
||||
copy(s[:], bs)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Base64String returns the base64 representation of this SHA256Hash.
|
||||
func (s SHA256Hash) Base64String() string {
|
||||
return base64.StdEncoding.EncodeToString(s[:])
|
||||
}
|
||||
|
||||
// MarshalJSON implements the json.Marshaller interface for SHA256Hash.
|
||||
func (s SHA256Hash) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`"` + s.Base64String() + `"`), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaller interface.
|
||||
func (s *SHA256Hash) UnmarshalJSON(b []byte) error {
|
||||
var content string
|
||||
if err := json.Unmarshal(b, &content); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal SHA256Hash: %v", err)
|
||||
}
|
||||
return s.FromBase64String(content)
|
||||
}
|
||||
|
||||
// SignedTreeHead represents the structure returned by the get-sth CT method
|
||||
// after base64 decoding. See sections 3.5 and 4.3 in the RFC)
|
||||
type SignedTreeHead struct {
|
||||
Version Version `json:"sth_version"` // The version of the protocol to which the STH conforms
|
||||
TreeSize uint64 `json:"tree_size"` // The number of entries in the new tree
|
||||
Timestamp uint64 `json:"timestamp"` // The time at which the STH was created
|
||||
SHA256RootHash SHA256Hash `json:"sha256_root_hash"` // The root hash of the log's Merkle tree
|
||||
TreeHeadSignature DigitallySigned `json:"tree_head_signature"` // The Log's signature for this STH (see RFC section 3.5)
|
||||
LogID SHA256Hash `json:"log_id"` // The SHA256 hash of the log's public key
|
||||
}
|
||||
|
||||
// SignedCertificateTimestamp represents the structure returned by the
|
||||
// add-chain and add-pre-chain methods after base64 decoding. (see RFC sections
|
||||
// 3.2 ,4.1 and 4.2)
|
||||
type SignedCertificateTimestamp struct {
|
||||
SCTVersion Version // The version of the protocol to which the SCT conforms
|
||||
LogID SHA256Hash // the SHA-256 hash of the log's public key, calculated over
|
||||
// the DER encoding of the key represented as SubjectPublicKeyInfo.
|
||||
Timestamp uint64 // Timestamp (in ms since unix epoc) at which the SCT was issued
|
||||
Extensions CTExtensions // For future extensions to the protocol
|
||||
Signature DigitallySigned // The Log's signature for this SCT
|
||||
}
|
||||
|
||||
func (s SignedCertificateTimestamp) String() string {
|
||||
return fmt.Sprintf("{Version:%d LogId:%s Timestamp:%d Extensions:'%s' Signature:%v}", s.SCTVersion,
|
||||
base64.StdEncoding.EncodeToString(s.LogID[:]),
|
||||
s.Timestamp,
|
||||
s.Extensions,
|
||||
s.Signature)
|
||||
}
|
||||
|
||||
// TimestampedEntry is part of the MerkleTreeLeaf structure.
|
||||
// See RFC section 3.4
|
||||
type TimestampedEntry struct {
|
||||
Timestamp uint64
|
||||
EntryType LogEntryType
|
||||
X509Entry ASN1Cert
|
||||
PrecertEntry PreCert
|
||||
Extensions CTExtensions
|
||||
}
|
||||
|
||||
// MerkleTreeLeaf represents the deserialized sructure of the hash input for the
|
||||
// leaves of a log's Merkle tree. See RFC section 3.4
|
||||
type MerkleTreeLeaf struct {
|
||||
Version Version // the version of the protocol to which the MerkleTreeLeaf corresponds
|
||||
LeafType MerkleLeafType // The type of the leaf input, currently only TimestampedEntry can exist
|
||||
TimestampedEntry TimestampedEntry // The entry data itself
|
||||
}
|
||||
|
||||
// Precertificate represents the parsed CT Precertificate structure.
|
||||
type Precertificate struct {
|
||||
// Raw DER bytes of the precert
|
||||
Raw []byte
|
||||
// SHA256 hash of the issuing key
|
||||
IssuerKeyHash [issuerKeyHashLength]byte
|
||||
// Parsed TBSCertificate structure (held in an x509.Certificate for ease of
|
||||
// access.
|
||||
TBSCertificate x509.Certificate
|
||||
}
|
||||
|
||||
// X509Certificate returns the X.509 Certificate contained within the
|
||||
// MerkleTreeLeaf.
|
||||
// Returns a pointer to an x509.Certificate or a non-nil error.
|
||||
func (m *MerkleTreeLeaf) X509Certificate() (*x509.Certificate, error) {
|
||||
return x509.ParseCertificate(m.TimestampedEntry.X509Entry)
|
||||
}
|
||||
|
||||
type sctError int
|
||||
|
||||
// Preallocate errors for performance
|
||||
var (
|
||||
ErrInvalidVersion error = sctError(1)
|
||||
ErrNotEnoughBuffer error = sctError(2)
|
||||
)
|
||||
|
||||
func (e sctError) Error() string {
|
||||
switch e {
|
||||
case ErrInvalidVersion:
|
||||
return "invalid SCT version detected"
|
||||
case ErrNotEnoughBuffer:
|
||||
return "provided buffer was too small"
|
||||
default:
|
||||
return "unknown error"
|
||||
}
|
||||
}
|
116
vendor/github.com/google/certificate-transparency/go/x509/cert_pool.go
generated
vendored
Executable file
116
vendor/github.com/google/certificate-transparency/go/x509/cert_pool.go
generated
vendored
Executable file
@ -0,0 +1,116 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package x509
|
||||
|
||||
import (
|
||||
"encoding/pem"
|
||||
)
|
||||
|
||||
// CertPool is a set of certificates.
|
||||
type CertPool struct {
|
||||
bySubjectKeyId map[string][]int
|
||||
byName map[string][]int
|
||||
certs []*Certificate
|
||||
}
|
||||
|
||||
// NewCertPool returns a new, empty CertPool.
|
||||
func NewCertPool() *CertPool {
|
||||
return &CertPool{
|
||||
make(map[string][]int),
|
||||
make(map[string][]int),
|
||||
nil,
|
||||
}
|
||||
}
|
||||
|
||||
// findVerifiedParents attempts to find certificates in s which have signed the
|
||||
// given certificate. If any candidates were rejected then errCert will be set
|
||||
// to one of them, arbitrarily, and err will contain the reason that it was
|
||||
// rejected.
|
||||
func (s *CertPool) findVerifiedParents(cert *Certificate) (parents []int, errCert *Certificate, err error) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
var candidates []int
|
||||
|
||||
if len(cert.AuthorityKeyId) > 0 {
|
||||
candidates = s.bySubjectKeyId[string(cert.AuthorityKeyId)]
|
||||
}
|
||||
if len(candidates) == 0 {
|
||||
candidates = s.byName[string(cert.RawIssuer)]
|
||||
}
|
||||
|
||||
for _, c := range candidates {
|
||||
if err = cert.CheckSignatureFrom(s.certs[c]); err == nil {
|
||||
parents = append(parents, c)
|
||||
} else {
|
||||
errCert = s.certs[c]
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// AddCert adds a certificate to a pool.
|
||||
func (s *CertPool) AddCert(cert *Certificate) {
|
||||
if cert == nil {
|
||||
panic("adding nil Certificate to CertPool")
|
||||
}
|
||||
|
||||
// Check that the certificate isn't being added twice.
|
||||
for _, c := range s.certs {
|
||||
if c.Equal(cert) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
n := len(s.certs)
|
||||
s.certs = append(s.certs, cert)
|
||||
|
||||
if len(cert.SubjectKeyId) > 0 {
|
||||
keyId := string(cert.SubjectKeyId)
|
||||
s.bySubjectKeyId[keyId] = append(s.bySubjectKeyId[keyId], n)
|
||||
}
|
||||
name := string(cert.RawSubject)
|
||||
s.byName[name] = append(s.byName[name], n)
|
||||
}
|
||||
|
||||
// AppendCertsFromPEM attempts to parse a series of PEM encoded certificates.
|
||||
// It appends any certificates found to s and returns true if any certificates
|
||||
// were successfully parsed.
|
||||
//
|
||||
// On many Linux systems, /etc/ssl/cert.pem will contain the system wide set
|
||||
// of root CAs in a format suitable for this function.
|
||||
func (s *CertPool) AppendCertsFromPEM(pemCerts []byte) (ok bool) {
|
||||
for len(pemCerts) > 0 {
|
||||
var block *pem.Block
|
||||
block, pemCerts = pem.Decode(pemCerts)
|
||||
if block == nil {
|
||||
break
|
||||
}
|
||||
if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
cert, err := ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
s.AddCert(cert)
|
||||
ok = true
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Subjects returns a list of the DER-encoded subjects of
|
||||
// all of the certificates in the pool.
|
||||
func (s *CertPool) Subjects() (res [][]byte) {
|
||||
res = make([][]byte, len(s.certs))
|
||||
for i, c := range s.certs {
|
||||
res[i] = c.RawSubject
|
||||
}
|
||||
return
|
||||
}
|
233
vendor/github.com/google/certificate-transparency/go/x509/pem_decrypt.go
generated
vendored
Executable file
233
vendor/github.com/google/certificate-transparency/go/x509/pem_decrypt.go
generated
vendored
Executable file
@ -0,0 +1,233 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package x509
|
||||
|
||||
// RFC 1423 describes the encryption of PEM blocks. The algorithm used to
|
||||
// generate a key from the password was derived by looking at the OpenSSL
|
||||
// implementation.
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/des"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type PEMCipher int
|
||||
|
||||
// Possible values for the EncryptPEMBlock encryption algorithm.
|
||||
const (
|
||||
_ PEMCipher = iota
|
||||
PEMCipherDES
|
||||
PEMCipher3DES
|
||||
PEMCipherAES128
|
||||
PEMCipherAES192
|
||||
PEMCipherAES256
|
||||
)
|
||||
|
||||
// rfc1423Algo holds a method for enciphering a PEM block.
|
||||
type rfc1423Algo struct {
|
||||
cipher PEMCipher
|
||||
name string
|
||||
cipherFunc func(key []byte) (cipher.Block, error)
|
||||
keySize int
|
||||
blockSize int
|
||||
}
|
||||
|
||||
// rfc1423Algos holds a slice of the possible ways to encrypt a PEM
|
||||
// block. The ivSize numbers were taken from the OpenSSL source.
|
||||
var rfc1423Algos = []rfc1423Algo{{
|
||||
cipher: PEMCipherDES,
|
||||
name: "DES-CBC",
|
||||
cipherFunc: des.NewCipher,
|
||||
keySize: 8,
|
||||
blockSize: des.BlockSize,
|
||||
}, {
|
||||
cipher: PEMCipher3DES,
|
||||
name: "DES-EDE3-CBC",
|
||||
cipherFunc: des.NewTripleDESCipher,
|
||||
keySize: 24,
|
||||
blockSize: des.BlockSize,
|
||||
}, {
|
||||
cipher: PEMCipherAES128,
|
||||
name: "AES-128-CBC",
|
||||
cipherFunc: aes.NewCipher,
|
||||
keySize: 16,
|
||||
blockSize: aes.BlockSize,
|
||||
}, {
|
||||
cipher: PEMCipherAES192,
|
||||
name: "AES-192-CBC",
|
||||
cipherFunc: aes.NewCipher,
|
||||
keySize: 24,
|
||||
blockSize: aes.BlockSize,
|
||||
}, {
|
||||
cipher: PEMCipherAES256,
|
||||
name: "AES-256-CBC",
|
||||
cipherFunc: aes.NewCipher,
|
||||
keySize: 32,
|
||||
blockSize: aes.BlockSize,
|
||||
},
|
||||
}
|
||||
|
||||
// deriveKey uses a key derivation function to stretch the password into a key
|
||||
// with the number of bits our cipher requires. This algorithm was derived from
|
||||
// the OpenSSL source.
|
||||
func (c rfc1423Algo) deriveKey(password, salt []byte) []byte {
|
||||
hash := md5.New()
|
||||
out := make([]byte, c.keySize)
|
||||
var digest []byte
|
||||
|
||||
for i := 0; i < len(out); i += len(digest) {
|
||||
hash.Reset()
|
||||
hash.Write(digest)
|
||||
hash.Write(password)
|
||||
hash.Write(salt)
|
||||
digest = hash.Sum(digest[:0])
|
||||
copy(out[i:], digest)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// IsEncryptedPEMBlock returns if the PEM block is password encrypted.
|
||||
func IsEncryptedPEMBlock(b *pem.Block) bool {
|
||||
_, ok := b.Headers["DEK-Info"]
|
||||
return ok
|
||||
}
|
||||
|
||||
// IncorrectPasswordError is returned when an incorrect password is detected.
|
||||
var IncorrectPasswordError = errors.New("x509: decryption password incorrect")
|
||||
|
||||
// DecryptPEMBlock takes a password encrypted PEM block and the password used to
|
||||
// encrypt it and returns a slice of decrypted DER encoded bytes. It inspects
|
||||
// the DEK-Info header to determine the algorithm used for decryption. If no
|
||||
// DEK-Info header is present, an error is returned. If an incorrect password
|
||||
// is detected an IncorrectPasswordError is returned.
|
||||
func DecryptPEMBlock(b *pem.Block, password []byte) ([]byte, error) {
|
||||
dek, ok := b.Headers["DEK-Info"]
|
||||
if !ok {
|
||||
return nil, errors.New("x509: no DEK-Info header in block")
|
||||
}
|
||||
|
||||
idx := strings.Index(dek, ",")
|
||||
if idx == -1 {
|
||||
return nil, errors.New("x509: malformed DEK-Info header")
|
||||
}
|
||||
|
||||
mode, hexIV := dek[:idx], dek[idx+1:]
|
||||
ciph := cipherByName(mode)
|
||||
if ciph == nil {
|
||||
return nil, errors.New("x509: unknown encryption mode")
|
||||
}
|
||||
iv, err := hex.DecodeString(hexIV)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(iv) != ciph.blockSize {
|
||||
return nil, errors.New("x509: incorrect IV size")
|
||||
}
|
||||
|
||||
// Based on the OpenSSL implementation. The salt is the first 8 bytes
|
||||
// of the initialization vector.
|
||||
key := ciph.deriveKey(password, iv[:8])
|
||||
block, err := ciph.cipherFunc(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data := make([]byte, len(b.Bytes))
|
||||
dec := cipher.NewCBCDecrypter(block, iv)
|
||||
dec.CryptBlocks(data, b.Bytes)
|
||||
|
||||
// Blocks are padded using a scheme where the last n bytes of padding are all
|
||||
// equal to n. It can pad from 1 to blocksize bytes inclusive. See RFC 1423.
|
||||
// For example:
|
||||
// [x y z 2 2]
|
||||
// [x y 7 7 7 7 7 7 7]
|
||||
// If we detect a bad padding, we assume it is an invalid password.
|
||||
dlen := len(data)
|
||||
if dlen == 0 || dlen%ciph.blockSize != 0 {
|
||||
return nil, errors.New("x509: invalid padding")
|
||||
}
|
||||
last := int(data[dlen-1])
|
||||
if dlen < last {
|
||||
return nil, IncorrectPasswordError
|
||||
}
|
||||
if last == 0 || last > ciph.blockSize {
|
||||
return nil, IncorrectPasswordError
|
||||
}
|
||||
for _, val := range data[dlen-last:] {
|
||||
if int(val) != last {
|
||||
return nil, IncorrectPasswordError
|
||||
}
|
||||
}
|
||||
return data[:dlen-last], nil
|
||||
}
|
||||
|
||||
// EncryptPEMBlock returns a PEM block of the specified type holding the
|
||||
// given DER-encoded data encrypted with the specified algorithm and
|
||||
// password.
|
||||
func EncryptPEMBlock(rand io.Reader, blockType string, data, password []byte, alg PEMCipher) (*pem.Block, error) {
|
||||
ciph := cipherByKey(alg)
|
||||
if ciph == nil {
|
||||
return nil, errors.New("x509: unknown encryption mode")
|
||||
}
|
||||
iv := make([]byte, ciph.blockSize)
|
||||
if _, err := io.ReadFull(rand, iv); err != nil {
|
||||
return nil, errors.New("x509: cannot generate IV: " + err.Error())
|
||||
}
|
||||
// The salt is the first 8 bytes of the initialization vector,
|
||||
// matching the key derivation in DecryptPEMBlock.
|
||||
key := ciph.deriveKey(password, iv[:8])
|
||||
block, err := ciph.cipherFunc(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
enc := cipher.NewCBCEncrypter(block, iv)
|
||||
pad := ciph.blockSize - len(data)%ciph.blockSize
|
||||
encrypted := make([]byte, len(data), len(data)+pad)
|
||||
// We could save this copy by encrypting all the whole blocks in
|
||||
// the data separately, but it doesn't seem worth the additional
|
||||
// code.
|
||||
copy(encrypted, data)
|
||||
// See RFC 1423, section 1.1
|
||||
for i := 0; i < pad; i++ {
|
||||
encrypted = append(encrypted, byte(pad))
|
||||
}
|
||||
enc.CryptBlocks(encrypted, encrypted)
|
||||
|
||||
return &pem.Block{
|
||||
Type: blockType,
|
||||
Headers: map[string]string{
|
||||
"Proc-Type": "4,ENCRYPTED",
|
||||
"DEK-Info": ciph.name + "," + hex.EncodeToString(iv),
|
||||
},
|
||||
Bytes: encrypted,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func cipherByName(name string) *rfc1423Algo {
|
||||
for i := range rfc1423Algos {
|
||||
alg := &rfc1423Algos[i]
|
||||
if alg.name == name {
|
||||
return alg
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func cipherByKey(key PEMCipher) *rfc1423Algo {
|
||||
for i := range rfc1423Algos {
|
||||
alg := &rfc1423Algos[i]
|
||||
if alg.cipher == key {
|
||||
return alg
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
124
vendor/github.com/google/certificate-transparency/go/x509/pkcs1.go
generated
vendored
Executable file
124
vendor/github.com/google/certificate-transparency/go/x509/pkcs1.go
generated
vendored
Executable file
@ -0,0 +1,124 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package x509
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
// START CT CHANGES
|
||||
"github.com/google/certificate-transparency/go/asn1"
|
||||
// END CT CHANGES
|
||||
"errors"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// pkcs1PrivateKey is a structure which mirrors the PKCS#1 ASN.1 for an RSA private key.
|
||||
type pkcs1PrivateKey struct {
|
||||
Version int
|
||||
N *big.Int
|
||||
E int
|
||||
D *big.Int
|
||||
P *big.Int
|
||||
Q *big.Int
|
||||
// We ignore these values, if present, because rsa will calculate them.
|
||||
Dp *big.Int `asn1:"optional"`
|
||||
Dq *big.Int `asn1:"optional"`
|
||||
Qinv *big.Int `asn1:"optional"`
|
||||
|
||||
AdditionalPrimes []pkcs1AdditionalRSAPrime `asn1:"optional,omitempty"`
|
||||
}
|
||||
|
||||
type pkcs1AdditionalRSAPrime struct {
|
||||
Prime *big.Int
|
||||
|
||||
// We ignore these values because rsa will calculate them.
|
||||
Exp *big.Int
|
||||
Coeff *big.Int
|
||||
}
|
||||
|
||||
// ParsePKCS1PrivateKey returns an RSA private key from its ASN.1 PKCS#1 DER encoded form.
|
||||
func ParsePKCS1PrivateKey(der []byte) (key *rsa.PrivateKey, err error) {
|
||||
var priv pkcs1PrivateKey
|
||||
rest, err := asn1.Unmarshal(der, &priv)
|
||||
if len(rest) > 0 {
|
||||
err = asn1.SyntaxError{Msg: "trailing data"}
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if priv.Version > 1 {
|
||||
return nil, errors.New("x509: unsupported private key version")
|
||||
}
|
||||
|
||||
if priv.N.Sign() <= 0 || priv.D.Sign() <= 0 || priv.P.Sign() <= 0 || priv.Q.Sign() <= 0 {
|
||||
return nil, errors.New("x509: private key contains zero or negative value")
|
||||
}
|
||||
|
||||
key = new(rsa.PrivateKey)
|
||||
key.PublicKey = rsa.PublicKey{
|
||||
E: priv.E,
|
||||
N: priv.N,
|
||||
}
|
||||
|
||||
key.D = priv.D
|
||||
key.Primes = make([]*big.Int, 2+len(priv.AdditionalPrimes))
|
||||
key.Primes[0] = priv.P
|
||||
key.Primes[1] = priv.Q
|
||||
for i, a := range priv.AdditionalPrimes {
|
||||
if a.Prime.Sign() <= 0 {
|
||||
return nil, errors.New("x509: private key contains zero or negative prime")
|
||||
}
|
||||
key.Primes[i+2] = a.Prime
|
||||
// We ignore the other two values because rsa will calculate
|
||||
// them as needed.
|
||||
}
|
||||
|
||||
err = key.Validate()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key.Precompute()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalPKCS1PrivateKey converts a private key to ASN.1 DER encoded form.
|
||||
func MarshalPKCS1PrivateKey(key *rsa.PrivateKey) []byte {
|
||||
key.Precompute()
|
||||
|
||||
version := 0
|
||||
if len(key.Primes) > 2 {
|
||||
version = 1
|
||||
}
|
||||
|
||||
priv := pkcs1PrivateKey{
|
||||
Version: version,
|
||||
N: key.N,
|
||||
E: key.PublicKey.E,
|
||||
D: key.D,
|
||||
P: key.Primes[0],
|
||||
Q: key.Primes[1],
|
||||
Dp: key.Precomputed.Dp,
|
||||
Dq: key.Precomputed.Dq,
|
||||
Qinv: key.Precomputed.Qinv,
|
||||
}
|
||||
|
||||
priv.AdditionalPrimes = make([]pkcs1AdditionalRSAPrime, len(key.Precomputed.CRTValues))
|
||||
for i, values := range key.Precomputed.CRTValues {
|
||||
priv.AdditionalPrimes[i].Prime = key.Primes[2+i]
|
||||
priv.AdditionalPrimes[i].Exp = values.Exp
|
||||
priv.AdditionalPrimes[i].Coeff = values.Coeff
|
||||
}
|
||||
|
||||
b, _ := asn1.Marshal(priv)
|
||||
return b
|
||||
}
|
||||
|
||||
// rsaPublicKey reflects the ASN.1 structure of a PKCS#1 public key.
|
||||
type rsaPublicKey struct {
|
||||
N *big.Int
|
||||
E int
|
||||
}
|
56
vendor/github.com/google/certificate-transparency/go/x509/pkcs8.go
generated
vendored
Executable file
56
vendor/github.com/google/certificate-transparency/go/x509/pkcs8.go
generated
vendored
Executable file
@ -0,0 +1,56 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package x509
|
||||
|
||||
import (
|
||||
// START CT CHANGES
|
||||
"github.com/google/certificate-transparency/go/asn1"
|
||||
"github.com/google/certificate-transparency/go/x509/pkix"
|
||||
// END CT CHANGES
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// pkcs8 reflects an ASN.1, PKCS#8 PrivateKey. See
|
||||
// ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-8/pkcs-8v1_2.asn
|
||||
// and RFC5208.
|
||||
type pkcs8 struct {
|
||||
Version int
|
||||
Algo pkix.AlgorithmIdentifier
|
||||
PrivateKey []byte
|
||||
// optional attributes omitted.
|
||||
}
|
||||
|
||||
// ParsePKCS8PrivateKey parses an unencrypted, PKCS#8 private key. See
|
||||
// http://www.rsa.com/rsalabs/node.asp?id=2130 and RFC5208.
|
||||
func ParsePKCS8PrivateKey(der []byte) (key interface{}, err error) {
|
||||
var privKey pkcs8
|
||||
if _, err := asn1.Unmarshal(der, &privKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch {
|
||||
case privKey.Algo.Algorithm.Equal(oidPublicKeyRSA):
|
||||
key, err = ParsePKCS1PrivateKey(privKey.PrivateKey)
|
||||
if err != nil {
|
||||
return nil, errors.New("x509: failed to parse RSA private key embedded in PKCS#8: " + err.Error())
|
||||
}
|
||||
return key, nil
|
||||
|
||||
case privKey.Algo.Algorithm.Equal(oidPublicKeyECDSA):
|
||||
bytes := privKey.Algo.Parameters.FullBytes
|
||||
namedCurveOID := new(asn1.ObjectIdentifier)
|
||||
if _, err := asn1.Unmarshal(bytes, namedCurveOID); err != nil {
|
||||
namedCurveOID = nil
|
||||
}
|
||||
key, err = parseECPrivateKey(namedCurveOID, privKey.PrivateKey)
|
||||
if err != nil {
|
||||
return nil, errors.New("x509: failed to parse EC private key embedded in PKCS#8: " + err.Error())
|
||||
}
|
||||
return key, nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("x509: PKCS#8 wrapping contained private key with unknown algorithm: %v", privKey.Algo.Algorithm)
|
||||
}
|
||||
}
|
173
vendor/github.com/google/certificate-transparency/go/x509/pkix/pkix.go
generated
vendored
Executable file
173
vendor/github.com/google/certificate-transparency/go/x509/pkix/pkix.go
generated
vendored
Executable file
@ -0,0 +1,173 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package pkix contains shared, low level structures used for ASN.1 parsing
|
||||
// and serialization of X.509 certificates, CRL and OCSP.
|
||||
package pkix
|
||||
|
||||
import (
|
||||
// START CT CHANGES
|
||||
"github.com/google/certificate-transparency/go/asn1"
|
||||
// END CT CHANGES
|
||||
"math/big"
|
||||
"time"
|
||||
)
|
||||
|
||||
// AlgorithmIdentifier represents the ASN.1 structure of the same name. See RFC
|
||||
// 5280, section 4.1.1.2.
|
||||
type AlgorithmIdentifier struct {
|
||||
Algorithm asn1.ObjectIdentifier
|
||||
Parameters asn1.RawValue `asn1:"optional"`
|
||||
}
|
||||
|
||||
type RDNSequence []RelativeDistinguishedNameSET
|
||||
|
||||
type RelativeDistinguishedNameSET []AttributeTypeAndValue
|
||||
|
||||
// AttributeTypeAndValue mirrors the ASN.1 structure of the same name in
|
||||
// http://tools.ietf.org/html/rfc5280#section-4.1.2.4
|
||||
type AttributeTypeAndValue struct {
|
||||
Type asn1.ObjectIdentifier
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
// Extension represents the ASN.1 structure of the same name. See RFC
|
||||
// 5280, section 4.2.
|
||||
type Extension struct {
|
||||
Id asn1.ObjectIdentifier
|
||||
Critical bool `asn1:"optional"`
|
||||
Value []byte
|
||||
}
|
||||
|
||||
// Name represents an X.509 distinguished name. This only includes the common
|
||||
// elements of a DN. Additional elements in the name are ignored.
|
||||
type Name struct {
|
||||
Country, Organization, OrganizationalUnit []string
|
||||
Locality, Province []string
|
||||
StreetAddress, PostalCode []string
|
||||
SerialNumber, CommonName string
|
||||
|
||||
Names []AttributeTypeAndValue
|
||||
}
|
||||
|
||||
func (n *Name) FillFromRDNSequence(rdns *RDNSequence) {
|
||||
for _, rdn := range *rdns {
|
||||
if len(rdn) == 0 {
|
||||
continue
|
||||
}
|
||||
atv := rdn[0]
|
||||
n.Names = append(n.Names, atv)
|
||||
value, ok := atv.Value.(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
t := atv.Type
|
||||
if len(t) == 4 && t[0] == 2 && t[1] == 5 && t[2] == 4 {
|
||||
switch t[3] {
|
||||
case 3:
|
||||
n.CommonName = value
|
||||
case 5:
|
||||
n.SerialNumber = value
|
||||
case 6:
|
||||
n.Country = append(n.Country, value)
|
||||
case 7:
|
||||
n.Locality = append(n.Locality, value)
|
||||
case 8:
|
||||
n.Province = append(n.Province, value)
|
||||
case 9:
|
||||
n.StreetAddress = append(n.StreetAddress, value)
|
||||
case 10:
|
||||
n.Organization = append(n.Organization, value)
|
||||
case 11:
|
||||
n.OrganizationalUnit = append(n.OrganizationalUnit, value)
|
||||
case 17:
|
||||
n.PostalCode = append(n.PostalCode, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
oidCountry = []int{2, 5, 4, 6}
|
||||
oidOrganization = []int{2, 5, 4, 10}
|
||||
oidOrganizationalUnit = []int{2, 5, 4, 11}
|
||||
oidCommonName = []int{2, 5, 4, 3}
|
||||
oidSerialNumber = []int{2, 5, 4, 5}
|
||||
oidLocality = []int{2, 5, 4, 7}
|
||||
oidProvince = []int{2, 5, 4, 8}
|
||||
oidStreetAddress = []int{2, 5, 4, 9}
|
||||
oidPostalCode = []int{2, 5, 4, 17}
|
||||
)
|
||||
|
||||
// appendRDNs appends a relativeDistinguishedNameSET to the given RDNSequence
|
||||
// and returns the new value. The relativeDistinguishedNameSET contains an
|
||||
// attributeTypeAndValue for each of the given values. See RFC 5280, A.1, and
|
||||
// search for AttributeTypeAndValue.
|
||||
func appendRDNs(in RDNSequence, values []string, oid asn1.ObjectIdentifier) RDNSequence {
|
||||
if len(values) == 0 {
|
||||
return in
|
||||
}
|
||||
|
||||
s := make([]AttributeTypeAndValue, len(values))
|
||||
for i, value := range values {
|
||||
s[i].Type = oid
|
||||
s[i].Value = value
|
||||
}
|
||||
|
||||
return append(in, s)
|
||||
}
|
||||
|
||||
func (n Name) ToRDNSequence() (ret RDNSequence) {
|
||||
ret = appendRDNs(ret, n.Country, oidCountry)
|
||||
ret = appendRDNs(ret, n.Organization, oidOrganization)
|
||||
ret = appendRDNs(ret, n.OrganizationalUnit, oidOrganizationalUnit)
|
||||
ret = appendRDNs(ret, n.Locality, oidLocality)
|
||||
ret = appendRDNs(ret, n.Province, oidProvince)
|
||||
ret = appendRDNs(ret, n.StreetAddress, oidStreetAddress)
|
||||
ret = appendRDNs(ret, n.PostalCode, oidPostalCode)
|
||||
if len(n.CommonName) > 0 {
|
||||
ret = appendRDNs(ret, []string{n.CommonName}, oidCommonName)
|
||||
}
|
||||
if len(n.SerialNumber) > 0 {
|
||||
ret = appendRDNs(ret, []string{n.SerialNumber}, oidSerialNumber)
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// CertificateList represents the ASN.1 structure of the same name. See RFC
|
||||
// 5280, section 5.1. Use Certificate.CheckCRLSignature to verify the
|
||||
// signature.
|
||||
type CertificateList struct {
|
||||
TBSCertList TBSCertificateList
|
||||
SignatureAlgorithm AlgorithmIdentifier
|
||||
SignatureValue asn1.BitString
|
||||
}
|
||||
|
||||
// HasExpired reports whether now is past the expiry time of certList.
|
||||
func (certList *CertificateList) HasExpired(now time.Time) bool {
|
||||
return now.After(certList.TBSCertList.NextUpdate)
|
||||
}
|
||||
|
||||
// TBSCertificateList represents the ASN.1 structure of the same name. See RFC
|
||||
// 5280, section 5.1.
|
||||
type TBSCertificateList struct {
|
||||
Raw asn1.RawContent
|
||||
Version int `asn1:"optional,default:2"`
|
||||
Signature AlgorithmIdentifier
|
||||
Issuer RDNSequence
|
||||
ThisUpdate time.Time
|
||||
NextUpdate time.Time
|
||||
RevokedCertificates []RevokedCertificate `asn1:"optional"`
|
||||
Extensions []Extension `asn1:"tag:0,optional,explicit"`
|
||||
}
|
||||
|
||||
// RevokedCertificate represents the ASN.1 structure of the same name. See RFC
|
||||
// 5280, section 5.1.
|
||||
type RevokedCertificate struct {
|
||||
SerialNumber *big.Int
|
||||
RevocationTime time.Time
|
||||
Extensions []Extension `asn1:"optional"`
|
||||
}
|
17
vendor/github.com/google/certificate-transparency/go/x509/root.go
generated
vendored
Executable file
17
vendor/github.com/google/certificate-transparency/go/x509/root.go
generated
vendored
Executable file
@ -0,0 +1,17 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package x509
|
||||
|
||||
import "sync"
|
||||
|
||||
var (
|
||||
once sync.Once
|
||||
systemRoots *CertPool
|
||||
)
|
||||
|
||||
func systemRootsPool() *CertPool {
|
||||
once.Do(initSystemRoots)
|
||||
return systemRoots
|
||||
}
|
83
vendor/github.com/google/certificate-transparency/go/x509/root_darwin.go
generated
vendored
Executable file
83
vendor/github.com/google/certificate-transparency/go/x509/root_darwin.go
generated
vendored
Executable file
@ -0,0 +1,83 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin,cgo
|
||||
|
||||
package x509
|
||||
|
||||
/*
|
||||
#cgo CFLAGS: -mmacosx-version-min=10.6 -D__MAC_OS_X_VERSION_MAX_ALLOWED=1060
|
||||
#cgo LDFLAGS: -framework CoreFoundation -framework Security
|
||||
|
||||
#include <CoreFoundation/CoreFoundation.h>
|
||||
#include <Security/Security.h>
|
||||
|
||||
// FetchPEMRootsCTX509 fetches the system's list of trusted X.509 root certificates.
|
||||
//
|
||||
// On success it returns 0 and fills pemRoots with a CFDataRef that contains the extracted root
|
||||
// certificates of the system. On failure, the function returns -1.
|
||||
//
|
||||
// Note: The CFDataRef returned in pemRoots must be released (using CFRelease) after
|
||||
// we've consumed its content.
|
||||
int FetchPEMRootsCTX509(CFDataRef *pemRoots) {
|
||||
if (pemRoots == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
CFArrayRef certs = NULL;
|
||||
OSStatus err = SecTrustCopyAnchorCertificates(&certs);
|
||||
if (err != noErr) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
CFMutableDataRef combinedData = CFDataCreateMutable(kCFAllocatorDefault, 0);
|
||||
int i, ncerts = CFArrayGetCount(certs);
|
||||
for (i = 0; i < ncerts; i++) {
|
||||
CFDataRef data = NULL;
|
||||
SecCertificateRef cert = (SecCertificateRef)CFArrayGetValueAtIndex(certs, i);
|
||||
if (cert == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Note: SecKeychainItemExport is deprecated as of 10.7 in favor of SecItemExport.
|
||||
// Once we support weak imports via cgo we should prefer that, and fall back to this
|
||||
// for older systems.
|
||||
err = SecKeychainItemExport(cert, kSecFormatX509Cert, kSecItemPemArmour, NULL, &data);
|
||||
if (err != noErr) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (data != NULL) {
|
||||
CFDataAppendBytes(combinedData, CFDataGetBytePtr(data), CFDataGetLength(data));
|
||||
CFRelease(data);
|
||||
}
|
||||
}
|
||||
|
||||
CFRelease(certs);
|
||||
|
||||
*pemRoots = combinedData;
|
||||
return 0;
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
import "unsafe"
|
||||
|
||||
func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func initSystemRoots() {
|
||||
roots := NewCertPool()
|
||||
|
||||
var data C.CFDataRef = nil
|
||||
err := C.FetchPEMRootsCTX509(&data)
|
||||
if err == -1 {
|
||||
return
|
||||
}
|
||||
|
||||
defer C.CFRelease(C.CFTypeRef(data))
|
||||
buf := C.GoBytes(unsafe.Pointer(C.CFDataGetBytePtr(data)), C.int(C.CFDataGetLength(data)))
|
||||
roots.AppendCertsFromPEM(buf)
|
||||
systemRoots = roots
|
||||
}
|
33
vendor/github.com/google/certificate-transparency/go/x509/root_plan9.go
generated
vendored
Executable file
33
vendor/github.com/google/certificate-transparency/go/x509/root_plan9.go
generated
vendored
Executable file
@ -0,0 +1,33 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build plan9
|
||||
|
||||
package x509
|
||||
|
||||
import "io/ioutil"
|
||||
|
||||
// Possible certificate files; stop after finding one.
|
||||
var certFiles = []string{
|
||||
"/sys/lib/tls/ca.pem",
|
||||
}
|
||||
|
||||
func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func initSystemRoots() {
|
||||
roots := NewCertPool()
|
||||
for _, file := range certFiles {
|
||||
data, err := ioutil.ReadFile(file)
|
||||
if err == nil {
|
||||
roots.AppendCertsFromPEM(data)
|
||||
systemRoots = roots
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// All of the files failed to load. systemRoots will be nil which will
|
||||
// trigger a specific error at verification time.
|
||||
}
|
14
vendor/github.com/google/certificate-transparency/go/x509/root_stub.go
generated
vendored
Executable file
14
vendor/github.com/google/certificate-transparency/go/x509/root_stub.go
generated
vendored
Executable file
@ -0,0 +1,14 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin,!cgo
|
||||
|
||||
package x509
|
||||
|
||||
func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func initSystemRoots() {
|
||||
}
|
37
vendor/github.com/google/certificate-transparency/go/x509/root_unix.go
generated
vendored
Executable file
37
vendor/github.com/google/certificate-transparency/go/x509/root_unix.go
generated
vendored
Executable file
@ -0,0 +1,37 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build dragonfly freebsd linux openbsd netbsd
|
||||
|
||||
package x509
|
||||
|
||||
import "io/ioutil"
|
||||
|
||||
// Possible certificate files; stop after finding one.
|
||||
var certFiles = []string{
|
||||
"/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc.
|
||||
"/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL
|
||||
"/etc/ssl/ca-bundle.pem", // OpenSUSE
|
||||
"/etc/ssl/cert.pem", // OpenBSD
|
||||
"/usr/local/share/certs/ca-root-nss.crt", // FreeBSD/DragonFly
|
||||
}
|
||||
|
||||
func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func initSystemRoots() {
|
||||
roots := NewCertPool()
|
||||
for _, file := range certFiles {
|
||||
data, err := ioutil.ReadFile(file)
|
||||
if err == nil {
|
||||
roots.AppendCertsFromPEM(data)
|
||||
systemRoots = roots
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// All of the files failed to load. systemRoots will be nil which will
|
||||
// trigger a specific error at verification time.
|
||||
}
|
229
vendor/github.com/google/certificate-transparency/go/x509/root_windows.go
generated
vendored
Executable file
229
vendor/github.com/google/certificate-transparency/go/x509/root_windows.go
generated
vendored
Executable file
@ -0,0 +1,229 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package x509
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Creates a new *syscall.CertContext representing the leaf certificate in an in-memory
|
||||
// certificate store containing itself and all of the intermediate certificates specified
|
||||
// in the opts.Intermediates CertPool.
|
||||
//
|
||||
// A pointer to the in-memory store is available in the returned CertContext's Store field.
|
||||
// The store is automatically freed when the CertContext is freed using
|
||||
// syscall.CertFreeCertificateContext.
|
||||
func createStoreContext(leaf *Certificate, opts *VerifyOptions) (*syscall.CertContext, error) {
|
||||
var storeCtx *syscall.CertContext
|
||||
|
||||
leafCtx, err := syscall.CertCreateCertificateContext(syscall.X509_ASN_ENCODING|syscall.PKCS_7_ASN_ENCODING, &leaf.Raw[0], uint32(len(leaf.Raw)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer syscall.CertFreeCertificateContext(leafCtx)
|
||||
|
||||
handle, err := syscall.CertOpenStore(syscall.CERT_STORE_PROV_MEMORY, 0, 0, syscall.CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer syscall.CertCloseStore(handle, 0)
|
||||
|
||||
err = syscall.CertAddCertificateContextToStore(handle, leafCtx, syscall.CERT_STORE_ADD_ALWAYS, &storeCtx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opts.Intermediates != nil {
|
||||
for _, intermediate := range opts.Intermediates.certs {
|
||||
ctx, err := syscall.CertCreateCertificateContext(syscall.X509_ASN_ENCODING|syscall.PKCS_7_ASN_ENCODING, &intermediate.Raw[0], uint32(len(intermediate.Raw)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = syscall.CertAddCertificateContextToStore(handle, ctx, syscall.CERT_STORE_ADD_ALWAYS, nil)
|
||||
syscall.CertFreeCertificateContext(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return storeCtx, nil
|
||||
}
|
||||
|
||||
// extractSimpleChain extracts the final certificate chain from a CertSimpleChain.
|
||||
func extractSimpleChain(simpleChain **syscall.CertSimpleChain, count int) (chain []*Certificate, err error) {
|
||||
if simpleChain == nil || count == 0 {
|
||||
return nil, errors.New("x509: invalid simple chain")
|
||||
}
|
||||
|
||||
simpleChains := (*[1 << 20]*syscall.CertSimpleChain)(unsafe.Pointer(simpleChain))[:]
|
||||
lastChain := simpleChains[count-1]
|
||||
elements := (*[1 << 20]*syscall.CertChainElement)(unsafe.Pointer(lastChain.Elements))[:]
|
||||
for i := 0; i < int(lastChain.NumElements); i++ {
|
||||
// Copy the buf, since ParseCertificate does not create its own copy.
|
||||
cert := elements[i].CertContext
|
||||
encodedCert := (*[1 << 20]byte)(unsafe.Pointer(cert.EncodedCert))[:]
|
||||
buf := make([]byte, cert.Length)
|
||||
copy(buf, encodedCert[:])
|
||||
parsedCert, err := ParseCertificate(buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chain = append(chain, parsedCert)
|
||||
}
|
||||
|
||||
return chain, nil
|
||||
}
|
||||
|
||||
// checkChainTrustStatus checks the trust status of the certificate chain, translating
|
||||
// any errors it finds into Go errors in the process.
|
||||
func checkChainTrustStatus(c *Certificate, chainCtx *syscall.CertChainContext) error {
|
||||
if chainCtx.TrustStatus.ErrorStatus != syscall.CERT_TRUST_NO_ERROR {
|
||||
status := chainCtx.TrustStatus.ErrorStatus
|
||||
switch status {
|
||||
case syscall.CERT_TRUST_IS_NOT_TIME_VALID:
|
||||
return CertificateInvalidError{c, Expired}
|
||||
default:
|
||||
return UnknownAuthorityError{c, nil, nil}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkChainSSLServerPolicy checks that the certificate chain in chainCtx is valid for
|
||||
// use as a certificate chain for a SSL/TLS server.
|
||||
func checkChainSSLServerPolicy(c *Certificate, chainCtx *syscall.CertChainContext, opts *VerifyOptions) error {
|
||||
servernamep, err := syscall.UTF16PtrFromString(opts.DNSName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sslPara := &syscall.SSLExtraCertChainPolicyPara{
|
||||
AuthType: syscall.AUTHTYPE_SERVER,
|
||||
ServerName: servernamep,
|
||||
}
|
||||
sslPara.Size = uint32(unsafe.Sizeof(*sslPara))
|
||||
|
||||
para := &syscall.CertChainPolicyPara{
|
||||
ExtraPolicyPara: uintptr(unsafe.Pointer(sslPara)),
|
||||
}
|
||||
para.Size = uint32(unsafe.Sizeof(*para))
|
||||
|
||||
status := syscall.CertChainPolicyStatus{}
|
||||
err = syscall.CertVerifyCertificateChainPolicy(syscall.CERT_CHAIN_POLICY_SSL, chainCtx, para, &status)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO(mkrautz): use the lChainIndex and lElementIndex fields
|
||||
// of the CertChainPolicyStatus to provide proper context, instead
|
||||
// using c.
|
||||
if status.Error != 0 {
|
||||
switch status.Error {
|
||||
case syscall.CERT_E_EXPIRED:
|
||||
return CertificateInvalidError{c, Expired}
|
||||
case syscall.CERT_E_CN_NO_MATCH:
|
||||
return HostnameError{c, opts.DNSName}
|
||||
case syscall.CERT_E_UNTRUSTEDROOT:
|
||||
return UnknownAuthorityError{c, nil, nil}
|
||||
default:
|
||||
return UnknownAuthorityError{c, nil, nil}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// systemVerify is like Verify, except that it uses CryptoAPI calls
|
||||
// to build certificate chains and verify them.
|
||||
func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
|
||||
hasDNSName := opts != nil && len(opts.DNSName) > 0
|
||||
|
||||
storeCtx, err := createStoreContext(c, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer syscall.CertFreeCertificateContext(storeCtx)
|
||||
|
||||
para := new(syscall.CertChainPara)
|
||||
para.Size = uint32(unsafe.Sizeof(*para))
|
||||
|
||||
// If there's a DNSName set in opts, assume we're verifying
|
||||
// a certificate from a TLS server.
|
||||
if hasDNSName {
|
||||
oids := []*byte{
|
||||
&syscall.OID_PKIX_KP_SERVER_AUTH[0],
|
||||
// Both IE and Chrome allow certificates with
|
||||
// Server Gated Crypto as well. Some certificates
|
||||
// in the wild require them.
|
||||
&syscall.OID_SERVER_GATED_CRYPTO[0],
|
||||
&syscall.OID_SGC_NETSCAPE[0],
|
||||
}
|
||||
para.RequestedUsage.Type = syscall.USAGE_MATCH_TYPE_OR
|
||||
para.RequestedUsage.Usage.Length = uint32(len(oids))
|
||||
para.RequestedUsage.Usage.UsageIdentifiers = &oids[0]
|
||||
} else {
|
||||
para.RequestedUsage.Type = syscall.USAGE_MATCH_TYPE_AND
|
||||
para.RequestedUsage.Usage.Length = 0
|
||||
para.RequestedUsage.Usage.UsageIdentifiers = nil
|
||||
}
|
||||
|
||||
var verifyTime *syscall.Filetime
|
||||
if opts != nil && !opts.CurrentTime.IsZero() {
|
||||
ft := syscall.NsecToFiletime(opts.CurrentTime.UnixNano())
|
||||
verifyTime = &ft
|
||||
}
|
||||
|
||||
// CertGetCertificateChain will traverse Windows's root stores
|
||||
// in an attempt to build a verified certificate chain. Once
|
||||
// it has found a verified chain, it stops. MSDN docs on
|
||||
// CERT_CHAIN_CONTEXT:
|
||||
//
|
||||
// When a CERT_CHAIN_CONTEXT is built, the first simple chain
|
||||
// begins with an end certificate and ends with a self-signed
|
||||
// certificate. If that self-signed certificate is not a root
|
||||
// or otherwise trusted certificate, an attempt is made to
|
||||
// build a new chain. CTLs are used to create the new chain
|
||||
// beginning with the self-signed certificate from the original
|
||||
// chain as the end certificate of the new chain. This process
|
||||
// continues building additional simple chains until the first
|
||||
// self-signed certificate is a trusted certificate or until
|
||||
// an additional simple chain cannot be built.
|
||||
//
|
||||
// The result is that we'll only get a single trusted chain to
|
||||
// return to our caller.
|
||||
var chainCtx *syscall.CertChainContext
|
||||
err = syscall.CertGetCertificateChain(syscall.Handle(0), storeCtx, verifyTime, storeCtx.Store, para, 0, 0, &chainCtx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer syscall.CertFreeCertificateChain(chainCtx)
|
||||
|
||||
err = checkChainTrustStatus(c, chainCtx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if hasDNSName {
|
||||
err = checkChainSSLServerPolicy(c, chainCtx, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
chain, err := extractSimpleChain(chainCtx.Chains, int(chainCtx.ChainCount))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
chains = append(chains, chain)
|
||||
|
||||
return chains, nil
|
||||
}
|
||||
|
||||
func initSystemRoots() {
|
||||
}
|
85
vendor/github.com/google/certificate-transparency/go/x509/sec1.go
generated
vendored
Executable file
85
vendor/github.com/google/certificate-transparency/go/x509/sec1.go
generated
vendored
Executable file
@ -0,0 +1,85 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package x509
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
// START CT CHANGES
|
||||
"github.com/google/certificate-transparency/go/asn1"
|
||||
// START CT CHANGES
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
const ecPrivKeyVersion = 1
|
||||
|
||||
// ecPrivateKey reflects an ASN.1 Elliptic Curve Private Key Structure.
|
||||
// References:
|
||||
// RFC5915
|
||||
// SEC1 - http://www.secg.org/download/aid-780/sec1-v2.pdf
|
||||
// Per RFC5915 the NamedCurveOID is marked as ASN.1 OPTIONAL, however in
|
||||
// most cases it is not.
|
||||
type ecPrivateKey struct {
|
||||
Version int
|
||||
PrivateKey []byte
|
||||
NamedCurveOID asn1.ObjectIdentifier `asn1:"optional,explicit,tag:0"`
|
||||
PublicKey asn1.BitString `asn1:"optional,explicit,tag:1"`
|
||||
}
|
||||
|
||||
// ParseECPrivateKey parses an ASN.1 Elliptic Curve Private Key Structure.
|
||||
func ParseECPrivateKey(der []byte) (key *ecdsa.PrivateKey, err error) {
|
||||
return parseECPrivateKey(nil, der)
|
||||
}
|
||||
|
||||
// MarshalECPrivateKey marshals an EC private key into ASN.1, DER format.
|
||||
func MarshalECPrivateKey(key *ecdsa.PrivateKey) ([]byte, error) {
|
||||
oid, ok := oidFromNamedCurve(key.Curve)
|
||||
if !ok {
|
||||
return nil, errors.New("x509: unknown elliptic curve")
|
||||
}
|
||||
return asn1.Marshal(ecPrivateKey{
|
||||
Version: 1,
|
||||
PrivateKey: key.D.Bytes(),
|
||||
NamedCurveOID: oid,
|
||||
PublicKey: asn1.BitString{Bytes: elliptic.Marshal(key.Curve, key.X, key.Y)},
|
||||
})
|
||||
}
|
||||
|
||||
// parseECPrivateKey parses an ASN.1 Elliptic Curve Private Key Structure.
|
||||
// The OID for the named curve may be provided from another source (such as
|
||||
// the PKCS8 container) - if it is provided then use this instead of the OID
|
||||
// that may exist in the EC private key structure.
|
||||
func parseECPrivateKey(namedCurveOID *asn1.ObjectIdentifier, der []byte) (key *ecdsa.PrivateKey, err error) {
|
||||
var privKey ecPrivateKey
|
||||
if _, err := asn1.Unmarshal(der, &privKey); err != nil {
|
||||
return nil, errors.New("x509: failed to parse EC private key: " + err.Error())
|
||||
}
|
||||
if privKey.Version != ecPrivKeyVersion {
|
||||
return nil, fmt.Errorf("x509: unknown EC private key version %d", privKey.Version)
|
||||
}
|
||||
|
||||
var curve elliptic.Curve
|
||||
if namedCurveOID != nil {
|
||||
curve = namedCurveFromOID(*namedCurveOID)
|
||||
} else {
|
||||
curve = namedCurveFromOID(privKey.NamedCurveOID)
|
||||
}
|
||||
if curve == nil {
|
||||
return nil, errors.New("x509: unknown elliptic curve")
|
||||
}
|
||||
|
||||
k := new(big.Int).SetBytes(privKey.PrivateKey)
|
||||
if k.Cmp(curve.Params().N) >= 0 {
|
||||
return nil, errors.New("x509: invalid elliptic curve private key value")
|
||||
}
|
||||
priv := new(ecdsa.PrivateKey)
|
||||
priv.Curve = curve
|
||||
priv.D = k
|
||||
priv.X, priv.Y = curve.ScalarBaseMult(privKey.PrivateKey)
|
||||
|
||||
return priv, nil
|
||||
}
|
476
vendor/github.com/google/certificate-transparency/go/x509/verify.go
generated
vendored
Executable file
476
vendor/github.com/google/certificate-transparency/go/x509/verify.go
generated
vendored
Executable file
@ -0,0 +1,476 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package x509
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type InvalidReason int
|
||||
|
||||
const (
|
||||
// NotAuthorizedToSign results when a certificate is signed by another
|
||||
// which isn't marked as a CA certificate.
|
||||
NotAuthorizedToSign InvalidReason = iota
|
||||
// Expired results when a certificate has expired, based on the time
|
||||
// given in the VerifyOptions.
|
||||
Expired
|
||||
// CANotAuthorizedForThisName results when an intermediate or root
|
||||
// certificate has a name constraint which doesn't include the name
|
||||
// being checked.
|
||||
CANotAuthorizedForThisName
|
||||
// TooManyIntermediates results when a path length constraint is
|
||||
// violated.
|
||||
TooManyIntermediates
|
||||
// IncompatibleUsage results when the certificate's key usage indicates
|
||||
// that it may only be used for a different purpose.
|
||||
IncompatibleUsage
|
||||
)
|
||||
|
||||
// CertificateInvalidError results when an odd error occurs. Users of this
|
||||
// library probably want to handle all these errors uniformly.
|
||||
type CertificateInvalidError struct {
|
||||
Cert *Certificate
|
||||
Reason InvalidReason
|
||||
}
|
||||
|
||||
func (e CertificateInvalidError) Error() string {
|
||||
switch e.Reason {
|
||||
case NotAuthorizedToSign:
|
||||
return "x509: certificate is not authorized to sign other certificates"
|
||||
case Expired:
|
||||
return "x509: certificate has expired or is not yet valid"
|
||||
case CANotAuthorizedForThisName:
|
||||
return "x509: a root or intermediate certificate is not authorized to sign in this domain"
|
||||
case TooManyIntermediates:
|
||||
return "x509: too many intermediates for path length constraint"
|
||||
case IncompatibleUsage:
|
||||
return "x509: certificate specifies an incompatible key usage"
|
||||
}
|
||||
return "x509: unknown error"
|
||||
}
|
||||
|
||||
// HostnameError results when the set of authorized names doesn't match the
|
||||
// requested name.
|
||||
type HostnameError struct {
|
||||
Certificate *Certificate
|
||||
Host string
|
||||
}
|
||||
|
||||
func (h HostnameError) Error() string {
|
||||
c := h.Certificate
|
||||
|
||||
var valid string
|
||||
if ip := net.ParseIP(h.Host); ip != nil {
|
||||
// Trying to validate an IP
|
||||
if len(c.IPAddresses) == 0 {
|
||||
return "x509: cannot validate certificate for " + h.Host + " because it doesn't contain any IP SANs"
|
||||
}
|
||||
for _, san := range c.IPAddresses {
|
||||
if len(valid) > 0 {
|
||||
valid += ", "
|
||||
}
|
||||
valid += san.String()
|
||||
}
|
||||
} else {
|
||||
if len(c.DNSNames) > 0 {
|
||||
valid = strings.Join(c.DNSNames, ", ")
|
||||
} else {
|
||||
valid = c.Subject.CommonName
|
||||
}
|
||||
}
|
||||
return "x509: certificate is valid for " + valid + ", not " + h.Host
|
||||
}
|
||||
|
||||
// UnknownAuthorityError results when the certificate issuer is unknown
|
||||
type UnknownAuthorityError struct {
|
||||
cert *Certificate
|
||||
// hintErr contains an error that may be helpful in determining why an
|
||||
// authority wasn't found.
|
||||
hintErr error
|
||||
// hintCert contains a possible authority certificate that was rejected
|
||||
// because of the error in hintErr.
|
||||
hintCert *Certificate
|
||||
}
|
||||
|
||||
func (e UnknownAuthorityError) Error() string {
|
||||
s := "x509: certificate signed by unknown authority"
|
||||
if e.hintErr != nil {
|
||||
certName := e.hintCert.Subject.CommonName
|
||||
if len(certName) == 0 {
|
||||
if len(e.hintCert.Subject.Organization) > 0 {
|
||||
certName = e.hintCert.Subject.Organization[0]
|
||||
}
|
||||
certName = "serial:" + e.hintCert.SerialNumber.String()
|
||||
}
|
||||
s += fmt.Sprintf(" (possibly because of %q while trying to verify candidate authority certificate %q)", e.hintErr, certName)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// SystemRootsError results when we fail to load the system root certificates.
|
||||
type SystemRootsError struct {
|
||||
}
|
||||
|
||||
func (e SystemRootsError) Error() string {
|
||||
return "x509: failed to load system roots and no roots provided"
|
||||
}
|
||||
|
||||
// VerifyOptions contains parameters for Certificate.Verify. It's a structure
|
||||
// because other PKIX verification APIs have ended up needing many options.
|
||||
type VerifyOptions struct {
|
||||
DNSName string
|
||||
Intermediates *CertPool
|
||||
Roots *CertPool // if nil, the system roots are used
|
||||
CurrentTime time.Time // if zero, the current time is used
|
||||
DisableTimeChecks bool
|
||||
// KeyUsage specifies which Extended Key Usage values are acceptable.
|
||||
// An empty list means ExtKeyUsageServerAuth. Key usage is considered a
|
||||
// constraint down the chain which mirrors Windows CryptoAPI behaviour,
|
||||
// but not the spec. To accept any key usage, include ExtKeyUsageAny.
|
||||
KeyUsages []ExtKeyUsage
|
||||
}
|
||||
|
||||
const (
|
||||
leafCertificate = iota
|
||||
intermediateCertificate
|
||||
rootCertificate
|
||||
)
|
||||
|
||||
// isValid performs validity checks on the c.
|
||||
func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *VerifyOptions) error {
|
||||
if !opts.DisableTimeChecks {
|
||||
now := opts.CurrentTime
|
||||
if now.IsZero() {
|
||||
now = time.Now()
|
||||
}
|
||||
if now.Before(c.NotBefore) || now.After(c.NotAfter) {
|
||||
return CertificateInvalidError{c, Expired}
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.PermittedDNSDomains) > 0 {
|
||||
ok := false
|
||||
for _, domain := range c.PermittedDNSDomains {
|
||||
if opts.DNSName == domain ||
|
||||
(strings.HasSuffix(opts.DNSName, domain) &&
|
||||
len(opts.DNSName) >= 1+len(domain) &&
|
||||
opts.DNSName[len(opts.DNSName)-len(domain)-1] == '.') {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !ok {
|
||||
return CertificateInvalidError{c, CANotAuthorizedForThisName}
|
||||
}
|
||||
}
|
||||
|
||||
// KeyUsage status flags are ignored. From Engineering Security, Peter
|
||||
// Gutmann: A European government CA marked its signing certificates as
|
||||
// being valid for encryption only, but no-one noticed. Another
|
||||
// European CA marked its signature keys as not being valid for
|
||||
// signatures. A different CA marked its own trusted root certificate
|
||||
// as being invalid for certificate signing. Another national CA
|
||||
// distributed a certificate to be used to encrypt data for the
|
||||
// country’s tax authority that was marked as only being usable for
|
||||
// digital signatures but not for encryption. Yet another CA reversed
|
||||
// the order of the bit flags in the keyUsage due to confusion over
|
||||
// encoding endianness, essentially setting a random keyUsage in
|
||||
// certificates that it issued. Another CA created a self-invalidating
|
||||
// certificate by adding a certificate policy statement stipulating
|
||||
// that the certificate had to be used strictly as specified in the
|
||||
// keyUsage, and a keyUsage containing a flag indicating that the RSA
|
||||
// encryption key could only be used for Diffie-Hellman key agreement.
|
||||
|
||||
if certType == intermediateCertificate && (!c.BasicConstraintsValid || !c.IsCA) {
|
||||
return CertificateInvalidError{c, NotAuthorizedToSign}
|
||||
}
|
||||
|
||||
if c.BasicConstraintsValid && c.MaxPathLen >= 0 {
|
||||
numIntermediates := len(currentChain) - 1
|
||||
if numIntermediates > c.MaxPathLen {
|
||||
return CertificateInvalidError{c, TooManyIntermediates}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify attempts to verify c by building one or more chains from c to a
|
||||
// certificate in opts.Roots, using certificates in opts.Intermediates if
|
||||
// needed. If successful, it returns one or more chains where the first
|
||||
// element of the chain is c and the last element is from opts.Roots.
|
||||
//
|
||||
// WARNING: this doesn't do any revocation checking.
|
||||
func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err error) {
|
||||
// Use Windows's own verification and chain building.
|
||||
if opts.Roots == nil && runtime.GOOS == "windows" {
|
||||
return c.systemVerify(&opts)
|
||||
}
|
||||
|
||||
if opts.Roots == nil {
|
||||
opts.Roots = systemRootsPool()
|
||||
if opts.Roots == nil {
|
||||
return nil, SystemRootsError{}
|
||||
}
|
||||
}
|
||||
|
||||
err = c.isValid(leafCertificate, nil, &opts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(opts.DNSName) > 0 {
|
||||
err = c.VerifyHostname(opts.DNSName)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
candidateChains, err := c.buildChains(make(map[int][][]*Certificate), []*Certificate{c}, &opts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
keyUsages := opts.KeyUsages
|
||||
if len(keyUsages) == 0 {
|
||||
keyUsages = []ExtKeyUsage{ExtKeyUsageServerAuth}
|
||||
}
|
||||
|
||||
// If any key usage is acceptable then we're done.
|
||||
for _, usage := range keyUsages {
|
||||
if usage == ExtKeyUsageAny {
|
||||
chains = candidateChains
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for _, candidate := range candidateChains {
|
||||
if checkChainForKeyUsage(candidate, keyUsages) {
|
||||
chains = append(chains, candidate)
|
||||
}
|
||||
}
|
||||
|
||||
if len(chains) == 0 {
|
||||
err = CertificateInvalidError{c, IncompatibleUsage}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func appendToFreshChain(chain []*Certificate, cert *Certificate) []*Certificate {
|
||||
n := make([]*Certificate, len(chain)+1)
|
||||
copy(n, chain)
|
||||
n[len(chain)] = cert
|
||||
return n
|
||||
}
|
||||
|
||||
func (c *Certificate) buildChains(cache map[int][][]*Certificate, currentChain []*Certificate, opts *VerifyOptions) (chains [][]*Certificate, err error) {
|
||||
possibleRoots, failedRoot, rootErr := opts.Roots.findVerifiedParents(c)
|
||||
for _, rootNum := range possibleRoots {
|
||||
root := opts.Roots.certs[rootNum]
|
||||
err = root.isValid(rootCertificate, currentChain, opts)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
chains = append(chains, appendToFreshChain(currentChain, root))
|
||||
}
|
||||
|
||||
possibleIntermediates, failedIntermediate, intermediateErr := opts.Intermediates.findVerifiedParents(c)
|
||||
nextIntermediate:
|
||||
for _, intermediateNum := range possibleIntermediates {
|
||||
intermediate := opts.Intermediates.certs[intermediateNum]
|
||||
for _, cert := range currentChain {
|
||||
if cert == intermediate {
|
||||
continue nextIntermediate
|
||||
}
|
||||
}
|
||||
err = intermediate.isValid(intermediateCertificate, currentChain, opts)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
var childChains [][]*Certificate
|
||||
childChains, ok := cache[intermediateNum]
|
||||
if !ok {
|
||||
childChains, err = intermediate.buildChains(cache, appendToFreshChain(currentChain, intermediate), opts)
|
||||
cache[intermediateNum] = childChains
|
||||
}
|
||||
chains = append(chains, childChains...)
|
||||
}
|
||||
|
||||
if len(chains) > 0 {
|
||||
err = nil
|
||||
}
|
||||
|
||||
if len(chains) == 0 && err == nil {
|
||||
hintErr := rootErr
|
||||
hintCert := failedRoot
|
||||
if hintErr == nil {
|
||||
hintErr = intermediateErr
|
||||
hintCert = failedIntermediate
|
||||
}
|
||||
err = UnknownAuthorityError{c, hintErr, hintCert}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func matchHostnames(pattern, host string) bool {
|
||||
if len(pattern) == 0 || len(host) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
patternParts := strings.Split(pattern, ".")
|
||||
hostParts := strings.Split(host, ".")
|
||||
|
||||
if len(patternParts) != len(hostParts) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i, patternPart := range patternParts {
|
||||
if patternPart == "*" {
|
||||
continue
|
||||
}
|
||||
if patternPart != hostParts[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// toLowerCaseASCII returns a lower-case version of in. See RFC 6125 6.4.1. We use
|
||||
// an explicitly ASCII function to avoid any sharp corners resulting from
|
||||
// performing Unicode operations on DNS labels.
|
||||
func toLowerCaseASCII(in string) string {
|
||||
// If the string is already lower-case then there's nothing to do.
|
||||
isAlreadyLowerCase := true
|
||||
for _, c := range in {
|
||||
if c == utf8.RuneError {
|
||||
// If we get a UTF-8 error then there might be
|
||||
// upper-case ASCII bytes in the invalid sequence.
|
||||
isAlreadyLowerCase = false
|
||||
break
|
||||
}
|
||||
if 'A' <= c && c <= 'Z' {
|
||||
isAlreadyLowerCase = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if isAlreadyLowerCase {
|
||||
return in
|
||||
}
|
||||
|
||||
out := []byte(in)
|
||||
for i, c := range out {
|
||||
if 'A' <= c && c <= 'Z' {
|
||||
out[i] += 'a' - 'A'
|
||||
}
|
||||
}
|
||||
return string(out)
|
||||
}
|
||||
|
||||
// VerifyHostname returns nil if c is a valid certificate for the named host.
|
||||
// Otherwise it returns an error describing the mismatch.
|
||||
func (c *Certificate) VerifyHostname(h string) error {
|
||||
// IP addresses may be written in [ ].
|
||||
candidateIP := h
|
||||
if len(h) >= 3 && h[0] == '[' && h[len(h)-1] == ']' {
|
||||
candidateIP = h[1 : len(h)-1]
|
||||
}
|
||||
if ip := net.ParseIP(candidateIP); ip != nil {
|
||||
// We only match IP addresses against IP SANs.
|
||||
// https://tools.ietf.org/html/rfc6125#appendix-B.2
|
||||
for _, candidate := range c.IPAddresses {
|
||||
if ip.Equal(candidate) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return HostnameError{c, candidateIP}
|
||||
}
|
||||
|
||||
lowered := toLowerCaseASCII(h)
|
||||
|
||||
if len(c.DNSNames) > 0 {
|
||||
for _, match := range c.DNSNames {
|
||||
if matchHostnames(toLowerCaseASCII(match), lowered) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// If Subject Alt Name is given, we ignore the common name.
|
||||
} else if matchHostnames(toLowerCaseASCII(c.Subject.CommonName), lowered) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return HostnameError{c, h}
|
||||
}
|
||||
|
||||
func checkChainForKeyUsage(chain []*Certificate, keyUsages []ExtKeyUsage) bool {
|
||||
usages := make([]ExtKeyUsage, len(keyUsages))
|
||||
copy(usages, keyUsages)
|
||||
|
||||
if len(chain) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
usagesRemaining := len(usages)
|
||||
|
||||
// We walk down the list and cross out any usages that aren't supported
|
||||
// by each certificate. If we cross out all the usages, then the chain
|
||||
// is unacceptable.
|
||||
|
||||
for i := len(chain) - 1; i >= 0; i-- {
|
||||
cert := chain[i]
|
||||
if len(cert.ExtKeyUsage) == 0 && len(cert.UnknownExtKeyUsage) == 0 {
|
||||
// The certificate doesn't have any extended key usage specified.
|
||||
continue
|
||||
}
|
||||
|
||||
for _, usage := range cert.ExtKeyUsage {
|
||||
if usage == ExtKeyUsageAny {
|
||||
// The certificate is explicitly good for any usage.
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
const invalidUsage ExtKeyUsage = -1
|
||||
|
||||
NextRequestedUsage:
|
||||
for i, requestedUsage := range usages {
|
||||
if requestedUsage == invalidUsage {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, usage := range cert.ExtKeyUsage {
|
||||
if requestedUsage == usage {
|
||||
continue NextRequestedUsage
|
||||
} else if requestedUsage == ExtKeyUsageServerAuth &&
|
||||
(usage == ExtKeyUsageNetscapeServerGatedCrypto ||
|
||||
usage == ExtKeyUsageMicrosoftServerGatedCrypto) {
|
||||
// In order to support COMODO
|
||||
// certificate chains, we have to
|
||||
// accept Netscape or Microsoft SGC
|
||||
// usages as equal to ServerAuth.
|
||||
continue NextRequestedUsage
|
||||
}
|
||||
}
|
||||
|
||||
usages[i] = invalidUsage
|
||||
usagesRemaining--
|
||||
if usagesRemaining == 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
1622
vendor/github.com/google/certificate-transparency/go/x509/x509.go
generated
vendored
Executable file
1622
vendor/github.com/google/certificate-transparency/go/x509/x509.go
generated
vendored
Executable file
File diff suppressed because it is too large
Load Diff
1
vendor/github.com/mreiferson/go-httpclient/.gitignore
generated
vendored
Normal file
1
vendor/github.com/mreiferson/go-httpclient/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
*.sw[op]
|
11
vendor/github.com/mreiferson/go-httpclient/.travis.yml
generated
vendored
Normal file
11
vendor/github.com/mreiferson/go-httpclient/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.1
|
||||
install:
|
||||
- go get github.com/bmizerany/assert
|
||||
script:
|
||||
- pushd $TRAVIS_BUILD_DIR
|
||||
- go test
|
||||
- popd
|
||||
notifications:
|
||||
email: false
|
21
vendor/github.com/mreiferson/go-httpclient/LICENSE
generated
vendored
Normal file
21
vendor/github.com/mreiferson/go-httpclient/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2012 Matt Reiferson
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
41
vendor/github.com/mreiferson/go-httpclient/README.md
generated
vendored
Normal file
41
vendor/github.com/mreiferson/go-httpclient/README.md
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
## go-httpclient
|
||||
|
||||
**requires Go 1.1+** as of `v0.4.0` the API has been completely re-written for Go 1.1 (for a Go
|
||||
1.0.x compatible release see [1adef50](https://github.com/mreiferson/go-httpclient/tree/1adef50))
|
||||
|
||||
[](http://travis-ci.org/mreiferson/go-httpclient)
|
||||
|
||||
Provides an HTTP Transport that implements the `RoundTripper` interface and
|
||||
can be used as a built in replacement for the standard library's, providing:
|
||||
|
||||
* connection timeouts
|
||||
* request timeouts
|
||||
|
||||
This is a thin wrapper around `http.Transport` that sets dial timeouts and uses
|
||||
Go's internal timer scheduler to call the Go 1.1+ `CancelRequest()` API.
|
||||
|
||||
### Example
|
||||
|
||||
```go
|
||||
transport := &httpclient.Transport{
|
||||
ConnectTimeout: 1*time.Second,
|
||||
RequestTimeout: 10*time.Second,
|
||||
ResponseHeaderTimeout: 5*time.Second,
|
||||
}
|
||||
defer transport.Close()
|
||||
|
||||
client := &http.Client{Transport: transport}
|
||||
req, _ := http.NewRequest("GET", "http://127.0.0.1/test", nil)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
```
|
||||
|
||||
*Note:* you will want to re-use a single client object rather than creating one for each request, otherwise you will end up [leaking connections](https://code.google.com/p/go/issues/detail?id=4049#c3).
|
||||
|
||||
### Reference Docs
|
||||
|
||||
For API docs see [godoc](http://godoc.org/github.com/mreiferson/go-httpclient).
|
237
vendor/github.com/mreiferson/go-httpclient/httpclient.go
generated
vendored
Normal file
237
vendor/github.com/mreiferson/go-httpclient/httpclient.go
generated
vendored
Normal file
@ -0,0 +1,237 @@
|
||||
/*
|
||||
Provides an HTTP Transport that implements the `RoundTripper` interface and
|
||||
can be used as a built in replacement for the standard library's, providing:
|
||||
|
||||
* connection timeouts
|
||||
* request timeouts
|
||||
|
||||
This is a thin wrapper around `http.Transport` that sets dial timeouts and uses
|
||||
Go's internal timer scheduler to call the Go 1.1+ `CancelRequest()` API.
|
||||
*/
|
||||
package httpclient
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// returns the current version of the package
|
||||
func Version() string {
|
||||
return "0.4.1"
|
||||
}
|
||||
|
||||
// Transport implements the RoundTripper interface and can be used as a replacement
|
||||
// for Go's built in http.Transport implementing end-to-end request timeouts.
|
||||
//
|
||||
// transport := &httpclient.Transport{
|
||||
// ConnectTimeout: 1*time.Second,
|
||||
// ResponseHeaderTimeout: 5*time.Second,
|
||||
// RequestTimeout: 10*time.Second,
|
||||
// }
|
||||
// defer transport.Close()
|
||||
//
|
||||
// client := &http.Client{Transport: transport}
|
||||
// req, _ := http.NewRequest("GET", "http://127.0.0.1/test", nil)
|
||||
// resp, err := client.Do(req)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// defer resp.Body.Close()
|
||||
//
|
||||
type Transport struct {
|
||||
// Proxy specifies a function to return a proxy for a given
|
||||
// *http.Request. If the function returns a non-nil error, the
|
||||
// request is aborted with the provided error.
|
||||
// If Proxy is nil or returns a nil *url.URL, no proxy is used.
|
||||
Proxy func(*http.Request) (*url.URL, error)
|
||||
|
||||
// Dial specifies the dial function for creating TCP
|
||||
// connections. This will override the Transport's ConnectTimeout and
|
||||
// ReadWriteTimeout settings.
|
||||
// If Dial is nil, a dialer is generated on demand matching the Transport's
|
||||
// options.
|
||||
Dial func(network, addr string) (net.Conn, error)
|
||||
|
||||
// TLSClientConfig specifies the TLS configuration to use with
|
||||
// tls.Client. If nil, the default configuration is used.
|
||||
TLSClientConfig *tls.Config
|
||||
|
||||
// DisableKeepAlives, if true, prevents re-use of TCP connections
|
||||
// between different HTTP requests.
|
||||
DisableKeepAlives bool
|
||||
|
||||
// DisableCompression, if true, prevents the Transport from
|
||||
// requesting compression with an "Accept-Encoding: gzip"
|
||||
// request header when the Request contains no existing
|
||||
// Accept-Encoding value. If the Transport requests gzip on
|
||||
// its own and gets a gzipped response, it's transparently
|
||||
// decoded in the Response.Body. However, if the user
|
||||
// explicitly requested gzip it is not automatically
|
||||
// uncompressed.
|
||||
DisableCompression bool
|
||||
|
||||
// MaxIdleConnsPerHost, if non-zero, controls the maximum idle
|
||||
// (keep-alive) to keep per-host. If zero,
|
||||
// http.DefaultMaxIdleConnsPerHost is used.
|
||||
MaxIdleConnsPerHost int
|
||||
|
||||
// ConnectTimeout, if non-zero, is the maximum amount of time a dial will wait for
|
||||
// a connect to complete.
|
||||
ConnectTimeout time.Duration
|
||||
|
||||
// ResponseHeaderTimeout, if non-zero, specifies the amount of
|
||||
// time to wait for a server's response headers after fully
|
||||
// writing the request (including its body, if any). This
|
||||
// time does not include the time to read the response body.
|
||||
ResponseHeaderTimeout time.Duration
|
||||
|
||||
// RequestTimeout, if non-zero, specifies the amount of time for the entire
|
||||
// request to complete (including all of the above timeouts + entire response body).
|
||||
// This should never be less than the sum total of the above two timeouts.
|
||||
RequestTimeout time.Duration
|
||||
|
||||
// ReadWriteTimeout, if non-zero, will set a deadline for every Read and
|
||||
// Write operation on the request connection.
|
||||
ReadWriteTimeout time.Duration
|
||||
|
||||
// TCPWriteBufferSize, the size of the operating system's write
|
||||
// buffer associated with the connection.
|
||||
TCPWriteBufferSize int
|
||||
|
||||
// TCPReadBuffserSize, the size of the operating system's read
|
||||
// buffer associated with the connection.
|
||||
TCPReadBufferSize int
|
||||
|
||||
starter sync.Once
|
||||
transport *http.Transport
|
||||
}
|
||||
|
||||
// Close cleans up the Transport, currently a no-op
|
||||
func (t *Transport) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Transport) lazyStart() {
|
||||
if t.Dial == nil {
|
||||
t.Dial = func(netw, addr string) (net.Conn, error) {
|
||||
c, err := net.DialTimeout(netw, addr, t.ConnectTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if t.TCPReadBufferSize != 0 || t.TCPWriteBufferSize != 0 {
|
||||
if tcpCon, ok := c.(*net.TCPConn); ok {
|
||||
if t.TCPWriteBufferSize != 0 {
|
||||
if err = tcpCon.SetWriteBuffer(t.TCPWriteBufferSize); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if t.TCPReadBufferSize != 0 {
|
||||
if err = tcpCon.SetReadBuffer(t.TCPReadBufferSize); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err = errors.New("Not Tcp Connection")
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if t.ReadWriteTimeout > 0 {
|
||||
timeoutConn := &rwTimeoutConn{
|
||||
TCPConn: c.(*net.TCPConn),
|
||||
rwTimeout: t.ReadWriteTimeout,
|
||||
}
|
||||
return timeoutConn, nil
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
|
||||
t.transport = &http.Transport{
|
||||
Dial: t.Dial,
|
||||
Proxy: t.Proxy,
|
||||
TLSClientConfig: t.TLSClientConfig,
|
||||
DisableKeepAlives: t.DisableKeepAlives,
|
||||
DisableCompression: t.DisableCompression,
|
||||
MaxIdleConnsPerHost: t.MaxIdleConnsPerHost,
|
||||
ResponseHeaderTimeout: t.ResponseHeaderTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Transport) CancelRequest(req *http.Request) {
|
||||
t.starter.Do(t.lazyStart)
|
||||
|
||||
t.transport.CancelRequest(req)
|
||||
}
|
||||
|
||||
func (t *Transport) CloseIdleConnections() {
|
||||
t.starter.Do(t.lazyStart)
|
||||
|
||||
t.transport.CloseIdleConnections()
|
||||
}
|
||||
|
||||
func (t *Transport) RegisterProtocol(scheme string, rt http.RoundTripper) {
|
||||
t.starter.Do(t.lazyStart)
|
||||
|
||||
t.transport.RegisterProtocol(scheme, rt)
|
||||
}
|
||||
|
||||
func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
|
||||
t.starter.Do(t.lazyStart)
|
||||
|
||||
if t.RequestTimeout > 0 {
|
||||
timer := time.AfterFunc(t.RequestTimeout, func() {
|
||||
t.transport.CancelRequest(req)
|
||||
})
|
||||
|
||||
resp, err = t.transport.RoundTrip(req)
|
||||
if err != nil {
|
||||
timer.Stop()
|
||||
} else {
|
||||
resp.Body = &bodyCloseInterceptor{ReadCloser: resp.Body, timer: timer}
|
||||
}
|
||||
} else {
|
||||
resp, err = t.transport.RoundTrip(req)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type bodyCloseInterceptor struct {
|
||||
io.ReadCloser
|
||||
timer *time.Timer
|
||||
}
|
||||
|
||||
func (bci *bodyCloseInterceptor) Close() error {
|
||||
bci.timer.Stop()
|
||||
return bci.ReadCloser.Close()
|
||||
}
|
||||
|
||||
// A net.Conn that sets a deadline for every Read or Write operation
|
||||
type rwTimeoutConn struct {
|
||||
*net.TCPConn
|
||||
rwTimeout time.Duration
|
||||
}
|
||||
|
||||
func (c *rwTimeoutConn) Read(b []byte) (int, error) {
|
||||
err := c.TCPConn.SetDeadline(time.Now().Add(c.rwTimeout))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return c.TCPConn.Read(b)
|
||||
}
|
||||
|
||||
func (c *rwTimeoutConn) Write(b []byte) (int, error) {
|
||||
err := c.TCPConn.SetDeadline(time.Now().Add(c.rwTimeout))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return c.TCPConn.Write(b)
|
||||
}
|
20
vendor/golang.org/x/crypto/curve25519/const_amd64.s
generated
vendored
Normal file
20
vendor/golang.org/x/crypto/curve25519/const_amd64.s
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This code was translated into a form compatible with 6a from the public
|
||||
// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
DATA ·REDMASK51(SB)/8, $0x0007FFFFFFFFFFFF
|
||||
GLOBL ·REDMASK51(SB), 8, $8
|
||||
|
||||
DATA ·_121666_213(SB)/8, $996687872
|
||||
GLOBL ·_121666_213(SB), 8, $8
|
||||
|
||||
DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA
|
||||
GLOBL ·_2P0(SB), 8, $8
|
||||
|
||||
DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE
|
||||
GLOBL ·_2P1234(SB), 8, $8
|
88
vendor/golang.org/x/crypto/curve25519/cswap_amd64.s
generated
vendored
Normal file
88
vendor/golang.org/x/crypto/curve25519/cswap_amd64.s
generated
vendored
Normal file
@ -0,0 +1,88 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This code was translated into a form compatible with 6a from the public
|
||||
// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
// func cswap(inout *[5]uint64, v uint64)
|
||||
TEXT ·cswap(SB),7,$0
|
||||
MOVQ inout+0(FP),DI
|
||||
MOVQ v+8(FP),SI
|
||||
|
||||
CMPQ SI,$1
|
||||
MOVQ 0(DI),SI
|
||||
MOVQ 80(DI),DX
|
||||
MOVQ 8(DI),CX
|
||||
MOVQ 88(DI),R8
|
||||
MOVQ SI,R9
|
||||
CMOVQEQ DX,SI
|
||||
CMOVQEQ R9,DX
|
||||
MOVQ CX,R9
|
||||
CMOVQEQ R8,CX
|
||||
CMOVQEQ R9,R8
|
||||
MOVQ SI,0(DI)
|
||||
MOVQ DX,80(DI)
|
||||
MOVQ CX,8(DI)
|
||||
MOVQ R8,88(DI)
|
||||
MOVQ 16(DI),SI
|
||||
MOVQ 96(DI),DX
|
||||
MOVQ 24(DI),CX
|
||||
MOVQ 104(DI),R8
|
||||
MOVQ SI,R9
|
||||
CMOVQEQ DX,SI
|
||||
CMOVQEQ R9,DX
|
||||
MOVQ CX,R9
|
||||
CMOVQEQ R8,CX
|
||||
CMOVQEQ R9,R8
|
||||
MOVQ SI,16(DI)
|
||||
MOVQ DX,96(DI)
|
||||
MOVQ CX,24(DI)
|
||||
MOVQ R8,104(DI)
|
||||
MOVQ 32(DI),SI
|
||||
MOVQ 112(DI),DX
|
||||
MOVQ 40(DI),CX
|
||||
MOVQ 120(DI),R8
|
||||
MOVQ SI,R9
|
||||
CMOVQEQ DX,SI
|
||||
CMOVQEQ R9,DX
|
||||
MOVQ CX,R9
|
||||
CMOVQEQ R8,CX
|
||||
CMOVQEQ R9,R8
|
||||
MOVQ SI,32(DI)
|
||||
MOVQ DX,112(DI)
|
||||
MOVQ CX,40(DI)
|
||||
MOVQ R8,120(DI)
|
||||
MOVQ 48(DI),SI
|
||||
MOVQ 128(DI),DX
|
||||
MOVQ 56(DI),CX
|
||||
MOVQ 136(DI),R8
|
||||
MOVQ SI,R9
|
||||
CMOVQEQ DX,SI
|
||||
CMOVQEQ R9,DX
|
||||
MOVQ CX,R9
|
||||
CMOVQEQ R8,CX
|
||||
CMOVQEQ R9,R8
|
||||
MOVQ SI,48(DI)
|
||||
MOVQ DX,128(DI)
|
||||
MOVQ CX,56(DI)
|
||||
MOVQ R8,136(DI)
|
||||
MOVQ 64(DI),SI
|
||||
MOVQ 144(DI),DX
|
||||
MOVQ 72(DI),CX
|
||||
MOVQ 152(DI),R8
|
||||
MOVQ SI,R9
|
||||
CMOVQEQ DX,SI
|
||||
CMOVQEQ R9,DX
|
||||
MOVQ CX,R9
|
||||
CMOVQEQ R8,CX
|
||||
CMOVQEQ R9,R8
|
||||
MOVQ SI,64(DI)
|
||||
MOVQ DX,144(DI)
|
||||
MOVQ CX,72(DI)
|
||||
MOVQ R8,152(DI)
|
||||
MOVQ DI,AX
|
||||
MOVQ SI,DX
|
||||
RET
|
841
vendor/golang.org/x/crypto/curve25519/curve25519.go
generated
vendored
Normal file
841
vendor/golang.org/x/crypto/curve25519/curve25519.go
generated
vendored
Normal file
@ -0,0 +1,841 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// We have a implementation in amd64 assembly so this code is only run on
|
||||
// non-amd64 platforms. The amd64 assembly does not support gccgo.
|
||||
// +build !amd64 gccgo appengine
|
||||
|
||||
package curve25519
|
||||
|
||||
// This code is a port of the public domain, "ref10" implementation of
|
||||
// curve25519 from SUPERCOP 20130419 by D. J. Bernstein.
|
||||
|
||||
// fieldElement represents an element of the field GF(2^255 - 19). An element
|
||||
// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
|
||||
// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on
|
||||
// context.
|
||||
type fieldElement [10]int32
|
||||
|
||||
func feZero(fe *fieldElement) {
|
||||
for i := range fe {
|
||||
fe[i] = 0
|
||||
}
|
||||
}
|
||||
|
||||
func feOne(fe *fieldElement) {
|
||||
feZero(fe)
|
||||
fe[0] = 1
|
||||
}
|
||||
|
||||
func feAdd(dst, a, b *fieldElement) {
|
||||
for i := range dst {
|
||||
dst[i] = a[i] + b[i]
|
||||
}
|
||||
}
|
||||
|
||||
func feSub(dst, a, b *fieldElement) {
|
||||
for i := range dst {
|
||||
dst[i] = a[i] - b[i]
|
||||
}
|
||||
}
|
||||
|
||||
func feCopy(dst, src *fieldElement) {
|
||||
for i := range dst {
|
||||
dst[i] = src[i]
|
||||
}
|
||||
}
|
||||
|
||||
// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0.
|
||||
//
|
||||
// Preconditions: b in {0,1}.
|
||||
func feCSwap(f, g *fieldElement, b int32) {
|
||||
var x fieldElement
|
||||
b = -b
|
||||
for i := range x {
|
||||
x[i] = b & (f[i] ^ g[i])
|
||||
}
|
||||
|
||||
for i := range f {
|
||||
f[i] ^= x[i]
|
||||
}
|
||||
for i := range g {
|
||||
g[i] ^= x[i]
|
||||
}
|
||||
}
|
||||
|
||||
// load3 reads a 24-bit, little-endian value from in.
|
||||
func load3(in []byte) int64 {
|
||||
var r int64
|
||||
r = int64(in[0])
|
||||
r |= int64(in[1]) << 8
|
||||
r |= int64(in[2]) << 16
|
||||
return r
|
||||
}
|
||||
|
||||
// load4 reads a 32-bit, little-endian value from in.
|
||||
func load4(in []byte) int64 {
|
||||
var r int64
|
||||
r = int64(in[0])
|
||||
r |= int64(in[1]) << 8
|
||||
r |= int64(in[2]) << 16
|
||||
r |= int64(in[3]) << 24
|
||||
return r
|
||||
}
|
||||
|
||||
func feFromBytes(dst *fieldElement, src *[32]byte) {
|
||||
h0 := load4(src[:])
|
||||
h1 := load3(src[4:]) << 6
|
||||
h2 := load3(src[7:]) << 5
|
||||
h3 := load3(src[10:]) << 3
|
||||
h4 := load3(src[13:]) << 2
|
||||
h5 := load4(src[16:])
|
||||
h6 := load3(src[20:]) << 7
|
||||
h7 := load3(src[23:]) << 5
|
||||
h8 := load3(src[26:]) << 4
|
||||
h9 := load3(src[29:]) << 2
|
||||
|
||||
var carry [10]int64
|
||||
carry[9] = (h9 + 1<<24) >> 25
|
||||
h0 += carry[9] * 19
|
||||
h9 -= carry[9] << 25
|
||||
carry[1] = (h1 + 1<<24) >> 25
|
||||
h2 += carry[1]
|
||||
h1 -= carry[1] << 25
|
||||
carry[3] = (h3 + 1<<24) >> 25
|
||||
h4 += carry[3]
|
||||
h3 -= carry[3] << 25
|
||||
carry[5] = (h5 + 1<<24) >> 25
|
||||
h6 += carry[5]
|
||||
h5 -= carry[5] << 25
|
||||
carry[7] = (h7 + 1<<24) >> 25
|
||||
h8 += carry[7]
|
||||
h7 -= carry[7] << 25
|
||||
|
||||
carry[0] = (h0 + 1<<25) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
carry[2] = (h2 + 1<<25) >> 26
|
||||
h3 += carry[2]
|
||||
h2 -= carry[2] << 26
|
||||
carry[4] = (h4 + 1<<25) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
carry[6] = (h6 + 1<<25) >> 26
|
||||
h7 += carry[6]
|
||||
h6 -= carry[6] << 26
|
||||
carry[8] = (h8 + 1<<25) >> 26
|
||||
h9 += carry[8]
|
||||
h8 -= carry[8] << 26
|
||||
|
||||
dst[0] = int32(h0)
|
||||
dst[1] = int32(h1)
|
||||
dst[2] = int32(h2)
|
||||
dst[3] = int32(h3)
|
||||
dst[4] = int32(h4)
|
||||
dst[5] = int32(h5)
|
||||
dst[6] = int32(h6)
|
||||
dst[7] = int32(h7)
|
||||
dst[8] = int32(h8)
|
||||
dst[9] = int32(h9)
|
||||
}
|
||||
|
||||
// feToBytes marshals h to s.
|
||||
// Preconditions:
|
||||
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
|
||||
//
|
||||
// Write p=2^255-19; q=floor(h/p).
|
||||
// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))).
|
||||
//
|
||||
// Proof:
|
||||
// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4.
|
||||
// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4.
|
||||
//
|
||||
// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9).
|
||||
// Then 0<y<1.
|
||||
//
|
||||
// Write r=h-pq.
|
||||
// Have 0<=r<=p-1=2^255-20.
|
||||
// Thus 0<=r+19(2^-255)r<r+19(2^-255)2^255<=2^255-1.
|
||||
//
|
||||
// Write x=r+19(2^-255)r+y.
|
||||
// Then 0<x<2^255 so floor(2^(-255)x) = 0 so floor(q+2^(-255)x) = q.
|
||||
//
|
||||
// Have q+2^(-255)x = 2^(-255)(h + 19 2^(-25) h9 + 2^(-1))
|
||||
// so floor(2^(-255)(h + 19 2^(-25) h9 + 2^(-1))) = q.
|
||||
func feToBytes(s *[32]byte, h *fieldElement) {
|
||||
var carry [10]int32
|
||||
|
||||
q := (19*h[9] + (1 << 24)) >> 25
|
||||
q = (h[0] + q) >> 26
|
||||
q = (h[1] + q) >> 25
|
||||
q = (h[2] + q) >> 26
|
||||
q = (h[3] + q) >> 25
|
||||
q = (h[4] + q) >> 26
|
||||
q = (h[5] + q) >> 25
|
||||
q = (h[6] + q) >> 26
|
||||
q = (h[7] + q) >> 25
|
||||
q = (h[8] + q) >> 26
|
||||
q = (h[9] + q) >> 25
|
||||
|
||||
// Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20.
|
||||
h[0] += 19 * q
|
||||
// Goal: Output h-2^255 q, which is between 0 and 2^255-20.
|
||||
|
||||
carry[0] = h[0] >> 26
|
||||
h[1] += carry[0]
|
||||
h[0] -= carry[0] << 26
|
||||
carry[1] = h[1] >> 25
|
||||
h[2] += carry[1]
|
||||
h[1] -= carry[1] << 25
|
||||
carry[2] = h[2] >> 26
|
||||
h[3] += carry[2]
|
||||
h[2] -= carry[2] << 26
|
||||
carry[3] = h[3] >> 25
|
||||
h[4] += carry[3]
|
||||
h[3] -= carry[3] << 25
|
||||
carry[4] = h[4] >> 26
|
||||
h[5] += carry[4]
|
||||
h[4] -= carry[4] << 26
|
||||
carry[5] = h[5] >> 25
|
||||
h[6] += carry[5]
|
||||
h[5] -= carry[5] << 25
|
||||
carry[6] = h[6] >> 26
|
||||
h[7] += carry[6]
|
||||
h[6] -= carry[6] << 26
|
||||
carry[7] = h[7] >> 25
|
||||
h[8] += carry[7]
|
||||
h[7] -= carry[7] << 25
|
||||
carry[8] = h[8] >> 26
|
||||
h[9] += carry[8]
|
||||
h[8] -= carry[8] << 26
|
||||
carry[9] = h[9] >> 25
|
||||
h[9] -= carry[9] << 25
|
||||
// h10 = carry9
|
||||
|
||||
// Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20.
|
||||
// Have h[0]+...+2^230 h[9] between 0 and 2^255-1;
|
||||
// evidently 2^255 h10-2^255 q = 0.
|
||||
// Goal: Output h[0]+...+2^230 h[9].
|
||||
|
||||
s[0] = byte(h[0] >> 0)
|
||||
s[1] = byte(h[0] >> 8)
|
||||
s[2] = byte(h[0] >> 16)
|
||||
s[3] = byte((h[0] >> 24) | (h[1] << 2))
|
||||
s[4] = byte(h[1] >> 6)
|
||||
s[5] = byte(h[1] >> 14)
|
||||
s[6] = byte((h[1] >> 22) | (h[2] << 3))
|
||||
s[7] = byte(h[2] >> 5)
|
||||
s[8] = byte(h[2] >> 13)
|
||||
s[9] = byte((h[2] >> 21) | (h[3] << 5))
|
||||
s[10] = byte(h[3] >> 3)
|
||||
s[11] = byte(h[3] >> 11)
|
||||
s[12] = byte((h[3] >> 19) | (h[4] << 6))
|
||||
s[13] = byte(h[4] >> 2)
|
||||
s[14] = byte(h[4] >> 10)
|
||||
s[15] = byte(h[4] >> 18)
|
||||
s[16] = byte(h[5] >> 0)
|
||||
s[17] = byte(h[5] >> 8)
|
||||
s[18] = byte(h[5] >> 16)
|
||||
s[19] = byte((h[5] >> 24) | (h[6] << 1))
|
||||
s[20] = byte(h[6] >> 7)
|
||||
s[21] = byte(h[6] >> 15)
|
||||
s[22] = byte((h[6] >> 23) | (h[7] << 3))
|
||||
s[23] = byte(h[7] >> 5)
|
||||
s[24] = byte(h[7] >> 13)
|
||||
s[25] = byte((h[7] >> 21) | (h[8] << 4))
|
||||
s[26] = byte(h[8] >> 4)
|
||||
s[27] = byte(h[8] >> 12)
|
||||
s[28] = byte((h[8] >> 20) | (h[9] << 6))
|
||||
s[29] = byte(h[9] >> 2)
|
||||
s[30] = byte(h[9] >> 10)
|
||||
s[31] = byte(h[9] >> 18)
|
||||
}
|
||||
|
||||
// feMul calculates h = f * g
|
||||
// Can overlap h with f or g.
|
||||
//
|
||||
// Preconditions:
|
||||
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
|
||||
// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
|
||||
//
|
||||
// Postconditions:
|
||||
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
|
||||
//
|
||||
// Notes on implementation strategy:
|
||||
//
|
||||
// Using schoolbook multiplication.
|
||||
// Karatsuba would save a little in some cost models.
|
||||
//
|
||||
// Most multiplications by 2 and 19 are 32-bit precomputations;
|
||||
// cheaper than 64-bit postcomputations.
|
||||
//
|
||||
// There is one remaining multiplication by 19 in the carry chain;
|
||||
// one *19 precomputation can be merged into this,
|
||||
// but the resulting data flow is considerably less clean.
|
||||
//
|
||||
// There are 12 carries below.
|
||||
// 10 of them are 2-way parallelizable and vectorizable.
|
||||
// Can get away with 11 carries, but then data flow is much deeper.
|
||||
//
|
||||
// With tighter constraints on inputs can squeeze carries into int32.
|
||||
func feMul(h, f, g *fieldElement) {
|
||||
f0 := f[0]
|
||||
f1 := f[1]
|
||||
f2 := f[2]
|
||||
f3 := f[3]
|
||||
f4 := f[4]
|
||||
f5 := f[5]
|
||||
f6 := f[6]
|
||||
f7 := f[7]
|
||||
f8 := f[8]
|
||||
f9 := f[9]
|
||||
g0 := g[0]
|
||||
g1 := g[1]
|
||||
g2 := g[2]
|
||||
g3 := g[3]
|
||||
g4 := g[4]
|
||||
g5 := g[5]
|
||||
g6 := g[6]
|
||||
g7 := g[7]
|
||||
g8 := g[8]
|
||||
g9 := g[9]
|
||||
g1_19 := 19 * g1 // 1.4*2^29
|
||||
g2_19 := 19 * g2 // 1.4*2^30; still ok
|
||||
g3_19 := 19 * g3
|
||||
g4_19 := 19 * g4
|
||||
g5_19 := 19 * g5
|
||||
g6_19 := 19 * g6
|
||||
g7_19 := 19 * g7
|
||||
g8_19 := 19 * g8
|
||||
g9_19 := 19 * g9
|
||||
f1_2 := 2 * f1
|
||||
f3_2 := 2 * f3
|
||||
f5_2 := 2 * f5
|
||||
f7_2 := 2 * f7
|
||||
f9_2 := 2 * f9
|
||||
f0g0 := int64(f0) * int64(g0)
|
||||
f0g1 := int64(f0) * int64(g1)
|
||||
f0g2 := int64(f0) * int64(g2)
|
||||
f0g3 := int64(f0) * int64(g3)
|
||||
f0g4 := int64(f0) * int64(g4)
|
||||
f0g5 := int64(f0) * int64(g5)
|
||||
f0g6 := int64(f0) * int64(g6)
|
||||
f0g7 := int64(f0) * int64(g7)
|
||||
f0g8 := int64(f0) * int64(g8)
|
||||
f0g9 := int64(f0) * int64(g9)
|
||||
f1g0 := int64(f1) * int64(g0)
|
||||
f1g1_2 := int64(f1_2) * int64(g1)
|
||||
f1g2 := int64(f1) * int64(g2)
|
||||
f1g3_2 := int64(f1_2) * int64(g3)
|
||||
f1g4 := int64(f1) * int64(g4)
|
||||
f1g5_2 := int64(f1_2) * int64(g5)
|
||||
f1g6 := int64(f1) * int64(g6)
|
||||
f1g7_2 := int64(f1_2) * int64(g7)
|
||||
f1g8 := int64(f1) * int64(g8)
|
||||
f1g9_38 := int64(f1_2) * int64(g9_19)
|
||||
f2g0 := int64(f2) * int64(g0)
|
||||
f2g1 := int64(f2) * int64(g1)
|
||||
f2g2 := int64(f2) * int64(g2)
|
||||
f2g3 := int64(f2) * int64(g3)
|
||||
f2g4 := int64(f2) * int64(g4)
|
||||
f2g5 := int64(f2) * int64(g5)
|
||||
f2g6 := int64(f2) * int64(g6)
|
||||
f2g7 := int64(f2) * int64(g7)
|
||||
f2g8_19 := int64(f2) * int64(g8_19)
|
||||
f2g9_19 := int64(f2) * int64(g9_19)
|
||||
f3g0 := int64(f3) * int64(g0)
|
||||
f3g1_2 := int64(f3_2) * int64(g1)
|
||||
f3g2 := int64(f3) * int64(g2)
|
||||
f3g3_2 := int64(f3_2) * int64(g3)
|
||||
f3g4 := int64(f3) * int64(g4)
|
||||
f3g5_2 := int64(f3_2) * int64(g5)
|
||||
f3g6 := int64(f3) * int64(g6)
|
||||
f3g7_38 := int64(f3_2) * int64(g7_19)
|
||||
f3g8_19 := int64(f3) * int64(g8_19)
|
||||
f3g9_38 := int64(f3_2) * int64(g9_19)
|
||||
f4g0 := int64(f4) * int64(g0)
|
||||
f4g1 := int64(f4) * int64(g1)
|
||||
f4g2 := int64(f4) * int64(g2)
|
||||
f4g3 := int64(f4) * int64(g3)
|
||||
f4g4 := int64(f4) * int64(g4)
|
||||
f4g5 := int64(f4) * int64(g5)
|
||||
f4g6_19 := int64(f4) * int64(g6_19)
|
||||
f4g7_19 := int64(f4) * int64(g7_19)
|
||||
f4g8_19 := int64(f4) * int64(g8_19)
|
||||
f4g9_19 := int64(f4) * int64(g9_19)
|
||||
f5g0 := int64(f5) * int64(g0)
|
||||
f5g1_2 := int64(f5_2) * int64(g1)
|
||||
f5g2 := int64(f5) * int64(g2)
|
||||
f5g3_2 := int64(f5_2) * int64(g3)
|
||||
f5g4 := int64(f5) * int64(g4)
|
||||
f5g5_38 := int64(f5_2) * int64(g5_19)
|
||||
f5g6_19 := int64(f5) * int64(g6_19)
|
||||
f5g7_38 := int64(f5_2) * int64(g7_19)
|
||||
f5g8_19 := int64(f5) * int64(g8_19)
|
||||
f5g9_38 := int64(f5_2) * int64(g9_19)
|
||||
f6g0 := int64(f6) * int64(g0)
|
||||
f6g1 := int64(f6) * int64(g1)
|
||||
f6g2 := int64(f6) * int64(g2)
|
||||
f6g3 := int64(f6) * int64(g3)
|
||||
f6g4_19 := int64(f6) * int64(g4_19)
|
||||
f6g5_19 := int64(f6) * int64(g5_19)
|
||||
f6g6_19 := int64(f6) * int64(g6_19)
|
||||
f6g7_19 := int64(f6) * int64(g7_19)
|
||||
f6g8_19 := int64(f6) * int64(g8_19)
|
||||
f6g9_19 := int64(f6) * int64(g9_19)
|
||||
f7g0 := int64(f7) * int64(g0)
|
||||
f7g1_2 := int64(f7_2) * int64(g1)
|
||||
f7g2 := int64(f7) * int64(g2)
|
||||
f7g3_38 := int64(f7_2) * int64(g3_19)
|
||||
f7g4_19 := int64(f7) * int64(g4_19)
|
||||
f7g5_38 := int64(f7_2) * int64(g5_19)
|
||||
f7g6_19 := int64(f7) * int64(g6_19)
|
||||
f7g7_38 := int64(f7_2) * int64(g7_19)
|
||||
f7g8_19 := int64(f7) * int64(g8_19)
|
||||
f7g9_38 := int64(f7_2) * int64(g9_19)
|
||||
f8g0 := int64(f8) * int64(g0)
|
||||
f8g1 := int64(f8) * int64(g1)
|
||||
f8g2_19 := int64(f8) * int64(g2_19)
|
||||
f8g3_19 := int64(f8) * int64(g3_19)
|
||||
f8g4_19 := int64(f8) * int64(g4_19)
|
||||
f8g5_19 := int64(f8) * int64(g5_19)
|
||||
f8g6_19 := int64(f8) * int64(g6_19)
|
||||
f8g7_19 := int64(f8) * int64(g7_19)
|
||||
f8g8_19 := int64(f8) * int64(g8_19)
|
||||
f8g9_19 := int64(f8) * int64(g9_19)
|
||||
f9g0 := int64(f9) * int64(g0)
|
||||
f9g1_38 := int64(f9_2) * int64(g1_19)
|
||||
f9g2_19 := int64(f9) * int64(g2_19)
|
||||
f9g3_38 := int64(f9_2) * int64(g3_19)
|
||||
f9g4_19 := int64(f9) * int64(g4_19)
|
||||
f9g5_38 := int64(f9_2) * int64(g5_19)
|
||||
f9g6_19 := int64(f9) * int64(g6_19)
|
||||
f9g7_38 := int64(f9_2) * int64(g7_19)
|
||||
f9g8_19 := int64(f9) * int64(g8_19)
|
||||
f9g9_38 := int64(f9_2) * int64(g9_19)
|
||||
h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38
|
||||
h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19
|
||||
h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38
|
||||
h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19
|
||||
h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38
|
||||
h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19
|
||||
h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38
|
||||
h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19
|
||||
h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38
|
||||
h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0
|
||||
var carry [10]int64
|
||||
|
||||
// |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38))
|
||||
// i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8
|
||||
// |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19))
|
||||
// i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9
|
||||
|
||||
carry[0] = (h0 + (1 << 25)) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
carry[4] = (h4 + (1 << 25)) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
// |h0| <= 2^25
|
||||
// |h4| <= 2^25
|
||||
// |h1| <= 1.51*2^58
|
||||
// |h5| <= 1.51*2^58
|
||||
|
||||
carry[1] = (h1 + (1 << 24)) >> 25
|
||||
h2 += carry[1]
|
||||
h1 -= carry[1] << 25
|
||||
carry[5] = (h5 + (1 << 24)) >> 25
|
||||
h6 += carry[5]
|
||||
h5 -= carry[5] << 25
|
||||
// |h1| <= 2^24; from now on fits into int32
|
||||
// |h5| <= 2^24; from now on fits into int32
|
||||
// |h2| <= 1.21*2^59
|
||||
// |h6| <= 1.21*2^59
|
||||
|
||||
carry[2] = (h2 + (1 << 25)) >> 26
|
||||
h3 += carry[2]
|
||||
h2 -= carry[2] << 26
|
||||
carry[6] = (h6 + (1 << 25)) >> 26
|
||||
h7 += carry[6]
|
||||
h6 -= carry[6] << 26
|
||||
// |h2| <= 2^25; from now on fits into int32 unchanged
|
||||
// |h6| <= 2^25; from now on fits into int32 unchanged
|
||||
// |h3| <= 1.51*2^58
|
||||
// |h7| <= 1.51*2^58
|
||||
|
||||
carry[3] = (h3 + (1 << 24)) >> 25
|
||||
h4 += carry[3]
|
||||
h3 -= carry[3] << 25
|
||||
carry[7] = (h7 + (1 << 24)) >> 25
|
||||
h8 += carry[7]
|
||||
h7 -= carry[7] << 25
|
||||
// |h3| <= 2^24; from now on fits into int32 unchanged
|
||||
// |h7| <= 2^24; from now on fits into int32 unchanged
|
||||
// |h4| <= 1.52*2^33
|
||||
// |h8| <= 1.52*2^33
|
||||
|
||||
carry[4] = (h4 + (1 << 25)) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
carry[8] = (h8 + (1 << 25)) >> 26
|
||||
h9 += carry[8]
|
||||
h8 -= carry[8] << 26
|
||||
// |h4| <= 2^25; from now on fits into int32 unchanged
|
||||
// |h8| <= 2^25; from now on fits into int32 unchanged
|
||||
// |h5| <= 1.01*2^24
|
||||
// |h9| <= 1.51*2^58
|
||||
|
||||
carry[9] = (h9 + (1 << 24)) >> 25
|
||||
h0 += carry[9] * 19
|
||||
h9 -= carry[9] << 25
|
||||
// |h9| <= 2^24; from now on fits into int32 unchanged
|
||||
// |h0| <= 1.8*2^37
|
||||
|
||||
carry[0] = (h0 + (1 << 25)) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
// |h0| <= 2^25; from now on fits into int32 unchanged
|
||||
// |h1| <= 1.01*2^24
|
||||
|
||||
h[0] = int32(h0)
|
||||
h[1] = int32(h1)
|
||||
h[2] = int32(h2)
|
||||
h[3] = int32(h3)
|
||||
h[4] = int32(h4)
|
||||
h[5] = int32(h5)
|
||||
h[6] = int32(h6)
|
||||
h[7] = int32(h7)
|
||||
h[8] = int32(h8)
|
||||
h[9] = int32(h9)
|
||||
}
|
||||
|
||||
// feSquare calculates h = f*f. Can overlap h with f.
|
||||
//
|
||||
// Preconditions:
|
||||
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
|
||||
//
|
||||
// Postconditions:
|
||||
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
|
||||
func feSquare(h, f *fieldElement) {
|
||||
f0 := f[0]
|
||||
f1 := f[1]
|
||||
f2 := f[2]
|
||||
f3 := f[3]
|
||||
f4 := f[4]
|
||||
f5 := f[5]
|
||||
f6 := f[6]
|
||||
f7 := f[7]
|
||||
f8 := f[8]
|
||||
f9 := f[9]
|
||||
f0_2 := 2 * f0
|
||||
f1_2 := 2 * f1
|
||||
f2_2 := 2 * f2
|
||||
f3_2 := 2 * f3
|
||||
f4_2 := 2 * f4
|
||||
f5_2 := 2 * f5
|
||||
f6_2 := 2 * f6
|
||||
f7_2 := 2 * f7
|
||||
f5_38 := 38 * f5 // 1.31*2^30
|
||||
f6_19 := 19 * f6 // 1.31*2^30
|
||||
f7_38 := 38 * f7 // 1.31*2^30
|
||||
f8_19 := 19 * f8 // 1.31*2^30
|
||||
f9_38 := 38 * f9 // 1.31*2^30
|
||||
f0f0 := int64(f0) * int64(f0)
|
||||
f0f1_2 := int64(f0_2) * int64(f1)
|
||||
f0f2_2 := int64(f0_2) * int64(f2)
|
||||
f0f3_2 := int64(f0_2) * int64(f3)
|
||||
f0f4_2 := int64(f0_2) * int64(f4)
|
||||
f0f5_2 := int64(f0_2) * int64(f5)
|
||||
f0f6_2 := int64(f0_2) * int64(f6)
|
||||
f0f7_2 := int64(f0_2) * int64(f7)
|
||||
f0f8_2 := int64(f0_2) * int64(f8)
|
||||
f0f9_2 := int64(f0_2) * int64(f9)
|
||||
f1f1_2 := int64(f1_2) * int64(f1)
|
||||
f1f2_2 := int64(f1_2) * int64(f2)
|
||||
f1f3_4 := int64(f1_2) * int64(f3_2)
|
||||
f1f4_2 := int64(f1_2) * int64(f4)
|
||||
f1f5_4 := int64(f1_2) * int64(f5_2)
|
||||
f1f6_2 := int64(f1_2) * int64(f6)
|
||||
f1f7_4 := int64(f1_2) * int64(f7_2)
|
||||
f1f8_2 := int64(f1_2) * int64(f8)
|
||||
f1f9_76 := int64(f1_2) * int64(f9_38)
|
||||
f2f2 := int64(f2) * int64(f2)
|
||||
f2f3_2 := int64(f2_2) * int64(f3)
|
||||
f2f4_2 := int64(f2_2) * int64(f4)
|
||||
f2f5_2 := int64(f2_2) * int64(f5)
|
||||
f2f6_2 := int64(f2_2) * int64(f6)
|
||||
f2f7_2 := int64(f2_2) * int64(f7)
|
||||
f2f8_38 := int64(f2_2) * int64(f8_19)
|
||||
f2f9_38 := int64(f2) * int64(f9_38)
|
||||
f3f3_2 := int64(f3_2) * int64(f3)
|
||||
f3f4_2 := int64(f3_2) * int64(f4)
|
||||
f3f5_4 := int64(f3_2) * int64(f5_2)
|
||||
f3f6_2 := int64(f3_2) * int64(f6)
|
||||
f3f7_76 := int64(f3_2) * int64(f7_38)
|
||||
f3f8_38 := int64(f3_2) * int64(f8_19)
|
||||
f3f9_76 := int64(f3_2) * int64(f9_38)
|
||||
f4f4 := int64(f4) * int64(f4)
|
||||
f4f5_2 := int64(f4_2) * int64(f5)
|
||||
f4f6_38 := int64(f4_2) * int64(f6_19)
|
||||
f4f7_38 := int64(f4) * int64(f7_38)
|
||||
f4f8_38 := int64(f4_2) * int64(f8_19)
|
||||
f4f9_38 := int64(f4) * int64(f9_38)
|
||||
f5f5_38 := int64(f5) * int64(f5_38)
|
||||
f5f6_38 := int64(f5_2) * int64(f6_19)
|
||||
f5f7_76 := int64(f5_2) * int64(f7_38)
|
||||
f5f8_38 := int64(f5_2) * int64(f8_19)
|
||||
f5f9_76 := int64(f5_2) * int64(f9_38)
|
||||
f6f6_19 := int64(f6) * int64(f6_19)
|
||||
f6f7_38 := int64(f6) * int64(f7_38)
|
||||
f6f8_38 := int64(f6_2) * int64(f8_19)
|
||||
f6f9_38 := int64(f6) * int64(f9_38)
|
||||
f7f7_38 := int64(f7) * int64(f7_38)
|
||||
f7f8_38 := int64(f7_2) * int64(f8_19)
|
||||
f7f9_76 := int64(f7_2) * int64(f9_38)
|
||||
f8f8_19 := int64(f8) * int64(f8_19)
|
||||
f8f9_38 := int64(f8) * int64(f9_38)
|
||||
f9f9_38 := int64(f9) * int64(f9_38)
|
||||
h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38
|
||||
h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38
|
||||
h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19
|
||||
h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38
|
||||
h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38
|
||||
h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38
|
||||
h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19
|
||||
h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38
|
||||
h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38
|
||||
h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2
|
||||
var carry [10]int64
|
||||
|
||||
carry[0] = (h0 + (1 << 25)) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
carry[4] = (h4 + (1 << 25)) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
|
||||
carry[1] = (h1 + (1 << 24)) >> 25
|
||||
h2 += carry[1]
|
||||
h1 -= carry[1] << 25
|
||||
carry[5] = (h5 + (1 << 24)) >> 25
|
||||
h6 += carry[5]
|
||||
h5 -= carry[5] << 25
|
||||
|
||||
carry[2] = (h2 + (1 << 25)) >> 26
|
||||
h3 += carry[2]
|
||||
h2 -= carry[2] << 26
|
||||
carry[6] = (h6 + (1 << 25)) >> 26
|
||||
h7 += carry[6]
|
||||
h6 -= carry[6] << 26
|
||||
|
||||
carry[3] = (h3 + (1 << 24)) >> 25
|
||||
h4 += carry[3]
|
||||
h3 -= carry[3] << 25
|
||||
carry[7] = (h7 + (1 << 24)) >> 25
|
||||
h8 += carry[7]
|
||||
h7 -= carry[7] << 25
|
||||
|
||||
carry[4] = (h4 + (1 << 25)) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
carry[8] = (h8 + (1 << 25)) >> 26
|
||||
h9 += carry[8]
|
||||
h8 -= carry[8] << 26
|
||||
|
||||
carry[9] = (h9 + (1 << 24)) >> 25
|
||||
h0 += carry[9] * 19
|
||||
h9 -= carry[9] << 25
|
||||
|
||||
carry[0] = (h0 + (1 << 25)) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
|
||||
h[0] = int32(h0)
|
||||
h[1] = int32(h1)
|
||||
h[2] = int32(h2)
|
||||
h[3] = int32(h3)
|
||||
h[4] = int32(h4)
|
||||
h[5] = int32(h5)
|
||||
h[6] = int32(h6)
|
||||
h[7] = int32(h7)
|
||||
h[8] = int32(h8)
|
||||
h[9] = int32(h9)
|
||||
}
|
||||
|
||||
// feMul121666 calculates h = f * 121666. Can overlap h with f.
|
||||
//
|
||||
// Preconditions:
|
||||
// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
|
||||
//
|
||||
// Postconditions:
|
||||
// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
|
||||
func feMul121666(h, f *fieldElement) {
|
||||
h0 := int64(f[0]) * 121666
|
||||
h1 := int64(f[1]) * 121666
|
||||
h2 := int64(f[2]) * 121666
|
||||
h3 := int64(f[3]) * 121666
|
||||
h4 := int64(f[4]) * 121666
|
||||
h5 := int64(f[5]) * 121666
|
||||
h6 := int64(f[6]) * 121666
|
||||
h7 := int64(f[7]) * 121666
|
||||
h8 := int64(f[8]) * 121666
|
||||
h9 := int64(f[9]) * 121666
|
||||
var carry [10]int64
|
||||
|
||||
carry[9] = (h9 + (1 << 24)) >> 25
|
||||
h0 += carry[9] * 19
|
||||
h9 -= carry[9] << 25
|
||||
carry[1] = (h1 + (1 << 24)) >> 25
|
||||
h2 += carry[1]
|
||||
h1 -= carry[1] << 25
|
||||
carry[3] = (h3 + (1 << 24)) >> 25
|
||||
h4 += carry[3]
|
||||
h3 -= carry[3] << 25
|
||||
carry[5] = (h5 + (1 << 24)) >> 25
|
||||
h6 += carry[5]
|
||||
h5 -= carry[5] << 25
|
||||
carry[7] = (h7 + (1 << 24)) >> 25
|
||||
h8 += carry[7]
|
||||
h7 -= carry[7] << 25
|
||||
|
||||
carry[0] = (h0 + (1 << 25)) >> 26
|
||||
h1 += carry[0]
|
||||
h0 -= carry[0] << 26
|
||||
carry[2] = (h2 + (1 << 25)) >> 26
|
||||
h3 += carry[2]
|
||||
h2 -= carry[2] << 26
|
||||
carry[4] = (h4 + (1 << 25)) >> 26
|
||||
h5 += carry[4]
|
||||
h4 -= carry[4] << 26
|
||||
carry[6] = (h6 + (1 << 25)) >> 26
|
||||
h7 += carry[6]
|
||||
h6 -= carry[6] << 26
|
||||
carry[8] = (h8 + (1 << 25)) >> 26
|
||||
h9 += carry[8]
|
||||
h8 -= carry[8] << 26
|
||||
|
||||
h[0] = int32(h0)
|
||||
h[1] = int32(h1)
|
||||
h[2] = int32(h2)
|
||||
h[3] = int32(h3)
|
||||
h[4] = int32(h4)
|
||||
h[5] = int32(h5)
|
||||
h[6] = int32(h6)
|
||||
h[7] = int32(h7)
|
||||
h[8] = int32(h8)
|
||||
h[9] = int32(h9)
|
||||
}
|
||||
|
||||
// feInvert sets out = z^-1.
|
||||
func feInvert(out, z *fieldElement) {
|
||||
var t0, t1, t2, t3 fieldElement
|
||||
var i int
|
||||
|
||||
feSquare(&t0, z)
|
||||
for i = 1; i < 1; i++ {
|
||||
feSquare(&t0, &t0)
|
||||
}
|
||||
feSquare(&t1, &t0)
|
||||
for i = 1; i < 2; i++ {
|
||||
feSquare(&t1, &t1)
|
||||
}
|
||||
feMul(&t1, z, &t1)
|
||||
feMul(&t0, &t0, &t1)
|
||||
feSquare(&t2, &t0)
|
||||
for i = 1; i < 1; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t1, &t1, &t2)
|
||||
feSquare(&t2, &t1)
|
||||
for i = 1; i < 5; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t1, &t2, &t1)
|
||||
feSquare(&t2, &t1)
|
||||
for i = 1; i < 10; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t2, &t2, &t1)
|
||||
feSquare(&t3, &t2)
|
||||
for i = 1; i < 20; i++ {
|
||||
feSquare(&t3, &t3)
|
||||
}
|
||||
feMul(&t2, &t3, &t2)
|
||||
feSquare(&t2, &t2)
|
||||
for i = 1; i < 10; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t1, &t2, &t1)
|
||||
feSquare(&t2, &t1)
|
||||
for i = 1; i < 50; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t2, &t2, &t1)
|
||||
feSquare(&t3, &t2)
|
||||
for i = 1; i < 100; i++ {
|
||||
feSquare(&t3, &t3)
|
||||
}
|
||||
feMul(&t2, &t3, &t2)
|
||||
feSquare(&t2, &t2)
|
||||
for i = 1; i < 50; i++ {
|
||||
feSquare(&t2, &t2)
|
||||
}
|
||||
feMul(&t1, &t2, &t1)
|
||||
feSquare(&t1, &t1)
|
||||
for i = 1; i < 5; i++ {
|
||||
feSquare(&t1, &t1)
|
||||
}
|
||||
feMul(out, &t1, &t0)
|
||||
}
|
||||
|
||||
func scalarMult(out, in, base *[32]byte) {
|
||||
var e [32]byte
|
||||
|
||||
copy(e[:], in[:])
|
||||
e[0] &= 248
|
||||
e[31] &= 127
|
||||
e[31] |= 64
|
||||
|
||||
var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement
|
||||
feFromBytes(&x1, base)
|
||||
feOne(&x2)
|
||||
feCopy(&x3, &x1)
|
||||
feOne(&z3)
|
||||
|
||||
swap := int32(0)
|
||||
for pos := 254; pos >= 0; pos-- {
|
||||
b := e[pos/8] >> uint(pos&7)
|
||||
b &= 1
|
||||
swap ^= int32(b)
|
||||
feCSwap(&x2, &x3, swap)
|
||||
feCSwap(&z2, &z3, swap)
|
||||
swap = int32(b)
|
||||
|
||||
feSub(&tmp0, &x3, &z3)
|
||||
feSub(&tmp1, &x2, &z2)
|
||||
feAdd(&x2, &x2, &z2)
|
||||
feAdd(&z2, &x3, &z3)
|
||||
feMul(&z3, &tmp0, &x2)
|
||||
feMul(&z2, &z2, &tmp1)
|
||||
feSquare(&tmp0, &tmp1)
|
||||
feSquare(&tmp1, &x2)
|
||||
feAdd(&x3, &z3, &z2)
|
||||
feSub(&z2, &z3, &z2)
|
||||
feMul(&x2, &tmp1, &tmp0)
|
||||
feSub(&tmp1, &tmp1, &tmp0)
|
||||
feSquare(&z2, &z2)
|
||||
feMul121666(&z3, &tmp1)
|
||||
feSquare(&x3, &x3)
|
||||
feAdd(&tmp0, &tmp0, &z3)
|
||||
feMul(&z3, &x1, &z2)
|
||||
feMul(&z2, &tmp1, &tmp0)
|
||||
}
|
||||
|
||||
feCSwap(&x2, &x3, swap)
|
||||
feCSwap(&z2, &z3, swap)
|
||||
|
||||
feInvert(&z2, &z2)
|
||||
feMul(&x2, &x2, &z2)
|
||||
feToBytes(out, &x2)
|
||||
}
|
23
vendor/golang.org/x/crypto/curve25519/doc.go
generated
vendored
Normal file
23
vendor/golang.org/x/crypto/curve25519/doc.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package curve25519 provides an implementation of scalar multiplication on
|
||||
// the elliptic curve known as curve25519. See http://cr.yp.to/ecdh.html
|
||||
package curve25519
|
||||
|
||||
// basePoint is the x coordinate of the generator of the curve.
|
||||
var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
|
||||
// ScalarMult sets dst to the product in*base where dst and base are the x
|
||||
// coordinates of group points and all values are in little-endian form.
|
||||
func ScalarMult(dst, in, base *[32]byte) {
|
||||
scalarMult(dst, in, base)
|
||||
}
|
||||
|
||||
// ScalarBaseMult sets dst to the product in*base where dst and base are the x
|
||||
// coordinates of group points, base is the standard generator and all values
|
||||
// are in little-endian form.
|
||||
func ScalarBaseMult(dst, in *[32]byte) {
|
||||
ScalarMult(dst, in, &basePoint)
|
||||
}
|
94
vendor/golang.org/x/crypto/curve25519/freeze_amd64.s
generated
vendored
Normal file
94
vendor/golang.org/x/crypto/curve25519/freeze_amd64.s
generated
vendored
Normal file
@ -0,0 +1,94 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This code was translated into a form compatible with 6a from the public
|
||||
// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
// func freeze(inout *[5]uint64)
|
||||
TEXT ·freeze(SB),7,$96-8
|
||||
MOVQ inout+0(FP), DI
|
||||
|
||||
MOVQ SP,R11
|
||||
MOVQ $31,CX
|
||||
NOTQ CX
|
||||
ANDQ CX,SP
|
||||
ADDQ $32,SP
|
||||
|
||||
MOVQ R11,0(SP)
|
||||
MOVQ R12,8(SP)
|
||||
MOVQ R13,16(SP)
|
||||
MOVQ R14,24(SP)
|
||||
MOVQ R15,32(SP)
|
||||
MOVQ BX,40(SP)
|
||||
MOVQ BP,48(SP)
|
||||
MOVQ 0(DI),SI
|
||||
MOVQ 8(DI),DX
|
||||
MOVQ 16(DI),CX
|
||||
MOVQ 24(DI),R8
|
||||
MOVQ 32(DI),R9
|
||||
MOVQ ·REDMASK51(SB),AX
|
||||
MOVQ AX,R10
|
||||
SUBQ $18,R10
|
||||
MOVQ $3,R11
|
||||
REDUCELOOP:
|
||||
MOVQ SI,R12
|
||||
SHRQ $51,R12
|
||||
ANDQ AX,SI
|
||||
ADDQ R12,DX
|
||||
MOVQ DX,R12
|
||||
SHRQ $51,R12
|
||||
ANDQ AX,DX
|
||||
ADDQ R12,CX
|
||||
MOVQ CX,R12
|
||||
SHRQ $51,R12
|
||||
ANDQ AX,CX
|
||||
ADDQ R12,R8
|
||||
MOVQ R8,R12
|
||||
SHRQ $51,R12
|
||||
ANDQ AX,R8
|
||||
ADDQ R12,R9
|
||||
MOVQ R9,R12
|
||||
SHRQ $51,R12
|
||||
ANDQ AX,R9
|
||||
IMUL3Q $19,R12,R12
|
||||
ADDQ R12,SI
|
||||
SUBQ $1,R11
|
||||
JA REDUCELOOP
|
||||
MOVQ $1,R12
|
||||
CMPQ R10,SI
|
||||
CMOVQLT R11,R12
|
||||
CMPQ AX,DX
|
||||
CMOVQNE R11,R12
|
||||
CMPQ AX,CX
|
||||
CMOVQNE R11,R12
|
||||
CMPQ AX,R8
|
||||
CMOVQNE R11,R12
|
||||
CMPQ AX,R9
|
||||
CMOVQNE R11,R12
|
||||
NEGQ R12
|
||||
ANDQ R12,AX
|
||||
ANDQ R12,R10
|
||||
SUBQ R10,SI
|
||||
SUBQ AX,DX
|
||||
SUBQ AX,CX
|
||||
SUBQ AX,R8
|
||||
SUBQ AX,R9
|
||||
MOVQ SI,0(DI)
|
||||
MOVQ DX,8(DI)
|
||||
MOVQ CX,16(DI)
|
||||
MOVQ R8,24(DI)
|
||||
MOVQ R9,32(DI)
|
||||
MOVQ 0(SP),R11
|
||||
MOVQ 8(SP),R12
|
||||
MOVQ 16(SP),R13
|
||||
MOVQ 24(SP),R14
|
||||
MOVQ 32(SP),R15
|
||||
MOVQ 40(SP),BX
|
||||
MOVQ 48(SP),BP
|
||||
MOVQ R11,SP
|
||||
MOVQ DI,AX
|
||||
MOVQ SI,DX
|
||||
RET
|
1398
vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s
generated
vendored
Normal file
1398
vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
240
vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go
generated
vendored
Normal file
240
vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go
generated
vendored
Normal file
@ -0,0 +1,240 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
package curve25519
|
||||
|
||||
// These functions are implemented in the .s files. The names of the functions
|
||||
// in the rest of the file are also taken from the SUPERCOP sources to help
|
||||
// people following along.
|
||||
|
||||
//go:noescape
|
||||
|
||||
func cswap(inout *[5]uint64, v uint64)
|
||||
|
||||
//go:noescape
|
||||
|
||||
func ladderstep(inout *[5][5]uint64)
|
||||
|
||||
//go:noescape
|
||||
|
||||
func freeze(inout *[5]uint64)
|
||||
|
||||
//go:noescape
|
||||
|
||||
func mul(dest, a, b *[5]uint64)
|
||||
|
||||
//go:noescape
|
||||
|
||||
func square(out, in *[5]uint64)
|
||||
|
||||
// mladder uses a Montgomery ladder to calculate (xr/zr) *= s.
|
||||
func mladder(xr, zr *[5]uint64, s *[32]byte) {
|
||||
var work [5][5]uint64
|
||||
|
||||
work[0] = *xr
|
||||
setint(&work[1], 1)
|
||||
setint(&work[2], 0)
|
||||
work[3] = *xr
|
||||
setint(&work[4], 1)
|
||||
|
||||
j := uint(6)
|
||||
var prevbit byte
|
||||
|
||||
for i := 31; i >= 0; i-- {
|
||||
for j < 8 {
|
||||
bit := ((*s)[i] >> j) & 1
|
||||
swap := bit ^ prevbit
|
||||
prevbit = bit
|
||||
cswap(&work[1], uint64(swap))
|
||||
ladderstep(&work)
|
||||
j--
|
||||
}
|
||||
j = 7
|
||||
}
|
||||
|
||||
*xr = work[1]
|
||||
*zr = work[2]
|
||||
}
|
||||
|
||||
func scalarMult(out, in, base *[32]byte) {
|
||||
var e [32]byte
|
||||
copy(e[:], (*in)[:])
|
||||
e[0] &= 248
|
||||
e[31] &= 127
|
||||
e[31] |= 64
|
||||
|
||||
var t, z [5]uint64
|
||||
unpack(&t, base)
|
||||
mladder(&t, &z, &e)
|
||||
invert(&z, &z)
|
||||
mul(&t, &t, &z)
|
||||
pack(out, &t)
|
||||
}
|
||||
|
||||
func setint(r *[5]uint64, v uint64) {
|
||||
r[0] = v
|
||||
r[1] = 0
|
||||
r[2] = 0
|
||||
r[3] = 0
|
||||
r[4] = 0
|
||||
}
|
||||
|
||||
// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian
|
||||
// order.
|
||||
func unpack(r *[5]uint64, x *[32]byte) {
|
||||
r[0] = uint64(x[0]) |
|
||||
uint64(x[1])<<8 |
|
||||
uint64(x[2])<<16 |
|
||||
uint64(x[3])<<24 |
|
||||
uint64(x[4])<<32 |
|
||||
uint64(x[5])<<40 |
|
||||
uint64(x[6]&7)<<48
|
||||
|
||||
r[1] = uint64(x[6])>>3 |
|
||||
uint64(x[7])<<5 |
|
||||
uint64(x[8])<<13 |
|
||||
uint64(x[9])<<21 |
|
||||
uint64(x[10])<<29 |
|
||||
uint64(x[11])<<37 |
|
||||
uint64(x[12]&63)<<45
|
||||
|
||||
r[2] = uint64(x[12])>>6 |
|
||||
uint64(x[13])<<2 |
|
||||
uint64(x[14])<<10 |
|
||||
uint64(x[15])<<18 |
|
||||
uint64(x[16])<<26 |
|
||||
uint64(x[17])<<34 |
|
||||
uint64(x[18])<<42 |
|
||||
uint64(x[19]&1)<<50
|
||||
|
||||
r[3] = uint64(x[19])>>1 |
|
||||
uint64(x[20])<<7 |
|
||||
uint64(x[21])<<15 |
|
||||
uint64(x[22])<<23 |
|
||||
uint64(x[23])<<31 |
|
||||
uint64(x[24])<<39 |
|
||||
uint64(x[25]&15)<<47
|
||||
|
||||
r[4] = uint64(x[25])>>4 |
|
||||
uint64(x[26])<<4 |
|
||||
uint64(x[27])<<12 |
|
||||
uint64(x[28])<<20 |
|
||||
uint64(x[29])<<28 |
|
||||
uint64(x[30])<<36 |
|
||||
uint64(x[31]&127)<<44
|
||||
}
|
||||
|
||||
// pack sets out = x where out is the usual, little-endian form of the 5,
|
||||
// 51-bit limbs in x.
|
||||
func pack(out *[32]byte, x *[5]uint64) {
|
||||
t := *x
|
||||
freeze(&t)
|
||||
|
||||
out[0] = byte(t[0])
|
||||
out[1] = byte(t[0] >> 8)
|
||||
out[2] = byte(t[0] >> 16)
|
||||
out[3] = byte(t[0] >> 24)
|
||||
out[4] = byte(t[0] >> 32)
|
||||
out[5] = byte(t[0] >> 40)
|
||||
out[6] = byte(t[0] >> 48)
|
||||
|
||||
out[6] ^= byte(t[1]<<3) & 0xf8
|
||||
out[7] = byte(t[1] >> 5)
|
||||
out[8] = byte(t[1] >> 13)
|
||||
out[9] = byte(t[1] >> 21)
|
||||
out[10] = byte(t[1] >> 29)
|
||||
out[11] = byte(t[1] >> 37)
|
||||
out[12] = byte(t[1] >> 45)
|
||||
|
||||
out[12] ^= byte(t[2]<<6) & 0xc0
|
||||
out[13] = byte(t[2] >> 2)
|
||||
out[14] = byte(t[2] >> 10)
|
||||
out[15] = byte(t[2] >> 18)
|
||||
out[16] = byte(t[2] >> 26)
|
||||
out[17] = byte(t[2] >> 34)
|
||||
out[18] = byte(t[2] >> 42)
|
||||
out[19] = byte(t[2] >> 50)
|
||||
|
||||
out[19] ^= byte(t[3]<<1) & 0xfe
|
||||
out[20] = byte(t[3] >> 7)
|
||||
out[21] = byte(t[3] >> 15)
|
||||
out[22] = byte(t[3] >> 23)
|
||||
out[23] = byte(t[3] >> 31)
|
||||
out[24] = byte(t[3] >> 39)
|
||||
out[25] = byte(t[3] >> 47)
|
||||
|
||||
out[25] ^= byte(t[4]<<4) & 0xf0
|
||||
out[26] = byte(t[4] >> 4)
|
||||
out[27] = byte(t[4] >> 12)
|
||||
out[28] = byte(t[4] >> 20)
|
||||
out[29] = byte(t[4] >> 28)
|
||||
out[30] = byte(t[4] >> 36)
|
||||
out[31] = byte(t[4] >> 44)
|
||||
}
|
||||
|
||||
// invert calculates r = x^-1 mod p using Fermat's little theorem.
|
||||
func invert(r *[5]uint64, x *[5]uint64) {
|
||||
var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64
|
||||
|
||||
square(&z2, x) /* 2 */
|
||||
square(&t, &z2) /* 4 */
|
||||
square(&t, &t) /* 8 */
|
||||
mul(&z9, &t, x) /* 9 */
|
||||
mul(&z11, &z9, &z2) /* 11 */
|
||||
square(&t, &z11) /* 22 */
|
||||
mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */
|
||||
|
||||
square(&t, &z2_5_0) /* 2^6 - 2^1 */
|
||||
for i := 1; i < 5; i++ { /* 2^20 - 2^10 */
|
||||
square(&t, &t)
|
||||
}
|
||||
mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */
|
||||
|
||||
square(&t, &z2_10_0) /* 2^11 - 2^1 */
|
||||
for i := 1; i < 10; i++ { /* 2^20 - 2^10 */
|
||||
square(&t, &t)
|
||||
}
|
||||
mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */
|
||||
|
||||
square(&t, &z2_20_0) /* 2^21 - 2^1 */
|
||||
for i := 1; i < 20; i++ { /* 2^40 - 2^20 */
|
||||
square(&t, &t)
|
||||
}
|
||||
mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */
|
||||
|
||||
square(&t, &t) /* 2^41 - 2^1 */
|
||||
for i := 1; i < 10; i++ { /* 2^50 - 2^10 */
|
||||
square(&t, &t)
|
||||
}
|
||||
mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */
|
||||
|
||||
square(&t, &z2_50_0) /* 2^51 - 2^1 */
|
||||
for i := 1; i < 50; i++ { /* 2^100 - 2^50 */
|
||||
square(&t, &t)
|
||||
}
|
||||
mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */
|
||||
|
||||
square(&t, &z2_100_0) /* 2^101 - 2^1 */
|
||||
for i := 1; i < 100; i++ { /* 2^200 - 2^100 */
|
||||
square(&t, &t)
|
||||
}
|
||||
mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */
|
||||
|
||||
square(&t, &t) /* 2^201 - 2^1 */
|
||||
for i := 1; i < 50; i++ { /* 2^250 - 2^50 */
|
||||
square(&t, &t)
|
||||
}
|
||||
mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */
|
||||
|
||||
square(&t, &t) /* 2^251 - 2^1 */
|
||||
square(&t, &t) /* 2^252 - 2^2 */
|
||||
square(&t, &t) /* 2^253 - 2^3 */
|
||||
|
||||
square(&t, &t) /* 2^254 - 2^4 */
|
||||
|
||||
square(&t, &t) /* 2^255 - 2^5 */
|
||||
mul(r, &t, &z11) /* 2^255 - 21 */
|
||||
}
|
191
vendor/golang.org/x/crypto/curve25519/mul_amd64.s
generated
vendored
Normal file
191
vendor/golang.org/x/crypto/curve25519/mul_amd64.s
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This code was translated into a form compatible with 6a from the public
|
||||
// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
// func mul(dest, a, b *[5]uint64)
|
||||
TEXT ·mul(SB),0,$128-24
|
||||
MOVQ dest+0(FP), DI
|
||||
MOVQ a+8(FP), SI
|
||||
MOVQ b+16(FP), DX
|
||||
|
||||
MOVQ SP,R11
|
||||
MOVQ $31,CX
|
||||
NOTQ CX
|
||||
ANDQ CX,SP
|
||||
ADDQ $32,SP
|
||||
|
||||
MOVQ R11,0(SP)
|
||||
MOVQ R12,8(SP)
|
||||
MOVQ R13,16(SP)
|
||||
MOVQ R14,24(SP)
|
||||
MOVQ R15,32(SP)
|
||||
MOVQ BX,40(SP)
|
||||
MOVQ BP,48(SP)
|
||||
MOVQ DI,56(SP)
|
||||
MOVQ DX,CX
|
||||
MOVQ 24(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MOVQ AX,64(SP)
|
||||
MULQ 16(CX)
|
||||
MOVQ AX,R8
|
||||
MOVQ DX,R9
|
||||
MOVQ 32(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MOVQ AX,72(SP)
|
||||
MULQ 8(CX)
|
||||
ADDQ AX,R8
|
||||
ADCQ DX,R9
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 0(CX)
|
||||
ADDQ AX,R8
|
||||
ADCQ DX,R9
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 8(CX)
|
||||
MOVQ AX,R10
|
||||
MOVQ DX,R11
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 16(CX)
|
||||
MOVQ AX,R12
|
||||
MOVQ DX,R13
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 24(CX)
|
||||
MOVQ AX,R14
|
||||
MOVQ DX,R15
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 32(CX)
|
||||
MOVQ AX,BX
|
||||
MOVQ DX,BP
|
||||
MOVQ 8(SI),AX
|
||||
MULQ 0(CX)
|
||||
ADDQ AX,R10
|
||||
ADCQ DX,R11
|
||||
MOVQ 8(SI),AX
|
||||
MULQ 8(CX)
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ 8(SI),AX
|
||||
MULQ 16(CX)
|
||||
ADDQ AX,R14
|
||||
ADCQ DX,R15
|
||||
MOVQ 8(SI),AX
|
||||
MULQ 24(CX)
|
||||
ADDQ AX,BX
|
||||
ADCQ DX,BP
|
||||
MOVQ 8(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MULQ 32(CX)
|
||||
ADDQ AX,R8
|
||||
ADCQ DX,R9
|
||||
MOVQ 16(SI),AX
|
||||
MULQ 0(CX)
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ 16(SI),AX
|
||||
MULQ 8(CX)
|
||||
ADDQ AX,R14
|
||||
ADCQ DX,R15
|
||||
MOVQ 16(SI),AX
|
||||
MULQ 16(CX)
|
||||
ADDQ AX,BX
|
||||
ADCQ DX,BP
|
||||
MOVQ 16(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MULQ 24(CX)
|
||||
ADDQ AX,R8
|
||||
ADCQ DX,R9
|
||||
MOVQ 16(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MULQ 32(CX)
|
||||
ADDQ AX,R10
|
||||
ADCQ DX,R11
|
||||
MOVQ 24(SI),AX
|
||||
MULQ 0(CX)
|
||||
ADDQ AX,R14
|
||||
ADCQ DX,R15
|
||||
MOVQ 24(SI),AX
|
||||
MULQ 8(CX)
|
||||
ADDQ AX,BX
|
||||
ADCQ DX,BP
|
||||
MOVQ 64(SP),AX
|
||||
MULQ 24(CX)
|
||||
ADDQ AX,R10
|
||||
ADCQ DX,R11
|
||||
MOVQ 64(SP),AX
|
||||
MULQ 32(CX)
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ 32(SI),AX
|
||||
MULQ 0(CX)
|
||||
ADDQ AX,BX
|
||||
ADCQ DX,BP
|
||||
MOVQ 72(SP),AX
|
||||
MULQ 16(CX)
|
||||
ADDQ AX,R10
|
||||
ADCQ DX,R11
|
||||
MOVQ 72(SP),AX
|
||||
MULQ 24(CX)
|
||||
ADDQ AX,R12
|
||||
ADCQ DX,R13
|
||||
MOVQ 72(SP),AX
|
||||
MULQ 32(CX)
|
||||
ADDQ AX,R14
|
||||
ADCQ DX,R15
|
||||
MOVQ ·REDMASK51(SB),SI
|
||||
SHLQ $13,R9:R8
|
||||
ANDQ SI,R8
|
||||
SHLQ $13,R11:R10
|
||||
ANDQ SI,R10
|
||||
ADDQ R9,R10
|
||||
SHLQ $13,R13:R12
|
||||
ANDQ SI,R12
|
||||
ADDQ R11,R12
|
||||
SHLQ $13,R15:R14
|
||||
ANDQ SI,R14
|
||||
ADDQ R13,R14
|
||||
SHLQ $13,BP:BX
|
||||
ANDQ SI,BX
|
||||
ADDQ R15,BX
|
||||
IMUL3Q $19,BP,DX
|
||||
ADDQ DX,R8
|
||||
MOVQ R8,DX
|
||||
SHRQ $51,DX
|
||||
ADDQ R10,DX
|
||||
MOVQ DX,CX
|
||||
SHRQ $51,DX
|
||||
ANDQ SI,R8
|
||||
ADDQ R12,DX
|
||||
MOVQ DX,R9
|
||||
SHRQ $51,DX
|
||||
ANDQ SI,CX
|
||||
ADDQ R14,DX
|
||||
MOVQ DX,AX
|
||||
SHRQ $51,DX
|
||||
ANDQ SI,R9
|
||||
ADDQ BX,DX
|
||||
MOVQ DX,R10
|
||||
SHRQ $51,DX
|
||||
ANDQ SI,AX
|
||||
IMUL3Q $19,DX,DX
|
||||
ADDQ DX,R8
|
||||
ANDQ SI,R10
|
||||
MOVQ R8,0(DI)
|
||||
MOVQ CX,8(DI)
|
||||
MOVQ R9,16(DI)
|
||||
MOVQ AX,24(DI)
|
||||
MOVQ R10,32(DI)
|
||||
MOVQ 0(SP),R11
|
||||
MOVQ 8(SP),R12
|
||||
MOVQ 16(SP),R13
|
||||
MOVQ 24(SP),R14
|
||||
MOVQ 32(SP),R15
|
||||
MOVQ 40(SP),BX
|
||||
MOVQ 48(SP),BP
|
||||
MOVQ R11,SP
|
||||
MOVQ DI,AX
|
||||
MOVQ SI,DX
|
||||
RET
|
153
vendor/golang.org/x/crypto/curve25519/square_amd64.s
generated
vendored
Normal file
153
vendor/golang.org/x/crypto/curve25519/square_amd64.s
generated
vendored
Normal file
@ -0,0 +1,153 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This code was translated into a form compatible with 6a from the public
|
||||
// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
// func square(out, in *[5]uint64)
|
||||
TEXT ·square(SB),7,$96-16
|
||||
MOVQ out+0(FP), DI
|
||||
MOVQ in+8(FP), SI
|
||||
|
||||
MOVQ SP,R11
|
||||
MOVQ $31,CX
|
||||
NOTQ CX
|
||||
ANDQ CX,SP
|
||||
ADDQ $32, SP
|
||||
|
||||
MOVQ R11,0(SP)
|
||||
MOVQ R12,8(SP)
|
||||
MOVQ R13,16(SP)
|
||||
MOVQ R14,24(SP)
|
||||
MOVQ R15,32(SP)
|
||||
MOVQ BX,40(SP)
|
||||
MOVQ BP,48(SP)
|
||||
MOVQ 0(SI),AX
|
||||
MULQ 0(SI)
|
||||
MOVQ AX,CX
|
||||
MOVQ DX,R8
|
||||
MOVQ 0(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 8(SI)
|
||||
MOVQ AX,R9
|
||||
MOVQ DX,R10
|
||||
MOVQ 0(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 16(SI)
|
||||
MOVQ AX,R11
|
||||
MOVQ DX,R12
|
||||
MOVQ 0(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 24(SI)
|
||||
MOVQ AX,R13
|
||||
MOVQ DX,R14
|
||||
MOVQ 0(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 32(SI)
|
||||
MOVQ AX,R15
|
||||
MOVQ DX,BX
|
||||
MOVQ 8(SI),AX
|
||||
MULQ 8(SI)
|
||||
ADDQ AX,R11
|
||||
ADCQ DX,R12
|
||||
MOVQ 8(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 16(SI)
|
||||
ADDQ AX,R13
|
||||
ADCQ DX,R14
|
||||
MOVQ 8(SI),AX
|
||||
SHLQ $1,AX
|
||||
MULQ 24(SI)
|
||||
ADDQ AX,R15
|
||||
ADCQ DX,BX
|
||||
MOVQ 8(SI),DX
|
||||
IMUL3Q $38,DX,AX
|
||||
MULQ 32(SI)
|
||||
ADDQ AX,CX
|
||||
ADCQ DX,R8
|
||||
MOVQ 16(SI),AX
|
||||
MULQ 16(SI)
|
||||
ADDQ AX,R15
|
||||
ADCQ DX,BX
|
||||
MOVQ 16(SI),DX
|
||||
IMUL3Q $38,DX,AX
|
||||
MULQ 24(SI)
|
||||
ADDQ AX,CX
|
||||
ADCQ DX,R8
|
||||
MOVQ 16(SI),DX
|
||||
IMUL3Q $38,DX,AX
|
||||
MULQ 32(SI)
|
||||
ADDQ AX,R9
|
||||
ADCQ DX,R10
|
||||
MOVQ 24(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MULQ 24(SI)
|
||||
ADDQ AX,R9
|
||||
ADCQ DX,R10
|
||||
MOVQ 24(SI),DX
|
||||
IMUL3Q $38,DX,AX
|
||||
MULQ 32(SI)
|
||||
ADDQ AX,R11
|
||||
ADCQ DX,R12
|
||||
MOVQ 32(SI),DX
|
||||
IMUL3Q $19,DX,AX
|
||||
MULQ 32(SI)
|
||||
ADDQ AX,R13
|
||||
ADCQ DX,R14
|
||||
MOVQ ·REDMASK51(SB),SI
|
||||
SHLQ $13,R8:CX
|
||||
ANDQ SI,CX
|
||||
SHLQ $13,R10:R9
|
||||
ANDQ SI,R9
|
||||
ADDQ R8,R9
|
||||
SHLQ $13,R12:R11
|
||||
ANDQ SI,R11
|
||||
ADDQ R10,R11
|
||||
SHLQ $13,R14:R13
|
||||
ANDQ SI,R13
|
||||
ADDQ R12,R13
|
||||
SHLQ $13,BX:R15
|
||||
ANDQ SI,R15
|
||||
ADDQ R14,R15
|
||||
IMUL3Q $19,BX,DX
|
||||
ADDQ DX,CX
|
||||
MOVQ CX,DX
|
||||
SHRQ $51,DX
|
||||
ADDQ R9,DX
|
||||
ANDQ SI,CX
|
||||
MOVQ DX,R8
|
||||
SHRQ $51,DX
|
||||
ADDQ R11,DX
|
||||
ANDQ SI,R8
|
||||
MOVQ DX,R9
|
||||
SHRQ $51,DX
|
||||
ADDQ R13,DX
|
||||
ANDQ SI,R9
|
||||
MOVQ DX,AX
|
||||
SHRQ $51,DX
|
||||
ADDQ R15,DX
|
||||
ANDQ SI,AX
|
||||
MOVQ DX,R10
|
||||
SHRQ $51,DX
|
||||
IMUL3Q $19,DX,DX
|
||||
ADDQ DX,CX
|
||||
ANDQ SI,R10
|
||||
MOVQ CX,0(DI)
|
||||
MOVQ R8,8(DI)
|
||||
MOVQ R9,16(DI)
|
||||
MOVQ AX,24(DI)
|
||||
MOVQ R10,32(DI)
|
||||
MOVQ 0(SP),R11
|
||||
MOVQ 8(SP),R12
|
||||
MOVQ 16(SP),R13
|
||||
MOVQ 24(SP),R14
|
||||
MOVQ 32(SP),R15
|
||||
MOVQ 40(SP),BX
|
||||
MOVQ 48(SP),BP
|
||||
MOVQ R11,SP
|
||||
MOVQ DI,AX
|
||||
MOVQ SI,DX
|
||||
RET
|
50
vendor/golang.org/x/crypto/pkcs12/bmp-string.go
generated
vendored
Normal file
50
vendor/golang.org/x/crypto/pkcs12/bmp-string.go
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pkcs12
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"unicode/utf16"
|
||||
)
|
||||
|
||||
// bmpString returns s encoded in UCS-2 with a zero terminator.
|
||||
func bmpString(s string) ([]byte, error) {
|
||||
// References:
|
||||
// https://tools.ietf.org/html/rfc7292#appendix-B.1
|
||||
// http://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane
|
||||
// - non-BMP characters are encoded in UTF 16 by using a surrogate pair of 16-bit codes
|
||||
// EncodeRune returns 0xfffd if the rune does not need special encoding
|
||||
// - the above RFC provides the info that BMPStrings are NULL terminated.
|
||||
|
||||
ret := make([]byte, 0, 2*len(s)+2)
|
||||
|
||||
for _, r := range s {
|
||||
if t, _ := utf16.EncodeRune(r); t != 0xfffd {
|
||||
return nil, errors.New("pkcs12: string contains characters that cannot be encoded in UCS-2")
|
||||
}
|
||||
ret = append(ret, byte(r/256), byte(r%256))
|
||||
}
|
||||
|
||||
return append(ret, 0, 0), nil
|
||||
}
|
||||
|
||||
func decodeBMPString(bmpString []byte) (string, error) {
|
||||
if len(bmpString)%2 != 0 {
|
||||
return "", errors.New("pkcs12: odd-length BMP string")
|
||||
}
|
||||
|
||||
// strip terminator if present
|
||||
if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 {
|
||||
bmpString = bmpString[:l-2]
|
||||
}
|
||||
|
||||
s := make([]uint16, 0, len(bmpString)/2)
|
||||
for len(bmpString) > 0 {
|
||||
s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1]))
|
||||
bmpString = bmpString[2:]
|
||||
}
|
||||
|
||||
return string(utf16.Decode(s)), nil
|
||||
}
|
131
vendor/golang.org/x/crypto/pkcs12/crypto.go
generated
vendored
Normal file
131
vendor/golang.org/x/crypto/pkcs12/crypto.go
generated
vendored
Normal file
@ -0,0 +1,131 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pkcs12
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/cipher"
|
||||
"crypto/des"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"errors"
|
||||
|
||||
"golang.org/x/crypto/pkcs12/internal/rc2"
|
||||
)
|
||||
|
||||
var (
|
||||
oidPBEWithSHAAnd3KeyTripleDESCBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 3})
|
||||
oidPBEWithSHAAnd40BitRC2CBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 6})
|
||||
)
|
||||
|
||||
// pbeCipher is an abstraction of a PKCS#12 cipher.
|
||||
type pbeCipher interface {
|
||||
// create returns a cipher.Block given a key.
|
||||
create(key []byte) (cipher.Block, error)
|
||||
// deriveKey returns a key derived from the given password and salt.
|
||||
deriveKey(salt, password []byte, iterations int) []byte
|
||||
// deriveKey returns an IV derived from the given password and salt.
|
||||
deriveIV(salt, password []byte, iterations int) []byte
|
||||
}
|
||||
|
||||
type shaWithTripleDESCBC struct{}
|
||||
|
||||
func (shaWithTripleDESCBC) create(key []byte) (cipher.Block, error) {
|
||||
return des.NewTripleDESCipher(key)
|
||||
}
|
||||
|
||||
func (shaWithTripleDESCBC) deriveKey(salt, password []byte, iterations int) []byte {
|
||||
return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 24)
|
||||
}
|
||||
|
||||
func (shaWithTripleDESCBC) deriveIV(salt, password []byte, iterations int) []byte {
|
||||
return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8)
|
||||
}
|
||||
|
||||
type shaWith40BitRC2CBC struct{}
|
||||
|
||||
func (shaWith40BitRC2CBC) create(key []byte) (cipher.Block, error) {
|
||||
return rc2.New(key, len(key)*8)
|
||||
}
|
||||
|
||||
func (shaWith40BitRC2CBC) deriveKey(salt, password []byte, iterations int) []byte {
|
||||
return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 5)
|
||||
}
|
||||
|
||||
func (shaWith40BitRC2CBC) deriveIV(salt, password []byte, iterations int) []byte {
|
||||
return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8)
|
||||
}
|
||||
|
||||
type pbeParams struct {
|
||||
Salt []byte
|
||||
Iterations int
|
||||
}
|
||||
|
||||
func pbDecrypterFor(algorithm pkix.AlgorithmIdentifier, password []byte) (cipher.BlockMode, int, error) {
|
||||
var cipherType pbeCipher
|
||||
|
||||
switch {
|
||||
case algorithm.Algorithm.Equal(oidPBEWithSHAAnd3KeyTripleDESCBC):
|
||||
cipherType = shaWithTripleDESCBC{}
|
||||
case algorithm.Algorithm.Equal(oidPBEWithSHAAnd40BitRC2CBC):
|
||||
cipherType = shaWith40BitRC2CBC{}
|
||||
default:
|
||||
return nil, 0, NotImplementedError("algorithm " + algorithm.Algorithm.String() + " is not supported")
|
||||
}
|
||||
|
||||
var params pbeParams
|
||||
if err := unmarshal(algorithm.Parameters.FullBytes, ¶ms); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
key := cipherType.deriveKey(params.Salt, password, params.Iterations)
|
||||
iv := cipherType.deriveIV(params.Salt, password, params.Iterations)
|
||||
|
||||
block, err := cipherType.create(key)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return cipher.NewCBCDecrypter(block, iv), block.BlockSize(), nil
|
||||
}
|
||||
|
||||
func pbDecrypt(info decryptable, password []byte) (decrypted []byte, err error) {
|
||||
cbc, blockSize, err := pbDecrypterFor(info.Algorithm(), password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
encrypted := info.Data()
|
||||
if len(encrypted) == 0 {
|
||||
return nil, errors.New("pkcs12: empty encrypted data")
|
||||
}
|
||||
if len(encrypted)%blockSize != 0 {
|
||||
return nil, errors.New("pkcs12: input is not a multiple of the block size")
|
||||
}
|
||||
decrypted = make([]byte, len(encrypted))
|
||||
cbc.CryptBlocks(decrypted, encrypted)
|
||||
|
||||
psLen := int(decrypted[len(decrypted)-1])
|
||||
if psLen == 0 || psLen > blockSize {
|
||||
return nil, ErrDecryption
|
||||
}
|
||||
|
||||
if len(decrypted) < psLen {
|
||||
return nil, ErrDecryption
|
||||
}
|
||||
ps := decrypted[len(decrypted)-psLen:]
|
||||
decrypted = decrypted[:len(decrypted)-psLen]
|
||||
if bytes.Compare(ps, bytes.Repeat([]byte{byte(psLen)}, psLen)) != 0 {
|
||||
return nil, ErrDecryption
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// decryptable abstracts a object that contains ciphertext.
|
||||
type decryptable interface {
|
||||
Algorithm() pkix.AlgorithmIdentifier
|
||||
Data() []byte
|
||||
}
|
23
vendor/golang.org/x/crypto/pkcs12/errors.go
generated
vendored
Normal file
23
vendor/golang.org/x/crypto/pkcs12/errors.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pkcs12
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
// ErrDecryption represents a failure to decrypt the input.
|
||||
ErrDecryption = errors.New("pkcs12: decryption error, incorrect padding")
|
||||
|
||||
// ErrIncorrectPassword is returned when an incorrect password is detected.
|
||||
// Usually, P12/PFX data is signed to be able to verify the password.
|
||||
ErrIncorrectPassword = errors.New("pkcs12: decryption password incorrect")
|
||||
)
|
||||
|
||||
// NotImplementedError indicates that the input is not currently supported.
|
||||
type NotImplementedError string
|
||||
|
||||
func (e NotImplementedError) Error() string {
|
||||
return "pkcs12: " + string(e)
|
||||
}
|
274
vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go
generated
vendored
Normal file
274
vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go
generated
vendored
Normal file
@ -0,0 +1,274 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package rc2 implements the RC2 cipher
|
||||
/*
|
||||
https://www.ietf.org/rfc/rfc2268.txt
|
||||
http://people.csail.mit.edu/rivest/pubs/KRRR98.pdf
|
||||
|
||||
This code is licensed under the MIT license.
|
||||
*/
|
||||
package rc2
|
||||
|
||||
import (
|
||||
"crypto/cipher"
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// The rc2 block size in bytes
|
||||
const BlockSize = 8
|
||||
|
||||
type rc2Cipher struct {
|
||||
k [64]uint16
|
||||
}
|
||||
|
||||
// New returns a new rc2 cipher with the given key and effective key length t1
|
||||
func New(key []byte, t1 int) (cipher.Block, error) {
|
||||
// TODO(dgryski): error checking for key length
|
||||
return &rc2Cipher{
|
||||
k: expandKey(key, t1),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (*rc2Cipher) BlockSize() int { return BlockSize }
|
||||
|
||||
var piTable = [256]byte{
|
||||
0xd9, 0x78, 0xf9, 0xc4, 0x19, 0xdd, 0xb5, 0xed, 0x28, 0xe9, 0xfd, 0x79, 0x4a, 0xa0, 0xd8, 0x9d,
|
||||
0xc6, 0x7e, 0x37, 0x83, 0x2b, 0x76, 0x53, 0x8e, 0x62, 0x4c, 0x64, 0x88, 0x44, 0x8b, 0xfb, 0xa2,
|
||||
0x17, 0x9a, 0x59, 0xf5, 0x87, 0xb3, 0x4f, 0x13, 0x61, 0x45, 0x6d, 0x8d, 0x09, 0x81, 0x7d, 0x32,
|
||||
0xbd, 0x8f, 0x40, 0xeb, 0x86, 0xb7, 0x7b, 0x0b, 0xf0, 0x95, 0x21, 0x22, 0x5c, 0x6b, 0x4e, 0x82,
|
||||
0x54, 0xd6, 0x65, 0x93, 0xce, 0x60, 0xb2, 0x1c, 0x73, 0x56, 0xc0, 0x14, 0xa7, 0x8c, 0xf1, 0xdc,
|
||||
0x12, 0x75, 0xca, 0x1f, 0x3b, 0xbe, 0xe4, 0xd1, 0x42, 0x3d, 0xd4, 0x30, 0xa3, 0x3c, 0xb6, 0x26,
|
||||
0x6f, 0xbf, 0x0e, 0xda, 0x46, 0x69, 0x07, 0x57, 0x27, 0xf2, 0x1d, 0x9b, 0xbc, 0x94, 0x43, 0x03,
|
||||
0xf8, 0x11, 0xc7, 0xf6, 0x90, 0xef, 0x3e, 0xe7, 0x06, 0xc3, 0xd5, 0x2f, 0xc8, 0x66, 0x1e, 0xd7,
|
||||
0x08, 0xe8, 0xea, 0xde, 0x80, 0x52, 0xee, 0xf7, 0x84, 0xaa, 0x72, 0xac, 0x35, 0x4d, 0x6a, 0x2a,
|
||||
0x96, 0x1a, 0xd2, 0x71, 0x5a, 0x15, 0x49, 0x74, 0x4b, 0x9f, 0xd0, 0x5e, 0x04, 0x18, 0xa4, 0xec,
|
||||
0xc2, 0xe0, 0x41, 0x6e, 0x0f, 0x51, 0xcb, 0xcc, 0x24, 0x91, 0xaf, 0x50, 0xa1, 0xf4, 0x70, 0x39,
|
||||
0x99, 0x7c, 0x3a, 0x85, 0x23, 0xb8, 0xb4, 0x7a, 0xfc, 0x02, 0x36, 0x5b, 0x25, 0x55, 0x97, 0x31,
|
||||
0x2d, 0x5d, 0xfa, 0x98, 0xe3, 0x8a, 0x92, 0xae, 0x05, 0xdf, 0x29, 0x10, 0x67, 0x6c, 0xba, 0xc9,
|
||||
0xd3, 0x00, 0xe6, 0xcf, 0xe1, 0x9e, 0xa8, 0x2c, 0x63, 0x16, 0x01, 0x3f, 0x58, 0xe2, 0x89, 0xa9,
|
||||
0x0d, 0x38, 0x34, 0x1b, 0xab, 0x33, 0xff, 0xb0, 0xbb, 0x48, 0x0c, 0x5f, 0xb9, 0xb1, 0xcd, 0x2e,
|
||||
0xc5, 0xf3, 0xdb, 0x47, 0xe5, 0xa5, 0x9c, 0x77, 0x0a, 0xa6, 0x20, 0x68, 0xfe, 0x7f, 0xc1, 0xad,
|
||||
}
|
||||
|
||||
func expandKey(key []byte, t1 int) [64]uint16 {
|
||||
|
||||
l := make([]byte, 128)
|
||||
copy(l, key)
|
||||
|
||||
var t = len(key)
|
||||
var t8 = (t1 + 7) / 8
|
||||
var tm = byte(255 % uint(1<<(8+uint(t1)-8*uint(t8))))
|
||||
|
||||
for i := len(key); i < 128; i++ {
|
||||
l[i] = piTable[l[i-1]+l[uint8(i-t)]]
|
||||
}
|
||||
|
||||
l[128-t8] = piTable[l[128-t8]&tm]
|
||||
|
||||
for i := 127 - t8; i >= 0; i-- {
|
||||
l[i] = piTable[l[i+1]^l[i+t8]]
|
||||
}
|
||||
|
||||
var k [64]uint16
|
||||
|
||||
for i := range k {
|
||||
k[i] = uint16(l[2*i]) + uint16(l[2*i+1])*256
|
||||
}
|
||||
|
||||
return k
|
||||
}
|
||||
|
||||
func rotl16(x uint16, b uint) uint16 {
|
||||
return (x >> (16 - b)) | (x << b)
|
||||
}
|
||||
|
||||
func (c *rc2Cipher) Encrypt(dst, src []byte) {
|
||||
|
||||
r0 := binary.LittleEndian.Uint16(src[0:])
|
||||
r1 := binary.LittleEndian.Uint16(src[2:])
|
||||
r2 := binary.LittleEndian.Uint16(src[4:])
|
||||
r3 := binary.LittleEndian.Uint16(src[6:])
|
||||
|
||||
var j int
|
||||
|
||||
for j <= 16 {
|
||||
// mix r0
|
||||
r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1)
|
||||
r0 = rotl16(r0, 1)
|
||||
j++
|
||||
|
||||
// mix r1
|
||||
r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2)
|
||||
r1 = rotl16(r1, 2)
|
||||
j++
|
||||
|
||||
// mix r2
|
||||
r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3)
|
||||
r2 = rotl16(r2, 3)
|
||||
j++
|
||||
|
||||
// mix r3
|
||||
r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0)
|
||||
r3 = rotl16(r3, 5)
|
||||
j++
|
||||
|
||||
}
|
||||
|
||||
r0 = r0 + c.k[r3&63]
|
||||
r1 = r1 + c.k[r0&63]
|
||||
r2 = r2 + c.k[r1&63]
|
||||
r3 = r3 + c.k[r2&63]
|
||||
|
||||
for j <= 40 {
|
||||
|
||||
// mix r0
|
||||
r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1)
|
||||
r0 = rotl16(r0, 1)
|
||||
j++
|
||||
|
||||
// mix r1
|
||||
r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2)
|
||||
r1 = rotl16(r1, 2)
|
||||
j++
|
||||
|
||||
// mix r2
|
||||
r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3)
|
||||
r2 = rotl16(r2, 3)
|
||||
j++
|
||||
|
||||
// mix r3
|
||||
r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0)
|
||||
r3 = rotl16(r3, 5)
|
||||
j++
|
||||
|
||||
}
|
||||
|
||||
r0 = r0 + c.k[r3&63]
|
||||
r1 = r1 + c.k[r0&63]
|
||||
r2 = r2 + c.k[r1&63]
|
||||
r3 = r3 + c.k[r2&63]
|
||||
|
||||
for j <= 60 {
|
||||
|
||||
// mix r0
|
||||
r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1)
|
||||
r0 = rotl16(r0, 1)
|
||||
j++
|
||||
|
||||
// mix r1
|
||||
r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2)
|
||||
r1 = rotl16(r1, 2)
|
||||
j++
|
||||
|
||||
// mix r2
|
||||
r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3)
|
||||
r2 = rotl16(r2, 3)
|
||||
j++
|
||||
|
||||
// mix r3
|
||||
r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0)
|
||||
r3 = rotl16(r3, 5)
|
||||
j++
|
||||
}
|
||||
|
||||
binary.LittleEndian.PutUint16(dst[0:], r0)
|
||||
binary.LittleEndian.PutUint16(dst[2:], r1)
|
||||
binary.LittleEndian.PutUint16(dst[4:], r2)
|
||||
binary.LittleEndian.PutUint16(dst[6:], r3)
|
||||
}
|
||||
|
||||
func (c *rc2Cipher) Decrypt(dst, src []byte) {
|
||||
|
||||
r0 := binary.LittleEndian.Uint16(src[0:])
|
||||
r1 := binary.LittleEndian.Uint16(src[2:])
|
||||
r2 := binary.LittleEndian.Uint16(src[4:])
|
||||
r3 := binary.LittleEndian.Uint16(src[6:])
|
||||
|
||||
j := 63
|
||||
|
||||
for j >= 44 {
|
||||
// unmix r3
|
||||
r3 = rotl16(r3, 16-5)
|
||||
r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0)
|
||||
j--
|
||||
|
||||
// unmix r2
|
||||
r2 = rotl16(r2, 16-3)
|
||||
r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3)
|
||||
j--
|
||||
|
||||
// unmix r1
|
||||
r1 = rotl16(r1, 16-2)
|
||||
r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2)
|
||||
j--
|
||||
|
||||
// unmix r0
|
||||
r0 = rotl16(r0, 16-1)
|
||||
r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1)
|
||||
j--
|
||||
}
|
||||
|
||||
r3 = r3 - c.k[r2&63]
|
||||
r2 = r2 - c.k[r1&63]
|
||||
r1 = r1 - c.k[r0&63]
|
||||
r0 = r0 - c.k[r3&63]
|
||||
|
||||
for j >= 20 {
|
||||
// unmix r3
|
||||
r3 = rotl16(r3, 16-5)
|
||||
r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0)
|
||||
j--
|
||||
|
||||
// unmix r2
|
||||
r2 = rotl16(r2, 16-3)
|
||||
r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3)
|
||||
j--
|
||||
|
||||
// unmix r1
|
||||
r1 = rotl16(r1, 16-2)
|
||||
r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2)
|
||||
j--
|
||||
|
||||
// unmix r0
|
||||
r0 = rotl16(r0, 16-1)
|
||||
r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1)
|
||||
j--
|
||||
|
||||
}
|
||||
|
||||
r3 = r3 - c.k[r2&63]
|
||||
r2 = r2 - c.k[r1&63]
|
||||
r1 = r1 - c.k[r0&63]
|
||||
r0 = r0 - c.k[r3&63]
|
||||
|
||||
for j >= 0 {
|
||||
|
||||
// unmix r3
|
||||
r3 = rotl16(r3, 16-5)
|
||||
r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0)
|
||||
j--
|
||||
|
||||
// unmix r2
|
||||
r2 = rotl16(r2, 16-3)
|
||||
r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3)
|
||||
j--
|
||||
|
||||
// unmix r1
|
||||
r1 = rotl16(r1, 16-2)
|
||||
r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2)
|
||||
j--
|
||||
|
||||
// unmix r0
|
||||
r0 = rotl16(r0, 16-1)
|
||||
r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1)
|
||||
j--
|
||||
|
||||
}
|
||||
|
||||
binary.LittleEndian.PutUint16(dst[0:], r0)
|
||||
binary.LittleEndian.PutUint16(dst[2:], r1)
|
||||
binary.LittleEndian.PutUint16(dst[4:], r2)
|
||||
binary.LittleEndian.PutUint16(dst[6:], r3)
|
||||
}
|
45
vendor/golang.org/x/crypto/pkcs12/mac.go
generated
vendored
Normal file
45
vendor/golang.org/x/crypto/pkcs12/mac.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pkcs12
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha1"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
)
|
||||
|
||||
type macData struct {
|
||||
Mac digestInfo
|
||||
MacSalt []byte
|
||||
Iterations int `asn1:"optional,default:1"`
|
||||
}
|
||||
|
||||
// from PKCS#7:
|
||||
type digestInfo struct {
|
||||
Algorithm pkix.AlgorithmIdentifier
|
||||
Digest []byte
|
||||
}
|
||||
|
||||
var (
|
||||
oidSHA1 = asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26})
|
||||
)
|
||||
|
||||
func verifyMac(macData *macData, message, password []byte) error {
|
||||
if !macData.Mac.Algorithm.Algorithm.Equal(oidSHA1) {
|
||||
return NotImplementedError("unknown digest algorithm: " + macData.Mac.Algorithm.Algorithm.String())
|
||||
}
|
||||
|
||||
key := pbkdf(sha1Sum, 20, 64, macData.MacSalt, password, macData.Iterations, 3, 20)
|
||||
|
||||
mac := hmac.New(sha1.New, key)
|
||||
mac.Write(message)
|
||||
expectedMAC := mac.Sum(nil)
|
||||
|
||||
if !hmac.Equal(macData.Mac.Digest, expectedMAC) {
|
||||
return ErrIncorrectPassword
|
||||
}
|
||||
return nil
|
||||
}
|
170
vendor/golang.org/x/crypto/pkcs12/pbkdf.go
generated
vendored
Normal file
170
vendor/golang.org/x/crypto/pkcs12/pbkdf.go
generated
vendored
Normal file
@ -0,0 +1,170 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pkcs12
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
var (
|
||||
one = big.NewInt(1)
|
||||
)
|
||||
|
||||
// sha1Sum returns the SHA-1 hash of in.
|
||||
func sha1Sum(in []byte) []byte {
|
||||
sum := sha1.Sum(in)
|
||||
return sum[:]
|
||||
}
|
||||
|
||||
// fillWithRepeats returns v*ceiling(len(pattern) / v) bytes consisting of
|
||||
// repeats of pattern.
|
||||
func fillWithRepeats(pattern []byte, v int) []byte {
|
||||
if len(pattern) == 0 {
|
||||
return nil
|
||||
}
|
||||
outputLen := v * ((len(pattern) + v - 1) / v)
|
||||
return bytes.Repeat(pattern, (outputLen+len(pattern)-1)/len(pattern))[:outputLen]
|
||||
}
|
||||
|
||||
func pbkdf(hash func([]byte) []byte, u, v int, salt, password []byte, r int, ID byte, size int) (key []byte) {
|
||||
// implementation of https://tools.ietf.org/html/rfc7292#appendix-B.2 , RFC text verbatim in comments
|
||||
|
||||
// Let H be a hash function built around a compression function f:
|
||||
|
||||
// Z_2^u x Z_2^v -> Z_2^u
|
||||
|
||||
// (that is, H has a chaining variable and output of length u bits, and
|
||||
// the message input to the compression function of H is v bits). The
|
||||
// values for u and v are as follows:
|
||||
|
||||
// HASH FUNCTION VALUE u VALUE v
|
||||
// MD2, MD5 128 512
|
||||
// SHA-1 160 512
|
||||
// SHA-224 224 512
|
||||
// SHA-256 256 512
|
||||
// SHA-384 384 1024
|
||||
// SHA-512 512 1024
|
||||
// SHA-512/224 224 1024
|
||||
// SHA-512/256 256 1024
|
||||
|
||||
// Furthermore, let r be the iteration count.
|
||||
|
||||
// We assume here that u and v are both multiples of 8, as are the
|
||||
// lengths of the password and salt strings (which we denote by p and s,
|
||||
// respectively) and the number n of pseudorandom bits required. In
|
||||
// addition, u and v are of course non-zero.
|
||||
|
||||
// For information on security considerations for MD5 [19], see [25] and
|
||||
// [1], and on those for MD2, see [18].
|
||||
|
||||
// The following procedure can be used to produce pseudorandom bits for
|
||||
// a particular "purpose" that is identified by a byte called "ID".
|
||||
// This standard specifies 3 different values for the ID byte:
|
||||
|
||||
// 1. If ID=1, then the pseudorandom bits being produced are to be used
|
||||
// as key material for performing encryption or decryption.
|
||||
|
||||
// 2. If ID=2, then the pseudorandom bits being produced are to be used
|
||||
// as an IV (Initial Value) for encryption or decryption.
|
||||
|
||||
// 3. If ID=3, then the pseudorandom bits being produced are to be used
|
||||
// as an integrity key for MACing.
|
||||
|
||||
// 1. Construct a string, D (the "diversifier"), by concatenating v/8
|
||||
// copies of ID.
|
||||
var D []byte
|
||||
for i := 0; i < v; i++ {
|
||||
D = append(D, ID)
|
||||
}
|
||||
|
||||
// 2. Concatenate copies of the salt together to create a string S of
|
||||
// length v(ceiling(s/v)) bits (the final copy of the salt may be
|
||||
// truncated to create S). Note that if the salt is the empty
|
||||
// string, then so is S.
|
||||
|
||||
S := fillWithRepeats(salt, v)
|
||||
|
||||
// 3. Concatenate copies of the password together to create a string P
|
||||
// of length v(ceiling(p/v)) bits (the final copy of the password
|
||||
// may be truncated to create P). Note that if the password is the
|
||||
// empty string, then so is P.
|
||||
|
||||
P := fillWithRepeats(password, v)
|
||||
|
||||
// 4. Set I=S||P to be the concatenation of S and P.
|
||||
I := append(S, P...)
|
||||
|
||||
// 5. Set c=ceiling(n/u).
|
||||
c := (size + u - 1) / u
|
||||
|
||||
// 6. For i=1, 2, ..., c, do the following:
|
||||
A := make([]byte, c*20)
|
||||
var IjBuf []byte
|
||||
for i := 0; i < c; i++ {
|
||||
// A. Set A2=H^r(D||I). (i.e., the r-th hash of D||1,
|
||||
// H(H(H(... H(D||I))))
|
||||
Ai := hash(append(D, I...))
|
||||
for j := 1; j < r; j++ {
|
||||
Ai = hash(Ai)
|
||||
}
|
||||
copy(A[i*20:], Ai[:])
|
||||
|
||||
if i < c-1 { // skip on last iteration
|
||||
// B. Concatenate copies of Ai to create a string B of length v
|
||||
// bits (the final copy of Ai may be truncated to create B).
|
||||
var B []byte
|
||||
for len(B) < v {
|
||||
B = append(B, Ai[:]...)
|
||||
}
|
||||
B = B[:v]
|
||||
|
||||
// C. Treating I as a concatenation I_0, I_1, ..., I_(k-1) of v-bit
|
||||
// blocks, where k=ceiling(s/v)+ceiling(p/v), modify I by
|
||||
// setting I_j=(I_j+B+1) mod 2^v for each j.
|
||||
{
|
||||
Bbi := new(big.Int).SetBytes(B)
|
||||
Ij := new(big.Int)
|
||||
|
||||
for j := 0; j < len(I)/v; j++ {
|
||||
Ij.SetBytes(I[j*v : (j+1)*v])
|
||||
Ij.Add(Ij, Bbi)
|
||||
Ij.Add(Ij, one)
|
||||
Ijb := Ij.Bytes()
|
||||
// We expect Ijb to be exactly v bytes,
|
||||
// if it is longer or shorter we must
|
||||
// adjust it accordingly.
|
||||
if len(Ijb) > v {
|
||||
Ijb = Ijb[len(Ijb)-v:]
|
||||
}
|
||||
if len(Ijb) < v {
|
||||
if IjBuf == nil {
|
||||
IjBuf = make([]byte, v)
|
||||
}
|
||||
bytesShort := v - len(Ijb)
|
||||
for i := 0; i < bytesShort; i++ {
|
||||
IjBuf[i] = 0
|
||||
}
|
||||
copy(IjBuf[bytesShort:], Ijb)
|
||||
Ijb = IjBuf
|
||||
}
|
||||
copy(I[j*v:(j+1)*v], Ijb)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// 7. Concatenate A_1, A_2, ..., A_c together to form a pseudorandom
|
||||
// bit string, A.
|
||||
|
||||
// 8. Use the first n bits of A as the output of this entire process.
|
||||
return A[:size]
|
||||
|
||||
// If the above process is being used to generate a DES key, the process
|
||||
// should be used to create 64 random bits, and the key's parity bits
|
||||
// should be set after the 64 bits have been produced. Similar concerns
|
||||
// hold for 2-key and 3-key triple-DES keys, for CDMF keys, and for any
|
||||
// similar keys with parity bits "built into them".
|
||||
}
|
342
vendor/golang.org/x/crypto/pkcs12/pkcs12.go
generated
vendored
Normal file
342
vendor/golang.org/x/crypto/pkcs12/pkcs12.go
generated
vendored
Normal file
@ -0,0 +1,342 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package pkcs12 implements some of PKCS#12.
|
||||
//
|
||||
// This implementation is distilled from https://tools.ietf.org/html/rfc7292
|
||||
// and referenced documents. It is intended for decoding P12/PFX-stored
|
||||
// certificates and keys for use with the crypto/tls package.
|
||||
package pkcs12
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"encoding/hex"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
)
|
||||
|
||||
var (
|
||||
oidDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 1})
|
||||
oidEncryptedDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 6})
|
||||
|
||||
oidFriendlyName = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 20})
|
||||
oidLocalKeyID = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 21})
|
||||
oidMicrosoftCSPName = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 4, 1, 311, 17, 1})
|
||||
)
|
||||
|
||||
type pfxPdu struct {
|
||||
Version int
|
||||
AuthSafe contentInfo
|
||||
MacData macData `asn1:"optional"`
|
||||
}
|
||||
|
||||
type contentInfo struct {
|
||||
ContentType asn1.ObjectIdentifier
|
||||
Content asn1.RawValue `asn1:"tag:0,explicit,optional"`
|
||||
}
|
||||
|
||||
type encryptedData struct {
|
||||
Version int
|
||||
EncryptedContentInfo encryptedContentInfo
|
||||
}
|
||||
|
||||
type encryptedContentInfo struct {
|
||||
ContentType asn1.ObjectIdentifier
|
||||
ContentEncryptionAlgorithm pkix.AlgorithmIdentifier
|
||||
EncryptedContent []byte `asn1:"tag:0,optional"`
|
||||
}
|
||||
|
||||
func (i encryptedContentInfo) Algorithm() pkix.AlgorithmIdentifier {
|
||||
return i.ContentEncryptionAlgorithm
|
||||
}
|
||||
|
||||
func (i encryptedContentInfo) Data() []byte { return i.EncryptedContent }
|
||||
|
||||
type safeBag struct {
|
||||
Id asn1.ObjectIdentifier
|
||||
Value asn1.RawValue `asn1:"tag:0,explicit"`
|
||||
Attributes []pkcs12Attribute `asn1:"set,optional"`
|
||||
}
|
||||
|
||||
type pkcs12Attribute struct {
|
||||
Id asn1.ObjectIdentifier
|
||||
Value asn1.RawValue `asn1:"set"`
|
||||
}
|
||||
|
||||
type encryptedPrivateKeyInfo struct {
|
||||
AlgorithmIdentifier pkix.AlgorithmIdentifier
|
||||
EncryptedData []byte
|
||||
}
|
||||
|
||||
func (i encryptedPrivateKeyInfo) Algorithm() pkix.AlgorithmIdentifier {
|
||||
return i.AlgorithmIdentifier
|
||||
}
|
||||
|
||||
func (i encryptedPrivateKeyInfo) Data() []byte {
|
||||
return i.EncryptedData
|
||||
}
|
||||
|
||||
// PEM block types
|
||||
const (
|
||||
certificateType = "CERTIFICATE"
|
||||
privateKeyType = "PRIVATE KEY"
|
||||
)
|
||||
|
||||
// unmarshal calls asn1.Unmarshal, but also returns an error if there is any
|
||||
// trailing data after unmarshaling.
|
||||
func unmarshal(in []byte, out interface{}) error {
|
||||
trailing, err := asn1.Unmarshal(in, out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(trailing) != 0 {
|
||||
return errors.New("pkcs12: trailing data found")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConvertToPEM converts all "safe bags" contained in pfxData to PEM blocks.
|
||||
func ToPEM(pfxData []byte, password string) ([]*pem.Block, error) {
|
||||
encodedPassword, err := bmpString(password)
|
||||
if err != nil {
|
||||
return nil, ErrIncorrectPassword
|
||||
}
|
||||
|
||||
bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword)
|
||||
|
||||
blocks := make([]*pem.Block, 0, len(bags))
|
||||
for _, bag := range bags {
|
||||
block, err := convertBag(&bag, encodedPassword)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blocks = append(blocks, block)
|
||||
}
|
||||
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
func convertBag(bag *safeBag, password []byte) (*pem.Block, error) {
|
||||
block := &pem.Block{
|
||||
Headers: make(map[string]string),
|
||||
}
|
||||
|
||||
for _, attribute := range bag.Attributes {
|
||||
k, v, err := convertAttribute(&attribute)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
block.Headers[k] = v
|
||||
}
|
||||
|
||||
switch {
|
||||
case bag.Id.Equal(oidCertBag):
|
||||
block.Type = certificateType
|
||||
certsData, err := decodeCertBag(bag.Value.Bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
block.Bytes = certsData
|
||||
case bag.Id.Equal(oidPKCS8ShroundedKeyBag):
|
||||
block.Type = privateKeyType
|
||||
|
||||
key, err := decodePkcs8ShroudedKeyBag(bag.Value.Bytes, password)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch key := key.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
block.Bytes = x509.MarshalPKCS1PrivateKey(key)
|
||||
case *ecdsa.PrivateKey:
|
||||
block.Bytes, err = x509.MarshalECPrivateKey(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("found unknown private key type in PKCS#8 wrapping")
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("don't know how to convert a safe bag of type " + bag.Id.String())
|
||||
}
|
||||
return block, nil
|
||||
}
|
||||
|
||||
func convertAttribute(attribute *pkcs12Attribute) (key, value string, err error) {
|
||||
isString := false
|
||||
|
||||
switch {
|
||||
case attribute.Id.Equal(oidFriendlyName):
|
||||
key = "friendlyName"
|
||||
isString = true
|
||||
case attribute.Id.Equal(oidLocalKeyID):
|
||||
key = "localKeyId"
|
||||
case attribute.Id.Equal(oidMicrosoftCSPName):
|
||||
// This key is chosen to match OpenSSL.
|
||||
key = "Microsoft CSP Name"
|
||||
isString = true
|
||||
default:
|
||||
return "", "", errors.New("pkcs12: unknown attribute with OID " + attribute.Id.String())
|
||||
}
|
||||
|
||||
if isString {
|
||||
if err := unmarshal(attribute.Value.Bytes, &attribute.Value); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if value, err = decodeBMPString(attribute.Value.Bytes); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
} else {
|
||||
var id []byte
|
||||
if err := unmarshal(attribute.Value.Bytes, &id); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
value = hex.EncodeToString(id)
|
||||
}
|
||||
|
||||
return key, value, nil
|
||||
}
|
||||
|
||||
// Decode extracts a certificate and private key from pfxData. This function
|
||||
// assumes that there is only one certificate and only one private key in the
|
||||
// pfxData.
|
||||
func Decode(pfxData []byte, password string) (privateKey interface{}, certificate *x509.Certificate, err error) {
|
||||
encodedPassword, err := bmpString(password)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if len(bags) != 2 {
|
||||
err = errors.New("pkcs12: expected exactly two safe bags in the PFX PDU")
|
||||
return
|
||||
}
|
||||
|
||||
for _, bag := range bags {
|
||||
switch {
|
||||
case bag.Id.Equal(oidCertBag):
|
||||
if certificate != nil {
|
||||
err = errors.New("pkcs12: expected exactly one certificate bag")
|
||||
}
|
||||
|
||||
certsData, err := decodeCertBag(bag.Value.Bytes)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
certs, err := x509.ParseCertificates(certsData)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(certs) != 1 {
|
||||
err = errors.New("pkcs12: expected exactly one certificate in the certBag")
|
||||
return nil, nil, err
|
||||
}
|
||||
certificate = certs[0]
|
||||
|
||||
case bag.Id.Equal(oidPKCS8ShroundedKeyBag):
|
||||
if privateKey != nil {
|
||||
err = errors.New("pkcs12: expected exactly one key bag")
|
||||
}
|
||||
|
||||
if privateKey, err = decodePkcs8ShroudedKeyBag(bag.Value.Bytes, encodedPassword); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if certificate == nil {
|
||||
return nil, nil, errors.New("pkcs12: certificate missing")
|
||||
}
|
||||
if privateKey == nil {
|
||||
return nil, nil, errors.New("pkcs12: private key missing")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getSafeContents(p12Data, password []byte) (bags []safeBag, updatedPassword []byte, err error) {
|
||||
pfx := new(pfxPdu)
|
||||
if err := unmarshal(p12Data, pfx); err != nil {
|
||||
return nil, nil, errors.New("pkcs12: error reading P12 data: " + err.Error())
|
||||
}
|
||||
|
||||
if pfx.Version != 3 {
|
||||
return nil, nil, NotImplementedError("can only decode v3 PFX PDU's")
|
||||
}
|
||||
|
||||
if !pfx.AuthSafe.ContentType.Equal(oidDataContentType) {
|
||||
return nil, nil, NotImplementedError("only password-protected PFX is implemented")
|
||||
}
|
||||
|
||||
// unmarshal the explicit bytes in the content for type 'data'
|
||||
if err := unmarshal(pfx.AuthSafe.Content.Bytes, &pfx.AuthSafe.Content); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if len(pfx.MacData.Mac.Algorithm.Algorithm) == 0 {
|
||||
return nil, nil, errors.New("pkcs12: no MAC in data")
|
||||
}
|
||||
|
||||
if err := verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password); err != nil {
|
||||
if err == ErrIncorrectPassword && len(password) == 2 && password[0] == 0 && password[1] == 0 {
|
||||
// some implementations use an empty byte array
|
||||
// for the empty string password try one more
|
||||
// time with empty-empty password
|
||||
password = nil
|
||||
err = verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var authenticatedSafe []contentInfo
|
||||
if err := unmarshal(pfx.AuthSafe.Content.Bytes, &authenticatedSafe); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if len(authenticatedSafe) != 2 {
|
||||
return nil, nil, NotImplementedError("expected exactly two items in the authenticated safe")
|
||||
}
|
||||
|
||||
for _, ci := range authenticatedSafe {
|
||||
var data []byte
|
||||
|
||||
switch {
|
||||
case ci.ContentType.Equal(oidDataContentType):
|
||||
if err := unmarshal(ci.Content.Bytes, &data); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
case ci.ContentType.Equal(oidEncryptedDataContentType):
|
||||
var encryptedData encryptedData
|
||||
if err := unmarshal(ci.Content.Bytes, &encryptedData); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if encryptedData.Version != 0 {
|
||||
return nil, nil, NotImplementedError("only version 0 of EncryptedData is supported")
|
||||
}
|
||||
if data, err = pbDecrypt(encryptedData.EncryptedContentInfo, password); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
default:
|
||||
return nil, nil, NotImplementedError("only data and encryptedData content types are supported in authenticated safe")
|
||||
}
|
||||
|
||||
var safeContents []safeBag
|
||||
if err := unmarshal(data, &safeContents); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
bags = append(bags, safeContents...)
|
||||
}
|
||||
|
||||
return bags, password, nil
|
||||
}
|
57
vendor/golang.org/x/crypto/pkcs12/safebags.go
generated
vendored
Normal file
57
vendor/golang.org/x/crypto/pkcs12/safebags.go
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package pkcs12
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/asn1"
|
||||
"errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// see https://tools.ietf.org/html/rfc7292#appendix-D
|
||||
oidCertTypeX509Certificate = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 22, 1})
|
||||
oidPKCS8ShroundedKeyBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 2})
|
||||
oidCertBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 3})
|
||||
)
|
||||
|
||||
type certBag struct {
|
||||
Id asn1.ObjectIdentifier
|
||||
Data []byte `asn1:"tag:0,explicit"`
|
||||
}
|
||||
|
||||
func decodePkcs8ShroudedKeyBag(asn1Data, password []byte) (privateKey interface{}, err error) {
|
||||
pkinfo := new(encryptedPrivateKeyInfo)
|
||||
if err = unmarshal(asn1Data, pkinfo); err != nil {
|
||||
return nil, errors.New("pkcs12: error decoding PKCS#8 shrouded key bag: " + err.Error())
|
||||
}
|
||||
|
||||
pkData, err := pbDecrypt(pkinfo, password)
|
||||
if err != nil {
|
||||
return nil, errors.New("pkcs12: error decrypting PKCS#8 shrouded key bag: " + err.Error())
|
||||
}
|
||||
|
||||
ret := new(asn1.RawValue)
|
||||
if err = unmarshal(pkData, ret); err != nil {
|
||||
return nil, errors.New("pkcs12: error unmarshaling decrypted private key: " + err.Error())
|
||||
}
|
||||
|
||||
if privateKey, err = x509.ParsePKCS8PrivateKey(pkData); err != nil {
|
||||
return nil, errors.New("pkcs12: error parsing PKCS#8 private key: " + err.Error())
|
||||
}
|
||||
|
||||
return privateKey, nil
|
||||
}
|
||||
|
||||
func decodeCertBag(asn1Data []byte) (x509Certificates []byte, err error) {
|
||||
bag := new(certBag)
|
||||
if err := unmarshal(asn1Data, bag); err != nil {
|
||||
return nil, errors.New("pkcs12: error decoding cert bag: " + err.Error())
|
||||
}
|
||||
if !bag.Id.Equal(oidCertTypeX509Certificate) {
|
||||
return nil, NotImplementedError("only X509 certificates are supported")
|
||||
}
|
||||
return bag.Data, nil
|
||||
}
|
Loading…
Reference in New Issue
Block a user