remove dependency on cfssl

This commit is contained in:
Mike Danese
2019-11-03 15:51:53 -08:00
parent 6a19261e96
commit 380f42727d
107 changed files with 0 additions and 23962 deletions

View File

@@ -1,29 +0,0 @@
*.iml
*.swo
*.swp
*.tfstate
*.tfstate.backup
*~
/.idea
/certcheck
/chainfix
/coverage.txt
/createtree
/crlcheck
/ctclient
/ct_server
/ct_hammer
/data
/dumpscts
/etcdiscover
/findlog
/goshawk
/gosmin
/gossip_server
/preloader
/scanlog
/sctcheck
/sctscan
/trillian_log_server
/trillian_log_signer
/trillian.json

View File

@@ -1,87 +0,0 @@
sudo: true # required for CI push into Kubernetes.
language: go
os: linux
go: "1.10"
go_import_path: github.com/google/certificate-transparency-go
env:
- GCE_CI=${ENABLE_GCE_CI} GOFLAGS=
- GOFLAGS=-race
- GOFLAGS= WITH_ETCD=true WITH_COVERAGE=true
- GOFLAGS=-race WITH_ETCD=true
matrix:
fast_finish: true
addons:
apt:
sources:
- mysql-5.7-trusty
packages:
- mysql-server
- mysql-client
services:
- docker
before_install:
- sudo mysql -e "use mysql; update user set authentication_string=PASSWORD('') where User='root'; update user set plugin='mysql_native_password';FLUSH PRIVILEGES;"
- sudo mysql_upgrade
- sudo service mysql restart
install:
- mkdir ../protoc
- |
(
cd ../protoc
wget https://github.com/google/protobuf/releases/download/v3.5.1/protoc-3.5.1-${TRAVIS_OS_NAME}-x86_64.zip
unzip protoc-3.5.1-${TRAVIS_OS_NAME}-x86_64.zip
)
- export PATH=$(pwd)/../protoc/bin:$PATH
- go get -d -t ./...
- go get github.com/alecthomas/gometalinter
- gometalinter --install
- go get -u github.com/golang/protobuf/proto
- go get -u github.com/golang/protobuf/protoc-gen-go
- go install github.com/golang/mock/mockgen
# install vendored etcd binary
- go install ./vendor/github.com/coreos/etcd/cmd/etcd
- go install ./vendor/github.com/coreos/etcd/cmd/etcdctl
- pushd ${GOPATH}/src/github.com/google/trillian
- go get -d -t ./...
- popd
script:
- set -e
- cd $HOME/gopath/src/github.com/google/certificate-transparency-go
- ./scripts/presubmit.sh ${PRESUBMIT_OPTS} ${WITH_COVERAGE:+--coverage}
- |
# Check re-generation didn't change anything
status=$(git status --porcelain | grep -v coverage) || :
if [[ -n ${status} ]]; then
echo "Regenerated files differ from checked-in versions: ${status}"
git status
git diff
exit 1
fi
- |
if [[ "${WITH_ETCD}" == "true" ]]; then
export ETCD_DIR="${GOPATH}/bin"
fi
- ./trillian/integration/integration_test.sh
- HAMMER_OPTS="--operations=1500" ./trillian/integration/ct_hammer_test.sh
- set +e
after_success:
- cp /tmp/coverage.txt .
- bash <(curl -s https://codecov.io/bash)
- |
# Push up to GCE CI instance if we're running after a merge to master
if [[ "${GCE_CI}" == "true" ]] && [[ $TRAVIS_PULL_REQUEST == "false" ]] && [[ $TRAVIS_BRANCH == "master" ]]; then
. scripts/install_cloud.sh
echo ${GCLOUD_SERVICE_KEY_CI} | base64 --decode -i > ${HOME}/gcloud-service-key.json
gcloud auth activate-service-account --key-file ${HOME}/gcloud-service-key.json
rm ${HOME}/gcloud-service-key.json
. scripts/deploy_gce_ci.sh
fi

View File

@@ -1,27 +0,0 @@
# This is the official list of benchmark authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
#
# Names should be added to this file as:
# Name or Organization <email address>
# The email address is not required for organizations.
#
# Please keep the list sorted.
Comodo CA Limited
Ed Maste <emaste@freebsd.org>
Fiaz Hossain <fiaz.hossain@salesforce.com>
Google Inc.
Internet Security Research Group
Jeff Trawick <trawick@gmail.com>
Katriel Cohn-Gordon <katriel.cohn-gordon@cybersecurity.ox.ac.uk>
Laël Cellier <lael.cellier@gmail.com>
Mark Schloesser <ms@mwcollect.org>
NORDUnet A/S
Nicholas Galbreath <nickg@client9.com>
Oliver Weidner <Oliver.Weidner@gmail.com>
PrimeKey Solutions AB
Ruslan Kovalov <ruslan.kovalyov@gmail.com>
Venafi, Inc.
Vladimir Rutsky <vladimir@rutsky.org>
Ximin Luo <infinity0@gmx.com>

View File

@@ -1,38 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"serialization.go",
"signatures.go",
"types.go",
],
importmap = "k8s.io/kubernetes/vendor/github.com/google/certificate-transparency-go",
importpath = "github.com/google/certificate-transparency-go",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/google/certificate-transparency-go/tls:go_default_library",
"//vendor/github.com/google/certificate-transparency-go/x509:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//vendor/github.com/google/certificate-transparency-go/asn1:all-srcs",
"//vendor/github.com/google/certificate-transparency-go/client:all-srcs",
"//vendor/github.com/google/certificate-transparency-go/jsonclient:all-srcs",
"//vendor/github.com/google/certificate-transparency-go/tls:all-srcs",
"//vendor/github.com/google/certificate-transparency-go/x509:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -1,208 +0,0 @@
# CERTIFICATE-TRANSPARENCY-GO Changelog
## v1.0.20 - Minimal Gossip / Go 1.11 Fix / Utility Improvements
Published 2018-07-05 09:21:34 +0000 UTC
Enhancements have been made to various utilities including `scanner`, `sctcheck`, `loglist` and `x509util`.
The `allow_verification_with_non_compliant_keys` flag has been removed from `signatures.go`.
An implementation of Gossip has been added. See the `gossip/minimal` package for more information.
An X.509 compatibility issue for Go 1.11 has been fixed. This should be backwards compatible with 1.10.
Commit [37a384cd035e722ea46e55029093e26687138edf](https://api.github.com/repos/google/certificate-transparency-go/commits/37a384cd035e722ea46e55029093e26687138edf) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.20)
## v1.0.19 - CTFE User Quota
Published 2018-06-01 13:51:52 +0000 UTC
CTFE now supports Trillian Log's explicit quota API; quota can be requested based on the remote user's IP, as well as per-issuing certificate in submitted chains.
Commit [8736a411b4ff214ea20687e46c2b67d66ebd83fc](https://api.github.com/repos/google/certificate-transparency-go/commits/8736a411b4ff214ea20687e46c2b67d66ebd83fc) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.19)
## v1.0.18 - Adding Migration Tool / Client Additions / K8 Config
Published 2018-06-01 14:28:20 +0000 UTC
Work on a log migration tool (Migrillian) is in progress. This is not yet ready for production use but will provide features for mirroring and migrating logs.
The `RequestLog` API allows for logging of SCTs when they are issued by CTFE.
The CT Go client now supports `GetEntryAndProof`. Utilities have been switched over to use the `glog` package.
Commit [77abf2dac5410a62c04ac1c662c6d0fa54afc2dc](https://api.github.com/repos/google/certificate-transparency-go/commits/77abf2dac5410a62c04ac1c662c6d0fa54afc2dc) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.18)
## v1.0.17 - Merkle verification / Tracing / Demo script / CORS
Published 2018-06-01 14:25:16 +0000 UTC
Now uses Merkle Tree verification from Trillian.
The CT server now supports CORS.
Request tracing added using OpenCensus. For GCE / K8 it just requires the flag to be enabled to export traces to Stackdriver. Other environments may differ.
A demo script was added that goes through setting up a simple deployment suitable for development / demo purposes. This may be useful for those new to the project.
Commit [3c3d22ce946447d047a03228ebb4a41e3e4eb15b](https://api.github.com/repos/google/certificate-transparency-go/commits/3c3d22ce946447d047a03228ebb4a41e3e4eb15b) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.17)
## v1.0.16 - Lifecycle test / Go 1.10.1
Published 2018-06-01 14:22:23 +0000 UTC
An integration test was added that goes through a create / drain queue / freeze lifecycle for a log.
Changes to `x509` were merged from Go 1.10.1.
Commit [a72423d09b410b80673fd1135ba1022d04bac6cd](https://api.github.com/repos/google/certificate-transparency-go/commits/a72423d09b410b80673fd1135ba1022d04bac6cd) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.16)
## v1.0.15 - More control of verification, grpclb, stackdriver metrics
Published 2018-06-01 14:20:32 +0000 UTC
Facilities were added to the `x509` package to control whether verification checks are applied.
Log server requests are now balanced using `gRPClb`.
For Kubernetes, metrics can be published to Stackdriver monitoring.
Commit [684d6eee6092774e54d301ccad0ed61bc8d010c1](https://api.github.com/repos/google/certificate-transparency-go/commits/684d6eee6092774e54d301ccad0ed61bc8d010c1) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.15)
## v1.0.14 - SQLite Removed, LeafHashForLeaf
Published 2018-06-01 14:15:37 +0000 UTC
Support for SQLlite was removed. This motivation was ongoing test flakiness caused by multi-user access. This database may work for an embedded scenario but is not suitable for use in a server environment.
A `LeafHashForLeaf` client API was added and is now used by the CT client and integration tests.
Commit [698cd6a661196db4b2e71437422178ffe8705006](https://api.github.com/repos/google/certificate-transparency-go/commits/698cd6a661196db4b2e71437422178ffe8705006) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.14)
## v1.0.13 - Crypto changes, util updates, sync with trillian repo, loglist verification
Published 2018-06-01 14:15:21 +0000 UTC
Some of our custom crypto package that were wrapping calls to the standard package have been removed and the base features used directly.
Updates were made to GCE ingress and health checks.
The log list utility can verify signatures.
Commit [480c3654a70c5383b9543ec784203030aedbd3a5](https://api.github.com/repos/google/certificate-transparency-go/commits/480c3654a70c5383b9543ec784203030aedbd3a5) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.13)
## v1.0.12 - Client / util updates & CTFE fixes
Published 2018-06-01 14:13:42 +0000 UTC
The CT client can now use a JSON loglist to find logs.
CTFE had a fix applied for preissued precerts.
A DNS client was added and CT client was extended to support DNS retrieval.
Commit [74c06c95e0b304a050a1c33764c8a01d653a16e3](https://api.github.com/repos/google/certificate-transparency-go/commits/74c06c95e0b304a050a1c33764c8a01d653a16e3) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.12)
## v1.0.11 - Kubernetes CI / Integration fixes
Published 2018-06-01 14:12:18 +0000 UTC
Updates to Kubernetes configs, mostly related to running a CI instance.
Commit [0856acca7e0ab7f082ae83a1fbb5d21160962efc](https://api.github.com/repos/google/certificate-transparency-go/commits/0856acca7e0ab7f082ae83a1fbb5d21160962efc) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.11)
## v1.0.10 - More scanner, x509, utility and client fixes. CTFE updates
Published 2018-06-01 14:09:47 +0000 UTC
The CT client was using the wrong protobuffer library package. To guard against this in future a check has been added to our lint config.
The `x509` and `asn1` packages have had upstream fixes applied from Go 1.10rc1.
Commit [1bec4527572c443752ad4f2830bef88be0533236](https://api.github.com/repos/google/certificate-transparency-go/commits/1bec4527572c443752ad4f2830bef88be0533236) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.10)
## v1.0.9 - Scanner, x509, utility and client fixes
Published 2018-06-01 14:11:13 +0000 UTC
The `scanner` utility now displays throughput stats.
Build instructions and README files were updated.
The `certcheck` utility can be told to ignore unknown critical X.509 extensions.
Commit [c06833528d04a94eed0c775104d1107bab9ae17c](https://api.github.com/repos/google/certificate-transparency-go/commits/c06833528d04a94eed0c775104d1107bab9ae17c) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.9)
## v1.0.8 - Client fixes, align with trillian repo
Published 2018-06-01 14:06:44 +0000 UTC
Commit [e8b02c60f294b503dbb67de0868143f5d4935e56](https://api.github.com/repos/google/certificate-transparency-go/commits/e8b02c60f294b503dbb67de0868143f5d4935e56) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.8)
## v1.0.7 - CTFE fixes
Published 2018-06-01 14:06:13 +0000 UTC
An issue was fixed with CTFE signature caching. In an unlikely set of circumstances this could lead to log mis-operation. While the chances of this are small, we recommend that versions prior to this one are not deployed.
Commit [52c0590bd3b4b80c5497005b0f47e10557425eeb](https://api.github.com/repos/google/certificate-transparency-go/commits/52c0590bd3b4b80c5497005b0f47e10557425eeb) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.7)
## v1.0.6 - crlcheck improvements / other fixes
Published 2018-06-01 14:04:22 +0000 UTC
The `crlcheck` utility has had several fixes and enhancements. Additionally the `hammer` now supports temporal logs.
Commit [3955e4a00c42e83ff17ce25003976159c5d0f0f9](https://api.github.com/repos/google/certificate-transparency-go/commits/3955e4a00c42e83ff17ce25003976159c5d0f0f9) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.6)
## v1.0.5 - X509 and asn1 fixes
Published 2018-06-01 14:02:58 +0000 UTC
This release is mostly fixes to the `x509` and `asn1` packages. Some command line utilties were also updated.
Commit [ae40d07cce12f1227c6e658e61c9dddb7646f97b](https://api.github.com/repos/google/certificate-transparency-go/commits/ae40d07cce12f1227c6e658e61c9dddb7646f97b) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.5)
## v1.0.4 - Multi log backend configs
Published 2018-06-01 14:02:07 +0000 UTC
Support was added to allow CTFE to use multiple backends, each serving a distinct set of logs. It allows for e.g. regional backend deployment with common frontend servers.
Commit [62023ed90b41fa40854957b5dec7d9d73594723f](https://api.github.com/repos/google/certificate-transparency-go/commits/62023ed90b41fa40854957b5dec7d9d73594723f) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.4)
## v1.0.3 - Hammer updates, use standard context
Published 2018-06-01 14:01:11 +0000 UTC
After the Go 1.9 migration references to anything other than the standard `context` package have been removed. This is the only one that should be used from now on.
Commit [b28beed8b9aceacc705e0ff4a11d435a310e3d97](https://api.github.com/repos/google/certificate-transparency-go/commits/b28beed8b9aceacc705e0ff4a11d435a310e3d97) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.3)
## v1.0.2 - Go 1.9
Published 2018-06-01 14:00:00 +0000 UTC
Go 1.9 is now required to build the code.
Commit [3aed33d672ee43f04b1e8a00b25ca3e2e2e74309](https://api.github.com/repos/google/certificate-transparency-go/commits/3aed33d672ee43f04b1e8a00b25ca3e2e2e74309) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.2)
## v1.0.1 - Hammer and client improvements
Published 2018-06-01 13:59:29 +0000 UTC
Commit [c28796cc21776667fb05d6300e32d9517be96515](https://api.github.com/repos/google/certificate-transparency-go/commits/c28796cc21776667fb05d6300e32d9517be96515) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.1)
## v1.0 - First Trillian CT Release
Published 2018-06-01 13:59:00 +0000 UTC
This is the point that corresponds to the 1.0 release in the trillian repo.
Commit [abb79e468b6f3bbd48d1ab0c9e68febf80d52c4d](https://api.github.com/repos/google/certificate-transparency-go/commits/abb79e468b6f3bbd48d1ab0c9e68febf80d52c4d) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0)

View File

@@ -1,58 +0,0 @@
# How to contribute #
We'd love to accept your patches and contributions to this project. There are
a just a few small guidelines you need to follow.
## Contributor License Agreement ##
Contributions to any Google project must be accompanied by a Contributor
License Agreement. This is not a copyright **assignment**, it simply gives
Google permission to use and redistribute your contributions as part of the
project.
* If you are an individual writing original source code and you're sure you
own the intellectual property, then you'll need to sign an [individual
CLA][].
* If you work for a company that wants to allow you to contribute your work,
then you'll need to sign a [corporate CLA][].
You generally only need to submit a CLA once, so if you've already submitted
one (even if it was for a different project), you probably don't need to do it
again.
[individual CLA]: https://developers.google.com/open-source/cla/individual
[corporate CLA]: https://developers.google.com/open-source/cla/corporate
Once your CLA is submitted (or if you already submitted one for
another Google project), make a commit adding yourself to the
[AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part
of your first [pull request][].
[AUTHORS]: AUTHORS
[CONTRIBUTORS]: CONTRIBUTORS
## Submitting a patch ##
1. It's generally best to start by opening a new issue describing the bug or
feature you're intending to fix. Even if you think it's relatively minor,
it's helpful to know what people are working on. Mention in the initial
issue that you are planning to work on that bug or feature so that it can
be assigned to you.
1. Follow the normal process of [forking][] the project, and setup a new
branch to work in. It's important that each group of changes be done in
separate branches in order to ensure that a pull request only includes the
commits related to that bug or feature.
1. Do your best to have [well-formed commit messages][] for each change.
This provides consistency throughout the project, and ensures that commit
messages are able to be formatted properly by various git tools.
1. Finally, push the commits to your fork and submit a [pull request][].
[forking]: https://help.github.com/articles/fork-a-repo
[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
[pull request]: https://help.github.com/articles/creating-a-pull-request

View File

@@ -1,57 +0,0 @@
# People who have agreed to one of the CLAs and can contribute patches.
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, Google employees are listed here
# but not in AUTHORS, because Google holds the copyright.
#
# Names should be added to this file only after verifying that
# the individual or the individual's organization has agreed to
# the appropriate Contributor License Agreement, found here:
#
# https://developers.google.com/open-source/cla/individual
# https://developers.google.com/open-source/cla/corporate
#
# The agreement for individuals can be filled out on the web.
#
# When adding J Random Contributor's name to this file,
# either J's name or J's organization's name should be
# added to the AUTHORS file, depending on whether the
# individual or corporate CLA was used.
#
# Names should be added to this file as:
# Name <email address>
#
# Please keep the list sorted.
Adam Eijdenberg <eijdenberg@google.com> <adam.eijdenberg@gmail.com>
Al Cutter <al@google.com>
Ben Laurie <benl@google.com> <ben@links.org>
Chris Kennelly <ckennelly@google.com> <ckennelly@ckennelly.com>
David Drysdale <drysdale@google.com>
Deyan Bektchiev <deyan.bektchiev@venafi.com> <deyan@bektchiev.net>
Ed Maste <emaste@freebsd.org>
Emilia Kasper <ekasper@google.com>
Eran Messeri <eranm@google.com> <eran.mes@gmail.com>
Fiaz Hossain <fiaz.hossain@salesforce.com>
Gary Belvin <gbelvin@google.com> <gdbelvin@gmail.com>
Jeff Trawick <trawick@gmail.com>
Joe Tsai <joetsai@digital-static.net>
Kat Joyce <katjoyce@google.com>
Katriel Cohn-Gordon <katriel.cohn-gordon@cybersecurity.ox.ac.uk>
Kiril Nikolov <kiril.nikolov@venafi.com>
Konrad Kraszewski <kraszewski@google.com> <laiquendir@gmail.com>
Laël Cellier <lael.cellier@gmail.com>
Linus Nordberg <linus@nordu.net>
Mark Schloesser <ms@mwcollect.org>
Nicholas Galbreath <nickg@client9.com>
Oliver Weidner <Oliver.Weidner@gmail.com>
Pascal Leroy <phl@google.com>
Paul Hadfield <hadfieldp@google.com> <paul@phad.org.uk>
Paul Lietar <lietar@google.com>
Pierre Phaneuf <pphaneuf@google.com>
Rob Percival <robpercival@google.com>
Rob Stradling <rob@comodo.com>
Roland Shoemaker <roland@letsencrypt.org>
Ruslan Kovalov <ruslan.kovalyov@gmail.com>
Samuel Lidén Borell <samuel@kodafritt.se>
Vladimir Rutsky <vladimir@rutsky.org>
Ximin Luo <infinity0@gmx.com>

View File

@@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,144 +0,0 @@
# Certificate Transparency: Go Code
[![Build Status](https://travis-ci.org/google/certificate-transparency-go.svg?branch=master)](https://travis-ci.org/google/certificate-transparency-go)
[![Go Report Card](https://goreportcard.com/badge/github.com/google/certificate-transparency-go)](https://goreportcard.com/report/github.com/google/certificate-transparency-go)
[![GoDoc](https://godoc.org/github.com/google/certificate-transparency-go?status.svg)](https://godoc.org/github.com/google/certificate-transparency-go)
This repository holds Go code related to
[Certificate Transparency](https://www.certificate-transparency.org/) (CT). The
repository requires Go version 1.9.
- [Repository Structure](#repository-structure)
- [Trillian CT Personality](#trillian-ct-personality)
- [Working on the Code](#working-on-the-code)
- [Rebuilding Generated Code](#rebuilding-generated-code)
- [Updating Vendor Code](#updating-vendor-code)
- [Running Codebase Checks](#running-codebase-checks)
## Repository Structure
The main parts of the repository are:
- Encoding libraries:
- `asn1/` and `x509/` are forks of the upstream Go `encoding/asn1` and
`crypto/x509` libraries. We maintain separate forks of these packages
because CT is intended to act as an observatory of certificates across the
ecosystem; as such, we need to be able to process somewhat-malformed
certificates that the stricter upstream code would (correctly) reject.
Our `x509` fork also includes code for working with the
[pre-certificates defined in RFC 6962](https://tools.ietf.org/html/rfc6962#section-3.1).
- `tls` holds a library for processing TLS-encoded data as described in
[RFC 5246](https://tools.ietf.org/html/rfc5246).
- `x509util` provides additional utilities for dealing with
`x509.Certificate`s.
- CT client libraries:
- The top-level `ct` package (in `.`) holds types and utilities for working
with CT data structures defined in
[RFC 6962](https://tools.ietf.org/html/rfc6962).
- `client/` and `jsonclient/` hold libraries that allow access to CT Logs
via entrypoints described in
[section 4 of RFC 6962](https://tools.ietf.org/html/rfc6962#section-4).
- `scanner/` holds a library for scanning the entire contents of an existing
CT Log.
- Command line tools:
- `./client/ctclient` allows interaction with a CT Log
- `./scanner/scanlog` allows an existing CT Log to be scanned for certificates
of interest; please be polite when running this tool against a Log.
- `./x509util/certcheck` allows display and verification of certificates
- `./x509util/crlcheck` allows display and verification of certificate
revocation lists (CRLs).
- CT Personality for [Trillian](https://github.com/google/trillian):
- `trillian/` holds code that allows a Certificate Transparency Log to be
run using a Trillian Log as its back-end -- see
[below](#trillian-ct-personality).
## Trillian CT Personality
The `trillian/` subdirectory holds code and scripts for running a CT Log based
on the [Trillian](https://github.com/google/trillian) general transparency Log.
The main code for the CT personality is held in `trillian/ctfe`; this code
responds to HTTP requests on the
[CT API paths](https://tools.ietf.org/html/rfc6962#section-4) and translates
them to the equivalent gRPC API requests to the Trillian Log.
This obviously relies on the gRPC API definitions at
`github.com/google/trillian`; the code also uses common libraries from the
Trillian project for:
- exposing monitoring and statistics via an `interface` and corresponding
Prometheus implementation (`github.com/google/trillian/monitoring/...`)
- dealing with cryptographic keys (`github.com/google/trillian/crypto/...`).
The `trillian/integration/` directory holds scripts and tests for running the whole
system locally. In particular:
- `trillian/integration/ct_integration_test.sh` brings up local processes
running a Trillian Log server, signer and a CT personality, and exercises the
complete set of RFC 6962 API entrypoints.
- `trillian/integration/ct_hammer_test.sh` brings up a complete system and runs
a continuous randomized test of the CT entrypoints.
These scripts require a local database instance to be configured as described
in the [Trillian instructions](https://github.com/google/trillian#mysql-setup).
## Working on the Code
Developers who want to make changes to the codebase need some additional
dependencies and tools, described in the following sections. The
[Travis configuration](.travis.yml) for the codebase is also useful reference
for the required tools and scripts, as it may be more up-to-date than this
document.
### Rebuilding Generated Code
Some of the CT Go code is autogenerated from other files:
- [Protocol buffer](https://developers.google.com/protocol-buffers/) message
definitions are converted to `.pb.go` implementations.
- A mock implementation of the Trillian gRPC API (in `trillian/mockclient`) is
created with [GoMock](https://github.com/golang/mock).
Re-generating mock or protobuffer files is only needed if you're changing
the original files; if you do, you'll need to install the prerequisites:
- `mockgen` tool from https://github.com/golang/mock
- `protoc`, [Go support for protoc](https://github.com/golang/protobuf) (see
documentation linked from the
[protobuf site](https://github.com/google/protobuf))
and run the following:
```bash
go generate -x ./... # hunts for //go:generate comments and runs them
```
### Updating Vendor Code
The codebase includes a couple of external projects under the `vendor/`
subdirectory, to ensure that builds use a fixed version (typically because the
upstream repository does not guarantee back-compatibility between the tip
`master` branch and the current stable release). See
[instructions in the Trillian repo](https://github.com/google/trillian#updating-vendor-code)
for how to update vendored subtrees.
### Running Codebase Checks
The [`scripts/presubmit.sh`](scripts/presubmit.sh) script runs various tools
and tests over the codebase.
```bash
# Install gometalinter and all linters
go get -u github.com/alecthomas/gometalinter
gometalinter --install
# Run code generation, build, test and linters
./scripts/presubmit.sh
# Run build, test and linters but skip code generation
./scripts/presubmit.sh --no-generate
# Or just run the linters alone:
gometalinter --config=gometalinter.json ./...
```

View File

@@ -1,27 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"asn1.go",
"common.go",
"marshal.go",
],
importmap = "k8s.io/kubernetes/vendor/github.com/google/certificate-transparency-go/asn1",
importpath = "github.com/google/certificate-transparency-go/asn1",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

File diff suppressed because it is too large Load Diff

View File

@@ -1,177 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package asn1
import (
"reflect"
"strconv"
"strings"
)
// ASN.1 objects have metadata preceding them:
// the tag: the type of the object
// a flag denoting if this object is compound or not
// the class type: the namespace of the tag
// the length of the object, in bytes
// Here are some standard tags and classes
// ASN.1 tags represent the type of the following object.
const (
TagBoolean = 1
TagInteger = 2
TagBitString = 3
TagOctetString = 4
TagNull = 5
TagOID = 6
TagEnum = 10
TagUTF8String = 12
TagSequence = 16
TagSet = 17
TagNumericString = 18
TagPrintableString = 19
TagT61String = 20
TagIA5String = 22
TagUTCTime = 23
TagGeneralizedTime = 24
TagGeneralString = 27
)
// ASN.1 class types represent the namespace of the tag.
const (
ClassUniversal = 0
ClassApplication = 1
ClassContextSpecific = 2
ClassPrivate = 3
)
type tagAndLength struct {
class, tag, length int
isCompound bool
}
// ASN.1 has IMPLICIT and EXPLICIT tags, which can be translated as "instead
// of" and "in addition to". When not specified, every primitive type has a
// default tag in the UNIVERSAL class.
//
// For example: a BIT STRING is tagged [UNIVERSAL 3] by default (although ASN.1
// doesn't actually have a UNIVERSAL keyword). However, by saying [IMPLICIT
// CONTEXT-SPECIFIC 42], that means that the tag is replaced by another.
//
// On the other hand, if it said [EXPLICIT CONTEXT-SPECIFIC 10], then an
// /additional/ tag would wrap the default tag. This explicit tag will have the
// compound flag set.
//
// (This is used in order to remove ambiguity with optional elements.)
//
// You can layer EXPLICIT and IMPLICIT tags to an arbitrary depth, however we
// don't support that here. We support a single layer of EXPLICIT or IMPLICIT
// tagging with tag strings on the fields of a structure.
// fieldParameters is the parsed representation of tag string from a structure field.
type fieldParameters struct {
optional bool // true iff the field is OPTIONAL
explicit bool // true iff an EXPLICIT tag is in use.
application bool // true iff an APPLICATION tag is in use.
defaultValue *int64 // a default value for INTEGER typed fields (maybe nil).
tag *int // the EXPLICIT or IMPLICIT tag (maybe nil).
stringType int // the string tag to use when marshaling.
timeType int // the time tag to use when marshaling.
set bool // true iff this should be encoded as a SET
omitEmpty bool // true iff this should be omitted if empty when marshaling.
name string // name of field for better diagnostics
// Invariants:
// if explicit is set, tag is non-nil.
}
// Given a tag string with the format specified in the package comment,
// parseFieldParameters will parse it into a fieldParameters structure,
// ignoring unknown parts of the string.
func parseFieldParameters(str string) (ret fieldParameters) {
for _, part := range strings.Split(str, ",") {
switch {
case part == "optional":
ret.optional = true
case part == "explicit":
ret.explicit = true
if ret.tag == nil {
ret.tag = new(int)
}
case part == "generalized":
ret.timeType = TagGeneralizedTime
case part == "utc":
ret.timeType = TagUTCTime
case part == "ia5":
ret.stringType = TagIA5String
case part == "printable":
ret.stringType = TagPrintableString
case part == "numeric":
ret.stringType = TagNumericString
case part == "utf8":
ret.stringType = TagUTF8String
case strings.HasPrefix(part, "default:"):
i, err := strconv.ParseInt(part[8:], 10, 64)
if err == nil {
ret.defaultValue = new(int64)
*ret.defaultValue = i
}
case strings.HasPrefix(part, "tag:"):
i, err := strconv.Atoi(part[4:])
if err == nil {
ret.tag = new(int)
*ret.tag = i
}
case part == "set":
ret.set = true
case part == "application":
ret.application = true
if ret.tag == nil {
ret.tag = new(int)
}
case part == "omitempty":
ret.omitEmpty = true
}
}
return
}
// Given a reflected Go type, getUniversalType returns the default tag number
// and expected compound flag.
func getUniversalType(t reflect.Type) (matchAny bool, tagNumber int, isCompound, ok bool) {
switch t {
case rawValueType:
return true, -1, false, true
case objectIdentifierType:
return false, TagOID, false, true
case bitStringType:
return false, TagBitString, false, true
case timeType:
return false, TagUTCTime, false, true
case enumeratedType:
return false, TagEnum, false, true
case bigIntType:
return false, TagInteger, false, true
}
switch t.Kind() {
case reflect.Bool:
return false, TagBoolean, false, true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return false, TagInteger, false, true
case reflect.Struct:
return false, TagSequence, true, true
case reflect.Slice:
if t.Elem().Kind() == reflect.Uint8 {
return false, TagOctetString, false, true
}
if strings.HasSuffix(t.Name(), "SET") {
return false, TagSet, true, true
}
return false, TagSequence, true, true
case reflect.String:
return false, TagPrintableString, false, true
}
return false, 0, false, false
}

View File

@@ -1,689 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package asn1
import (
"errors"
"fmt"
"math/big"
"reflect"
"time"
"unicode/utf8"
)
var (
byte00Encoder encoder = byteEncoder(0x00)
byteFFEncoder encoder = byteEncoder(0xff)
)
// encoder represents an ASN.1 element that is waiting to be marshaled.
type encoder interface {
// Len returns the number of bytes needed to marshal this element.
Len() int
// Encode encodes this element by writing Len() bytes to dst.
Encode(dst []byte)
}
type byteEncoder byte
func (c byteEncoder) Len() int {
return 1
}
func (c byteEncoder) Encode(dst []byte) {
dst[0] = byte(c)
}
type bytesEncoder []byte
func (b bytesEncoder) Len() int {
return len(b)
}
func (b bytesEncoder) Encode(dst []byte) {
if copy(dst, b) != len(b) {
panic("internal error")
}
}
type stringEncoder string
func (s stringEncoder) Len() int {
return len(s)
}
func (s stringEncoder) Encode(dst []byte) {
if copy(dst, s) != len(s) {
panic("internal error")
}
}
type multiEncoder []encoder
func (m multiEncoder) Len() int {
var size int
for _, e := range m {
size += e.Len()
}
return size
}
func (m multiEncoder) Encode(dst []byte) {
var off int
for _, e := range m {
e.Encode(dst[off:])
off += e.Len()
}
}
type taggedEncoder struct {
// scratch contains temporary space for encoding the tag and length of
// an element in order to avoid extra allocations.
scratch [8]byte
tag encoder
body encoder
}
func (t *taggedEncoder) Len() int {
return t.tag.Len() + t.body.Len()
}
func (t *taggedEncoder) Encode(dst []byte) {
t.tag.Encode(dst)
t.body.Encode(dst[t.tag.Len():])
}
type int64Encoder int64
func (i int64Encoder) Len() int {
n := 1
for i > 127 {
n++
i >>= 8
}
for i < -128 {
n++
i >>= 8
}
return n
}
func (i int64Encoder) Encode(dst []byte) {
n := i.Len()
for j := 0; j < n; j++ {
dst[j] = byte(i >> uint((n-1-j)*8))
}
}
func base128IntLength(n int64) int {
if n == 0 {
return 1
}
l := 0
for i := n; i > 0; i >>= 7 {
l++
}
return l
}
func appendBase128Int(dst []byte, n int64) []byte {
l := base128IntLength(n)
for i := l - 1; i >= 0; i-- {
o := byte(n >> uint(i*7))
o &= 0x7f
if i != 0 {
o |= 0x80
}
dst = append(dst, o)
}
return dst
}
func makeBigInt(n *big.Int, fieldName string) (encoder, error) {
if n == nil {
return nil, StructuralError{"empty integer", fieldName}
}
if n.Sign() < 0 {
// A negative number has to be converted to two's-complement
// form. So we'll invert and subtract 1. If the
// most-significant-bit isn't set then we'll need to pad the
// beginning with 0xff in order to keep the number negative.
nMinus1 := new(big.Int).Neg(n)
nMinus1.Sub(nMinus1, bigOne)
bytes := nMinus1.Bytes()
for i := range bytes {
bytes[i] ^= 0xff
}
if len(bytes) == 0 || bytes[0]&0x80 == 0 {
return multiEncoder([]encoder{byteFFEncoder, bytesEncoder(bytes)}), nil
}
return bytesEncoder(bytes), nil
} else if n.Sign() == 0 {
// Zero is written as a single 0 zero rather than no bytes.
return byte00Encoder, nil
} else {
bytes := n.Bytes()
if len(bytes) > 0 && bytes[0]&0x80 != 0 {
// We'll have to pad this with 0x00 in order to stop it
// looking like a negative number.
return multiEncoder([]encoder{byte00Encoder, bytesEncoder(bytes)}), nil
}
return bytesEncoder(bytes), nil
}
}
func appendLength(dst []byte, i int) []byte {
n := lengthLength(i)
for ; n > 0; n-- {
dst = append(dst, byte(i>>uint((n-1)*8)))
}
return dst
}
func lengthLength(i int) (numBytes int) {
numBytes = 1
for i > 255 {
numBytes++
i >>= 8
}
return
}
func appendTagAndLength(dst []byte, t tagAndLength) []byte {
b := uint8(t.class) << 6
if t.isCompound {
b |= 0x20
}
if t.tag >= 31 {
b |= 0x1f
dst = append(dst, b)
dst = appendBase128Int(dst, int64(t.tag))
} else {
b |= uint8(t.tag)
dst = append(dst, b)
}
if t.length >= 128 {
l := lengthLength(t.length)
dst = append(dst, 0x80|byte(l))
dst = appendLength(dst, t.length)
} else {
dst = append(dst, byte(t.length))
}
return dst
}
type bitStringEncoder BitString
func (b bitStringEncoder) Len() int {
return len(b.Bytes) + 1
}
func (b bitStringEncoder) Encode(dst []byte) {
dst[0] = byte((8 - b.BitLength%8) % 8)
if copy(dst[1:], b.Bytes) != len(b.Bytes) {
panic("internal error")
}
}
type oidEncoder []int
func (oid oidEncoder) Len() int {
l := base128IntLength(int64(oid[0]*40 + oid[1]))
for i := 2; i < len(oid); i++ {
l += base128IntLength(int64(oid[i]))
}
return l
}
func (oid oidEncoder) Encode(dst []byte) {
dst = appendBase128Int(dst[:0], int64(oid[0]*40+oid[1]))
for i := 2; i < len(oid); i++ {
dst = appendBase128Int(dst, int64(oid[i]))
}
}
func makeObjectIdentifier(oid []int, fieldName string) (e encoder, err error) {
if len(oid) < 2 || oid[0] > 2 || (oid[0] < 2 && oid[1] >= 40) {
return nil, StructuralError{"invalid object identifier", fieldName}
}
return oidEncoder(oid), nil
}
func makePrintableString(s, fieldName string) (e encoder, err error) {
for i := 0; i < len(s); i++ {
// The asterisk is often used in PrintableString, even though
// it is invalid. If a PrintableString was specifically
// requested then the asterisk is permitted by this code.
// Ampersand is allowed in parsing due a handful of CA
// certificates, however when making new certificates
// it is rejected.
if !isPrintable(s[i], allowAsterisk, rejectAmpersand) {
return nil, StructuralError{"PrintableString contains invalid character", fieldName}
}
}
return stringEncoder(s), nil
}
func makeIA5String(s, fieldName string) (e encoder, err error) {
for i := 0; i < len(s); i++ {
if s[i] > 127 {
return nil, StructuralError{"IA5String contains invalid character", fieldName}
}
}
return stringEncoder(s), nil
}
func makeNumericString(s string, fieldName string) (e encoder, err error) {
for i := 0; i < len(s); i++ {
if !isNumeric(s[i]) {
return nil, StructuralError{"NumericString contains invalid character", fieldName}
}
}
return stringEncoder(s), nil
}
func makeUTF8String(s string) encoder {
return stringEncoder(s)
}
func appendTwoDigits(dst []byte, v int) []byte {
return append(dst, byte('0'+(v/10)%10), byte('0'+v%10))
}
func appendFourDigits(dst []byte, v int) []byte {
var bytes [4]byte
for i := range bytes {
bytes[3-i] = '0' + byte(v%10)
v /= 10
}
return append(dst, bytes[:]...)
}
func outsideUTCRange(t time.Time) bool {
year := t.Year()
return year < 1950 || year >= 2050
}
func makeUTCTime(t time.Time, fieldName string) (e encoder, err error) {
dst := make([]byte, 0, 18)
dst, err = appendUTCTime(dst, t, fieldName)
if err != nil {
return nil, err
}
return bytesEncoder(dst), nil
}
func makeGeneralizedTime(t time.Time, fieldName string) (e encoder, err error) {
dst := make([]byte, 0, 20)
dst, err = appendGeneralizedTime(dst, t, fieldName)
if err != nil {
return nil, err
}
return bytesEncoder(dst), nil
}
func appendUTCTime(dst []byte, t time.Time, fieldName string) (ret []byte, err error) {
year := t.Year()
switch {
case 1950 <= year && year < 2000:
dst = appendTwoDigits(dst, year-1900)
case 2000 <= year && year < 2050:
dst = appendTwoDigits(dst, year-2000)
default:
return nil, StructuralError{"cannot represent time as UTCTime", fieldName}
}
return appendTimeCommon(dst, t), nil
}
func appendGeneralizedTime(dst []byte, t time.Time, fieldName string) (ret []byte, err error) {
year := t.Year()
if year < 0 || year > 9999 {
return nil, StructuralError{"cannot represent time as GeneralizedTime", fieldName}
}
dst = appendFourDigits(dst, year)
return appendTimeCommon(dst, t), nil
}
func appendTimeCommon(dst []byte, t time.Time) []byte {
_, month, day := t.Date()
dst = appendTwoDigits(dst, int(month))
dst = appendTwoDigits(dst, day)
hour, min, sec := t.Clock()
dst = appendTwoDigits(dst, hour)
dst = appendTwoDigits(dst, min)
dst = appendTwoDigits(dst, sec)
_, offset := t.Zone()
switch {
case offset/60 == 0:
return append(dst, 'Z')
case offset > 0:
dst = append(dst, '+')
case offset < 0:
dst = append(dst, '-')
}
offsetMinutes := offset / 60
if offsetMinutes < 0 {
offsetMinutes = -offsetMinutes
}
dst = appendTwoDigits(dst, offsetMinutes/60)
dst = appendTwoDigits(dst, offsetMinutes%60)
return dst
}
func stripTagAndLength(in []byte) []byte {
_, offset, err := parseTagAndLength(in, 0, "")
if err != nil {
return in
}
return in[offset:]
}
func makeBody(value reflect.Value, params fieldParameters) (e encoder, err error) {
switch value.Type() {
case flagType:
return bytesEncoder(nil), nil
case timeType:
t := value.Interface().(time.Time)
if params.timeType == TagGeneralizedTime || outsideUTCRange(t) {
return makeGeneralizedTime(t, params.name)
}
return makeUTCTime(t, params.name)
case bitStringType:
return bitStringEncoder(value.Interface().(BitString)), nil
case objectIdentifierType:
return makeObjectIdentifier(value.Interface().(ObjectIdentifier), params.name)
case bigIntType:
return makeBigInt(value.Interface().(*big.Int), params.name)
}
switch v := value; v.Kind() {
case reflect.Bool:
if v.Bool() {
return byteFFEncoder, nil
}
return byte00Encoder, nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return int64Encoder(v.Int()), nil
case reflect.Struct:
t := v.Type()
for i := 0; i < t.NumField(); i++ {
if t.Field(i).PkgPath != "" {
return nil, StructuralError{"struct contains unexported fields", t.Field(i).Name}
}
}
startingField := 0
n := t.NumField()
if n == 0 {
return bytesEncoder(nil), nil
}
// If the first element of the structure is a non-empty
// RawContents, then we don't bother serializing the rest.
if t.Field(0).Type == rawContentsType {
s := v.Field(0)
if s.Len() > 0 {
bytes := s.Bytes()
/* The RawContents will contain the tag and
* length fields but we'll also be writing
* those ourselves, so we strip them out of
* bytes */
return bytesEncoder(stripTagAndLength(bytes)), nil
}
startingField = 1
}
switch n1 := n - startingField; n1 {
case 0:
return bytesEncoder(nil), nil
case 1:
return makeField(v.Field(startingField), parseFieldParameters(t.Field(startingField).Tag.Get("asn1")))
default:
m := make([]encoder, n1)
for i := 0; i < n1; i++ {
m[i], err = makeField(v.Field(i+startingField), parseFieldParameters(t.Field(i+startingField).Tag.Get("asn1")))
if err != nil {
return nil, err
}
}
return multiEncoder(m), nil
}
case reflect.Slice:
sliceType := v.Type()
if sliceType.Elem().Kind() == reflect.Uint8 {
return bytesEncoder(v.Bytes()), nil
}
var fp fieldParameters
switch l := v.Len(); l {
case 0:
return bytesEncoder(nil), nil
case 1:
return makeField(v.Index(0), fp)
default:
m := make([]encoder, l)
for i := 0; i < l; i++ {
m[i], err = makeField(v.Index(i), fp)
if err != nil {
return nil, err
}
}
return multiEncoder(m), nil
}
case reflect.String:
switch params.stringType {
case TagIA5String:
return makeIA5String(v.String(), params.name)
case TagPrintableString:
return makePrintableString(v.String(), params.name)
case TagNumericString:
return makeNumericString(v.String(), params.name)
default:
return makeUTF8String(v.String()), nil
}
}
return nil, StructuralError{"unknown Go type", params.name}
}
func makeField(v reflect.Value, params fieldParameters) (e encoder, err error) {
if !v.IsValid() {
return nil, fmt.Errorf("asn1: cannot marshal nil value")
}
// If the field is an interface{} then recurse into it.
if v.Kind() == reflect.Interface && v.Type().NumMethod() == 0 {
return makeField(v.Elem(), params)
}
if v.Kind() == reflect.Slice && v.Len() == 0 && params.omitEmpty {
return bytesEncoder(nil), nil
}
if params.optional && params.defaultValue != nil && canHaveDefaultValue(v.Kind()) {
defaultValue := reflect.New(v.Type()).Elem()
defaultValue.SetInt(*params.defaultValue)
if reflect.DeepEqual(v.Interface(), defaultValue.Interface()) {
return bytesEncoder(nil), nil
}
}
// If no default value is given then the zero value for the type is
// assumed to be the default value. This isn't obviously the correct
// behavior, but it's what Go has traditionally done.
if params.optional && params.defaultValue == nil {
if reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) {
return bytesEncoder(nil), nil
}
}
if v.Type() == rawValueType {
rv := v.Interface().(RawValue)
if len(rv.FullBytes) != 0 {
return bytesEncoder(rv.FullBytes), nil
}
t := new(taggedEncoder)
t.tag = bytesEncoder(appendTagAndLength(t.scratch[:0], tagAndLength{rv.Class, rv.Tag, len(rv.Bytes), rv.IsCompound}))
t.body = bytesEncoder(rv.Bytes)
return t, nil
}
matchAny, tag, isCompound, ok := getUniversalType(v.Type())
if !ok || matchAny {
return nil, StructuralError{fmt.Sprintf("unknown Go type: %v", v.Type()), params.name}
}
if params.timeType != 0 && tag != TagUTCTime {
return nil, StructuralError{"explicit time type given to non-time member", params.name}
}
if params.stringType != 0 && tag != TagPrintableString {
return nil, StructuralError{"explicit string type given to non-string member", params.name}
}
switch tag {
case TagPrintableString:
if params.stringType == 0 {
// This is a string without an explicit string type. We'll use
// a PrintableString if the character set in the string is
// sufficiently limited, otherwise we'll use a UTF8String.
for _, r := range v.String() {
if r >= utf8.RuneSelf || !isPrintable(byte(r), rejectAsterisk, rejectAmpersand) {
if !utf8.ValidString(v.String()) {
return nil, errors.New("asn1: string not valid UTF-8")
}
tag = TagUTF8String
break
}
}
} else {
tag = params.stringType
}
case TagUTCTime:
if params.timeType == TagGeneralizedTime || outsideUTCRange(v.Interface().(time.Time)) {
tag = TagGeneralizedTime
}
}
if params.set {
if tag != TagSequence {
return nil, StructuralError{"non sequence tagged as set", params.name}
}
tag = TagSet
}
t := new(taggedEncoder)
t.body, err = makeBody(v, params)
if err != nil {
return nil, err
}
bodyLen := t.body.Len()
class := ClassUniversal
if params.tag != nil {
if params.application {
class = ClassApplication
} else {
class = ClassContextSpecific
}
if params.explicit {
t.tag = bytesEncoder(appendTagAndLength(t.scratch[:0], tagAndLength{ClassUniversal, tag, bodyLen, isCompound}))
tt := new(taggedEncoder)
tt.body = t
tt.tag = bytesEncoder(appendTagAndLength(tt.scratch[:0], tagAndLength{
class: class,
tag: *params.tag,
length: bodyLen + t.tag.Len(),
isCompound: true,
}))
return tt, nil
}
// implicit tag.
tag = *params.tag
}
t.tag = bytesEncoder(appendTagAndLength(t.scratch[:0], tagAndLength{class, tag, bodyLen, isCompound}))
return t, nil
}
// Marshal returns the ASN.1 encoding of val.
//
// In addition to the struct tags recognised by Unmarshal, the following can be
// used:
//
// ia5: causes strings to be marshaled as ASN.1, IA5String values
// omitempty: causes empty slices to be skipped
// printable: causes strings to be marshaled as ASN.1, PrintableString values
// utf8: causes strings to be marshaled as ASN.1, UTF8String values
// utc: causes time.Time to be marshaled as ASN.1, UTCTime values
// generalized: causes time.Time to be marshaled as ASN.1, GeneralizedTime values
func Marshal(val interface{}) ([]byte, error) {
return MarshalWithParams(val, "")
}
// MarshalWithParams allows field parameters to be specified for the
// top-level element. The form of the params is the same as the field tags.
func MarshalWithParams(val interface{}, params string) ([]byte, error) {
e, err := makeField(reflect.ValueOf(val), parseFieldParameters(params))
if err != nil {
return nil, err
}
b := make([]byte, e.Len())
e.Encode(b)
return b, nil
}

View File

@@ -1,39 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"getentries.go",
"logclient.go",
"multilog.go",
],
importmap = "k8s.io/kubernetes/vendor/github.com/google/certificate-transparency-go/client",
importpath = "github.com/google/certificate-transparency-go/client",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/golang/protobuf/proto:go_default_library",
"//vendor/github.com/golang/protobuf/ptypes:go_default_library",
"//vendor/github.com/google/certificate-transparency-go:go_default_library",
"//vendor/github.com/google/certificate-transparency-go/client/configpb:go_default_library",
"//vendor/github.com/google/certificate-transparency-go/jsonclient:go_default_library",
"//vendor/github.com/google/certificate-transparency-go/tls:go_default_library",
"//vendor/github.com/google/certificate-transparency-go/x509:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//vendor/github.com/google/certificate-transparency-go/client/configpb:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -1,30 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"gen.go",
"multilog.pb.go",
],
importmap = "k8s.io/kubernetes/vendor/github.com/google/certificate-transparency-go/client/configpb",
importpath = "github.com/google/certificate-transparency-go/client/configpb",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/golang/protobuf/proto:go_default_library",
"//vendor/github.com/golang/protobuf/ptypes/timestamp:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -1,17 +0,0 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package configpb
//go:generate protoc -I=. -I=$GOPATH/src --go_out=:. multilog.proto

View File

@@ -1,158 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: multilog.proto
package configpb
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import timestamp "github.com/golang/protobuf/ptypes/timestamp"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// TemporalLogConfig is a set of LogShardConfig messages, whose
// time limits should be contiguous.
type TemporalLogConfig struct {
Shard []*LogShardConfig `protobuf:"bytes,1,rep,name=shard,proto3" json:"shard,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *TemporalLogConfig) Reset() { *m = TemporalLogConfig{} }
func (m *TemporalLogConfig) String() string { return proto.CompactTextString(m) }
func (*TemporalLogConfig) ProtoMessage() {}
func (*TemporalLogConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_multilog_3c9b797b88da6f07, []int{0}
}
func (m *TemporalLogConfig) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_TemporalLogConfig.Unmarshal(m, b)
}
func (m *TemporalLogConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_TemporalLogConfig.Marshal(b, m, deterministic)
}
func (dst *TemporalLogConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_TemporalLogConfig.Merge(dst, src)
}
func (m *TemporalLogConfig) XXX_Size() int {
return xxx_messageInfo_TemporalLogConfig.Size(m)
}
func (m *TemporalLogConfig) XXX_DiscardUnknown() {
xxx_messageInfo_TemporalLogConfig.DiscardUnknown(m)
}
var xxx_messageInfo_TemporalLogConfig proto.InternalMessageInfo
func (m *TemporalLogConfig) GetShard() []*LogShardConfig {
if m != nil {
return m.Shard
}
return nil
}
// LogShardConfig describes the acceptable date range for a single shard of a temporal
// log.
type LogShardConfig struct {
Uri string `protobuf:"bytes,1,opt,name=uri,proto3" json:"uri,omitempty"`
// The log's public key in DER-encoded PKIX form.
PublicKeyDer []byte `protobuf:"bytes,2,opt,name=public_key_der,json=publicKeyDer,proto3" json:"public_key_der,omitempty"`
// not_after_start defines the start of the range of acceptable NotAfter
// values, inclusive.
// Leaving this unset implies no lower bound to the range.
NotAfterStart *timestamp.Timestamp `protobuf:"bytes,3,opt,name=not_after_start,json=notAfterStart,proto3" json:"not_after_start,omitempty"`
// not_after_limit defines the end of the range of acceptable NotAfter values,
// exclusive.
// Leaving this unset implies no upper bound to the range.
NotAfterLimit *timestamp.Timestamp `protobuf:"bytes,4,opt,name=not_after_limit,json=notAfterLimit,proto3" json:"not_after_limit,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *LogShardConfig) Reset() { *m = LogShardConfig{} }
func (m *LogShardConfig) String() string { return proto.CompactTextString(m) }
func (*LogShardConfig) ProtoMessage() {}
func (*LogShardConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_multilog_3c9b797b88da6f07, []int{1}
}
func (m *LogShardConfig) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LogShardConfig.Unmarshal(m, b)
}
func (m *LogShardConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_LogShardConfig.Marshal(b, m, deterministic)
}
func (dst *LogShardConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_LogShardConfig.Merge(dst, src)
}
func (m *LogShardConfig) XXX_Size() int {
return xxx_messageInfo_LogShardConfig.Size(m)
}
func (m *LogShardConfig) XXX_DiscardUnknown() {
xxx_messageInfo_LogShardConfig.DiscardUnknown(m)
}
var xxx_messageInfo_LogShardConfig proto.InternalMessageInfo
func (m *LogShardConfig) GetUri() string {
if m != nil {
return m.Uri
}
return ""
}
func (m *LogShardConfig) GetPublicKeyDer() []byte {
if m != nil {
return m.PublicKeyDer
}
return nil
}
func (m *LogShardConfig) GetNotAfterStart() *timestamp.Timestamp {
if m != nil {
return m.NotAfterStart
}
return nil
}
func (m *LogShardConfig) GetNotAfterLimit() *timestamp.Timestamp {
if m != nil {
return m.NotAfterLimit
}
return nil
}
func init() {
proto.RegisterType((*TemporalLogConfig)(nil), "configpb.TemporalLogConfig")
proto.RegisterType((*LogShardConfig)(nil), "configpb.LogShardConfig")
}
func init() { proto.RegisterFile("multilog.proto", fileDescriptor_multilog_3c9b797b88da6f07) }
var fileDescriptor_multilog_3c9b797b88da6f07 = []byte{
// 241 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x8f, 0xb1, 0x4e, 0xc3, 0x30,
0x14, 0x45, 0x65, 0x02, 0x08, 0xdc, 0x12, 0xc0, 0x93, 0xd5, 0x85, 0xa8, 0x62, 0xc8, 0xe4, 0x4a,
0xe5, 0x0b, 0xa0, 0x6c, 0x64, 0x4a, 0xbb, 0x47, 0x4e, 0xeb, 0x18, 0x0b, 0x3b, 0xcf, 0x72, 0x5e,
0x86, 0xfe, 0x25, 0x9f, 0x84, 0x1c, 0x2b, 0x43, 0x37, 0xb6, 0xa7, 0x77, 0xcf, 0xb9, 0xd2, 0xa5,
0xb9, 0x1b, 0x2d, 0x1a, 0x0b, 0x5a, 0xf8, 0x00, 0x08, 0xec, 0xee, 0x08, 0x7d, 0x67, 0xb4, 0x6f,
0x57, 0x2f, 0x1a, 0x40, 0x5b, 0xb5, 0x99, 0xfe, 0xed, 0xd8, 0x6d, 0xd0, 0x38, 0x35, 0xa0, 0x74,
0x3e, 0xa1, 0xeb, 0x1d, 0x7d, 0x3e, 0x28, 0xe7, 0x21, 0x48, 0x5b, 0x81, 0xde, 0x4d, 0x1e, 0x13,
0xf4, 0x66, 0xf8, 0x96, 0xe1, 0xc4, 0x49, 0x91, 0x95, 0x8b, 0x2d, 0x17, 0x73, 0x9f, 0xa8, 0x40,
0xef, 0x63, 0x92, 0xc0, 0x3a, 0x61, 0xeb, 0x5f, 0x42, 0xf3, 0xcb, 0x84, 0x3d, 0xd1, 0x6c, 0x0c,
0x86, 0x93, 0x82, 0x94, 0xf7, 0x75, 0x3c, 0xd9, 0x2b, 0xcd, 0xfd, 0xd8, 0x5a, 0x73, 0x6c, 0x7e,
0xd4, 0xb9, 0x39, 0xa9, 0xc0, 0xaf, 0x0a, 0x52, 0x2e, 0xeb, 0x65, 0xfa, 0x7e, 0xa9, 0xf3, 0xa7,
0x0a, 0xec, 0x83, 0x3e, 0xf6, 0x80, 0x8d, 0xec, 0x50, 0x85, 0x66, 0x40, 0x19, 0x90, 0x67, 0x05,
0x29, 0x17, 0xdb, 0x95, 0x48, 0x53, 0xc4, 0x3c, 0x45, 0x1c, 0xe6, 0x29, 0xf5, 0x43, 0x0f, 0xf8,
0x1e, 0x8d, 0x7d, 0x14, 0x2e, 0x3b, 0xac, 0x71, 0x06, 0xf9, 0xf5, 0xff, 0x3b, 0xaa, 0x28, 0xb4,
0xb7, 0x13, 0xf2, 0xf6, 0x17, 0x00, 0x00, 0xff, 0xff, 0xf8, 0xd9, 0x50, 0x5b, 0x5b, 0x01, 0x00,
0x00,
}

View File

@@ -1,43 +0,0 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package configpb;
import "google/protobuf/timestamp.proto";
// TemporalLogConfig is a set of LogShardConfig messages, whose
// time limits should be contiguous.
message TemporalLogConfig {
repeated LogShardConfig shard = 1;
}
// LogShardConfig describes the acceptable date range for a single shard of a temporal
// log.
message LogShardConfig {
string uri = 1;
// The log's public key in DER-encoded PKIX form.
bytes public_key_der = 2;
// not_after_start defines the start of the range of acceptable NotAfter
// values, inclusive.
// Leaving this unset implies no lower bound to the range.
google.protobuf.Timestamp not_after_start = 3;
// not_after_limit defines the end of the range of acceptable NotAfter values,
// exclusive.
// Leaving this unset implies no upper bound to the range.
google.protobuf.Timestamp not_after_limit = 4;
}

View File

@@ -1,75 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"context"
"errors"
"strconv"
ct "github.com/google/certificate-transparency-go"
"github.com/google/certificate-transparency-go/x509"
)
// GetRawEntries exposes the /ct/v1/get-entries result with only the JSON parsing done.
func (c *LogClient) GetRawEntries(ctx context.Context, start, end int64) (*ct.GetEntriesResponse, error) {
if end < 0 {
return nil, errors.New("end should be >= 0")
}
if end < start {
return nil, errors.New("start should be <= end")
}
params := map[string]string{
"start": strconv.FormatInt(start, 10),
"end": strconv.FormatInt(end, 10),
}
if ctx == nil {
ctx = context.TODO()
}
var resp ct.GetEntriesResponse
httpRsp, body, err := c.GetAndParse(ctx, ct.GetEntriesPath, params, &resp)
if err != nil {
if httpRsp != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return nil, err
}
return &resp, nil
}
// GetEntries attempts to retrieve the entries in the sequence [start, end] from the CT log server
// (RFC6962 s4.6) as parsed [pre-]certificates for convenience, held in a slice of ct.LogEntry structures.
// However, this does mean that any certificate parsing failures will cause a failure of the whole
// retrieval operation; for more robust retrieval of parsed certificates, use GetRawEntries() and invoke
// ct.LogEntryFromLeaf() on each individual entry.
func (c *LogClient) GetEntries(ctx context.Context, start, end int64) ([]ct.LogEntry, error) {
resp, err := c.GetRawEntries(ctx, start, end)
if err != nil {
return nil, err
}
entries := make([]ct.LogEntry, len(resp.Entries))
for i, entry := range resp.Entries {
index := start + int64(i)
logEntry, err := ct.LogEntryFromLeaf(index, &entry)
if x509.IsFatal(err) {
return nil, err
}
entries[i] = *logEntry
}
return entries, nil
}

View File

@@ -1,289 +0,0 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package client is a CT log client implementation and contains types and code
// for interacting with RFC6962-compliant CT Log instances.
// See http://tools.ietf.org/html/rfc6962 for details
package client
import (
"context"
"encoding/base64"
"fmt"
"net/http"
"strconv"
ct "github.com/google/certificate-transparency-go"
"github.com/google/certificate-transparency-go/jsonclient"
"github.com/google/certificate-transparency-go/tls"
)
// LogClient represents a client for a given CT Log instance
type LogClient struct {
jsonclient.JSONClient
}
// CheckLogClient is an interface that allows (just) checking of various log contents.
type CheckLogClient interface {
BaseURI() string
GetSTH(context.Context) (*ct.SignedTreeHead, error)
GetSTHConsistency(ctx context.Context, first, second uint64) ([][]byte, error)
GetProofByHash(ctx context.Context, hash []byte, treeSize uint64) (*ct.GetProofByHashResponse, error)
}
// New constructs a new LogClient instance.
// |uri| is the base URI of the CT log instance to interact with, e.g.
// https://ct.googleapis.com/pilot
// |hc| is the underlying client to be used for HTTP requests to the CT log.
// |opts| can be used to provide a custom logger interface and a public key
// for signature verification.
func New(uri string, hc *http.Client, opts jsonclient.Options) (*LogClient, error) {
logClient, err := jsonclient.New(uri, hc, opts)
if err != nil {
return nil, err
}
return &LogClient{*logClient}, err
}
// RspError represents an error that occurred when processing a response from a server,
// and also includes key details from the http.Response that triggered the error.
type RspError struct {
Err error
StatusCode int
Body []byte
}
// Error formats the RspError instance, focusing on the error.
func (e RspError) Error() string {
return e.Err.Error()
}
// Attempts to add |chain| to the log, using the api end-point specified by
// |path|. If provided context expires before submission is complete an
// error will be returned.
func (c *LogClient) addChainWithRetry(ctx context.Context, ctype ct.LogEntryType, path string, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
var resp ct.AddChainResponse
var req ct.AddChainRequest
for _, link := range chain {
req.Chain = append(req.Chain, link.Data)
}
httpRsp, body, err := c.PostAndParseWithRetry(ctx, path, &req, &resp)
if err != nil {
if httpRsp != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return nil, err
}
var ds ct.DigitallySigned
if rest, err := tls.Unmarshal(resp.Signature, &ds); err != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
} else if len(rest) > 0 {
return nil, RspError{
Err: fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest)),
StatusCode: httpRsp.StatusCode,
Body: body,
}
}
exts, err := base64.StdEncoding.DecodeString(resp.Extensions)
if err != nil {
return nil, RspError{
Err: fmt.Errorf("invalid base64 data in Extensions (%q): %v", resp.Extensions, err),
StatusCode: httpRsp.StatusCode,
Body: body,
}
}
var logID ct.LogID
copy(logID.KeyID[:], resp.ID)
sct := &ct.SignedCertificateTimestamp{
SCTVersion: resp.SCTVersion,
LogID: logID,
Timestamp: resp.Timestamp,
Extensions: ct.CTExtensions(exts),
Signature: ds,
}
if err := c.VerifySCTSignature(*sct, ctype, chain); err != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return sct, nil
}
// AddChain adds the (DER represented) X509 |chain| to the log.
func (c *LogClient) AddChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
return c.addChainWithRetry(ctx, ct.X509LogEntryType, ct.AddChainPath, chain)
}
// AddPreChain adds the (DER represented) Precertificate |chain| to the log.
func (c *LogClient) AddPreChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
return c.addChainWithRetry(ctx, ct.PrecertLogEntryType, ct.AddPreChainPath, chain)
}
// AddJSON submits arbitrary data to to XJSON server.
func (c *LogClient) AddJSON(ctx context.Context, data interface{}) (*ct.SignedCertificateTimestamp, error) {
req := ct.AddJSONRequest{Data: data}
var resp ct.AddChainResponse
httpRsp, body, err := c.PostAndParse(ctx, ct.AddJSONPath, &req, &resp)
if err != nil {
if httpRsp != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return nil, err
}
var ds ct.DigitallySigned
if rest, err := tls.Unmarshal(resp.Signature, &ds); err != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
} else if len(rest) > 0 {
return nil, RspError{
Err: fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest)),
StatusCode: httpRsp.StatusCode,
Body: body,
}
}
var logID ct.LogID
copy(logID.KeyID[:], resp.ID)
return &ct.SignedCertificateTimestamp{
SCTVersion: resp.SCTVersion,
LogID: logID,
Timestamp: resp.Timestamp,
Extensions: ct.CTExtensions(resp.Extensions),
Signature: ds,
}, nil
}
// GetSTH retrieves the current STH from the log.
// Returns a populated SignedTreeHead, or a non-nil error (which may be of type
// RspError if a raw http.Response is available).
func (c *LogClient) GetSTH(ctx context.Context) (*ct.SignedTreeHead, error) {
var resp ct.GetSTHResponse
httpRsp, body, err := c.GetAndParse(ctx, ct.GetSTHPath, nil, &resp)
if err != nil {
if httpRsp != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return nil, err
}
sth, err := resp.ToSignedTreeHead()
if err != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
if err := c.VerifySTHSignature(*sth); err != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return sth, nil
}
// VerifySTHSignature checks the signature in sth, returning any error encountered or nil if verification is
// successful.
func (c *LogClient) VerifySTHSignature(sth ct.SignedTreeHead) error {
if c.Verifier == nil {
// Can't verify signatures without a verifier
return nil
}
return c.Verifier.VerifySTHSignature(sth)
}
// VerifySCTSignature checks the signature in sct for the given LogEntryType, with associated certificate chain.
func (c *LogClient) VerifySCTSignature(sct ct.SignedCertificateTimestamp, ctype ct.LogEntryType, certData []ct.ASN1Cert) error {
if c.Verifier == nil {
// Can't verify signatures without a verifier
return nil
}
leaf, err := ct.MerkleTreeLeafFromRawChain(certData, ctype, sct.Timestamp)
if err != nil {
return fmt.Errorf("failed to build MerkleTreeLeaf: %v", err)
}
entry := ct.LogEntry{Leaf: *leaf}
return c.Verifier.VerifySCTSignature(sct, entry)
}
// GetSTHConsistency retrieves the consistency proof between two snapshots.
func (c *LogClient) GetSTHConsistency(ctx context.Context, first, second uint64) ([][]byte, error) {
base10 := 10
params := map[string]string{
"first": strconv.FormatUint(first, base10),
"second": strconv.FormatUint(second, base10),
}
var resp ct.GetSTHConsistencyResponse
httpRsp, body, err := c.GetAndParse(ctx, ct.GetSTHConsistencyPath, params, &resp)
if err != nil {
if httpRsp != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return nil, err
}
return resp.Consistency, nil
}
// GetProofByHash returns an audit path for the hash of an SCT.
func (c *LogClient) GetProofByHash(ctx context.Context, hash []byte, treeSize uint64) (*ct.GetProofByHashResponse, error) {
b64Hash := base64.StdEncoding.EncodeToString(hash)
base10 := 10
params := map[string]string{
"tree_size": strconv.FormatUint(treeSize, base10),
"hash": b64Hash,
}
var resp ct.GetProofByHashResponse
httpRsp, body, err := c.GetAndParse(ctx, ct.GetProofByHashPath, params, &resp)
if err != nil {
if httpRsp != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return nil, err
}
return &resp, nil
}
// GetAcceptedRoots retrieves the set of acceptable root certificates for a log.
func (c *LogClient) GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error) {
var resp ct.GetRootsResponse
httpRsp, body, err := c.GetAndParse(ctx, ct.GetRootsPath, nil, &resp)
if err != nil {
if httpRsp != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return nil, err
}
var roots []ct.ASN1Cert
for _, cert64 := range resp.Certificates {
cert, err := base64.StdEncoding.DecodeString(cert64)
if err != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
roots = append(roots, ct.ASN1Cert{Data: cert})
}
return roots, nil
}
// GetEntryAndProof returns a log entry and audit path for the index of a leaf.
func (c *LogClient) GetEntryAndProof(ctx context.Context, index, treeSize uint64) (*ct.GetEntryAndProofResponse, error) {
base10 := 10
params := map[string]string{
"leaf_index": strconv.FormatUint(index, base10),
"tree_size": strconv.FormatUint(treeSize, base10),
}
var resp ct.GetEntryAndProofResponse
httpRsp, body, err := c.GetAndParse(ctx, ct.GetEntryAndProofPath, params, &resp)
if err != nil {
if httpRsp != nil {
return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body}
}
return nil, err
}
return &resp, nil
}

View File

@@ -1,221 +0,0 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"context"
"crypto/sha256"
"errors"
"fmt"
"io/ioutil"
"net/http"
"time"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
ct "github.com/google/certificate-transparency-go"
"github.com/google/certificate-transparency-go/client/configpb"
"github.com/google/certificate-transparency-go/jsonclient"
"github.com/google/certificate-transparency-go/x509"
)
type interval struct {
lower *time.Time // nil => no lower bound
upper *time.Time // nil => no upper bound
}
// TemporalLogConfigFromFile creates a TemporalLogConfig object from the given
// filename, which should contain text-protobuf encoded configuration data.
func TemporalLogConfigFromFile(filename string) (*configpb.TemporalLogConfig, error) {
if len(filename) == 0 {
return nil, errors.New("log config filename empty")
}
cfgText, err := ioutil.ReadFile(filename)
if err != nil {
return nil, fmt.Errorf("failed to read log config: %v", err)
}
var cfg configpb.TemporalLogConfig
if err := proto.UnmarshalText(string(cfgText), &cfg); err != nil {
return nil, fmt.Errorf("failed to parse log config: %v", err)
}
if len(cfg.Shard) == 0 {
return nil, errors.New("empty log config found")
}
return &cfg, nil
}
// AddLogClient is an interface that allows adding certificates and pre-certificates to a log.
// Both LogClient and TemporalLogClient implement this interface, which allows users to
// commonize code for adding certs to normal/temporal logs.
type AddLogClient interface {
AddChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error)
AddPreChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error)
GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error)
}
// TemporalLogClient allows [pre-]certificates to be uploaded to a temporal log.
type TemporalLogClient struct {
Clients []*LogClient
intervals []interval
}
// NewTemporalLogClient builds a new client for interacting with a temporal log.
// The provided config should be contiguous and chronological.
func NewTemporalLogClient(cfg configpb.TemporalLogConfig, hc *http.Client) (*TemporalLogClient, error) {
if len(cfg.Shard) == 0 {
return nil, errors.New("empty config")
}
overall, err := shardInterval(cfg.Shard[0])
if err != nil {
return nil, fmt.Errorf("cfg.Shard[0] invalid: %v", err)
}
intervals := make([]interval, 0, len(cfg.Shard))
intervals = append(intervals, overall)
for i := 1; i < len(cfg.Shard); i++ {
interval, err := shardInterval(cfg.Shard[i])
if err != nil {
return nil, fmt.Errorf("cfg.Shard[%d] invalid: %v", i, err)
}
if overall.upper == nil {
return nil, fmt.Errorf("cfg.Shard[%d] extends an interval with no upper bound", i)
}
if interval.lower == nil {
return nil, fmt.Errorf("cfg.Shard[%d] has no lower bound but extends an interval", i)
}
if !interval.lower.Equal(*overall.upper) {
return nil, fmt.Errorf("cfg.Shard[%d] starts at %v but previous interval ended at %v", i, interval.lower, overall.upper)
}
overall.upper = interval.upper
intervals = append(intervals, interval)
}
clients := make([]*LogClient, 0, len(cfg.Shard))
for i, shard := range cfg.Shard {
opts := jsonclient.Options{}
opts.PublicKeyDER = shard.GetPublicKeyDer()
c, err := New(shard.Uri, hc, opts)
if err != nil {
return nil, fmt.Errorf("failed to create client for cfg.Shard[%d]: %v", i, err)
}
clients = append(clients, c)
}
tlc := TemporalLogClient{
Clients: clients,
intervals: intervals,
}
return &tlc, nil
}
// GetAcceptedRoots retrieves the set of acceptable root certificates for all
// of the shards of a temporal log (i.e. the union).
func (tlc *TemporalLogClient) GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error) {
type result struct {
roots []ct.ASN1Cert
err error
}
results := make(chan result, len(tlc.Clients))
for _, c := range tlc.Clients {
go func(c *LogClient) {
var r result
r.roots, r.err = c.GetAcceptedRoots(ctx)
results <- r
}(c)
}
var allRoots []ct.ASN1Cert
seen := make(map[[sha256.Size]byte]bool)
for range tlc.Clients {
r := <-results
if r.err != nil {
return nil, r.err
}
for _, root := range r.roots {
h := sha256.Sum256(root.Data)
if seen[h] {
continue
}
seen[h] = true
allRoots = append(allRoots, root)
}
}
return allRoots, nil
}
// AddChain adds the (DER represented) X509 chain to the appropriate log.
func (tlc *TemporalLogClient) AddChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
return tlc.addChain(ctx, ct.X509LogEntryType, ct.AddChainPath, chain)
}
// AddPreChain adds the (DER represented) Precertificate chain to the appropriate log.
func (tlc *TemporalLogClient) AddPreChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
return tlc.addChain(ctx, ct.PrecertLogEntryType, ct.AddPreChainPath, chain)
}
func (tlc *TemporalLogClient) addChain(ctx context.Context, ctype ct.LogEntryType, path string, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) {
// Parse the first entry in the chain
if len(chain) == 0 {
return nil, errors.New("missing chain")
}
cert, err := x509.ParseCertificate(chain[0].Data)
if err != nil {
return nil, fmt.Errorf("failed to parse initial chain entry: %v", err)
}
cidx, err := tlc.IndexByDate(cert.NotAfter)
if err != nil {
return nil, fmt.Errorf("failed to find log to process cert: %v", err)
}
return tlc.Clients[cidx].addChainWithRetry(ctx, ctype, path, chain)
}
// IndexByDate returns the index of the Clients entry that is appropriate for the given
// date.
func (tlc *TemporalLogClient) IndexByDate(when time.Time) (int, error) {
for i, interval := range tlc.intervals {
if (interval.lower != nil) && when.Before(*interval.lower) {
continue
}
if (interval.upper != nil) && !when.Before(*interval.upper) {
continue
}
return i, nil
}
return -1, fmt.Errorf("no log found encompassing date %v", when)
}
func shardInterval(cfg *configpb.LogShardConfig) (interval, error) {
var interval interval
if cfg.NotAfterStart != nil {
t, err := ptypes.Timestamp(cfg.NotAfterStart)
if err != nil {
return interval, fmt.Errorf("failed to parse NotAfterStart: %v", err)
}
interval.lower = &t
}
if cfg.NotAfterLimit != nil {
t, err := ptypes.Timestamp(cfg.NotAfterLimit)
if err != nil {
return interval, fmt.Errorf("failed to parse NotAfterLimit: %v", err)
}
interval.upper = &t
}
if interval.lower != nil && interval.upper != nil && !(*interval.lower).Before(*interval.upper) {
return interval, errors.New("inverted interval")
}
return interval, nil
}

View File

@@ -1,10 +0,0 @@
steps:
- id: build_ctfe
name: gcr.io/cloud-builders/docker
args:
- build
- --file=trillian/examples/deployment/docker/ctfe/Dockerfile
- --tag=gcr.io/${PROJECT_ID}/ctfe:${TAG_NAME}
- .
images:
- gcr.io/${PROJECT_ID}/ctfe:${TAG_NAME}

View File

@@ -1,28 +0,0 @@
{
"Deadline": "60s",
"Linters": {
"license": "./scripts/check_license.sh:PATH:LINE:MESSAGE",
"forked": "./scripts/check_forked.sh:PATH:LINE:MESSAGE",
"unforked": "./scripts/check_unforked.sh:PATH:LINE:MESSAGE"
},
"Enable": [
"forked",
"gocyclo",
"gofmt",
"goimports",
"golint",
"license",
"misspell",
"unforked",
"vet"
],
"Exclude": [
"x509/",
"asn1/",
".+\\.pb\\.go",
".+\\.pb\\.gw\\.go",
"mock_.+\\.go"
],
"Cyclo": 40,
"Vendor": true
}

View File

@@ -1,31 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"backoff.go",
"client.go",
],
importmap = "k8s.io/kubernetes/vendor/github.com/google/certificate-transparency-go/jsonclient",
importpath = "github.com/google/certificate-transparency-go/jsonclient",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/google/certificate-transparency-go:go_default_library",
"//vendor/github.com/google/certificate-transparency-go/x509:go_default_library",
"//vendor/golang.org/x/net/context/ctxhttp:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -1,72 +0,0 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jsonclient
import (
"sync"
"time"
)
type backoff struct {
mu sync.RWMutex
multiplier uint
notBefore time.Time
}
const (
// maximum backoff is 2^(maxMultiplier-1) = 128 seconds
maxMultiplier = 8
)
func (b *backoff) set(override *time.Duration) time.Duration {
b.mu.Lock()
defer b.mu.Unlock()
if b.notBefore.After(time.Now()) {
if override != nil {
// If existing backoff is set but override would be longer than
// it then set it to that.
notBefore := time.Now().Add(*override)
if notBefore.After(b.notBefore) {
b.notBefore = notBefore
}
}
return time.Until(b.notBefore)
}
var wait time.Duration
if override != nil {
wait = *override
} else {
if b.multiplier < maxMultiplier {
b.multiplier++
}
wait = time.Second * time.Duration(1<<(b.multiplier-1))
}
b.notBefore = time.Now().Add(wait)
return wait
}
func (b *backoff) decreaseMultiplier() {
b.mu.Lock()
defer b.mu.Unlock()
if b.multiplier > 0 {
b.multiplier--
}
}
func (b *backoff) until() time.Time {
b.mu.RLock()
defer b.mu.RUnlock()
return b.notBefore
}

View File

@@ -1,294 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jsonclient
import (
"bytes"
"context"
"crypto"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"math/rand"
"net/http"
"net/url"
"strconv"
"strings"
"time"
ct "github.com/google/certificate-transparency-go"
"github.com/google/certificate-transparency-go/x509"
"golang.org/x/net/context/ctxhttp"
)
const maxJitter = 250 * time.Millisecond
type backoffer interface {
// set adjusts/increases the current backoff interval (typically on retryable failure);
// if the optional parameter is provided, this will be used as the interval if it is greater
// than the currently set interval. Returns the current wait period so that it can be
// logged along with any error message.
set(*time.Duration) time.Duration
// decreaseMultiplier reduces the current backoff multiplier, typically on success.
decreaseMultiplier()
// until returns the time until which the client should wait before making a request,
// it may be in the past in which case it should be ignored.
until() time.Time
}
// JSONClient provides common functionality for interacting with a JSON server
// that uses cryptographic signatures.
type JSONClient struct {
uri string // the base URI of the server. e.g. https://ct.googleapis/pilot
httpClient *http.Client // used to interact with the server via HTTP
Verifier *ct.SignatureVerifier // nil for no verification (e.g. no public key available)
logger Logger // interface to use for logging warnings and errors
backoff backoffer // object used to store and calculate backoff information
}
// Logger is a simple logging interface used to log internal errors and warnings
type Logger interface {
// Printf formats and logs a message
Printf(string, ...interface{})
}
// Options are the options for creating a new JSONClient.
type Options struct {
// Interface to use for logging warnings and errors, if nil the
// standard library log package will be used.
Logger Logger
// PEM format public key to use for signature verification.
PublicKey string
// DER format public key to use for signature verification.
PublicKeyDER []byte
}
// ParsePublicKey parses and returns the public key contained in opts.
// If both opts.PublicKey and opts.PublicKeyDER are set, PublicKeyDER is used.
// If neither is set, nil will be returned.
func (opts *Options) ParsePublicKey() (crypto.PublicKey, error) {
if len(opts.PublicKeyDER) > 0 {
return x509.ParsePKIXPublicKey(opts.PublicKeyDER)
}
if opts.PublicKey != "" {
pubkey, _ /* keyhash */, rest, err := ct.PublicKeyFromPEM([]byte(opts.PublicKey))
if err != nil {
return nil, err
}
if len(rest) > 0 {
return nil, errors.New("extra data found after PEM key decoded")
}
return pubkey, nil
}
return nil, nil
}
type basicLogger struct{}
func (bl *basicLogger) Printf(msg string, args ...interface{}) {
log.Printf(msg, args...)
}
// New constructs a new JSONClient instance, for the given base URI, using the
// given http.Client object (if provided) and the Options object.
// If opts does not specify a public key, signatures will not be verified.
func New(uri string, hc *http.Client, opts Options) (*JSONClient, error) {
pubkey, err := opts.ParsePublicKey()
if err != nil {
return nil, fmt.Errorf("invalid public key: %v", err)
}
var verifier *ct.SignatureVerifier
if pubkey != nil {
var err error
verifier, err = ct.NewSignatureVerifier(pubkey)
if err != nil {
return nil, err
}
}
if hc == nil {
hc = new(http.Client)
}
logger := opts.Logger
if logger == nil {
logger = &basicLogger{}
}
return &JSONClient{
uri: strings.TrimRight(uri, "/"),
httpClient: hc,
Verifier: verifier,
logger: logger,
backoff: &backoff{},
}, nil
}
// BaseURI returns the base URI that the JSONClient makes queries to.
func (c *JSONClient) BaseURI() string {
return c.uri
}
// GetAndParse makes a HTTP GET call to the given path, and attempta to parse
// the response as a JSON representation of the rsp structure. Returns the
// http.Response, the body of the response, and an error. Note that the
// returned http.Response can be non-nil even when an error is returned,
// in particular when the HTTP status is not OK or when the JSON parsing fails.
func (c *JSONClient) GetAndParse(ctx context.Context, path string, params map[string]string, rsp interface{}) (*http.Response, []byte, error) {
if ctx == nil {
return nil, nil, errors.New("context.Context required")
}
// Build a GET request with URL-encoded parameters.
vals := url.Values{}
for k, v := range params {
vals.Add(k, v)
}
fullURI := fmt.Sprintf("%s%s?%s", c.uri, path, vals.Encode())
httpReq, err := http.NewRequest(http.MethodGet, fullURI, nil)
if err != nil {
return nil, nil, err
}
httpRsp, err := ctxhttp.Do(ctx, c.httpClient, httpReq)
if err != nil {
return nil, nil, err
}
// Read everything now so http.Client can reuse the connection.
body, err := ioutil.ReadAll(httpRsp.Body)
httpRsp.Body.Close()
if err != nil {
return httpRsp, body, fmt.Errorf("failed to read response body: %v", err)
}
if httpRsp.StatusCode != http.StatusOK {
return httpRsp, body, fmt.Errorf("got HTTP Status %q", httpRsp.Status)
}
if err := json.NewDecoder(bytes.NewReader(body)).Decode(rsp); err != nil {
return httpRsp, body, err
}
return httpRsp, body, nil
}
// PostAndParse makes a HTTP POST call to the given path, including the request
// parameters, and attempts to parse the response as a JSON representation of
// the rsp structure. Returns the http.Response, the body of the response, and
// an error. Note that the returned http.Response can be non-nil even when an
// error is returned, in particular when the HTTP status is not OK or when the
// JSON parsing fails.
func (c *JSONClient) PostAndParse(ctx context.Context, path string, req, rsp interface{}) (*http.Response, []byte, error) {
if ctx == nil {
return nil, nil, errors.New("context.Context required")
}
// Build a POST request with JSON body.
postBody, err := json.Marshal(req)
if err != nil {
return nil, nil, err
}
fullURI := fmt.Sprintf("%s%s", c.uri, path)
httpReq, err := http.NewRequest(http.MethodPost, fullURI, bytes.NewReader(postBody))
if err != nil {
return nil, nil, err
}
httpReq.Header.Set("Content-Type", "application/json")
httpRsp, err := ctxhttp.Do(ctx, c.httpClient, httpReq)
// Read all of the body, if there is one, so that the http.Client can do Keep-Alive.
var body []byte
if httpRsp != nil {
body, err = ioutil.ReadAll(httpRsp.Body)
httpRsp.Body.Close()
}
if err != nil {
return httpRsp, body, err
}
if httpRsp.StatusCode == http.StatusOK {
if err = json.Unmarshal(body, &rsp); err != nil {
return httpRsp, body, err
}
}
return httpRsp, body, nil
}
// waitForBackoff blocks until the defined backoff interval or context has expired, if the returned
// not before time is in the past it returns immediately.
func (c *JSONClient) waitForBackoff(ctx context.Context) error {
dur := time.Until(c.backoff.until().Add(time.Millisecond * time.Duration(rand.Intn(int(maxJitter.Seconds()*1000)))))
if dur < 0 {
dur = 0
}
backoffTimer := time.NewTimer(dur)
select {
case <-ctx.Done():
return ctx.Err()
case <-backoffTimer.C:
}
return nil
}
// PostAndParseWithRetry makes a HTTP POST call, but retries (with backoff) on
// retriable errors; the caller should set a deadline on the provided context
// to prevent infinite retries. Return values are as for PostAndParse.
func (c *JSONClient) PostAndParseWithRetry(ctx context.Context, path string, req, rsp interface{}) (*http.Response, []byte, error) {
if ctx == nil {
return nil, nil, errors.New("context.Context required")
}
for {
httpRsp, body, err := c.PostAndParse(ctx, path, req, rsp)
if err != nil {
// Don't retry context errors.
if err == context.Canceled || err == context.DeadlineExceeded {
return nil, nil, err
}
wait := c.backoff.set(nil)
c.logger.Printf("Request failed, backing-off for %s: %s", wait, err)
} else {
switch {
case httpRsp.StatusCode == http.StatusOK:
return httpRsp, body, nil
case httpRsp.StatusCode == http.StatusRequestTimeout:
// Request timeout, retry immediately
c.logger.Printf("Request timed out, retrying immediately")
case httpRsp.StatusCode == http.StatusServiceUnavailable:
var backoff *time.Duration
// Retry-After may be either a number of seconds as a int or a RFC 1123
// date string (RFC 7231 Section 7.1.3)
if retryAfter := httpRsp.Header.Get("Retry-After"); retryAfter != "" {
if seconds, err := strconv.Atoi(retryAfter); err == nil {
b := time.Duration(seconds) * time.Second
backoff = &b
} else if date, err := time.Parse(time.RFC1123, retryAfter); err == nil {
b := date.Sub(time.Now())
backoff = &b
}
}
wait := c.backoff.set(backoff)
c.logger.Printf("Request failed, backing-off for %s: got HTTP status %s", wait, httpRsp.Status)
default:
return httpRsp, body, fmt.Errorf("got HTTP Status %q", httpRsp.Status)
}
}
if err := c.waitForBackoff(ctx); err != nil {
return nil, nil, err
}
}
}

View File

@@ -1,347 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ct
import (
"crypto"
"crypto/sha256"
"encoding/json"
"fmt"
"strings"
"time"
"github.com/google/certificate-transparency-go/tls"
"github.com/google/certificate-transparency-go/x509"
)
// SerializeSCTSignatureInput serializes the passed in sct and log entry into
// the correct format for signing.
func SerializeSCTSignatureInput(sct SignedCertificateTimestamp, entry LogEntry) ([]byte, error) {
switch sct.SCTVersion {
case V1:
input := CertificateTimestamp{
SCTVersion: sct.SCTVersion,
SignatureType: CertificateTimestampSignatureType,
Timestamp: sct.Timestamp,
EntryType: entry.Leaf.TimestampedEntry.EntryType,
Extensions: sct.Extensions,
}
switch entry.Leaf.TimestampedEntry.EntryType {
case X509LogEntryType:
input.X509Entry = entry.Leaf.TimestampedEntry.X509Entry
case PrecertLogEntryType:
input.PrecertEntry = &PreCert{
IssuerKeyHash: entry.Leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash,
TBSCertificate: entry.Leaf.TimestampedEntry.PrecertEntry.TBSCertificate,
}
case XJSONLogEntryType:
input.JSONEntry = entry.Leaf.TimestampedEntry.JSONEntry
default:
return nil, fmt.Errorf("unsupported entry type %s", entry.Leaf.TimestampedEntry.EntryType)
}
return tls.Marshal(input)
default:
return nil, fmt.Errorf("unknown SCT version %d", sct.SCTVersion)
}
}
// SerializeSTHSignatureInput serializes the passed in STH into the correct
// format for signing.
func SerializeSTHSignatureInput(sth SignedTreeHead) ([]byte, error) {
switch sth.Version {
case V1:
if len(sth.SHA256RootHash) != crypto.SHA256.Size() {
return nil, fmt.Errorf("invalid TreeHash length, got %d expected %d", len(sth.SHA256RootHash), crypto.SHA256.Size())
}
input := TreeHeadSignature{
Version: sth.Version,
SignatureType: TreeHashSignatureType,
Timestamp: sth.Timestamp,
TreeSize: sth.TreeSize,
SHA256RootHash: sth.SHA256RootHash,
}
return tls.Marshal(input)
default:
return nil, fmt.Errorf("unsupported STH version %d", sth.Version)
}
}
// CreateX509MerkleTreeLeaf generates a MerkleTreeLeaf for an X509 cert
func CreateX509MerkleTreeLeaf(cert ASN1Cert, timestamp uint64) *MerkleTreeLeaf {
return &MerkleTreeLeaf{
Version: V1,
LeafType: TimestampedEntryLeafType,
TimestampedEntry: &TimestampedEntry{
Timestamp: timestamp,
EntryType: X509LogEntryType,
X509Entry: &cert,
},
}
}
// CreateJSONMerkleTreeLeaf creates the merkle tree leaf for json data.
func CreateJSONMerkleTreeLeaf(data interface{}, timestamp uint64) *MerkleTreeLeaf {
jsonData, err := json.Marshal(AddJSONRequest{Data: data})
if err != nil {
return nil
}
// Match the JSON serialization implemented by json-c
jsonStr := strings.Replace(string(jsonData), ":", ": ", -1)
jsonStr = strings.Replace(jsonStr, ",", ", ", -1)
jsonStr = strings.Replace(jsonStr, "{", "{ ", -1)
jsonStr = strings.Replace(jsonStr, "}", " }", -1)
jsonStr = strings.Replace(jsonStr, "/", `\/`, -1)
// TODO: Pending google/certificate-transparency#1243, replace with
// ObjectHash once supported by CT server.
return &MerkleTreeLeaf{
Version: V1,
LeafType: TimestampedEntryLeafType,
TimestampedEntry: &TimestampedEntry{
Timestamp: timestamp,
EntryType: XJSONLogEntryType,
JSONEntry: &JSONDataEntry{Data: []byte(jsonStr)},
},
}
}
// MerkleTreeLeafFromRawChain generates a MerkleTreeLeaf from a chain (in DER-encoded form) and timestamp.
func MerkleTreeLeafFromRawChain(rawChain []ASN1Cert, etype LogEntryType, timestamp uint64) (*MerkleTreeLeaf, error) {
// Need at most 3 of the chain
count := 3
if count > len(rawChain) {
count = len(rawChain)
}
chain := make([]*x509.Certificate, count)
for i := range chain {
cert, err := x509.ParseCertificate(rawChain[i].Data)
if x509.IsFatal(err) {
return nil, fmt.Errorf("failed to parse chain[%d] cert: %v", i, err)
}
chain[i] = cert
}
return MerkleTreeLeafFromChain(chain, etype, timestamp)
}
// MerkleTreeLeafFromChain generates a MerkleTreeLeaf from a chain and timestamp.
func MerkleTreeLeafFromChain(chain []*x509.Certificate, etype LogEntryType, timestamp uint64) (*MerkleTreeLeaf, error) {
leaf := MerkleTreeLeaf{
Version: V1,
LeafType: TimestampedEntryLeafType,
TimestampedEntry: &TimestampedEntry{
EntryType: etype,
Timestamp: timestamp,
},
}
if etype == X509LogEntryType {
leaf.TimestampedEntry.X509Entry = &ASN1Cert{Data: chain[0].Raw}
return &leaf, nil
}
if etype != PrecertLogEntryType {
return nil, fmt.Errorf("unknown LogEntryType %d", etype)
}
// Pre-certs are more complicated. First, parse the leaf pre-cert and its
// putative issuer.
if len(chain) < 2 {
return nil, fmt.Errorf("no issuer cert available for precert leaf building")
}
issuer := chain[1]
cert := chain[0]
var preIssuer *x509.Certificate
if IsPreIssuer(issuer) {
// Replace the cert's issuance information with details from the pre-issuer.
preIssuer = issuer
// The issuer of the pre-cert is not going to be the issuer of the final
// cert. Change to use the final issuer's key hash.
if len(chain) < 3 {
return nil, fmt.Errorf("no issuer cert available for pre-issuer")
}
issuer = chain[2]
}
// Next, post-process the DER-encoded TBSCertificate, to remove the CT poison
// extension and possibly update the issuer field.
defangedTBS, err := x509.BuildPrecertTBS(cert.RawTBSCertificate, preIssuer)
if err != nil {
return nil, fmt.Errorf("failed to remove poison extension: %v", err)
}
leaf.TimestampedEntry.EntryType = PrecertLogEntryType
leaf.TimestampedEntry.PrecertEntry = &PreCert{
IssuerKeyHash: sha256.Sum256(issuer.RawSubjectPublicKeyInfo),
TBSCertificate: defangedTBS,
}
return &leaf, nil
}
// MerkleTreeLeafForEmbeddedSCT generates a MerkleTreeLeaf from a chain and an
// SCT timestamp, where the leaf certificate at chain[0] is a certificate that
// contains embedded SCTs. It is assumed that the timestamp provided is from
// one of the SCTs embedded within the leaf certificate.
func MerkleTreeLeafForEmbeddedSCT(chain []*x509.Certificate, timestamp uint64) (*MerkleTreeLeaf, error) {
// For building the leaf for a certificate and SCT where the SCT is embedded
// in the certificate, we need to build the original precertificate TBS
// data. First, parse the leaf cert and its issuer.
if len(chain) < 2 {
return nil, fmt.Errorf("no issuer cert available for precert leaf building")
}
issuer := chain[1]
cert := chain[0]
// Next, post-process the DER-encoded TBSCertificate, to remove the SCTList
// extension.
tbs, err := x509.RemoveSCTList(cert.RawTBSCertificate)
if err != nil {
return nil, fmt.Errorf("failed to remove SCT List extension: %v", err)
}
return &MerkleTreeLeaf{
Version: V1,
LeafType: TimestampedEntryLeafType,
TimestampedEntry: &TimestampedEntry{
EntryType: PrecertLogEntryType,
Timestamp: timestamp,
PrecertEntry: &PreCert{
IssuerKeyHash: sha256.Sum256(issuer.RawSubjectPublicKeyInfo),
TBSCertificate: tbs,
},
},
}, nil
}
// LeafHashForLeaf returns the leaf hash for a Merkle tree leaf.
func LeafHashForLeaf(leaf *MerkleTreeLeaf) ([sha256.Size]byte, error) {
leafData, err := tls.Marshal(*leaf)
if err != nil {
return [sha256.Size]byte{}, fmt.Errorf("failed to tls-encode MerkleTreeLeaf: %s", err)
}
data := append([]byte{TreeLeafPrefix}, leafData...)
leafHash := sha256.Sum256(data)
return leafHash, nil
}
// IsPreIssuer indicates whether a certificate is a pre-cert issuer with the specific
// certificate transparency extended key usage.
func IsPreIssuer(issuer *x509.Certificate) bool {
for _, eku := range issuer.ExtKeyUsage {
if eku == x509.ExtKeyUsageCertificateTransparency {
return true
}
}
return false
}
// RawLogEntryFromLeaf converts a LeafEntry object (which has the raw leaf data
// after JSON parsing) into a RawLogEntry object (i.e. a TLS-parsed structure).
func RawLogEntryFromLeaf(index int64, entry *LeafEntry) (*RawLogEntry, error) {
ret := RawLogEntry{Index: index}
if rest, err := tls.Unmarshal(entry.LeafInput, &ret.Leaf); err != nil {
return nil, fmt.Errorf("failed to unmarshal MerkleTreeLeaf: %v", err)
} else if len(rest) > 0 {
return nil, fmt.Errorf("MerkleTreeLeaf: trailing data %d bytes", len(rest))
}
switch eType := ret.Leaf.TimestampedEntry.EntryType; eType {
case X509LogEntryType:
var certChain CertificateChain
if rest, err := tls.Unmarshal(entry.ExtraData, &certChain); err != nil {
return nil, fmt.Errorf("failed to unmarshal CertificateChain: %v", err)
} else if len(rest) > 0 {
return nil, fmt.Errorf("CertificateChain: trailing data %d bytes", len(rest))
}
ret.Cert = *ret.Leaf.TimestampedEntry.X509Entry
ret.Chain = certChain.Entries
case PrecertLogEntryType:
var precertChain PrecertChainEntry
if rest, err := tls.Unmarshal(entry.ExtraData, &precertChain); err != nil {
return nil, fmt.Errorf("failed to unmarshal PrecertChainEntry: %v", err)
} else if len(rest) > 0 {
return nil, fmt.Errorf("PrecertChainEntry: trailing data %d bytes", len(rest))
}
ret.Cert = precertChain.PreCertificate
ret.Chain = precertChain.CertificateChain
default:
// TODO(pavelkalinnikov): Section 4.6 of RFC6962 implies that unknown types
// are not errors. We should revisit how we process this case.
return nil, fmt.Errorf("unknown entry type: %v", eType)
}
return &ret, nil
}
// ToLogEntry converts RawLogEntry to a LogEntry, which includes an x509-parsed
// (pre-)certificate.
//
// Note that this function may return a valid LogEntry object and a non-nil
// error value, when the error indicates a non-fatal parsing error.
func (rle *RawLogEntry) ToLogEntry() (*LogEntry, error) {
var err error
entry := LogEntry{Index: rle.Index, Leaf: rle.Leaf, Chain: rle.Chain}
switch eType := rle.Leaf.TimestampedEntry.EntryType; eType {
case X509LogEntryType:
entry.X509Cert, err = rle.Leaf.X509Certificate()
if x509.IsFatal(err) {
return nil, fmt.Errorf("failed to parse certificate: %v", err)
}
case PrecertLogEntryType:
var tbsCert *x509.Certificate
tbsCert, err = rle.Leaf.Precertificate()
if x509.IsFatal(err) {
return nil, fmt.Errorf("failed to parse precertificate: %v", err)
}
entry.Precert = &Precertificate{
Submitted: rle.Cert,
IssuerKeyHash: rle.Leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash,
TBSCertificate: tbsCert,
}
default:
return nil, fmt.Errorf("unknown entry type: %v", eType)
}
// err may be non-nil for a non-fatal error.
return &entry, err
}
// LogEntryFromLeaf converts a LeafEntry object (which has the raw leaf data
// after JSON parsing) into a LogEntry object (which includes x509.Certificate
// objects, after TLS and ASN.1 parsing).
//
// Note that this function may return a valid LogEntry object and a non-nil
// error value, when the error indicates a non-fatal parsing error.
func LogEntryFromLeaf(index int64, leaf *LeafEntry) (*LogEntry, error) {
rle, err := RawLogEntryFromLeaf(index, leaf)
if err != nil {
return nil, err
}
return rle.ToLogEntry()
}
// TimestampToTime converts a timestamp in the style of RFC 6962 (milliseconds
// since UNIX epoch) to a Go Time.
func TimestampToTime(ts uint64) time.Time {
secs := int64(ts / 1000)
msecs := int64(ts % 1000)
return time.Unix(secs, msecs*1000000)
}

View File

@@ -1,112 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ct
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"crypto/sha256"
"encoding/base64"
"encoding/pem"
"fmt"
"log"
"github.com/google/certificate-transparency-go/tls"
"github.com/google/certificate-transparency-go/x509"
)
// AllowVerificationWithNonCompliantKeys may be set to true in order to allow
// SignatureVerifier to use keys which are technically non-compliant with
// RFC6962.
var AllowVerificationWithNonCompliantKeys = false
// PublicKeyFromPEM parses a PEM formatted block and returns the public key contained within and any remaining unread bytes, or an error.
func PublicKeyFromPEM(b []byte) (crypto.PublicKey, SHA256Hash, []byte, error) {
p, rest := pem.Decode(b)
if p == nil {
return nil, [sha256.Size]byte{}, rest, fmt.Errorf("no PEM block found in %s", string(b))
}
k, err := x509.ParsePKIXPublicKey(p.Bytes)
return k, sha256.Sum256(p.Bytes), rest, err
}
// PublicKeyFromB64 parses a base64-encoded public key.
func PublicKeyFromB64(b64PubKey string) (crypto.PublicKey, error) {
der, err := base64.StdEncoding.DecodeString(b64PubKey)
if err != nil {
return nil, fmt.Errorf("error decoding public key: %s", err)
}
return x509.ParsePKIXPublicKey(der)
}
// SignatureVerifier can verify signatures on SCTs and STHs
type SignatureVerifier struct {
pubKey crypto.PublicKey
}
// NewSignatureVerifier creates a new SignatureVerifier using the passed in PublicKey.
func NewSignatureVerifier(pk crypto.PublicKey) (*SignatureVerifier, error) {
switch pkType := pk.(type) {
case *rsa.PublicKey:
if pkType.N.BitLen() < 2048 {
e := fmt.Errorf("public key is RSA with < 2048 bits (size:%d)", pkType.N.BitLen())
if !AllowVerificationWithNonCompliantKeys {
return nil, e
}
log.Printf("WARNING: %v", e)
}
case *ecdsa.PublicKey:
params := *(pkType.Params())
if params != *elliptic.P256().Params() {
e := fmt.Errorf("public is ECDSA, but not on the P256 curve")
if !AllowVerificationWithNonCompliantKeys {
return nil, e
}
log.Printf("WARNING: %v", e)
}
default:
return nil, fmt.Errorf("Unsupported public key type %v", pkType)
}
return &SignatureVerifier{
pubKey: pk,
}, nil
}
// VerifySignature verifies the given signature sig matches the data.
func (s SignatureVerifier) VerifySignature(data []byte, sig tls.DigitallySigned) error {
return tls.VerifySignature(s.pubKey, data, sig)
}
// VerifySCTSignature verifies that the SCT's signature is valid for the given LogEntry.
func (s SignatureVerifier) VerifySCTSignature(sct SignedCertificateTimestamp, entry LogEntry) error {
sctData, err := SerializeSCTSignatureInput(sct, entry)
if err != nil {
return err
}
return s.VerifySignature(sctData, tls.DigitallySigned(sct.Signature))
}
// VerifySTHSignature verifies that the STH's signature is valid.
func (s SignatureVerifier) VerifySTHSignature(sth SignedTreeHead) error {
sthData, err := SerializeSTHSignatureInput(sth)
if err != nil {
return err
}
return s.VerifySignature(sthData, tls.DigitallySigned(sth.TreeHeadSignature))
}

View File

@@ -1,28 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"signature.go",
"tls.go",
"types.go",
],
importmap = "k8s.io/kubernetes/vendor/github.com/google/certificate-transparency-go/tls",
importpath = "github.com/google/certificate-transparency-go/tls",
visibility = ["//visibility:public"],
deps = ["//vendor/github.com/google/certificate-transparency-go/asn1:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -1,152 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tls
import (
"crypto"
"crypto/dsa"
"crypto/ecdsa"
_ "crypto/md5" // For registration side-effect
"crypto/rand"
"crypto/rsa"
_ "crypto/sha1" // For registration side-effect
_ "crypto/sha256" // For registration side-effect
_ "crypto/sha512" // For registration side-effect
"errors"
"fmt"
"log"
"math/big"
"github.com/google/certificate-transparency-go/asn1"
)
type dsaSig struct {
R, S *big.Int
}
func generateHash(algo HashAlgorithm, data []byte) ([]byte, crypto.Hash, error) {
var hashType crypto.Hash
switch algo {
case MD5:
hashType = crypto.MD5
case SHA1:
hashType = crypto.SHA1
case SHA224:
hashType = crypto.SHA224
case SHA256:
hashType = crypto.SHA256
case SHA384:
hashType = crypto.SHA384
case SHA512:
hashType = crypto.SHA512
default:
return nil, hashType, fmt.Errorf("unsupported Algorithm.Hash in signature: %v", algo)
}
hasher := hashType.New()
if _, err := hasher.Write(data); err != nil {
return nil, hashType, fmt.Errorf("failed to write to hasher: %v", err)
}
return hasher.Sum([]byte{}), hashType, nil
}
// VerifySignature verifies that the passed in signature over data was created by the given PublicKey.
func VerifySignature(pubKey crypto.PublicKey, data []byte, sig DigitallySigned) error {
hash, hashType, err := generateHash(sig.Algorithm.Hash, data)
if err != nil {
return err
}
switch sig.Algorithm.Signature {
case RSA:
rsaKey, ok := pubKey.(*rsa.PublicKey)
if !ok {
return fmt.Errorf("cannot verify RSA signature with %T key", pubKey)
}
if err := rsa.VerifyPKCS1v15(rsaKey, hashType, hash, sig.Signature); err != nil {
return fmt.Errorf("failed to verify rsa signature: %v", err)
}
case DSA:
dsaKey, ok := pubKey.(*dsa.PublicKey)
if !ok {
return fmt.Errorf("cannot verify DSA signature with %T key", pubKey)
}
var dsaSig dsaSig
rest, err := asn1.Unmarshal(sig.Signature, &dsaSig)
if err != nil {
return fmt.Errorf("failed to unmarshal DSA signature: %v", err)
}
if len(rest) != 0 {
log.Printf("Garbage following signature %v", rest)
}
if dsaSig.R.Sign() <= 0 || dsaSig.S.Sign() <= 0 {
return errors.New("DSA signature contained zero or negative values")
}
if !dsa.Verify(dsaKey, hash, dsaSig.R, dsaSig.S) {
return errors.New("failed to verify DSA signature")
}
case ECDSA:
ecdsaKey, ok := pubKey.(*ecdsa.PublicKey)
if !ok {
return fmt.Errorf("cannot verify ECDSA signature with %T key", pubKey)
}
var ecdsaSig dsaSig
rest, err := asn1.Unmarshal(sig.Signature, &ecdsaSig)
if err != nil {
return fmt.Errorf("failed to unmarshal ECDSA signature: %v", err)
}
if len(rest) != 0 {
log.Printf("Garbage following signature %v", rest)
}
if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 {
return errors.New("ECDSA signature contained zero or negative values")
}
if !ecdsa.Verify(ecdsaKey, hash, ecdsaSig.R, ecdsaSig.S) {
return errors.New("failed to verify ECDSA signature")
}
default:
return fmt.Errorf("unsupported Algorithm.Signature in signature: %v", sig.Algorithm.Hash)
}
return nil
}
// CreateSignature builds a signature over the given data using the specified hash algorithm and private key.
func CreateSignature(privKey crypto.PrivateKey, hashAlgo HashAlgorithm, data []byte) (DigitallySigned, error) {
var sig DigitallySigned
sig.Algorithm.Hash = hashAlgo
hash, hashType, err := generateHash(sig.Algorithm.Hash, data)
if err != nil {
return sig, err
}
switch privKey := privKey.(type) {
case rsa.PrivateKey:
sig.Algorithm.Signature = RSA
sig.Signature, err = rsa.SignPKCS1v15(rand.Reader, &privKey, hashType, hash)
return sig, err
case ecdsa.PrivateKey:
sig.Algorithm.Signature = ECDSA
var ecdsaSig dsaSig
ecdsaSig.R, ecdsaSig.S, err = ecdsa.Sign(rand.Reader, &privKey, hash)
if err != nil {
return sig, err
}
sig.Signature, err = asn1.Marshal(ecdsaSig)
return sig, err
default:
return sig, fmt.Errorf("unsupported private key type %T", privKey)
}
}

View File

@@ -1,711 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package tls implements functionality for dealing with TLS-encoded data,
// as defined in RFC 5246. This includes parsing and generation of TLS-encoded
// data, together with utility functions for dealing with the DigitallySigned
// TLS type.
package tls
import (
"bytes"
"encoding/binary"
"fmt"
"reflect"
"strconv"
"strings"
)
// This file holds utility functions for TLS encoding/decoding data
// as per RFC 5246 section 4.
// A structuralError suggests that the TLS data is valid, but the Go type
// which is receiving it doesn't match.
type structuralError struct {
field string
msg string
}
func (e structuralError) Error() string {
var prefix string
if e.field != "" {
prefix = e.field + ": "
}
return "tls: structure error: " + prefix + e.msg
}
// A syntaxError suggests that the TLS data is invalid.
type syntaxError struct {
field string
msg string
}
func (e syntaxError) Error() string {
var prefix string
if e.field != "" {
prefix = e.field + ": "
}
return "tls: syntax error: " + prefix + e.msg
}
// Uint24 is an unsigned 3-byte integer.
type Uint24 uint32
// Enum is an unsigned integer.
type Enum uint64
var (
uint8Type = reflect.TypeOf(uint8(0))
uint16Type = reflect.TypeOf(uint16(0))
uint24Type = reflect.TypeOf(Uint24(0))
uint32Type = reflect.TypeOf(uint32(0))
uint64Type = reflect.TypeOf(uint64(0))
enumType = reflect.TypeOf(Enum(0))
)
// Unmarshal parses the TLS-encoded data in b and uses the reflect package to
// fill in an arbitrary value pointed at by val. Because Unmarshal uses the
// reflect package, the structs being written to must use exported fields
// (upper case names).
//
// The mappings between TLS types and Go types is as follows; some fields
// must have tags (to indicate their encoded size).
//
// TLS Go Required Tags
// opaque byte / uint8
// uint8 byte / uint8
// uint16 uint16
// uint24 tls.Uint24
// uint32 uint32
// uint64 uint64
// enum tls.Enum size:S or maxval:N
// Type<N,M> []Type minlen:N,maxlen:M
// opaque[N] [N]byte / [N]uint8
// uint8[N] [N]byte / [N]uint8
// struct { } struct { }
// select(T) {
// case e1: Type *T selector:Field,val:e1
// }
//
// TLS variants (RFC 5246 s4.6.1) are only supported when the value of the
// associated enumeration type is available earlier in the same enclosing
// struct, and each possible variant is marked with a selector tag (to
// indicate which field selects the variants) and a val tag (to indicate
// what value of the selector picks this particular field).
//
// For example, a TLS structure:
//
// enum { e1(1), e2(2) } EnumType;
// struct {
// EnumType sel;
// select(sel) {
// case e1: uint16
// case e2: uint32
// } data;
// } VariantItem;
//
// would have a corresponding Go type:
//
// type VariantItem struct {
// Sel tls.Enum `tls:"maxval:2"`
// Data16 *uint16 `tls:"selector:Sel,val:1"`
// Data32 *uint32 `tls:"selector:Sel,val:2"`
// }
//
// TLS fixed-length vectors of types other than opaque or uint8 are not supported.
//
// For TLS variable-length vectors that are themselves used in other vectors,
// create a single-field structure to represent the inner type. For example, for:
//
// opaque InnerType<1..65535>;
// struct {
// InnerType inners<1,65535>;
// } Something;
//
// convert to:
//
// type InnerType struct {
// Val []byte `tls:"minlen:1,maxlen:65535"`
// }
// type Something struct {
// Inners []InnerType `tls:"minlen:1,maxlen:65535"`
// }
//
// If the encoded value does not fit in the Go type, Unmarshal returns a parse error.
func Unmarshal(b []byte, val interface{}) ([]byte, error) {
return UnmarshalWithParams(b, val, "")
}
// UnmarshalWithParams allows field parameters to be specified for the
// top-level element. The form of the params is the same as the field tags.
func UnmarshalWithParams(b []byte, val interface{}, params string) ([]byte, error) {
info, err := fieldTagToFieldInfo(params, "")
if err != nil {
return nil, err
}
// The passed in interface{} is a pointer (to allow the value to be written
// to); extract the pointed-to object as a reflect.Value, so parseField
// can do various introspection things.
v := reflect.ValueOf(val).Elem()
offset, err := parseField(v, b, 0, info)
if err != nil {
return nil, err
}
return b[offset:], nil
}
// Return the number of bytes needed to encode values up to (and including) x.
func byteCount(x uint64) uint {
switch {
case x < 0x100:
return 1
case x < 0x10000:
return 2
case x < 0x1000000:
return 3
case x < 0x100000000:
return 4
case x < 0x10000000000:
return 5
case x < 0x1000000000000:
return 6
case x < 0x100000000000000:
return 7
default:
return 8
}
}
type fieldInfo struct {
count uint // Number of bytes
countSet bool
minlen uint64 // Only relevant for slices
maxlen uint64 // Only relevant for slices
selector string // Only relevant for select sub-values
val uint64 // Only relevant for select sub-values
name string // Used for better error messages
}
func (i *fieldInfo) fieldName() string {
if i == nil {
return ""
}
return i.name
}
// Given a tag string, return a fieldInfo describing the field.
func fieldTagToFieldInfo(str string, name string) (*fieldInfo, error) {
var info *fieldInfo
// Iterate over clauses in the tag, ignoring any that don't parse properly.
for _, part := range strings.Split(str, ",") {
switch {
case strings.HasPrefix(part, "maxval:"):
if v, err := strconv.ParseUint(part[7:], 10, 64); err == nil {
info = &fieldInfo{count: byteCount(v), countSet: true}
}
case strings.HasPrefix(part, "size:"):
if sz, err := strconv.ParseUint(part[5:], 10, 32); err == nil {
info = &fieldInfo{count: uint(sz), countSet: true}
}
case strings.HasPrefix(part, "maxlen:"):
v, err := strconv.ParseUint(part[7:], 10, 64)
if err != nil {
continue
}
if info == nil {
info = &fieldInfo{}
}
info.count = byteCount(v)
info.countSet = true
info.maxlen = v
case strings.HasPrefix(part, "minlen:"):
v, err := strconv.ParseUint(part[7:], 10, 64)
if err != nil {
continue
}
if info == nil {
info = &fieldInfo{}
}
info.minlen = v
case strings.HasPrefix(part, "selector:"):
if info == nil {
info = &fieldInfo{}
}
info.selector = part[9:]
case strings.HasPrefix(part, "val:"):
v, err := strconv.ParseUint(part[4:], 10, 64)
if err != nil {
continue
}
if info == nil {
info = &fieldInfo{}
}
info.val = v
}
}
if info != nil {
info.name = name
if info.selector == "" {
if info.count < 1 {
return nil, structuralError{name, "field of unknown size in " + str}
} else if info.count > 8 {
return nil, structuralError{name, "specified size too large in " + str}
} else if info.minlen > info.maxlen {
return nil, structuralError{name, "specified length range inverted in " + str}
} else if info.val > 0 {
return nil, structuralError{name, "specified selector value but not field in " + str}
}
}
} else if name != "" {
info = &fieldInfo{name: name}
}
return info, nil
}
// Check that a value fits into a field described by a fieldInfo structure.
func (i fieldInfo) check(val uint64, fldName string) error {
if val >= (1 << (8 * i.count)) {
return structuralError{fldName, fmt.Sprintf("value %d too large for size", val)}
}
if i.maxlen != 0 {
if val < i.minlen {
return structuralError{fldName, fmt.Sprintf("value %d too small for minimum %d", val, i.minlen)}
}
if val > i.maxlen {
return structuralError{fldName, fmt.Sprintf("value %d too large for maximum %d", val, i.maxlen)}
}
}
return nil
}
// readVarUint reads an big-endian unsigned integer of the given size in
// bytes.
func readVarUint(data []byte, info *fieldInfo) (uint64, error) {
if info == nil || !info.countSet {
return 0, structuralError{info.fieldName(), "no field size information available"}
}
if len(data) < int(info.count) {
return 0, syntaxError{info.fieldName(), "truncated variable-length integer"}
}
var result uint64
for i := uint(0); i < info.count; i++ {
result = (result << 8) | uint64(data[i])
}
if err := info.check(result, info.name); err != nil {
return 0, err
}
return result, nil
}
// parseField is the main parsing function. Given a byte slice and an offset
// (in bytes) into the data, it will try to parse a suitable ASN.1 value out
// and store it in the given Value.
func parseField(v reflect.Value, data []byte, initOffset int, info *fieldInfo) (int, error) {
offset := initOffset
rest := data[offset:]
fieldType := v.Type()
// First look for known fixed types.
switch fieldType {
case uint8Type:
if len(rest) < 1 {
return offset, syntaxError{info.fieldName(), "truncated uint8"}
}
v.SetUint(uint64(rest[0]))
offset++
return offset, nil
case uint16Type:
if len(rest) < 2 {
return offset, syntaxError{info.fieldName(), "truncated uint16"}
}
v.SetUint(uint64(binary.BigEndian.Uint16(rest)))
offset += 2
return offset, nil
case uint24Type:
if len(rest) < 3 {
return offset, syntaxError{info.fieldName(), "truncated uint24"}
}
v.SetUint(uint64(data[0])<<16 | uint64(data[1])<<8 | uint64(data[2]))
offset += 3
return offset, nil
case uint32Type:
if len(rest) < 4 {
return offset, syntaxError{info.fieldName(), "truncated uint32"}
}
v.SetUint(uint64(binary.BigEndian.Uint32(rest)))
offset += 4
return offset, nil
case uint64Type:
if len(rest) < 8 {
return offset, syntaxError{info.fieldName(), "truncated uint64"}
}
v.SetUint(uint64(binary.BigEndian.Uint64(rest)))
offset += 8
return offset, nil
}
// Now deal with user-defined types.
switch v.Kind() {
case enumType.Kind():
// Assume that anything of the same kind as Enum is an Enum, so that
// users can alias types of their own to Enum.
val, err := readVarUint(rest, info)
if err != nil {
return offset, err
}
v.SetUint(val)
offset += int(info.count)
return offset, nil
case reflect.Struct:
structType := fieldType
// TLS includes a select(Enum) {..} construct, where the value of an enum
// indicates which variant field is present (like a C union). We require
// that the enum value be an earlier field in the same structure (the selector),
// and that each of the possible variant destination fields be pointers.
// So the Go mapping looks like:
// type variantType struct {
// Which tls.Enum `tls:"size:1"` // this is the selector
// Val1 *type1 `tls:"selector:Which,val:1"` // this is a destination
// Val2 *type2 `tls:"selector:Which,val:1"` // this is a destination
// }
// To deal with this, we track any enum-like fields and their values...
enums := make(map[string]uint64)
// .. and we track which selector names we've seen (in the destination field tags),
// and whether a destination for that selector has been chosen.
selectorSeen := make(map[string]bool)
for i := 0; i < structType.NumField(); i++ {
// Find information about this field.
tag := structType.Field(i).Tag.Get("tls")
fieldInfo, err := fieldTagToFieldInfo(tag, structType.Field(i).Name)
if err != nil {
return offset, err
}
destination := v.Field(i)
if fieldInfo.selector != "" {
// This is a possible select(Enum) destination, so first check that the referenced
// selector field has already been seen earlier in the struct.
choice, ok := enums[fieldInfo.selector]
if !ok {
return offset, structuralError{fieldInfo.name, "selector not seen: " + fieldInfo.selector}
}
if structType.Field(i).Type.Kind() != reflect.Ptr {
return offset, structuralError{fieldInfo.name, "choice field not a pointer type"}
}
// Is this the first mention of the selector field name? If so, remember it.
seen, ok := selectorSeen[fieldInfo.selector]
if !ok {
selectorSeen[fieldInfo.selector] = false
}
if choice != fieldInfo.val {
// This destination field was not the chosen one, so make it nil (we checked
// it was a pointer above).
v.Field(i).Set(reflect.Zero(structType.Field(i).Type))
continue
}
if seen {
// We already saw a different destination field receive the value for this
// selector value, which indicates a badly annotated structure.
return offset, structuralError{fieldInfo.name, "duplicate selector value for " + fieldInfo.selector}
}
selectorSeen[fieldInfo.selector] = true
// Make an object of the pointed-to type and parse into that.
v.Field(i).Set(reflect.New(structType.Field(i).Type.Elem()))
destination = v.Field(i).Elem()
}
offset, err = parseField(destination, data, offset, fieldInfo)
if err != nil {
return offset, err
}
// Remember any possible tls.Enum values encountered in case they are selectors.
if structType.Field(i).Type.Kind() == enumType.Kind() {
enums[structType.Field(i).Name] = v.Field(i).Uint()
}
}
// Now we have seen all fields in the structure, check that all select(Enum) {..} selector
// fields found a destination to put their data in.
for selector, seen := range selectorSeen {
if !seen {
return offset, syntaxError{info.fieldName(), selector + ": unhandled value for selector"}
}
}
return offset, nil
case reflect.Array:
datalen := v.Len()
if datalen > len(rest) {
return offset, syntaxError{info.fieldName(), "truncated array"}
}
inner := rest[:datalen]
offset += datalen
if fieldType.Elem().Kind() != reflect.Uint8 {
// Only byte/uint8 arrays are supported
return offset, structuralError{info.fieldName(), "unsupported array type: " + v.Type().String()}
}
reflect.Copy(v, reflect.ValueOf(inner))
return offset, nil
case reflect.Slice:
sliceType := fieldType
// Slices represent variable-length vectors, which are prefixed by a length field.
// The fieldInfo indicates the size of that length field.
varlen, err := readVarUint(rest, info)
if err != nil {
return offset, err
}
datalen := int(varlen)
offset += int(info.count)
rest = rest[info.count:]
if datalen > len(rest) {
return offset, syntaxError{info.fieldName(), "truncated slice"}
}
inner := rest[:datalen]
offset += datalen
if fieldType.Elem().Kind() == reflect.Uint8 {
// Fast version for []byte
v.Set(reflect.MakeSlice(sliceType, datalen, datalen))
reflect.Copy(v, reflect.ValueOf(inner))
return offset, nil
}
v.Set(reflect.MakeSlice(sliceType, 0, datalen))
single := reflect.New(sliceType.Elem())
for innerOffset := 0; innerOffset < len(inner); {
var err error
innerOffset, err = parseField(single.Elem(), inner, innerOffset, nil)
if err != nil {
return offset, err
}
v.Set(reflect.Append(v, single.Elem()))
}
return offset, nil
default:
return offset, structuralError{info.fieldName(), fmt.Sprintf("unsupported type: %s of kind %s", fieldType, v.Kind())}
}
}
// Marshal returns the TLS encoding of val.
func Marshal(val interface{}) ([]byte, error) {
return MarshalWithParams(val, "")
}
// MarshalWithParams returns the TLS encoding of val, and allows field
// parameters to be specified for the top-level element. The form
// of the params is the same as the field tags.
func MarshalWithParams(val interface{}, params string) ([]byte, error) {
info, err := fieldTagToFieldInfo(params, "")
if err != nil {
return nil, err
}
var out bytes.Buffer
v := reflect.ValueOf(val)
if err := marshalField(&out, v, info); err != nil {
return nil, err
}
return out.Bytes(), err
}
func marshalField(out *bytes.Buffer, v reflect.Value, info *fieldInfo) error {
var prefix string
if info != nil && len(info.name) > 0 {
prefix = info.name + ": "
}
fieldType := v.Type()
// First look for known fixed types.
switch fieldType {
case uint8Type:
out.WriteByte(byte(v.Uint()))
return nil
case uint16Type:
scratch := make([]byte, 2)
binary.BigEndian.PutUint16(scratch, uint16(v.Uint()))
out.Write(scratch)
return nil
case uint24Type:
i := v.Uint()
if i > 0xffffff {
return structuralError{info.fieldName(), fmt.Sprintf("uint24 overflow %d", i)}
}
scratch := make([]byte, 4)
binary.BigEndian.PutUint32(scratch, uint32(i))
out.Write(scratch[1:])
return nil
case uint32Type:
scratch := make([]byte, 4)
binary.BigEndian.PutUint32(scratch, uint32(v.Uint()))
out.Write(scratch)
return nil
case uint64Type:
scratch := make([]byte, 8)
binary.BigEndian.PutUint64(scratch, uint64(v.Uint()))
out.Write(scratch)
return nil
}
// Now deal with user-defined types.
switch v.Kind() {
case enumType.Kind():
i := v.Uint()
if info == nil {
return structuralError{info.fieldName(), "enum field tag missing"}
}
if err := info.check(i, prefix); err != nil {
return err
}
scratch := make([]byte, 8)
binary.BigEndian.PutUint64(scratch, uint64(i))
out.Write(scratch[(8 - info.count):])
return nil
case reflect.Struct:
structType := fieldType
enums := make(map[string]uint64) // Values of any Enum fields
// The comment parseField() describes the mapping of the TLS select(Enum) {..} construct;
// here we have selector and source (rather than destination) fields.
// Track which selector names we've seen (in the source field tags), and whether a source
// value for that selector has been processed.
selectorSeen := make(map[string]bool)
for i := 0; i < structType.NumField(); i++ {
// Find information about this field.
tag := structType.Field(i).Tag.Get("tls")
fieldInfo, err := fieldTagToFieldInfo(tag, structType.Field(i).Name)
if err != nil {
return err
}
source := v.Field(i)
if fieldInfo.selector != "" {
// This field is a possible source for a select(Enum) {..}. First check
// the selector field name has been seen.
choice, ok := enums[fieldInfo.selector]
if !ok {
return structuralError{fieldInfo.name, "selector not seen: " + fieldInfo.selector}
}
if structType.Field(i).Type.Kind() != reflect.Ptr {
return structuralError{fieldInfo.name, "choice field not a pointer type"}
}
// Is this the first mention of the selector field name? If so, remember it.
seen, ok := selectorSeen[fieldInfo.selector]
if !ok {
selectorSeen[fieldInfo.selector] = false
}
if choice != fieldInfo.val {
// This source was not chosen; police that it should be nil.
if v.Field(i).Pointer() != uintptr(0) {
return structuralError{fieldInfo.name, "unchosen field is non-nil"}
}
continue
}
if seen {
// We already saw a different source field generate the value for this
// selector value, which indicates a badly annotated structure.
return structuralError{fieldInfo.name, "duplicate selector value for " + fieldInfo.selector}
}
selectorSeen[fieldInfo.selector] = true
if v.Field(i).Pointer() == uintptr(0) {
return structuralError{fieldInfo.name, "chosen field is nil"}
}
// Marshal from the pointed-to source object.
source = v.Field(i).Elem()
}
var fieldData bytes.Buffer
if err := marshalField(&fieldData, source, fieldInfo); err != nil {
return err
}
out.Write(fieldData.Bytes())
// Remember any tls.Enum values encountered in case they are selectors.
if structType.Field(i).Type.Kind() == enumType.Kind() {
enums[structType.Field(i).Name] = v.Field(i).Uint()
}
}
// Now we have seen all fields in the structure, check that all select(Enum) {..} selector
// fields found a source field get get their data from.
for selector, seen := range selectorSeen {
if !seen {
return syntaxError{info.fieldName(), selector + ": unhandled value for selector"}
}
}
return nil
case reflect.Array:
datalen := v.Len()
arrayType := fieldType
if arrayType.Elem().Kind() != reflect.Uint8 {
// Only byte/uint8 arrays are supported
return structuralError{info.fieldName(), "unsupported array type"}
}
bytes := make([]byte, datalen)
for i := 0; i < datalen; i++ {
bytes[i] = uint8(v.Index(i).Uint())
}
_, err := out.Write(bytes)
return err
case reflect.Slice:
if info == nil {
return structuralError{info.fieldName(), "slice field tag missing"}
}
sliceType := fieldType
if sliceType.Elem().Kind() == reflect.Uint8 {
// Fast version for []byte: first write the length as info.count bytes.
datalen := v.Len()
scratch := make([]byte, 8)
binary.BigEndian.PutUint64(scratch, uint64(datalen))
out.Write(scratch[(8 - info.count):])
if err := info.check(uint64(datalen), prefix); err != nil {
return err
}
// Then just write the data.
bytes := make([]byte, datalen)
for i := 0; i < datalen; i++ {
bytes[i] = uint8(v.Index(i).Uint())
}
_, err := out.Write(bytes)
return err
}
// General version: use a separate Buffer to write the slice entries into.
var innerBuf bytes.Buffer
for i := 0; i < v.Len(); i++ {
if err := marshalField(&innerBuf, v.Index(i), nil); err != nil {
return err
}
}
// Now insert (and check) the size.
size := uint64(innerBuf.Len())
if err := info.check(size, prefix); err != nil {
return err
}
scratch := make([]byte, 8)
binary.BigEndian.PutUint64(scratch, size)
out.Write(scratch[(8 - info.count):])
// Then copy the data.
_, err := out.Write(innerBuf.Bytes())
return err
default:
return structuralError{info.fieldName(), fmt.Sprintf("unsupported type: %s of kind %s", fieldType, v.Kind())}
}
}

View File

@@ -1,117 +0,0 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tls
import (
"crypto"
"crypto/dsa"
"crypto/ecdsa"
"crypto/rsa"
"fmt"
)
// DigitallySigned gives information about a signature, including the algorithm used
// and the signature value. Defined in RFC 5246 s4.7.
type DigitallySigned struct {
Algorithm SignatureAndHashAlgorithm
Signature []byte `tls:"minlen:0,maxlen:65535"`
}
func (d DigitallySigned) String() string {
return fmt.Sprintf("Signature: HashAlgo=%v SignAlgo=%v Value=%x", d.Algorithm.Hash, d.Algorithm.Signature, d.Signature)
}
// SignatureAndHashAlgorithm gives information about the algorithms used for a
// signature. Defined in RFC 5246 s7.4.1.4.1.
type SignatureAndHashAlgorithm struct {
Hash HashAlgorithm `tls:"maxval:255"`
Signature SignatureAlgorithm `tls:"maxval:255"`
}
// HashAlgorithm enum from RFC 5246 s7.4.1.4.1.
type HashAlgorithm Enum
// HashAlgorithm constants from RFC 5246 s7.4.1.4.1.
const (
None HashAlgorithm = 0
MD5 HashAlgorithm = 1
SHA1 HashAlgorithm = 2
SHA224 HashAlgorithm = 3
SHA256 HashAlgorithm = 4
SHA384 HashAlgorithm = 5
SHA512 HashAlgorithm = 6
)
func (h HashAlgorithm) String() string {
switch h {
case None:
return "None"
case MD5:
return "MD5"
case SHA1:
return "SHA1"
case SHA224:
return "SHA224"
case SHA256:
return "SHA256"
case SHA384:
return "SHA384"
case SHA512:
return "SHA512"
default:
return fmt.Sprintf("UNKNOWN(%d)", h)
}
}
// SignatureAlgorithm enum from RFC 5246 s7.4.1.4.1.
type SignatureAlgorithm Enum
// SignatureAlgorithm constants from RFC 5246 s7.4.1.4.1.
const (
Anonymous SignatureAlgorithm = 0
RSA SignatureAlgorithm = 1
DSA SignatureAlgorithm = 2
ECDSA SignatureAlgorithm = 3
)
func (s SignatureAlgorithm) String() string {
switch s {
case Anonymous:
return "Anonymous"
case RSA:
return "RSA"
case DSA:
return "DSA"
case ECDSA:
return "ECDSA"
default:
return fmt.Sprintf("UNKNOWN(%d)", s)
}
}
// SignatureAlgorithmFromPubKey returns the algorithm used for this public key.
// ECDSA, RSA, and DSA keys are supported. Other key types will return Anonymous.
func SignatureAlgorithmFromPubKey(k crypto.PublicKey) SignatureAlgorithm {
switch k.(type) {
case *ecdsa.PublicKey:
return ECDSA
case *rsa.PublicKey:
return RSA
case *dsa.PublicKey:
return DSA
default:
return Anonymous
}
}

View File

@@ -1,528 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package ct holds core types and utilities for Certificate Transparency.
package ct
import (
"crypto/sha256"
"encoding/base64"
"encoding/json"
"fmt"
"github.com/google/certificate-transparency-go/tls"
"github.com/google/certificate-transparency-go/x509"
)
///////////////////////////////////////////////////////////////////////////////
// The following structures represent those outlined in RFC6962; any section
// numbers mentioned refer to that RFC.
///////////////////////////////////////////////////////////////////////////////
// LogEntryType represents the LogEntryType enum from section 3.1:
// enum { x509_entry(0), precert_entry(1), (65535) } LogEntryType;
type LogEntryType tls.Enum // tls:"maxval:65535"
// LogEntryType constants from section 3.1.
const (
X509LogEntryType LogEntryType = 0
PrecertLogEntryType LogEntryType = 1
XJSONLogEntryType LogEntryType = 0x8000 // Experimental. Don't rely on this!
)
func (e LogEntryType) String() string {
switch e {
case X509LogEntryType:
return "X509LogEntryType"
case PrecertLogEntryType:
return "PrecertLogEntryType"
case XJSONLogEntryType:
return "XJSONLogEntryType"
default:
return fmt.Sprintf("UnknownEntryType(%d)", e)
}
}
// RFC6962 section 2.1 requires a prefix byte on hash inputs for second preimage resistance.
const (
TreeLeafPrefix = byte(0x00)
TreeNodePrefix = byte(0x01)
)
// MerkleLeafType represents the MerkleLeafType enum from section 3.4:
// enum { timestamped_entry(0), (255) } MerkleLeafType;
type MerkleLeafType tls.Enum // tls:"maxval:255"
// TimestampedEntryLeafType is the only defined MerkleLeafType constant from section 3.4.
const TimestampedEntryLeafType MerkleLeafType = 0 // Entry type for an SCT
func (m MerkleLeafType) String() string {
switch m {
case TimestampedEntryLeafType:
return "TimestampedEntryLeafType"
default:
return fmt.Sprintf("UnknownLeafType(%d)", m)
}
}
// Version represents the Version enum from section 3.2:
// enum { v1(0), (255) } Version;
type Version tls.Enum // tls:"maxval:255"
// CT Version constants from section 3.2.
const (
V1 Version = 0
)
func (v Version) String() string {
switch v {
case V1:
return "V1"
default:
return fmt.Sprintf("UnknownVersion(%d)", v)
}
}
// SignatureType differentiates STH signatures from SCT signatures, see section 3.2.
// enum { certificate_timestamp(0), tree_hash(1), (255) } SignatureType;
type SignatureType tls.Enum // tls:"maxval:255"
// SignatureType constants from section 3.2.
const (
CertificateTimestampSignatureType SignatureType = 0
TreeHashSignatureType SignatureType = 1
)
func (st SignatureType) String() string {
switch st {
case CertificateTimestampSignatureType:
return "CertificateTimestamp"
case TreeHashSignatureType:
return "TreeHash"
default:
return fmt.Sprintf("UnknownSignatureType(%d)", st)
}
}
// ASN1Cert type for holding the raw DER bytes of an ASN.1 Certificate
// (section 3.1).
type ASN1Cert struct {
Data []byte `tls:"minlen:1,maxlen:16777215"`
}
// LogID holds the hash of the Log's public key (section 3.2).
// TODO(pphaneuf): Users should be migrated to the one in the logid package.
type LogID struct {
KeyID [sha256.Size]byte
}
// PreCert represents a Precertificate (section 3.2).
type PreCert struct {
IssuerKeyHash [sha256.Size]byte
TBSCertificate []byte `tls:"minlen:1,maxlen:16777215"` // DER-encoded TBSCertificate
}
// CTExtensions is a representation of the raw bytes of any CtExtension
// structure (see section 3.2).
// nolint: golint
type CTExtensions []byte // tls:"minlen:0,maxlen:65535"`
// MerkleTreeNode represents an internal node in the CT tree.
type MerkleTreeNode []byte
// ConsistencyProof represents a CT consistency proof (see sections 2.1.2 and
// 4.4).
type ConsistencyProof []MerkleTreeNode
// AuditPath represents a CT inclusion proof (see sections 2.1.1 and 4.5).
type AuditPath []MerkleTreeNode
// LeafInput represents a serialized MerkleTreeLeaf structure.
type LeafInput []byte
// DigitallySigned is a local alias for tls.DigitallySigned so that we can
// attach a MarshalJSON method.
type DigitallySigned tls.DigitallySigned
// FromBase64String populates the DigitallySigned structure from the base64 data passed in.
// Returns an error if the base64 data is invalid.
func (d *DigitallySigned) FromBase64String(b64 string) error {
raw, err := base64.StdEncoding.DecodeString(b64)
if err != nil {
return fmt.Errorf("failed to unbase64 DigitallySigned: %v", err)
}
var ds tls.DigitallySigned
if rest, err := tls.Unmarshal(raw, &ds); err != nil {
return fmt.Errorf("failed to unmarshal DigitallySigned: %v", err)
} else if len(rest) > 0 {
return fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest))
}
*d = DigitallySigned(ds)
return nil
}
// Base64String returns the base64 representation of the DigitallySigned struct.
func (d DigitallySigned) Base64String() (string, error) {
b, err := tls.Marshal(d)
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(b), nil
}
// MarshalJSON implements the json.Marshaller interface.
func (d DigitallySigned) MarshalJSON() ([]byte, error) {
b64, err := d.Base64String()
if err != nil {
return []byte{}, err
}
return []byte(`"` + b64 + `"`), nil
}
// UnmarshalJSON implements the json.Unmarshaler interface.
func (d *DigitallySigned) UnmarshalJSON(b []byte) error {
var content string
if err := json.Unmarshal(b, &content); err != nil {
return fmt.Errorf("failed to unmarshal DigitallySigned: %v", err)
}
return d.FromBase64String(content)
}
// RawLogEntry represents the (TLS-parsed) contents of an entry in a CT log.
type RawLogEntry struct {
// Index is a position of the entry in the log.
Index int64
// Leaf is a parsed Merkle leaf hash input.
Leaf MerkleTreeLeaf
// Cert is:
// - A certificate if Leaf.TimestampedEntry.EntryType is X509LogEntryType.
// - A precertificate if Leaf.TimestampedEntry.EntryType is
// PrecertLogEntryType, in the form of a DER-encoded Certificate as
// originally added (which includes the poison extension and a signature
// generated over the pre-cert by the pre-cert issuer).
// - Empty otherwise.
Cert ASN1Cert
// Chain is the issuing certificate chain starting with the issuer of Cert,
// or an empty slice if Cert is empty.
Chain []ASN1Cert
}
// LogEntry represents the (parsed) contents of an entry in a CT log. This is described
// in section 3.1, but note that this structure does *not* match the TLS structure
// defined there (the TLS structure is never used directly in RFC6962).
type LogEntry struct {
Index int64
Leaf MerkleTreeLeaf
// Exactly one of the following three fields should be non-empty.
X509Cert *x509.Certificate // Parsed X.509 certificate
Precert *Precertificate // Extracted precertificate
JSONData []byte
// Chain holds the issuing certificate chain, starting with the
// issuer of the leaf certificate / pre-certificate.
Chain []ASN1Cert
}
// PrecertChainEntry holds an precertificate together with a validation chain
// for it; see section 3.1.
type PrecertChainEntry struct {
PreCertificate ASN1Cert `tls:"minlen:1,maxlen:16777215"`
CertificateChain []ASN1Cert `tls:"minlen:0,maxlen:16777215"`
}
// CertificateChain holds a chain of certificates, as returned as extra data
// for get-entries (section 4.6).
type CertificateChain struct {
Entries []ASN1Cert `tls:"minlen:0,maxlen:16777215"`
}
// JSONDataEntry holds arbitrary data.
type JSONDataEntry struct {
Data []byte `tls:"minlen:0,maxlen:1677215"`
}
// SHA256Hash represents the output from the SHA256 hash function.
type SHA256Hash [sha256.Size]byte
// FromBase64String populates the SHA256 struct with the contents of the base64 data passed in.
func (s *SHA256Hash) FromBase64String(b64 string) error {
bs, err := base64.StdEncoding.DecodeString(b64)
if err != nil {
return fmt.Errorf("failed to unbase64 LogID: %v", err)
}
if len(bs) != sha256.Size {
return fmt.Errorf("invalid SHA256 length, expected 32 but got %d", len(bs))
}
copy(s[:], bs)
return nil
}
// Base64String returns the base64 representation of this SHA256Hash.
func (s SHA256Hash) Base64String() string {
return base64.StdEncoding.EncodeToString(s[:])
}
// MarshalJSON implements the json.Marshaller interface for SHA256Hash.
func (s SHA256Hash) MarshalJSON() ([]byte, error) {
return []byte(`"` + s.Base64String() + `"`), nil
}
// UnmarshalJSON implements the json.Unmarshaller interface.
func (s *SHA256Hash) UnmarshalJSON(b []byte) error {
var content string
if err := json.Unmarshal(b, &content); err != nil {
return fmt.Errorf("failed to unmarshal SHA256Hash: %v", err)
}
return s.FromBase64String(content)
}
// SignedTreeHead represents the structure returned by the get-sth CT method
// after base64 decoding; see sections 3.5 and 4.3.
type SignedTreeHead struct {
Version Version `json:"sth_version"` // The version of the protocol to which the STH conforms
TreeSize uint64 `json:"tree_size"` // The number of entries in the new tree
Timestamp uint64 `json:"timestamp"` // The time at which the STH was created
SHA256RootHash SHA256Hash `json:"sha256_root_hash"` // The root hash of the log's Merkle tree
TreeHeadSignature DigitallySigned `json:"tree_head_signature"` // Log's signature over a TLS-encoded TreeHeadSignature
LogID SHA256Hash `json:"log_id"` // The SHA256 hash of the log's public key
}
// TreeHeadSignature holds the data over which the signature in an STH is
// generated; see section 3.5
type TreeHeadSignature struct {
Version Version `tls:"maxval:255"`
SignatureType SignatureType `tls:"maxval:255"` // == TreeHashSignatureType
Timestamp uint64
TreeSize uint64
SHA256RootHash SHA256Hash
}
// SignedCertificateTimestamp represents the structure returned by the
// add-chain and add-pre-chain methods after base64 decoding; see sections
// 3.2, 4.1 and 4.2.
type SignedCertificateTimestamp struct {
SCTVersion Version `tls:"maxval:255"`
LogID LogID
Timestamp uint64
Extensions CTExtensions `tls:"minlen:0,maxlen:65535"`
Signature DigitallySigned // Signature over TLS-encoded CertificateTimestamp
}
// CertificateTimestamp is the collection of data that the signature in an
// SCT is over; see section 3.2.
type CertificateTimestamp struct {
SCTVersion Version `tls:"maxval:255"`
SignatureType SignatureType `tls:"maxval:255"`
Timestamp uint64
EntryType LogEntryType `tls:"maxval:65535"`
X509Entry *ASN1Cert `tls:"selector:EntryType,val:0"`
PrecertEntry *PreCert `tls:"selector:EntryType,val:1"`
JSONEntry *JSONDataEntry `tls:"selector:EntryType,val:32768"`
Extensions CTExtensions `tls:"minlen:0,maxlen:65535"`
}
func (s SignedCertificateTimestamp) String() string {
return fmt.Sprintf("{Version:%d LogId:%s Timestamp:%d Extensions:'%s' Signature:%v}", s.SCTVersion,
base64.StdEncoding.EncodeToString(s.LogID.KeyID[:]),
s.Timestamp,
s.Extensions,
s.Signature)
}
// TimestampedEntry is part of the MerkleTreeLeaf structure; see section 3.4.
type TimestampedEntry struct {
Timestamp uint64
EntryType LogEntryType `tls:"maxval:65535"`
X509Entry *ASN1Cert `tls:"selector:EntryType,val:0"`
PrecertEntry *PreCert `tls:"selector:EntryType,val:1"`
JSONEntry *JSONDataEntry `tls:"selector:EntryType,val:32768"`
Extensions CTExtensions `tls:"minlen:0,maxlen:65535"`
}
// MerkleTreeLeaf represents the deserialized structure of the hash input for the
// leaves of a log's Merkle tree; see section 3.4.
type MerkleTreeLeaf struct {
Version Version `tls:"maxval:255"`
LeafType MerkleLeafType `tls:"maxval:255"`
TimestampedEntry *TimestampedEntry `tls:"selector:LeafType,val:0"`
}
// Precertificate represents the parsed CT Precertificate structure.
type Precertificate struct {
// DER-encoded pre-certificate as originally added, which includes a
// poison extension and a signature generated over the pre-cert by
// the pre-cert issuer (which might differ from the issuer of the final
// cert, see RFC6962 s3.1).
Submitted ASN1Cert
// SHA256 hash of the issuing key
IssuerKeyHash [sha256.Size]byte
// Parsed TBSCertificate structure, held in an x509.Certificate for convenience.
TBSCertificate *x509.Certificate
}
// X509Certificate returns the X.509 Certificate contained within the
// MerkleTreeLeaf.
func (m *MerkleTreeLeaf) X509Certificate() (*x509.Certificate, error) {
if m.TimestampedEntry.EntryType != X509LogEntryType {
return nil, fmt.Errorf("cannot call X509Certificate on a MerkleTreeLeaf that is not an X509 entry")
}
return x509.ParseCertificate(m.TimestampedEntry.X509Entry.Data)
}
// Precertificate returns the X.509 Precertificate contained within the MerkleTreeLeaf.
//
// The returned precertificate is embedded in an x509.Certificate, but is in the
// form stored internally in the log rather than the original submitted form
// (i.e. it does not include the poison extension and any changes to reflect the
// final certificate's issuer have been made; see x509.BuildPrecertTBS).
func (m *MerkleTreeLeaf) Precertificate() (*x509.Certificate, error) {
if m.TimestampedEntry.EntryType != PrecertLogEntryType {
return nil, fmt.Errorf("cannot call Precertificate on a MerkleTreeLeaf that is not a precert entry")
}
return x509.ParseTBSCertificate(m.TimestampedEntry.PrecertEntry.TBSCertificate)
}
// APIEndpoint is a string that represents one of the Certificate Transparency
// Log API endpoints.
type APIEndpoint string
// Certificate Transparency Log API endpoints; see section 4.
// WARNING: Should match the URI paths without the "/ct/v1/" prefix. If
// changing these constants, may need to change those too.
const (
AddChainStr APIEndpoint = "add-chain"
AddPreChainStr APIEndpoint = "add-pre-chain"
GetSTHStr APIEndpoint = "get-sth"
GetEntriesStr APIEndpoint = "get-entries"
GetProofByHashStr APIEndpoint = "get-proof-by-hash"
GetSTHConsistencyStr APIEndpoint = "get-sth-consistency"
GetRootsStr APIEndpoint = "get-roots"
GetEntryAndProofStr APIEndpoint = "get-entry-and-proof"
)
// URI paths for Log requests; see section 4.
// WARNING: Should match the API endpoints, with the "/ct/v1/" prefix. If
// changing these constants, may need to change those too.
const (
AddChainPath = "/ct/v1/add-chain"
AddPreChainPath = "/ct/v1/add-pre-chain"
GetSTHPath = "/ct/v1/get-sth"
GetEntriesPath = "/ct/v1/get-entries"
GetProofByHashPath = "/ct/v1/get-proof-by-hash"
GetSTHConsistencyPath = "/ct/v1/get-sth-consistency"
GetRootsPath = "/ct/v1/get-roots"
GetEntryAndProofPath = "/ct/v1/get-entry-and-proof"
AddJSONPath = "/ct/v1/add-json" // Experimental addition
)
// AddChainRequest represents the JSON request body sent to the add-chain and
// add-pre-chain POST methods from sections 4.1 and 4.2.
type AddChainRequest struct {
Chain [][]byte `json:"chain"`
}
// AddChainResponse represents the JSON response to the add-chain and
// add-pre-chain POST methods.
// An SCT represents a Log's promise to integrate a [pre-]certificate into the
// log within a defined period of time.
type AddChainResponse struct {
SCTVersion Version `json:"sct_version"` // SCT structure version
ID []byte `json:"id"` // Log ID
Timestamp uint64 `json:"timestamp"` // Timestamp of issuance
Extensions string `json:"extensions"` // Holder for any CT extensions
Signature []byte `json:"signature"` // Log signature for this SCT
}
// AddJSONRequest represents the JSON request body sent to the add-json POST method.
// The corresponding response re-uses AddChainResponse.
// This is an experimental addition not covered by RFC6962.
type AddJSONRequest struct {
Data interface{} `json:"data"`
}
// GetSTHResponse respresents the JSON response to the get-sth GET method from section 4.3.
type GetSTHResponse struct {
TreeSize uint64 `json:"tree_size"` // Number of certs in the current tree
Timestamp uint64 `json:"timestamp"` // Time that the tree was created
SHA256RootHash []byte `json:"sha256_root_hash"` // Root hash of the tree
TreeHeadSignature []byte `json:"tree_head_signature"` // Log signature for this STH
}
// ToSignedTreeHead creates a SignedTreeHead from the GetSTHResponse.
func (r *GetSTHResponse) ToSignedTreeHead() (*SignedTreeHead, error) {
sth := SignedTreeHead{
TreeSize: r.TreeSize,
Timestamp: r.Timestamp,
}
if len(r.SHA256RootHash) != sha256.Size {
return nil, fmt.Errorf("sha256_root_hash is invalid length, expected %d got %d", sha256.Size, len(r.SHA256RootHash))
}
copy(sth.SHA256RootHash[:], r.SHA256RootHash)
var ds DigitallySigned
if rest, err := tls.Unmarshal(r.TreeHeadSignature, &ds); err != nil {
return nil, fmt.Errorf("tls.Unmarshal(): %s", err)
} else if len(rest) > 0 {
return nil, fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest))
}
sth.TreeHeadSignature = ds
return &sth, nil
}
// GetSTHConsistencyResponse represents the JSON response to the get-sth-consistency
// GET method from section 4.4. (The corresponding GET request has parameters 'first' and
// 'second'.)
type GetSTHConsistencyResponse struct {
Consistency [][]byte `json:"consistency"`
}
// GetProofByHashResponse represents the JSON response to the get-proof-by-hash GET
// method from section 4.5. (The corresponding GET request has parameters 'hash'
// and 'tree_size'.)
type GetProofByHashResponse struct {
LeafIndex int64 `json:"leaf_index"` // The 0-based index of the end entity corresponding to the "hash" parameter.
AuditPath [][]byte `json:"audit_path"` // An array of base64-encoded Merkle Tree nodes proving the inclusion of the chosen certificate.
}
// LeafEntry represents a leaf in the Log's Merkle tree, as returned by the get-entries
// GET method from section 4.6.
type LeafEntry struct {
// LeafInput is a TLS-encoded MerkleTreeLeaf
LeafInput []byte `json:"leaf_input"`
// ExtraData holds (unsigned) extra data, normally the cert validation chain.
ExtraData []byte `json:"extra_data"`
}
// GetEntriesResponse respresents the JSON response to the get-entries GET method
// from section 4.6.
type GetEntriesResponse struct {
Entries []LeafEntry `json:"entries"` // the list of returned entries
}
// GetRootsResponse represents the JSON response to the get-roots GET method from section 4.7.
type GetRootsResponse struct {
Certificates []string `json:"certificates"`
}
// GetEntryAndProofResponse represents the JSON response to the get-entry-and-proof
// GET method from section 4.8. (The corresponding GET request has parameters 'leaf_index'
// and 'tree_size'.)
type GetEntryAndProofResponse struct {
LeafInput []byte `json:"leaf_input"` // the entry itself
ExtraData []byte `json:"extra_data"` // any chain provided when the entry was added to the log
AuditPath [][]byte `json:"audit_path"` // the corresponding proof
}

View File

@@ -1,82 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"cert_pool.go",
"curves.go",
"error.go",
"errors.go",
"names.go",
"nilref_nil_darwin.go",
"nilref_zero_darwin.go",
"pem_decrypt.go",
"pkcs1.go",
"pkcs8.go",
"ptr_sysptr_windows.go",
"ptr_uint_windows.go",
"revoked.go",
"root.go",
"root_bsd.go",
"root_cgo_darwin.go",
"root_darwin.go",
"root_darwin_armx.go",
"root_linux.go",
"root_nacl.go",
"root_nocgo_darwin.go",
"root_plan9.go",
"root_solaris.go",
"root_unix.go",
"root_windows.go",
"rpki.go",
"sec1.go",
"verify.go",
"x509.go",
],
cgo = True,
clinkopts = select({
"@io_bazel_rules_go//go/platform:darwin_386": [
"-framework CoreFoundation -framework Security",
],
"@io_bazel_rules_go//go/platform:darwin_amd64": [
"-framework CoreFoundation -framework Security",
],
"//conditions:default": [],
}),
copts = select({
"@io_bazel_rules_go//go/platform:darwin_386": [
"-mmacosx-version-min=10.6 -D__MAC_OS_X_VERSION_MAX_ALLOWED=1080",
],
"@io_bazel_rules_go//go/platform:darwin_amd64": [
"-mmacosx-version-min=10.6 -D__MAC_OS_X_VERSION_MAX_ALLOWED=1080",
],
"//conditions:default": [],
}),
importmap = "k8s.io/kubernetes/vendor/github.com/google/certificate-transparency-go/x509",
importpath = "github.com/google/certificate-transparency-go/x509",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/google/certificate-transparency-go/asn1:go_default_library",
"//vendor/github.com/google/certificate-transparency-go/tls:go_default_library",
"//vendor/github.com/google/certificate-transparency-go/x509/pkix:go_default_library",
"//vendor/golang.org/x/crypto/cryptobyte:go_default_library",
"//vendor/golang.org/x/crypto/cryptobyte/asn1:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//vendor/github.com/google/certificate-transparency-go/x509/pkix:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -1,143 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x509
import (
"encoding/pem"
"errors"
"runtime"
)
// CertPool is a set of certificates.
type CertPool struct {
bySubjectKeyId map[string][]int
byName map[string][]int
certs []*Certificate
}
// NewCertPool returns a new, empty CertPool.
func NewCertPool() *CertPool {
return &CertPool{
bySubjectKeyId: make(map[string][]int),
byName: make(map[string][]int),
}
}
// SystemCertPool returns a copy of the system cert pool.
//
// Any mutations to the returned pool are not written to disk and do
// not affect any other pool.
func SystemCertPool() (*CertPool, error) {
if runtime.GOOS == "windows" {
// Issue 16736, 18609:
return nil, errors.New("crypto/x509: system root pool is not available on Windows")
}
return loadSystemRoots()
}
// findVerifiedParents attempts to find certificates in s which have signed the
// given certificate. If any candidates were rejected then errCert will be set
// to one of them, arbitrarily, and err will contain the reason that it was
// rejected.
func (s *CertPool) findVerifiedParents(cert *Certificate) (parents []int, errCert *Certificate, err error) {
if s == nil {
return
}
var candidates []int
if len(cert.AuthorityKeyId) > 0 {
candidates = s.bySubjectKeyId[string(cert.AuthorityKeyId)]
}
if len(candidates) == 0 {
candidates = s.byName[string(cert.RawIssuer)]
}
for _, c := range candidates {
if err = cert.CheckSignatureFrom(s.certs[c]); err == nil {
parents = append(parents, c)
} else {
errCert = s.certs[c]
}
}
return
}
func (s *CertPool) contains(cert *Certificate) bool {
if s == nil {
return false
}
candidates := s.byName[string(cert.RawSubject)]
for _, c := range candidates {
if s.certs[c].Equal(cert) {
return true
}
}
return false
}
// AddCert adds a certificate to a pool.
func (s *CertPool) AddCert(cert *Certificate) {
if cert == nil {
panic("adding nil Certificate to CertPool")
}
// Check that the certificate isn't being added twice.
if s.contains(cert) {
return
}
n := len(s.certs)
s.certs = append(s.certs, cert)
if len(cert.SubjectKeyId) > 0 {
keyId := string(cert.SubjectKeyId)
s.bySubjectKeyId[keyId] = append(s.bySubjectKeyId[keyId], n)
}
name := string(cert.RawSubject)
s.byName[name] = append(s.byName[name], n)
}
// AppendCertsFromPEM attempts to parse a series of PEM encoded certificates.
// It appends any certificates found to s and reports whether any certificates
// were successfully parsed.
//
// On many Linux systems, /etc/ssl/cert.pem will contain the system wide set
// of root CAs in a format suitable for this function.
func (s *CertPool) AppendCertsFromPEM(pemCerts []byte) (ok bool) {
for len(pemCerts) > 0 {
var block *pem.Block
block, pemCerts = pem.Decode(pemCerts)
if block == nil {
break
}
if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
continue
}
cert, err := ParseCertificate(block.Bytes)
if IsFatal(err) {
continue
}
s.AddCert(cert)
ok = true
}
return
}
// Subjects returns a list of the DER-encoded subjects of
// all of the certificates in the pool.
func (s *CertPool) Subjects() [][]byte {
res := make([][]byte, len(s.certs))
for i, c := range s.certs {
res[i] = c.RawSubject
}
return res
}

View File

@@ -1,37 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x509
import (
"crypto/elliptic"
"math/big"
"sync"
)
// This file holds ECC curves that are not supported by the main Go crypto/elliptic
// library, but which have been observed in certificates in the wild.
var initonce sync.Once
var p192r1 *elliptic.CurveParams
func initAllCurves() {
initSECP192R1()
}
func initSECP192R1() {
// See SEC-2, section 2.2.2
p192r1 = &elliptic.CurveParams{Name: "P-192"}
p192r1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFF", 16)
p192r1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFF99DEF836146BC9B1B4D22831", 16)
p192r1.B, _ = new(big.Int).SetString("64210519E59C80E70FA7E9AB72243049FEB8DEECC146B9B1", 16)
p192r1.Gx, _ = new(big.Int).SetString("188DA80EB03090F67CBF20EB43A18800F4FF0AFD82FF1012", 16)
p192r1.Gy, _ = new(big.Int).SetString("07192B95FFC8DA78631011ED6B24CDD573F977A11E794811", 16)
p192r1.BitSize = 192
}
func secp192r1() elliptic.Curve {
initonce.Do(initAllCurves)
return p192r1
}

View File

@@ -1,230 +0,0 @@
package x509
import (
"bytes"
"fmt"
"strconv"
"strings"
)
// Error implements the error interface and describes a single error in an X.509 certificate or CRL.
type Error struct {
ID ErrorID
Category ErrCategory
Summary string
Field string
SpecRef string
SpecText string
// Fatal indicates that parsing has been aborted.
Fatal bool
}
func (err Error) Error() string {
var msg bytes.Buffer
if err.ID != ErrInvalidID {
if err.Fatal {
msg.WriteRune('E')
} else {
msg.WriteRune('W')
}
msg.WriteString(fmt.Sprintf("%03d: ", err.ID))
}
msg.WriteString(err.Summary)
return msg.String()
}
// VerboseError creates a more verbose error string, including spec details.
func (err Error) VerboseError() string {
var msg bytes.Buffer
msg.WriteString(err.Error())
if len(err.Field) > 0 || err.Category != UnknownCategory || len(err.SpecRef) > 0 || len(err.SpecText) > 0 {
msg.WriteString(" (")
needSep := false
if len(err.Field) > 0 {
msg.WriteString(err.Field)
needSep = true
}
if err.Category != UnknownCategory {
if needSep {
msg.WriteString(": ")
}
msg.WriteString(err.Category.String())
needSep = true
}
if len(err.SpecRef) > 0 {
if needSep {
msg.WriteString(": ")
}
msg.WriteString(err.SpecRef)
needSep = true
}
if len(err.SpecText) > 0 {
if needSep {
if len(err.SpecRef) > 0 {
msg.WriteString(", ")
} else {
msg.WriteString(": ")
}
}
msg.WriteString("'")
msg.WriteString(err.SpecText)
msg.WriteString("'")
}
msg.WriteString(")")
}
return msg.String()
}
// ErrCategory indicates the category of an x509.Error.
type ErrCategory int
// ErrCategory values.
const (
UnknownCategory ErrCategory = iota
// Errors in ASN.1 encoding
InvalidASN1Encoding
InvalidASN1Content
InvalidASN1DER
// Errors in ASN.1 relative to schema
InvalidValueRange
InvalidASN1Type
UnexpectedAdditionalData
// Errors in X.509
PoorlyFormedCertificate // Fails a SHOULD clause
MalformedCertificate // Fails a MUST clause
PoorlyFormedCRL // Fails a SHOULD clause
MalformedCRL // Fails a MUST clause
// Errors relative to CA/Browser Forum guidelines
BaselineRequirementsFailure
EVRequirementsFailure
// Other errors
InsecureAlgorithm
UnrecognizedValue
)
func (category ErrCategory) String() string {
switch category {
case InvalidASN1Encoding:
return "Invalid ASN.1 encoding"
case InvalidASN1Content:
return "Invalid ASN.1 content"
case InvalidASN1DER:
return "Invalid ASN.1 distinguished encoding"
case InvalidValueRange:
return "Invalid value for range given in schema"
case InvalidASN1Type:
return "Invalid ASN.1 type for schema"
case UnexpectedAdditionalData:
return "Unexpected additional data present"
case PoorlyFormedCertificate:
return "Certificate does not comply with SHOULD clause in spec"
case MalformedCertificate:
return "Certificate does not comply with MUST clause in spec"
case PoorlyFormedCRL:
return "Certificate Revocation List does not comply with SHOULD clause in spec"
case MalformedCRL:
return "Certificate Revocation List does not comply with MUST clause in spec"
case BaselineRequirementsFailure:
return "Certificate does not comply with CA/BF baseline requirements"
case EVRequirementsFailure:
return "Certificate does not comply with CA/BF EV requirements"
case InsecureAlgorithm:
return "Certificate uses an insecure algorithm"
case UnrecognizedValue:
return "Certificate uses an unrecognized value"
default:
return fmt.Sprintf("Unknown (%d)", category)
}
}
// ErrorID is an identifier for an x509.Error, to allow filtering.
type ErrorID int
// Errors implements the error interface and holds a collection of errors found in a certificate or CRL.
type Errors struct {
Errs []Error
}
// Error converts to a string.
func (e *Errors) Error() string {
return e.combineErrors(Error.Error)
}
// VerboseError creates a more verbose error string, including spec details.
func (e *Errors) VerboseError() string {
return e.combineErrors(Error.VerboseError)
}
// Fatal indicates whether e includes a fatal error
func (e *Errors) Fatal() bool {
return (e.FirstFatal() != nil)
}
// Empty indicates whether e has no errors.
func (e *Errors) Empty() bool {
return len(e.Errs) == 0
}
// FirstFatal returns the first fatal error in e, or nil
// if there is no fatal error.
func (e *Errors) FirstFatal() error {
for _, err := range e.Errs {
if err.Fatal {
return err
}
}
return nil
}
// AddID adds the Error identified by the given id to an x509.Errors.
func (e *Errors) AddID(id ErrorID, args ...interface{}) {
e.Errs = append(e.Errs, NewError(id, args...))
}
func (e Errors) combineErrors(errfn func(Error) string) string {
if len(e.Errs) == 0 {
return ""
}
if len(e.Errs) == 1 {
return errfn((e.Errs)[0])
}
var msg bytes.Buffer
msg.WriteString("Errors:")
for _, err := range e.Errs {
msg.WriteString("\n ")
msg.WriteString(errfn(err))
}
return msg.String()
}
// Filter creates a new Errors object with any entries from the filtered
// list of IDs removed.
func (e Errors) Filter(filtered []ErrorID) Errors {
var results Errors
eloop:
for _, v := range e.Errs {
for _, f := range filtered {
if v.ID == f {
break eloop
}
}
results.Errs = append(results.Errs, v)
}
return results
}
// ErrorFilter builds a list of error IDs (suitable for use with Errors.Filter) from a comma-separated string.
func ErrorFilter(ignore string) []ErrorID {
var ids []ErrorID
filters := strings.Split(ignore, ",")
for _, f := range filters {
v, err := strconv.Atoi(f)
if err != nil {
continue
}
ids = append(ids, ErrorID(v))
}
return ids
}

View File

@@ -1,302 +0,0 @@
package x509
import "fmt"
// To preserve error IDs, only append to this list, never insert.
const (
ErrInvalidID ErrorID = iota
ErrInvalidCertList
ErrTrailingCertList
ErrUnexpectedlyCriticalCertListExtension
ErrUnexpectedlyNonCriticalCertListExtension
ErrInvalidCertListAuthKeyID
ErrTrailingCertListAuthKeyID
ErrInvalidCertListIssuerAltName
ErrInvalidCertListCRLNumber
ErrTrailingCertListCRLNumber
ErrNegativeCertListCRLNumber
ErrInvalidCertListDeltaCRL
ErrTrailingCertListDeltaCRL
ErrNegativeCertListDeltaCRL
ErrInvalidCertListIssuingDP
ErrTrailingCertListIssuingDP
ErrCertListIssuingDPMultipleTypes
ErrCertListIssuingDPInvalidFullName
ErrInvalidCertListFreshestCRL
ErrInvalidCertListAuthInfoAccess
ErrTrailingCertListAuthInfoAccess
ErrUnhandledCriticalCertListExtension
ErrUnexpectedlyCriticalRevokedCertExtension
ErrUnexpectedlyNonCriticalRevokedCertExtension
ErrInvalidRevocationReason
ErrTrailingRevocationReason
ErrInvalidRevocationInvalidityDate
ErrTrailingRevocationInvalidityDate
ErrInvalidRevocationIssuer
ErrUnhandledCriticalRevokedCertExtension
ErrMaxID
)
// idToError gives a template x509.Error for each defined ErrorID; where the Summary
// field may hold format specifiers that take field parameters.
var idToError map[ErrorID]Error
var errorInfo = []Error{
{
ID: ErrInvalidCertList,
Summary: "x509: failed to parse CertificateList: %v",
Field: "CertificateList",
SpecRef: "RFC 5280 s5.1",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrTrailingCertList,
Summary: "x509: trailing data after CertificateList",
Field: "CertificateList",
SpecRef: "RFC 5280 s5.1",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrUnexpectedlyCriticalCertListExtension,
Summary: "x509: certificate list extension %v marked critical but expected to be non-critical",
Field: "tbsCertList.crlExtensions.*.critical",
SpecRef: "RFC 5280 s5.2",
Category: MalformedCRL,
},
{
ID: ErrUnexpectedlyNonCriticalCertListExtension,
Summary: "x509: certificate list extension %v marked non-critical but expected to be critical",
Field: "tbsCertList.crlExtensions.*.critical",
SpecRef: "RFC 5280 s5.2",
Category: MalformedCRL,
},
{
ID: ErrInvalidCertListAuthKeyID,
Summary: "x509: failed to unmarshal certificate-list authority key-id: %v",
Field: "tbsCertList.crlExtensions.*.AuthorityKeyIdentifier",
SpecRef: "RFC 5280 s5.2.1",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrTrailingCertListAuthKeyID,
Summary: "x509: trailing data after certificate list auth key ID",
Field: "tbsCertList.crlExtensions.*.AuthorityKeyIdentifier",
SpecRef: "RFC 5280 s5.2.1",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrInvalidCertListIssuerAltName,
Summary: "x509: failed to parse CRL issuer alt name: %v",
Field: "tbsCertList.crlExtensions.*.IssuerAltName",
SpecRef: "RFC 5280 s5.2.2",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrInvalidCertListCRLNumber,
Summary: "x509: failed to unmarshal certificate-list crl-number: %v",
Field: "tbsCertList.crlExtensions.*.CRLNumber",
SpecRef: "RFC 5280 s5.2.3",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrTrailingCertListCRLNumber,
Summary: "x509: trailing data after certificate list crl-number",
Field: "tbsCertList.crlExtensions.*.CRLNumber",
SpecRef: "RFC 5280 s5.2.3",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrNegativeCertListCRLNumber,
Summary: "x509: negative certificate list crl-number: %d",
Field: "tbsCertList.crlExtensions.*.CRLNumber",
SpecRef: "RFC 5280 s5.2.3",
Category: MalformedCRL,
Fatal: true,
},
{
ID: ErrInvalidCertListDeltaCRL,
Summary: "x509: failed to unmarshal certificate-list delta-crl: %v",
Field: "tbsCertList.crlExtensions.*.BaseCRLNumber",
SpecRef: "RFC 5280 s5.2.4",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrTrailingCertListDeltaCRL,
Summary: "x509: trailing data after certificate list delta-crl",
Field: "tbsCertList.crlExtensions.*.BaseCRLNumber",
SpecRef: "RFC 5280 s5.2.4",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrNegativeCertListDeltaCRL,
Summary: "x509: negative certificate list base-crl-number: %d",
Field: "tbsCertList.crlExtensions.*.BaseCRLNumber",
SpecRef: "RFC 5280 s5.2.4",
Category: MalformedCRL,
Fatal: true,
},
{
ID: ErrInvalidCertListIssuingDP,
Summary: "x509: failed to unmarshal certificate list issuing distribution point: %v",
Field: "tbsCertList.crlExtensions.*.IssuingDistributionPoint",
SpecRef: "RFC 5280 s5.2.5",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrTrailingCertListIssuingDP,
Summary: "x509: trailing data after certificate list issuing distribution point",
Field: "tbsCertList.crlExtensions.*.IssuingDistributionPoint",
SpecRef: "RFC 5280 s5.2.5",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrCertListIssuingDPMultipleTypes,
Summary: "x509: multiple cert types set in issuing-distribution-point: user:%v CA:%v attr:%v",
Field: "tbsCertList.crlExtensions.*.IssuingDistributionPoint",
SpecRef: "RFC 5280 s5.2.5",
SpecText: "at most one of onlyContainsUserCerts, onlyContainsCACerts, and onlyContainsAttributeCerts may be set to TRUE.",
Category: MalformedCRL,
Fatal: true,
},
{
ID: ErrCertListIssuingDPInvalidFullName,
Summary: "x509: failed to parse CRL issuing-distribution-point fullName: %v",
Field: "tbsCertList.crlExtensions.*.IssuingDistributionPoint.distributionPoint",
SpecRef: "RFC 5280 s5.2.5",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrInvalidCertListFreshestCRL,
Summary: "x509: failed to unmarshal certificate list freshestCRL: %v",
Field: "tbsCertList.crlExtensions.*.FreshestCRL",
SpecRef: "RFC 5280 s5.2.6",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrInvalidCertListAuthInfoAccess,
Summary: "x509: failed to unmarshal certificate list authority info access: %v",
Field: "tbsCertList.crlExtensions.*.AuthorityInfoAccess",
SpecRef: "RFC 5280 s5.2.7",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrTrailingCertListAuthInfoAccess,
Summary: "x509: trailing data after certificate list authority info access",
Field: "tbsCertList.crlExtensions.*.AuthorityInfoAccess",
SpecRef: "RFC 5280 s5.2.7",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrUnhandledCriticalCertListExtension,
Summary: "x509: unhandled critical extension in certificate list: %v",
Field: "tbsCertList.revokedCertificates.crlExtensions.*",
SpecRef: "RFC 5280 s5.2",
SpecText: "If a CRL contains a critical extension that the application cannot process, then the application MUST NOT use that CRL to determine the status of certificates.",
Category: MalformedCRL,
Fatal: true,
},
{
ID: ErrUnexpectedlyCriticalRevokedCertExtension,
Summary: "x509: revoked certificate extension %v marked critical but expected to be non-critical",
Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.critical",
SpecRef: "RFC 5280 s5.3",
Category: MalformedCRL,
},
{
ID: ErrUnexpectedlyNonCriticalRevokedCertExtension,
Summary: "x509: revoked certificate extension %v marked non-critical but expected to be critical",
Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.critical",
SpecRef: "RFC 5280 s5.3",
Category: MalformedCRL,
},
{
ID: ErrInvalidRevocationReason,
Summary: "x509: failed to parse revocation reason: %v",
Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.CRLReason",
SpecRef: "RFC 5280 s5.3.1",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrTrailingRevocationReason,
Summary: "x509: trailing data after revoked certificate reason",
Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.CRLReason",
SpecRef: "RFC 5280 s5.3.1",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrInvalidRevocationInvalidityDate,
Summary: "x509: failed to parse revoked certificate invalidity date: %v",
Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.InvalidityDate",
SpecRef: "RFC 5280 s5.3.2",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrTrailingRevocationInvalidityDate,
Summary: "x509: trailing data after revoked certificate invalidity date",
Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.InvalidityDate",
SpecRef: "RFC 5280 s5.3.2",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrInvalidRevocationIssuer,
Summary: "x509: failed to parse revocation issuer %v",
Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.CertificateIssuer",
SpecRef: "RFC 5280 s5.3.3",
Category: InvalidASN1Content,
Fatal: true,
},
{
ID: ErrUnhandledCriticalRevokedCertExtension,
Summary: "x509: unhandled critical extension in revoked certificate: %v",
Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*",
SpecRef: "RFC 5280 s5.3",
SpecText: "If a CRL contains a critical CRL entry extension that the application cannot process, then the application MUST NOT use that CRL to determine the status of any certificates.",
Category: MalformedCRL,
Fatal: true,
},
}
func init() {
idToError = make(map[ErrorID]Error, len(errorInfo))
for _, info := range errorInfo {
idToError[info.ID] = info
}
}
// NewError builds a new x509.Error based on the template for the given id.
func NewError(id ErrorID, args ...interface{}) Error {
var err Error
if id >= ErrMaxID {
err.ID = id
err.Summary = fmt.Sprintf("Unknown error ID %v: args %+v", id, args)
err.Fatal = true
} else {
err = idToError[id]
err.Summary = fmt.Sprintf(err.Summary, args...)
}
return err
}

View File

@@ -1,164 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x509
import (
"fmt"
"net"
"github.com/google/certificate-transparency-go/asn1"
"github.com/google/certificate-transparency-go/x509/pkix"
)
const (
// GeneralName tag values from RFC 5280, 4.2.1.6
tagOtherName = 0
tagRFC822Name = 1
tagDNSName = 2
tagX400Address = 3
tagDirectoryName = 4
tagEDIPartyName = 5
tagURI = 6
tagIPAddress = 7
tagRegisteredID = 8
)
// OtherName describes a name related to a certificate which is not in one
// of the standard name formats. RFC 5280, 4.2.1.6:
// OtherName ::= SEQUENCE {
// type-id OBJECT IDENTIFIER,
// value [0] EXPLICIT ANY DEFINED BY type-id }
type OtherName struct {
TypeID asn1.ObjectIdentifier
Value asn1.RawValue
}
// GeneralNames holds a collection of names related to a certificate.
type GeneralNames struct {
DNSNames []string
EmailAddresses []string
DirectoryNames []pkix.Name
URIs []string
IPNets []net.IPNet
RegisteredIDs []asn1.ObjectIdentifier
OtherNames []OtherName
}
// Len returns the total number of names in a GeneralNames object.
func (gn GeneralNames) Len() int {
return (len(gn.DNSNames) + len(gn.EmailAddresses) + len(gn.DirectoryNames) +
len(gn.URIs) + len(gn.IPNets) + len(gn.RegisteredIDs) + len(gn.OtherNames))
}
// Empty indicates whether a GeneralNames object is empty.
func (gn GeneralNames) Empty() bool {
return gn.Len() == 0
}
func parseGeneralNames(value []byte, gname *GeneralNames) error {
// RFC 5280, 4.2.1.6
// GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName
//
// GeneralName ::= CHOICE {
// otherName [0] OtherName,
// rfc822Name [1] IA5String,
// dNSName [2] IA5String,
// x400Address [3] ORAddress,
// directoryName [4] Name,
// ediPartyName [5] EDIPartyName,
// uniformResourceIdentifier [6] IA5String,
// iPAddress [7] OCTET STRING,
// registeredID [8] OBJECT IDENTIFIER }
var seq asn1.RawValue
var rest []byte
if rest, err := asn1.Unmarshal(value, &seq); err != nil {
return fmt.Errorf("x509: failed to parse GeneralNames: %v", err)
} else if len(rest) != 0 {
return fmt.Errorf("x509: trailing data after GeneralNames")
}
if !seq.IsCompound || seq.Tag != asn1.TagSequence || seq.Class != asn1.ClassUniversal {
return fmt.Errorf("x509: failed to parse GeneralNames sequence, tag %+v", seq)
}
rest = seq.Bytes
for len(rest) > 0 {
var err error
rest, err = parseGeneralName(rest, gname, false)
if err != nil {
return fmt.Errorf("x509: failed to parse GeneralName: %v", err)
}
}
return nil
}
func parseGeneralName(data []byte, gname *GeneralNames, withMask bool) ([]byte, error) {
var v asn1.RawValue
var rest []byte
var err error
rest, err = asn1.Unmarshal(data, &v)
if err != nil {
return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames: %v", err)
}
switch v.Tag {
case tagOtherName:
if !v.IsCompound {
return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames.otherName: not compound")
}
var other OtherName
v.FullBytes = append([]byte{}, v.FullBytes...)
v.FullBytes[0] = asn1.TagSequence | 0x20
_, err = asn1.Unmarshal(v.FullBytes, &other)
if err != nil {
return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames.otherName: %v", err)
}
gname.OtherNames = append(gname.OtherNames, other)
case tagRFC822Name:
gname.EmailAddresses = append(gname.EmailAddresses, string(v.Bytes))
case tagDNSName:
dns := string(v.Bytes)
gname.DNSNames = append(gname.DNSNames, dns)
case tagDirectoryName:
var rdnSeq pkix.RDNSequence
if _, err := asn1.Unmarshal(v.Bytes, &rdnSeq); err != nil {
return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames.directoryName: %v", err)
}
var dirName pkix.Name
dirName.FillFromRDNSequence(&rdnSeq)
gname.DirectoryNames = append(gname.DirectoryNames, dirName)
case tagURI:
gname.URIs = append(gname.URIs, string(v.Bytes))
case tagIPAddress:
vlen := len(v.Bytes)
if withMask {
switch vlen {
case (2 * net.IPv4len), (2 * net.IPv6len):
ipNet := net.IPNet{IP: v.Bytes[0 : vlen/2], Mask: v.Bytes[vlen/2:]}
gname.IPNets = append(gname.IPNets, ipNet)
default:
return nil, fmt.Errorf("x509: invalid IP/mask length %d in GeneralNames.iPAddress", vlen)
}
} else {
switch vlen {
case net.IPv4len, net.IPv6len:
ipNet := net.IPNet{IP: v.Bytes}
gname.IPNets = append(gname.IPNets, ipNet)
default:
return nil, fmt.Errorf("x509: invalid IP length %d in GeneralNames.iPAddress", vlen)
}
}
case tagRegisteredID:
var oid asn1.ObjectIdentifier
v.FullBytes = append([]byte{}, v.FullBytes...)
v.FullBytes[0] = asn1.TagOID
_, err = asn1.Unmarshal(v.FullBytes, &oid)
if err != nil {
return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames.registeredID: %v", err)
}
gname.RegisteredIDs = append(gname.RegisteredIDs, oid)
default:
return nil, fmt.Errorf("x509: failed to unmarshal GeneralName: unknown tag %d", v.Tag)
}
return rest, nil
}

View File

@@ -1,26 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build cgo,!arm,!arm64,!ios,!go1.10
package x509
/*
#cgo CFLAGS: -mmacosx-version-min=10.6 -D__MAC_OS_X_VERSION_MAX_ALLOWED=1080
#cgo LDFLAGS: -framework CoreFoundation -framework Security
#include <CoreFoundation/CoreFoundation.h>
*/
import "C"
// For Go versions before 1.10, nil values for Apple's CoreFoundation
// CF*Ref types were represented by nil. See:
// https://github.com/golang/go/commit/b868616b63a8
func setNilCFRef(v *C.CFDataRef) {
*v = nil
}
func isNilCFRef(v C.CFDataRef) bool {
return v == nil
}

View File

@@ -1,26 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build cgo,!arm,!arm64,!ios,go1.10
package x509
/*
#cgo CFLAGS: -mmacosx-version-min=10.6 -D__MAC_OS_X_VERSION_MAX_ALLOWED=1080
#cgo LDFLAGS: -framework CoreFoundation -framework Security
#include <CoreFoundation/CoreFoundation.h>
*/
import "C"
// For Go versions >= 1.10, nil values for Apple's CoreFoundation
// CF*Ref types are represented by zero. See:
// https://github.com/golang/go/commit/b868616b63a8
func setNilCFRef(v *C.CFDataRef) {
*v = 0
}
func isNilCFRef(v C.CFDataRef) bool {
return v == 0
}

View File

@@ -1,240 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x509
// RFC 1423 describes the encryption of PEM blocks. The algorithm used to
// generate a key from the password was derived by looking at the OpenSSL
// implementation.
import (
"crypto/aes"
"crypto/cipher"
"crypto/des"
"crypto/md5"
"encoding/hex"
"encoding/pem"
"errors"
"io"
"strings"
)
type PEMCipher int
// Possible values for the EncryptPEMBlock encryption algorithm.
const (
_ PEMCipher = iota
PEMCipherDES
PEMCipher3DES
PEMCipherAES128
PEMCipherAES192
PEMCipherAES256
)
// rfc1423Algo holds a method for enciphering a PEM block.
type rfc1423Algo struct {
cipher PEMCipher
name string
cipherFunc func(key []byte) (cipher.Block, error)
keySize int
blockSize int
}
// rfc1423Algos holds a slice of the possible ways to encrypt a PEM
// block. The ivSize numbers were taken from the OpenSSL source.
var rfc1423Algos = []rfc1423Algo{{
cipher: PEMCipherDES,
name: "DES-CBC",
cipherFunc: des.NewCipher,
keySize: 8,
blockSize: des.BlockSize,
}, {
cipher: PEMCipher3DES,
name: "DES-EDE3-CBC",
cipherFunc: des.NewTripleDESCipher,
keySize: 24,
blockSize: des.BlockSize,
}, {
cipher: PEMCipherAES128,
name: "AES-128-CBC",
cipherFunc: aes.NewCipher,
keySize: 16,
blockSize: aes.BlockSize,
}, {
cipher: PEMCipherAES192,
name: "AES-192-CBC",
cipherFunc: aes.NewCipher,
keySize: 24,
blockSize: aes.BlockSize,
}, {
cipher: PEMCipherAES256,
name: "AES-256-CBC",
cipherFunc: aes.NewCipher,
keySize: 32,
blockSize: aes.BlockSize,
},
}
// deriveKey uses a key derivation function to stretch the password into a key
// with the number of bits our cipher requires. This algorithm was derived from
// the OpenSSL source.
func (c rfc1423Algo) deriveKey(password, salt []byte) []byte {
hash := md5.New()
out := make([]byte, c.keySize)
var digest []byte
for i := 0; i < len(out); i += len(digest) {
hash.Reset()
hash.Write(digest)
hash.Write(password)
hash.Write(salt)
digest = hash.Sum(digest[:0])
copy(out[i:], digest)
}
return out
}
// IsEncryptedPEMBlock returns if the PEM block is password encrypted.
func IsEncryptedPEMBlock(b *pem.Block) bool {
_, ok := b.Headers["DEK-Info"]
return ok
}
// IncorrectPasswordError is returned when an incorrect password is detected.
var IncorrectPasswordError = errors.New("x509: decryption password incorrect")
// DecryptPEMBlock takes a password encrypted PEM block and the password used to
// encrypt it and returns a slice of decrypted DER encoded bytes. It inspects
// the DEK-Info header to determine the algorithm used for decryption. If no
// DEK-Info header is present, an error is returned. If an incorrect password
// is detected an IncorrectPasswordError is returned. Because of deficiencies
// in the encrypted-PEM format, it's not always possible to detect an incorrect
// password. In these cases no error will be returned but the decrypted DER
// bytes will be random noise.
func DecryptPEMBlock(b *pem.Block, password []byte) ([]byte, error) {
dek, ok := b.Headers["DEK-Info"]
if !ok {
return nil, errors.New("x509: no DEK-Info header in block")
}
idx := strings.Index(dek, ",")
if idx == -1 {
return nil, errors.New("x509: malformed DEK-Info header")
}
mode, hexIV := dek[:idx], dek[idx+1:]
ciph := cipherByName(mode)
if ciph == nil {
return nil, errors.New("x509: unknown encryption mode")
}
iv, err := hex.DecodeString(hexIV)
if err != nil {
return nil, err
}
if len(iv) != ciph.blockSize {
return nil, errors.New("x509: incorrect IV size")
}
// Based on the OpenSSL implementation. The salt is the first 8 bytes
// of the initialization vector.
key := ciph.deriveKey(password, iv[:8])
block, err := ciph.cipherFunc(key)
if err != nil {
return nil, err
}
if len(b.Bytes)%block.BlockSize() != 0 {
return nil, errors.New("x509: encrypted PEM data is not a multiple of the block size")
}
data := make([]byte, len(b.Bytes))
dec := cipher.NewCBCDecrypter(block, iv)
dec.CryptBlocks(data, b.Bytes)
// Blocks are padded using a scheme where the last n bytes of padding are all
// equal to n. It can pad from 1 to blocksize bytes inclusive. See RFC 1423.
// For example:
// [x y z 2 2]
// [x y 7 7 7 7 7 7 7]
// If we detect a bad padding, we assume it is an invalid password.
dlen := len(data)
if dlen == 0 || dlen%ciph.blockSize != 0 {
return nil, errors.New("x509: invalid padding")
}
last := int(data[dlen-1])
if dlen < last {
return nil, IncorrectPasswordError
}
if last == 0 || last > ciph.blockSize {
return nil, IncorrectPasswordError
}
for _, val := range data[dlen-last:] {
if int(val) != last {
return nil, IncorrectPasswordError
}
}
return data[:dlen-last], nil
}
// EncryptPEMBlock returns a PEM block of the specified type holding the
// given DER-encoded data encrypted with the specified algorithm and
// password.
func EncryptPEMBlock(rand io.Reader, blockType string, data, password []byte, alg PEMCipher) (*pem.Block, error) {
ciph := cipherByKey(alg)
if ciph == nil {
return nil, errors.New("x509: unknown encryption mode")
}
iv := make([]byte, ciph.blockSize)
if _, err := io.ReadFull(rand, iv); err != nil {
return nil, errors.New("x509: cannot generate IV: " + err.Error())
}
// The salt is the first 8 bytes of the initialization vector,
// matching the key derivation in DecryptPEMBlock.
key := ciph.deriveKey(password, iv[:8])
block, err := ciph.cipherFunc(key)
if err != nil {
return nil, err
}
enc := cipher.NewCBCEncrypter(block, iv)
pad := ciph.blockSize - len(data)%ciph.blockSize
encrypted := make([]byte, len(data), len(data)+pad)
// We could save this copy by encrypting all the whole blocks in
// the data separately, but it doesn't seem worth the additional
// code.
copy(encrypted, data)
// See RFC 1423, section 1.1
for i := 0; i < pad; i++ {
encrypted = append(encrypted, byte(pad))
}
enc.CryptBlocks(encrypted, encrypted)
return &pem.Block{
Type: blockType,
Headers: map[string]string{
"Proc-Type": "4,ENCRYPTED",
"DEK-Info": ciph.name + "," + hex.EncodeToString(iv),
},
Bytes: encrypted,
}, nil
}
func cipherByName(name string) *rfc1423Algo {
for i := range rfc1423Algos {
alg := &rfc1423Algos[i]
if alg.name == name {
return alg
}
}
return nil
}
func cipherByKey(key PEMCipher) *rfc1423Algo {
for i := range rfc1423Algos {
alg := &rfc1423Algos[i]
if alg.cipher == key {
return alg
}
}
return nil
}

View File

@@ -1,155 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x509
import (
"crypto/rsa"
"errors"
"math/big"
"github.com/google/certificate-transparency-go/asn1"
)
// pkcs1PrivateKey is a structure which mirrors the PKCS#1 ASN.1 for an RSA private key.
type pkcs1PrivateKey struct {
Version int
N *big.Int
E int
D *big.Int
P *big.Int
Q *big.Int
// We ignore these values, if present, because rsa will calculate them.
Dp *big.Int `asn1:"optional"`
Dq *big.Int `asn1:"optional"`
Qinv *big.Int `asn1:"optional"`
AdditionalPrimes []pkcs1AdditionalRSAPrime `asn1:"optional,omitempty"`
}
type pkcs1AdditionalRSAPrime struct {
Prime *big.Int
// We ignore these values because rsa will calculate them.
Exp *big.Int
Coeff *big.Int
}
// pkcs1PublicKey reflects the ASN.1 structure of a PKCS#1 public key.
type pkcs1PublicKey struct {
N *big.Int
E int
}
// ParsePKCS1PrivateKey returns an RSA private key from its ASN.1 PKCS#1 DER encoded form.
func ParsePKCS1PrivateKey(der []byte) (*rsa.PrivateKey, error) {
var priv pkcs1PrivateKey
rest, err := asn1.Unmarshal(der, &priv)
if len(rest) > 0 {
return nil, asn1.SyntaxError{Msg: "trailing data"}
}
if err != nil {
return nil, err
}
if priv.Version > 1 {
return nil, errors.New("x509: unsupported private key version")
}
if priv.N.Sign() <= 0 || priv.D.Sign() <= 0 || priv.P.Sign() <= 0 || priv.Q.Sign() <= 0 {
return nil, errors.New("x509: private key contains zero or negative value")
}
key := new(rsa.PrivateKey)
key.PublicKey = rsa.PublicKey{
E: priv.E,
N: priv.N,
}
key.D = priv.D
key.Primes = make([]*big.Int, 2+len(priv.AdditionalPrimes))
key.Primes[0] = priv.P
key.Primes[1] = priv.Q
for i, a := range priv.AdditionalPrimes {
if a.Prime.Sign() <= 0 {
return nil, errors.New("x509: private key contains zero or negative prime")
}
key.Primes[i+2] = a.Prime
// We ignore the other two values because rsa will calculate
// them as needed.
}
err = key.Validate()
if err != nil {
return nil, err
}
key.Precompute()
return key, nil
}
// MarshalPKCS1PrivateKey converts a private key to ASN.1 DER encoded form.
func MarshalPKCS1PrivateKey(key *rsa.PrivateKey) []byte {
key.Precompute()
version := 0
if len(key.Primes) > 2 {
version = 1
}
priv := pkcs1PrivateKey{
Version: version,
N: key.N,
E: key.PublicKey.E,
D: key.D,
P: key.Primes[0],
Q: key.Primes[1],
Dp: key.Precomputed.Dp,
Dq: key.Precomputed.Dq,
Qinv: key.Precomputed.Qinv,
}
priv.AdditionalPrimes = make([]pkcs1AdditionalRSAPrime, len(key.Precomputed.CRTValues))
for i, values := range key.Precomputed.CRTValues {
priv.AdditionalPrimes[i].Prime = key.Primes[2+i]
priv.AdditionalPrimes[i].Exp = values.Exp
priv.AdditionalPrimes[i].Coeff = values.Coeff
}
b, _ := asn1.Marshal(priv)
return b
}
// ParsePKCS1PublicKey parses a PKCS#1 public key in ASN.1 DER form.
func ParsePKCS1PublicKey(der []byte) (*rsa.PublicKey, error) {
var pub pkcs1PublicKey
rest, err := asn1.Unmarshal(der, &pub)
if err != nil {
return nil, err
}
if len(rest) > 0 {
return nil, asn1.SyntaxError{Msg: "trailing data"}
}
if pub.N.Sign() <= 0 || pub.E <= 0 {
return nil, errors.New("x509: public key contains zero or negative value")
}
if pub.E > 1<<31-1 {
return nil, errors.New("x509: public key contains large public exponent")
}
return &rsa.PublicKey{
E: pub.E,
N: pub.N,
}, nil
}
// MarshalPKCS1PublicKey converts an RSA public key to PKCS#1, ASN.1 DER form.
func MarshalPKCS1PublicKey(key *rsa.PublicKey) []byte {
derBytes, _ := asn1.Marshal(pkcs1PublicKey{
N: key.N,
E: key.E,
})
return derBytes
}

View File

@@ -1,102 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x509
import (
"crypto/ecdsa"
"crypto/rsa"
"errors"
"fmt"
"github.com/google/certificate-transparency-go/asn1"
"github.com/google/certificate-transparency-go/x509/pkix"
)
// pkcs8 reflects an ASN.1, PKCS#8 PrivateKey. See
// ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-8/pkcs-8v1_2.asn
// and RFC 5208.
type pkcs8 struct {
Version int
Algo pkix.AlgorithmIdentifier
PrivateKey []byte
// optional attributes omitted.
}
// ParsePKCS8PrivateKey parses an unencrypted, PKCS#8 private key.
// See RFC 5208.
func ParsePKCS8PrivateKey(der []byte) (key interface{}, err error) {
var privKey pkcs8
if _, err := asn1.Unmarshal(der, &privKey); err != nil {
return nil, err
}
switch {
case privKey.Algo.Algorithm.Equal(OIDPublicKeyRSA):
key, err = ParsePKCS1PrivateKey(privKey.PrivateKey)
if err != nil {
return nil, errors.New("x509: failed to parse RSA private key embedded in PKCS#8: " + err.Error())
}
return key, nil
case privKey.Algo.Algorithm.Equal(OIDPublicKeyECDSA):
bytes := privKey.Algo.Parameters.FullBytes
namedCurveOID := new(asn1.ObjectIdentifier)
if _, err := asn1.Unmarshal(bytes, namedCurveOID); err != nil {
namedCurveOID = nil
}
key, err = parseECPrivateKey(namedCurveOID, privKey.PrivateKey)
if err != nil {
return nil, errors.New("x509: failed to parse EC private key embedded in PKCS#8: " + err.Error())
}
return key, nil
default:
return nil, fmt.Errorf("x509: PKCS#8 wrapping contained private key with unknown algorithm: %v", privKey.Algo.Algorithm)
}
}
// MarshalPKCS8PrivateKey converts a private key to PKCS#8 encoded form.
// The following key types are supported: *rsa.PrivateKey, *ecdsa.PublicKey.
// Unsupported key types result in an error.
//
// See RFC 5208.
func MarshalPKCS8PrivateKey(key interface{}) ([]byte, error) {
var privKey pkcs8
switch k := key.(type) {
case *rsa.PrivateKey:
privKey.Algo = pkix.AlgorithmIdentifier{
Algorithm: OIDPublicKeyRSA,
Parameters: asn1.NullRawValue,
}
privKey.PrivateKey = MarshalPKCS1PrivateKey(k)
case *ecdsa.PrivateKey:
oid, ok := OIDFromNamedCurve(k.Curve)
if !ok {
return nil, errors.New("x509: unknown curve while marshalling to PKCS#8")
}
oidBytes, err := asn1.Marshal(oid)
if err != nil {
return nil, errors.New("x509: failed to marshal curve OID: " + err.Error())
}
privKey.Algo = pkix.AlgorithmIdentifier{
Algorithm: OIDPublicKeyECDSA,
Parameters: asn1.RawValue{
FullBytes: oidBytes,
},
}
if privKey.PrivateKey, err = marshalECPrivateKeyWithOID(k, nil); err != nil {
return nil, errors.New("x509: failed to marshal EC private key while building PKCS#8: " + err.Error())
}
default:
return nil, fmt.Errorf("x509: unknown key type while marshalling PKCS#8: %T", key)
}
return asn1.Marshal(privKey)
}

View File

@@ -1,24 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["pkix.go"],
importmap = "k8s.io/kubernetes/vendor/github.com/google/certificate-transparency-go/x509/pkix",
importpath = "github.com/google/certificate-transparency-go/x509/pkix",
visibility = ["//visibility:public"],
deps = ["//vendor/github.com/google/certificate-transparency-go/asn1:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -1,288 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package pkix contains shared, low level structures used for ASN.1 parsing
// and serialization of X.509 certificates, CRL and OCSP.
package pkix
import (
// START CT CHANGES
"encoding/hex"
"fmt"
"github.com/google/certificate-transparency-go/asn1"
// END CT CHANGES
"math/big"
"time"
)
// AlgorithmIdentifier represents the ASN.1 structure of the same name. See RFC
// 5280, section 4.1.1.2.
type AlgorithmIdentifier struct {
Algorithm asn1.ObjectIdentifier
Parameters asn1.RawValue `asn1:"optional"`
}
type RDNSequence []RelativeDistinguishedNameSET
var attributeTypeNames = map[string]string{
"2.5.4.6": "C",
"2.5.4.10": "O",
"2.5.4.11": "OU",
"2.5.4.3": "CN",
"2.5.4.5": "SERIALNUMBER",
"2.5.4.7": "L",
"2.5.4.8": "ST",
"2.5.4.9": "STREET",
"2.5.4.17": "POSTALCODE",
}
// String returns a string representation of the sequence r,
// roughly following the RFC 2253 Distinguished Names syntax.
func (r RDNSequence) String() string {
s := ""
for i := 0; i < len(r); i++ {
rdn := r[len(r)-1-i]
if i > 0 {
s += ","
}
for j, tv := range rdn {
if j > 0 {
s += "+"
}
oidString := tv.Type.String()
typeName, ok := attributeTypeNames[oidString]
if !ok {
derBytes, err := asn1.Marshal(tv.Value)
if err == nil {
s += oidString + "=#" + hex.EncodeToString(derBytes)
continue // No value escaping necessary.
}
typeName = oidString
}
valueString := fmt.Sprint(tv.Value)
escaped := make([]rune, 0, len(valueString))
for k, c := range valueString {
escape := false
switch c {
case ',', '+', '"', '\\', '<', '>', ';':
escape = true
case ' ':
escape = k == 0 || k == len(valueString)-1
case '#':
escape = k == 0
}
if escape {
escaped = append(escaped, '\\', c)
} else {
escaped = append(escaped, c)
}
}
s += typeName + "=" + string(escaped)
}
}
return s
}
type RelativeDistinguishedNameSET []AttributeTypeAndValue
// AttributeTypeAndValue mirrors the ASN.1 structure of the same name in
// http://tools.ietf.org/html/rfc5280#section-4.1.2.4
type AttributeTypeAndValue struct {
Type asn1.ObjectIdentifier
Value interface{}
}
// AttributeTypeAndValueSET represents a set of ASN.1 sequences of
// AttributeTypeAndValue sequences from RFC 2986 (PKCS #10).
type AttributeTypeAndValueSET struct {
Type asn1.ObjectIdentifier
Value [][]AttributeTypeAndValue `asn1:"set"`
}
// Extension represents the ASN.1 structure of the same name. See RFC
// 5280, section 4.2.
type Extension struct {
Id asn1.ObjectIdentifier
Critical bool `asn1:"optional"`
Value []byte
}
// Name represents an X.509 distinguished name. This only includes the common
// elements of a DN. When parsing, all elements are stored in Names and
// non-standard elements can be extracted from there. When marshaling, elements
// in ExtraNames are appended and override other values with the same OID.
type Name struct {
Country, Organization, OrganizationalUnit []string
Locality, Province []string
StreetAddress, PostalCode []string
SerialNumber, CommonName string
Names []AttributeTypeAndValue
ExtraNames []AttributeTypeAndValue
}
func (n *Name) FillFromRDNSequence(rdns *RDNSequence) {
for _, rdn := range *rdns {
if len(rdn) == 0 {
continue
}
for _, atv := range rdn {
n.Names = append(n.Names, atv)
value, ok := atv.Value.(string)
if !ok {
continue
}
t := atv.Type
if len(t) == 4 && t[0] == OIDAttribute[0] && t[1] == OIDAttribute[1] && t[2] == OIDAttribute[2] {
switch t[3] {
case OIDCommonName[3]:
n.CommonName = value
case OIDSerialNumber[3]:
n.SerialNumber = value
case OIDCountry[3]:
n.Country = append(n.Country, value)
case OIDLocality[3]:
n.Locality = append(n.Locality, value)
case OIDProvince[3]:
n.Province = append(n.Province, value)
case OIDStreetAddress[3]:
n.StreetAddress = append(n.StreetAddress, value)
case OIDOrganization[3]:
n.Organization = append(n.Organization, value)
case OIDOrganizationalUnit[3]:
n.OrganizationalUnit = append(n.OrganizationalUnit, value)
case OIDPostalCode[3]:
n.PostalCode = append(n.PostalCode, value)
}
}
}
}
}
var (
OIDAttribute = asn1.ObjectIdentifier{2, 5, 4}
OIDCountry = asn1.ObjectIdentifier{2, 5, 4, 6}
OIDOrganization = asn1.ObjectIdentifier{2, 5, 4, 10}
OIDOrganizationalUnit = asn1.ObjectIdentifier{2, 5, 4, 11}
OIDCommonName = asn1.ObjectIdentifier{2, 5, 4, 3}
OIDSerialNumber = asn1.ObjectIdentifier{2, 5, 4, 5}
OIDLocality = asn1.ObjectIdentifier{2, 5, 4, 7}
OIDProvince = asn1.ObjectIdentifier{2, 5, 4, 8}
OIDStreetAddress = asn1.ObjectIdentifier{2, 5, 4, 9}
OIDPostalCode = asn1.ObjectIdentifier{2, 5, 4, 17}
OIDPseudonym = asn1.ObjectIdentifier{2, 5, 4, 65}
OIDTitle = asn1.ObjectIdentifier{2, 5, 4, 12}
OIDDnQualifier = asn1.ObjectIdentifier{2, 5, 4, 46}
OIDName = asn1.ObjectIdentifier{2, 5, 4, 41}
OIDSurname = asn1.ObjectIdentifier{2, 5, 4, 4}
OIDGivenName = asn1.ObjectIdentifier{2, 5, 4, 42}
OIDInitials = asn1.ObjectIdentifier{2, 5, 4, 43}
OIDGenerationQualifier = asn1.ObjectIdentifier{2, 5, 4, 44}
)
// appendRDNs appends a relativeDistinguishedNameSET to the given RDNSequence
// and returns the new value. The relativeDistinguishedNameSET contains an
// attributeTypeAndValue for each of the given values. See RFC 5280, A.1, and
// search for AttributeTypeAndValue.
func (n Name) appendRDNs(in RDNSequence, values []string, oid asn1.ObjectIdentifier) RDNSequence {
if len(values) == 0 || oidInAttributeTypeAndValue(oid, n.ExtraNames) {
return in
}
s := make([]AttributeTypeAndValue, len(values))
for i, value := range values {
s[i].Type = oid
s[i].Value = value
}
return append(in, s)
}
func (n Name) ToRDNSequence() (ret RDNSequence) {
ret = n.appendRDNs(ret, n.Country, OIDCountry)
ret = n.appendRDNs(ret, n.Province, OIDProvince)
ret = n.appendRDNs(ret, n.Locality, OIDLocality)
ret = n.appendRDNs(ret, n.StreetAddress, OIDStreetAddress)
ret = n.appendRDNs(ret, n.PostalCode, OIDPostalCode)
ret = n.appendRDNs(ret, n.Organization, OIDOrganization)
ret = n.appendRDNs(ret, n.OrganizationalUnit, OIDOrganizationalUnit)
if len(n.CommonName) > 0 {
ret = n.appendRDNs(ret, []string{n.CommonName}, OIDCommonName)
}
if len(n.SerialNumber) > 0 {
ret = n.appendRDNs(ret, []string{n.SerialNumber}, OIDSerialNumber)
}
for _, atv := range n.ExtraNames {
ret = append(ret, []AttributeTypeAndValue{atv})
}
return ret
}
// String returns the string form of n, roughly following
// the RFC 2253 Distinguished Names syntax.
func (n Name) String() string {
return n.ToRDNSequence().String()
}
// oidInAttributeTypeAndValue returns whether a type with the given OID exists
// in atv.
func oidInAttributeTypeAndValue(oid asn1.ObjectIdentifier, atv []AttributeTypeAndValue) bool {
for _, a := range atv {
if a.Type.Equal(oid) {
return true
}
}
return false
}
// CertificateList represents the ASN.1 structure of the same name. See RFC
// 5280, section 5.1. Use Certificate.CheckCRLSignature to verify the
// signature.
type CertificateList struct {
TBSCertList TBSCertificateList
SignatureAlgorithm AlgorithmIdentifier
SignatureValue asn1.BitString
}
// HasExpired reports whether certList should have been updated by now.
func (certList *CertificateList) HasExpired(now time.Time) bool {
return !now.Before(certList.TBSCertList.NextUpdate)
}
// TBSCertificateList represents the ASN.1 structure TBSCertList. See RFC
// 5280, section 5.1.
type TBSCertificateList struct {
Raw asn1.RawContent
Version int `asn1:"optional,default:0"`
Signature AlgorithmIdentifier
Issuer RDNSequence
ThisUpdate time.Time
NextUpdate time.Time `asn1:"optional"`
RevokedCertificates []RevokedCertificate `asn1:"optional"`
Extensions []Extension `asn1:"tag:0,optional,explicit"`
}
// RevokedCertificate represents the unnamed ASN.1 structure that makes up the
// revokedCertificates member of the TBSCertList structure. See RFC
// 5280, section 5.1.
type RevokedCertificate struct {
SerialNumber *big.Int
RevocationTime time.Time
Extensions []Extension `asn1:"optional"`
}

View File

@@ -1,20 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.11
package x509
import (
"syscall"
"unsafe"
)
// For Go versions >= 1.11, the ExtraPolicyPara field in
// syscall.CertChainPolicyPara is of type syscall.Pointer. See:
// https://github.com/golang/go/commit/4869ec00e87ef
func convertToPolicyParaType(p unsafe.Pointer) syscall.Pointer {
return (syscall.Pointer)(p)
}

View File

@@ -1,17 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.11
package x509
import "unsafe"
// For Go versions before 1.11, the ExtraPolicyPara field in
// syscall.CertChainPolicyPara was of type uintptr. See:
// https://github.com/golang/go/commit/4869ec00e87ef
func convertToPolicyParaType(p unsafe.Pointer) uintptr {
return uintptr(p)
}

View File

@@ -1,365 +0,0 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x509
import (
"bytes"
"encoding/pem"
"time"
"github.com/google/certificate-transparency-go/asn1"
"github.com/google/certificate-transparency-go/x509/pkix"
)
// OID values for CRL extensions (TBSCertList.Extensions), RFC 5280 s5.2.
var (
OIDExtensionCRLNumber = asn1.ObjectIdentifier{2, 5, 29, 20}
OIDExtensionDeltaCRLIndicator = asn1.ObjectIdentifier{2, 5, 29, 27}
OIDExtensionIssuingDistributionPoint = asn1.ObjectIdentifier{2, 5, 29, 28}
)
// OID values for CRL entry extensions (RevokedCertificate.Extensions), RFC 5280 s5.3
var (
OIDExtensionCRLReasons = asn1.ObjectIdentifier{2, 5, 29, 21}
OIDExtensionInvalidityDate = asn1.ObjectIdentifier{2, 5, 29, 24}
OIDExtensionCertificateIssuer = asn1.ObjectIdentifier{2, 5, 29, 29}
)
// RevocationReasonCode represents the reason for a certificate revocation; see RFC 5280 s5.3.1.
type RevocationReasonCode asn1.Enumerated
// RevocationReasonCode values.
var (
Unspecified = RevocationReasonCode(0)
KeyCompromise = RevocationReasonCode(1)
CACompromise = RevocationReasonCode(2)
AffiliationChanged = RevocationReasonCode(3)
Superseded = RevocationReasonCode(4)
CessationOfOperation = RevocationReasonCode(5)
CertificateHold = RevocationReasonCode(6)
RemoveFromCRL = RevocationReasonCode(8)
PrivilegeWithdrawn = RevocationReasonCode(9)
AACompromise = RevocationReasonCode(10)
)
// ReasonFlag holds a bitmask of applicable revocation reasons, from RFC 5280 s4.2.1.13
type ReasonFlag int
// ReasonFlag values.
const (
UnusedFlag ReasonFlag = 1 << iota
KeyCompromiseFlag
CACompromiseFlag
AffiliationChangedFlag
SupersededFlag
CessationOfOperationFlag
CertificateHoldFlag
PrivilegeWithdrawnFlag
AACompromiseFlag
)
// CertificateList represents the ASN.1 structure of the same name from RFC 5280, s5.1.
// It has the same content as pkix.CertificateList, but the contents include parsed versions
// of any extensions.
type CertificateList struct {
Raw asn1.RawContent
TBSCertList TBSCertList
SignatureAlgorithm pkix.AlgorithmIdentifier
SignatureValue asn1.BitString
}
// ExpiredAt reports whether now is past the expiry time of certList.
func (certList *CertificateList) ExpiredAt(now time.Time) bool {
return now.After(certList.TBSCertList.NextUpdate)
}
// Indication of whether extensions need to be critical or non-critical. Extensions that
// can be either are omitted from the map.
var listExtCritical = map[string]bool{
// From RFC 5280...
OIDExtensionAuthorityKeyId.String(): false, // s5.2.1
OIDExtensionIssuerAltName.String(): false, // s5.2.2
OIDExtensionCRLNumber.String(): false, // s5.2.3
OIDExtensionDeltaCRLIndicator.String(): true, // s5.2.4
OIDExtensionIssuingDistributionPoint.String(): true, // s5.2.5
OIDExtensionFreshestCRL.String(): false, // s5.2.6
OIDExtensionAuthorityInfoAccess.String(): false, // s5.2.7
}
var certExtCritical = map[string]bool{
// From RFC 5280...
OIDExtensionCRLReasons.String(): false, // s5.3.1
OIDExtensionInvalidityDate.String(): false, // s5.3.2
OIDExtensionCertificateIssuer.String(): true, // s5.3.3
}
// IssuingDistributionPoint represents the ASN.1 structure of the same
// name
type IssuingDistributionPoint struct {
DistributionPoint distributionPointName `asn1:"optional,tag:0"`
OnlyContainsUserCerts bool `asn1:"optional,tag:1"`
OnlyContainsCACerts bool `asn1:"optional,tag:2"`
OnlySomeReasons asn1.BitString `asn1:"optional,tag:3"`
IndirectCRL bool `asn1:"optional,tag:4"`
OnlyContainsAttributeCerts bool `asn1:"optional,tag:5"`
}
// TBSCertList represents the ASN.1 structure of the same name from RFC
// 5280, section 5.1. It has the same content as pkix.TBSCertificateList
// but the extensions are included in a parsed format.
type TBSCertList struct {
Raw asn1.RawContent
Version int
Signature pkix.AlgorithmIdentifier
Issuer pkix.RDNSequence
ThisUpdate time.Time
NextUpdate time.Time
RevokedCertificates []*RevokedCertificate
Extensions []pkix.Extension
// Cracked out extensions:
AuthorityKeyID []byte
IssuerAltNames GeneralNames
CRLNumber int
BaseCRLNumber int // -1 if no delta CRL present
IssuingDistributionPoint IssuingDistributionPoint
IssuingDPFullNames GeneralNames
FreshestCRLDistributionPoint []string
OCSPServer []string
IssuingCertificateURL []string
}
// ParseCertificateList parses a CertificateList (e.g. a CRL) from the given
// bytes. It's often the case that PEM encoded CRLs will appear where they
// should be DER encoded, so this function will transparently handle PEM
// encoding as long as there isn't any leading garbage.
func ParseCertificateList(clBytes []byte) (*CertificateList, error) {
if bytes.HasPrefix(clBytes, pemCRLPrefix) {
block, _ := pem.Decode(clBytes)
if block != nil && block.Type == pemType {
clBytes = block.Bytes
}
}
return ParseCertificateListDER(clBytes)
}
// ParseCertificateListDER parses a DER encoded CertificateList from the given bytes.
// For non-fatal errors, this function returns both an error and a CertificateList
// object.
func ParseCertificateListDER(derBytes []byte) (*CertificateList, error) {
var errs Errors
// First parse the DER into the pkix structures.
pkixList := new(pkix.CertificateList)
if rest, err := asn1.Unmarshal(derBytes, pkixList); err != nil {
errs.AddID(ErrInvalidCertList, err)
return nil, &errs
} else if len(rest) != 0 {
errs.AddID(ErrTrailingCertList)
return nil, &errs
}
// Transcribe the revoked certs but crack out extensions.
revokedCerts := make([]*RevokedCertificate, len(pkixList.TBSCertList.RevokedCertificates))
for i, pkixRevoked := range pkixList.TBSCertList.RevokedCertificates {
revokedCerts[i] = parseRevokedCertificate(pkixRevoked, &errs)
if revokedCerts[i] == nil {
return nil, &errs
}
}
certList := CertificateList{
Raw: derBytes,
TBSCertList: TBSCertList{
Raw: pkixList.TBSCertList.Raw,
Version: pkixList.TBSCertList.Version,
Signature: pkixList.TBSCertList.Signature,
Issuer: pkixList.TBSCertList.Issuer,
ThisUpdate: pkixList.TBSCertList.ThisUpdate,
NextUpdate: pkixList.TBSCertList.NextUpdate,
RevokedCertificates: revokedCerts,
Extensions: pkixList.TBSCertList.Extensions,
CRLNumber: -1,
BaseCRLNumber: -1,
},
SignatureAlgorithm: pkixList.SignatureAlgorithm,
SignatureValue: pkixList.SignatureValue,
}
// Now crack out extensions.
for _, e := range certList.TBSCertList.Extensions {
if expectCritical, present := listExtCritical[e.Id.String()]; present {
if e.Critical && !expectCritical {
errs.AddID(ErrUnexpectedlyCriticalCertListExtension, e.Id)
} else if !e.Critical && expectCritical {
errs.AddID(ErrUnexpectedlyNonCriticalCertListExtension, e.Id)
}
}
switch {
case e.Id.Equal(OIDExtensionAuthorityKeyId):
// RFC 5280 s5.2.1
var a authKeyId
if rest, err := asn1.Unmarshal(e.Value, &a); err != nil {
errs.AddID(ErrInvalidCertListAuthKeyID, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingCertListAuthKeyID)
}
certList.TBSCertList.AuthorityKeyID = a.Id
case e.Id.Equal(OIDExtensionIssuerAltName):
// RFC 5280 s5.2.2
if err := parseGeneralNames(e.Value, &certList.TBSCertList.IssuerAltNames); err != nil {
errs.AddID(ErrInvalidCertListIssuerAltName, err)
}
case e.Id.Equal(OIDExtensionCRLNumber):
// RFC 5280 s5.2.3
if rest, err := asn1.Unmarshal(e.Value, &certList.TBSCertList.CRLNumber); err != nil {
errs.AddID(ErrInvalidCertListCRLNumber, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingCertListCRLNumber)
}
if certList.TBSCertList.CRLNumber < 0 {
errs.AddID(ErrNegativeCertListCRLNumber, certList.TBSCertList.CRLNumber)
}
case e.Id.Equal(OIDExtensionDeltaCRLIndicator):
// RFC 5280 s5.2.4
if rest, err := asn1.Unmarshal(e.Value, &certList.TBSCertList.BaseCRLNumber); err != nil {
errs.AddID(ErrInvalidCertListDeltaCRL, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingCertListDeltaCRL)
}
if certList.TBSCertList.BaseCRLNumber < 0 {
errs.AddID(ErrNegativeCertListDeltaCRL, certList.TBSCertList.BaseCRLNumber)
}
case e.Id.Equal(OIDExtensionIssuingDistributionPoint):
parseIssuingDistributionPoint(e.Value, &certList.TBSCertList.IssuingDistributionPoint, &certList.TBSCertList.IssuingDPFullNames, &errs)
case e.Id.Equal(OIDExtensionFreshestCRL):
// RFC 5280 s5.2.6
if err := parseDistributionPoints(e.Value, &certList.TBSCertList.FreshestCRLDistributionPoint); err != nil {
errs.AddID(ErrInvalidCertListFreshestCRL, err)
return nil, err
}
case e.Id.Equal(OIDExtensionAuthorityInfoAccess):
// RFC 5280 s5.2.7
var aia []accessDescription
if rest, err := asn1.Unmarshal(e.Value, &aia); err != nil {
errs.AddID(ErrInvalidCertListAuthInfoAccess, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingCertListAuthInfoAccess)
}
for _, v := range aia {
// GeneralName: uniformResourceIdentifier [6] IA5String
if v.Location.Tag != tagURI {
continue
}
switch {
case v.Method.Equal(OIDAuthorityInfoAccessOCSP):
certList.TBSCertList.OCSPServer = append(certList.TBSCertList.OCSPServer, string(v.Location.Bytes))
case v.Method.Equal(OIDAuthorityInfoAccessIssuers):
certList.TBSCertList.IssuingCertificateURL = append(certList.TBSCertList.IssuingCertificateURL, string(v.Location.Bytes))
}
// TODO(drysdale): cope with more possibilities
}
default:
if e.Critical {
errs.AddID(ErrUnhandledCriticalCertListExtension, e.Id)
}
}
}
if errs.Fatal() {
return nil, &errs
}
if errs.Empty() {
return &certList, nil
}
return &certList, &errs
}
func parseIssuingDistributionPoint(data []byte, idp *IssuingDistributionPoint, name *GeneralNames, errs *Errors) {
// RFC 5280 s5.2.5
if rest, err := asn1.Unmarshal(data, idp); err != nil {
errs.AddID(ErrInvalidCertListIssuingDP, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingCertListIssuingDP)
}
typeCount := 0
if idp.OnlyContainsUserCerts {
typeCount++
}
if idp.OnlyContainsCACerts {
typeCount++
}
if idp.OnlyContainsAttributeCerts {
typeCount++
}
if typeCount > 1 {
errs.AddID(ErrCertListIssuingDPMultipleTypes, idp.OnlyContainsUserCerts, idp.OnlyContainsCACerts, idp.OnlyContainsAttributeCerts)
}
for _, fn := range idp.DistributionPoint.FullName {
if _, err := parseGeneralName(fn.FullBytes, name, false); err != nil {
errs.AddID(ErrCertListIssuingDPInvalidFullName, err)
}
}
}
// RevokedCertificate represents the unnamed ASN.1 structure that makes up the
// revokedCertificates member of the TBSCertList structure from RFC 5280, s5.1.
// It has the same content as pkix.RevokedCertificate but the extensions are
// included in a parsed format.
type RevokedCertificate struct {
pkix.RevokedCertificate
// Cracked out extensions:
RevocationReason RevocationReasonCode
InvalidityDate time.Time
Issuer GeneralNames
}
func parseRevokedCertificate(pkixRevoked pkix.RevokedCertificate, errs *Errors) *RevokedCertificate {
result := RevokedCertificate{RevokedCertificate: pkixRevoked}
for _, e := range pkixRevoked.Extensions {
if expectCritical, present := certExtCritical[e.Id.String()]; present {
if e.Critical && !expectCritical {
errs.AddID(ErrUnexpectedlyCriticalRevokedCertExtension, e.Id)
} else if !e.Critical && expectCritical {
errs.AddID(ErrUnexpectedlyNonCriticalRevokedCertExtension, e.Id)
}
}
switch {
case e.Id.Equal(OIDExtensionCRLReasons):
// RFC 5280, s5.3.1
var reason asn1.Enumerated
if rest, err := asn1.Unmarshal(e.Value, &reason); err != nil {
errs.AddID(ErrInvalidRevocationReason, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingRevocationReason)
}
result.RevocationReason = RevocationReasonCode(reason)
case e.Id.Equal(OIDExtensionInvalidityDate):
// RFC 5280, s5.3.2
if rest, err := asn1.Unmarshal(e.Value, &result.InvalidityDate); err != nil {
errs.AddID(ErrInvalidRevocationInvalidityDate, err)
} else if len(rest) != 0 {
errs.AddID(ErrTrailingRevocationInvalidityDate)
}
case e.Id.Equal(OIDExtensionCertificateIssuer):
// RFC 5280, s5.3.3
if err := parseGeneralNames(e.Value, &result.Issuer); err != nil {
errs.AddID(ErrInvalidRevocationIssuer, err)
}
default:
if e.Critical {
errs.AddID(ErrUnhandledCriticalRevokedCertExtension, e.Id)
}
}
}
return &result
}
// CheckCertificateListSignature checks that the signature in crl is from c.
func (c *Certificate) CheckCertificateListSignature(crl *CertificateList) error {
algo := SignatureAlgorithmFromAI(crl.SignatureAlgorithm)
return c.CheckSignature(algo, crl.TBSCertList.Raw, crl.SignatureValue.RightAlign())
}

View File

@@ -1,22 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x509
import "sync"
var (
once sync.Once
systemRoots *CertPool
systemRootsErr error
)
func systemRootsPool() *CertPool {
once.Do(initSystemRoots)
return systemRoots
}
func initSystemRoots() {
systemRoots, systemRootsErr = loadSystemRoots()
}

View File

@@ -1,15 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build dragonfly freebsd netbsd openbsd
package x509
// Possible certificate files; stop after finding one.
var certFiles = []string{
"/usr/local/etc/ssl/cert.pem", // FreeBSD
"/etc/ssl/cert.pem", // OpenBSD
"/usr/local/share/certs/ca-root-nss.crt", // DragonFly
"/etc/openssl/certs/ca-certificates.crt", // NetBSD
}

View File

@@ -1,252 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build cgo,!arm,!arm64,!ios
package x509
/*
#cgo CFLAGS: -mmacosx-version-min=10.6 -D__MAC_OS_X_VERSION_MAX_ALLOWED=1080
#cgo LDFLAGS: -framework CoreFoundation -framework Security
#include <errno.h>
#include <sys/sysctl.h>
#include <CoreFoundation/CoreFoundation.h>
#include <Security/Security.h>
// FetchPEMRootsCTX509_MountainLion is the version of FetchPEMRoots from Go 1.6
// which still works on OS X 10.8 (Mountain Lion).
// It lacks support for admin & user cert domains.
// See golang.org/issue/16473
int FetchPEMRootsCTX509_MountainLion(CFDataRef *pemRoots) {
if (pemRoots == NULL) {
return -1;
}
CFArrayRef certs = NULL;
OSStatus err = SecTrustCopyAnchorCertificates(&certs);
if (err != noErr) {
return -1;
}
CFMutableDataRef combinedData = CFDataCreateMutable(kCFAllocatorDefault, 0);
int i, ncerts = CFArrayGetCount(certs);
for (i = 0; i < ncerts; i++) {
CFDataRef data = NULL;
SecCertificateRef cert = (SecCertificateRef)CFArrayGetValueAtIndex(certs, i);
if (cert == NULL) {
continue;
}
// Note: SecKeychainItemExport is deprecated as of 10.7 in favor of SecItemExport.
// Once we support weak imports via cgo we should prefer that, and fall back to this
// for older systems.
err = SecKeychainItemExport(cert, kSecFormatX509Cert, kSecItemPemArmour, NULL, &data);
if (err != noErr) {
continue;
}
if (data != NULL) {
CFDataAppendBytes(combinedData, CFDataGetBytePtr(data), CFDataGetLength(data));
CFRelease(data);
}
}
CFRelease(certs);
*pemRoots = combinedData;
return 0;
}
// useOldCodeCTX509 reports whether the running machine is OS X 10.8 Mountain Lion
// or older. We only support Mountain Lion and higher, but we'll at least try our
// best on older machines and continue to use the old code path.
//
// See golang.org/issue/16473
int useOldCodeCTX509() {
char str[256];
size_t size = sizeof(str);
memset(str, 0, size);
sysctlbyname("kern.osrelease", str, &size, NULL, 0);
// OS X 10.8 is osrelease "12.*", 10.7 is 11.*, 10.6 is 10.*.
// We never supported things before that.
return memcmp(str, "12.", 3) == 0 || memcmp(str, "11.", 3) == 0 || memcmp(str, "10.", 3) == 0;
}
// FetchPEMRootsCTX509 fetches the system's list of trusted X.509 root certificates.
//
// On success it returns 0 and fills pemRoots with a CFDataRef that contains the extracted root
// certificates of the system. On failure, the function returns -1.
// Additionally, it fills untrustedPemRoots with certs that must be removed from pemRoots.
//
// Note: The CFDataRef returned in pemRoots and untrustedPemRoots must
// be released (using CFRelease) after we've consumed its content.
int FetchPEMRootsCTX509(CFDataRef *pemRoots, CFDataRef *untrustedPemRoots) {
if (useOldCodeCTX509()) {
return FetchPEMRootsCTX509_MountainLion(pemRoots);
}
// Get certificates from all domains, not just System, this lets
// the user add CAs to their "login" keychain, and Admins to add
// to the "System" keychain
SecTrustSettingsDomain domains[] = { kSecTrustSettingsDomainSystem,
kSecTrustSettingsDomainAdmin,
kSecTrustSettingsDomainUser };
int numDomains = sizeof(domains)/sizeof(SecTrustSettingsDomain);
if (pemRoots == NULL) {
return -1;
}
// kSecTrustSettingsResult is defined as CFSTR("kSecTrustSettingsResult"),
// but the Go linker's internal linking mode can't handle CFSTR relocations.
// Create our own dynamic string instead and release it below.
CFStringRef policy = CFStringCreateWithCString(NULL, "kSecTrustSettingsResult", kCFStringEncodingUTF8);
CFMutableDataRef combinedData = CFDataCreateMutable(kCFAllocatorDefault, 0);
CFMutableDataRef combinedUntrustedData = CFDataCreateMutable(kCFAllocatorDefault, 0);
for (int i = 0; i < numDomains; i++) {
CFArrayRef certs = NULL;
OSStatus err = SecTrustSettingsCopyCertificates(domains[i], &certs);
if (err != noErr) {
continue;
}
CFIndex numCerts = CFArrayGetCount(certs);
for (int j = 0; j < numCerts; j++) {
CFDataRef data = NULL;
CFErrorRef errRef = NULL;
CFArrayRef trustSettings = NULL;
SecCertificateRef cert = (SecCertificateRef)CFArrayGetValueAtIndex(certs, j);
if (cert == NULL) {
continue;
}
// We only want trusted certs.
int untrusted = 0;
int trustAsRoot = 0;
int trustRoot = 0;
if (i == 0) {
trustAsRoot = 1;
} else {
// Certs found in the system domain are always trusted. If the user
// configures "Never Trust" on such a cert, it will also be found in the
// admin or user domain, causing it to be added to untrustedPemRoots. The
// Go code will then clean this up.
// Trust may be stored in any of the domains. According to Apple's
// SecTrustServer.c, "user trust settings overrule admin trust settings",
// so take the last trust settings array we find.
// Skip the system domain since it is always trusted.
for (int k = i; k < numDomains; k++) {
CFArrayRef domainTrustSettings = NULL;
err = SecTrustSettingsCopyTrustSettings(cert, domains[k], &domainTrustSettings);
if (err == errSecSuccess && domainTrustSettings != NULL) {
if (trustSettings) {
CFRelease(trustSettings);
}
trustSettings = domainTrustSettings;
}
}
if (trustSettings == NULL) {
// "this certificate must be verified to a known trusted certificate"; aka not a root.
continue;
}
for (CFIndex k = 0; k < CFArrayGetCount(trustSettings); k++) {
CFNumberRef cfNum;
CFDictionaryRef tSetting = (CFDictionaryRef)CFArrayGetValueAtIndex(trustSettings, k);
if (CFDictionaryGetValueIfPresent(tSetting, policy, (const void**)&cfNum)){
SInt32 result = 0;
CFNumberGetValue(cfNum, kCFNumberSInt32Type, &result);
// TODO: The rest of the dictionary specifies conditions for evaluation.
if (result == kSecTrustSettingsResultDeny) {
untrusted = 1;
} else if (result == kSecTrustSettingsResultTrustAsRoot) {
trustAsRoot = 1;
} else if (result == kSecTrustSettingsResultTrustRoot) {
trustRoot = 1;
}
}
}
CFRelease(trustSettings);
}
if (trustRoot) {
// We only want to add Root CAs, so make sure Subject and Issuer Name match
CFDataRef subjectName = SecCertificateCopyNormalizedSubjectContent(cert, &errRef);
if (errRef != NULL) {
CFRelease(errRef);
continue;
}
CFDataRef issuerName = SecCertificateCopyNormalizedIssuerContent(cert, &errRef);
if (errRef != NULL) {
CFRelease(subjectName);
CFRelease(errRef);
continue;
}
Boolean equal = CFEqual(subjectName, issuerName);
CFRelease(subjectName);
CFRelease(issuerName);
if (!equal) {
continue;
}
}
// Note: SecKeychainItemExport is deprecated as of 10.7 in favor of SecItemExport.
// Once we support weak imports via cgo we should prefer that, and fall back to this
// for older systems.
err = SecKeychainItemExport(cert, kSecFormatX509Cert, kSecItemPemArmour, NULL, &data);
if (err != noErr) {
continue;
}
if (data != NULL) {
if (!trustRoot && !trustAsRoot) {
untrusted = 1;
}
CFMutableDataRef appendTo = untrusted ? combinedUntrustedData : combinedData;
CFDataAppendBytes(appendTo, CFDataGetBytePtr(data), CFDataGetLength(data));
CFRelease(data);
}
}
CFRelease(certs);
}
CFRelease(policy);
*pemRoots = combinedData;
*untrustedPemRoots = combinedUntrustedData;
return 0;
}
*/
import "C"
import (
"errors"
"unsafe"
)
func loadSystemRoots() (*CertPool, error) {
roots := NewCertPool()
var data C.CFDataRef
setNilCFRef(&data)
var untrustedData C.CFDataRef
setNilCFRef(&untrustedData)
err := C.FetchPEMRootsCTX509(&data, &untrustedData)
if err == -1 {
// TODO: better error message
return nil, errors.New("crypto/x509: failed to load darwin system roots with cgo")
}
defer C.CFRelease(C.CFTypeRef(data))
buf := C.GoBytes(unsafe.Pointer(C.CFDataGetBytePtr(data)), C.int(C.CFDataGetLength(data)))
roots.AppendCertsFromPEM(buf)
if isNilCFRef(untrustedData) {
return roots, nil
}
defer C.CFRelease(C.CFTypeRef(untrustedData))
buf = C.GoBytes(unsafe.Pointer(C.CFDataGetBytePtr(untrustedData)), C.int(C.CFDataGetLength(untrustedData)))
untrustedRoots := NewCertPool()
untrustedRoots.AppendCertsFromPEM(buf)
trustedRoots := NewCertPool()
for _, c := range roots.certs {
if !untrustedRoots.contains(c) {
trustedRoots.AddCert(c)
}
}
return trustedRoots, nil
}

View File

@@ -1,264 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run root_darwin_arm_gen.go -output root_darwin_armx.go
package x509
import (
"bufio"
"bytes"
"crypto/sha1"
"encoding/pem"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"os/user"
"path/filepath"
"strings"
"sync"
)
var debugExecDarwinRoots = strings.Contains(os.Getenv("GODEBUG"), "x509roots=1")
func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
return nil, nil
}
// This code is only used when compiling without cgo.
// It is here, instead of root_nocgo_darwin.go, so that tests can check it
// even if the tests are run with cgo enabled.
// The linker will not include these unused functions in binaries built with cgo enabled.
// execSecurityRoots finds the macOS list of trusted root certificates
// using only command-line tools. This is our fallback path when cgo isn't available.
//
// The strategy is as follows:
//
// 1. Run "security trust-settings-export" and "security
// trust-settings-export -d" to discover the set of certs with some
// user-tweaked trust policy. We're too lazy to parse the XML (at
// least at this stage of Go 1.8) to understand what the trust
// policy actually is. We just learn that there is _some_ policy.
//
// 2. Run "security find-certificate" to dump the list of system root
// CAs in PEM format.
//
// 3. For each dumped cert, conditionally verify it with "security
// verify-cert" if that cert was in the set discovered in Step 1.
// Without the Step 1 optimization, running "security verify-cert"
// 150-200 times takes 3.5 seconds. With the optimization, the
// whole process takes about 180 milliseconds with 1 untrusted root
// CA. (Compared to 110ms in the cgo path)
func execSecurityRoots() (*CertPool, error) {
hasPolicy, err := getCertsWithTrustPolicy()
if err != nil {
return nil, err
}
if debugExecDarwinRoots {
println(fmt.Sprintf("crypto/x509: %d certs have a trust policy", len(hasPolicy)))
}
args := []string{"find-certificate", "-a", "-p",
"/System/Library/Keychains/SystemRootCertificates.keychain",
"/Library/Keychains/System.keychain",
}
u, err := user.Current()
if err != nil {
if debugExecDarwinRoots {
println(fmt.Sprintf("crypto/x509: get current user: %v", err))
}
} else {
args = append(args,
filepath.Join(u.HomeDir, "/Library/Keychains/login.keychain"),
// Fresh installs of Sierra use a slightly different path for the login keychain
filepath.Join(u.HomeDir, "/Library/Keychains/login.keychain-db"),
)
}
cmd := exec.Command("/usr/bin/security", args...)
data, err := cmd.Output()
if err != nil {
return nil, err
}
var (
mu sync.Mutex
roots = NewCertPool()
numVerified int // number of execs of 'security verify-cert', for debug stats
)
blockCh := make(chan *pem.Block)
var wg sync.WaitGroup
// Using 4 goroutines to pipe into verify-cert seems to be
// about the best we can do. The verify-cert binary seems to
// just RPC to another server with coarse locking anyway, so
// running 16 at a time for instance doesn't help at all. Due
// to the "if hasPolicy" check below, though, we will rarely
// (or never) call verify-cert on stock macOS systems, though.
// The hope is that we only call verify-cert when the user has
// tweaked their trust policy. These 4 goroutines are only
// defensive in the pathological case of many trust edits.
for i := 0; i < 4; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for block := range blockCh {
cert, err := ParseCertificate(block.Bytes)
if err != nil {
continue
}
sha1CapHex := fmt.Sprintf("%X", sha1.Sum(block.Bytes))
valid := true
verifyChecks := 0
if hasPolicy[sha1CapHex] {
verifyChecks++
if !verifyCertWithSystem(block, cert) {
valid = false
}
}
mu.Lock()
numVerified += verifyChecks
if valid {
roots.AddCert(cert)
}
mu.Unlock()
}
}()
}
for len(data) > 0 {
var block *pem.Block
block, data = pem.Decode(data)
if block == nil {
break
}
if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
continue
}
blockCh <- block
}
close(blockCh)
wg.Wait()
if debugExecDarwinRoots {
mu.Lock()
defer mu.Unlock()
println(fmt.Sprintf("crypto/x509: ran security verify-cert %d times", numVerified))
}
return roots, nil
}
func verifyCertWithSystem(block *pem.Block, cert *Certificate) bool {
data := pem.EncodeToMemory(block)
f, err := ioutil.TempFile("", "cert")
if err != nil {
fmt.Fprintf(os.Stderr, "can't create temporary file for cert: %v", err)
return false
}
defer os.Remove(f.Name())
if _, err := f.Write(data); err != nil {
fmt.Fprintf(os.Stderr, "can't write temporary file for cert: %v", err)
return false
}
if err := f.Close(); err != nil {
fmt.Fprintf(os.Stderr, "can't write temporary file for cert: %v", err)
return false
}
cmd := exec.Command("/usr/bin/security", "verify-cert", "-c", f.Name(), "-l", "-L")
var stderr bytes.Buffer
if debugExecDarwinRoots {
cmd.Stderr = &stderr
}
if err := cmd.Run(); err != nil {
if debugExecDarwinRoots {
println(fmt.Sprintf("crypto/x509: verify-cert rejected %s: %q", cert.Subject.CommonName, bytes.TrimSpace(stderr.Bytes())))
}
return false
}
if debugExecDarwinRoots {
println(fmt.Sprintf("crypto/x509: verify-cert approved %s", cert.Subject.CommonName))
}
return true
}
// getCertsWithTrustPolicy returns the set of certs that have a
// possibly-altered trust policy. The keys of the map are capitalized
// sha1 hex of the raw cert.
// They are the certs that should be checked against `security
// verify-cert` to see whether the user altered the default trust
// settings. This code is only used for cgo-disabled builds.
func getCertsWithTrustPolicy() (map[string]bool, error) {
set := map[string]bool{}
td, err := ioutil.TempDir("", "x509trustpolicy")
if err != nil {
return nil, err
}
defer os.RemoveAll(td)
run := func(file string, args ...string) error {
file = filepath.Join(td, file)
args = append(args, file)
cmd := exec.Command("/usr/bin/security", args...)
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
// If there are no trust settings, the
// `security trust-settings-export` command
// fails with:
// exit status 1, SecTrustSettingsCreateExternalRepresentation: No Trust Settings were found.
// Rather than match on English substrings that are probably
// localized on macOS, just interpret any failure to mean that
// there are no trust settings.
if debugExecDarwinRoots {
println(fmt.Sprintf("crypto/x509: exec %q: %v, %s", cmd.Args, err, stderr.Bytes()))
}
return nil
}
f, err := os.Open(file)
if err != nil {
return err
}
defer f.Close()
// Gather all the runs of 40 capitalized hex characters.
br := bufio.NewReader(f)
var hexBuf bytes.Buffer
for {
b, err := br.ReadByte()
isHex := ('A' <= b && b <= 'F') || ('0' <= b && b <= '9')
if isHex {
hexBuf.WriteByte(b)
} else {
if hexBuf.Len() == 40 {
set[hexBuf.String()] = true
}
hexBuf.Reset()
}
if err == io.EOF {
break
}
if err != nil {
return err
}
}
return nil
}
if err := run("user", "trust-settings-export"); err != nil {
return nil, fmt.Errorf("dump-trust-settings (user): %v", err)
}
if err := run("admin", "trust-settings-export", "-d"); err != nil {
return nil, fmt.Errorf("dump-trust-settings (admin): %v", err)
}
return set, nil
}

View File

@@ -1,187 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// Generates root_darwin_armx.go.
//
// As of iOS 8, there is no API for querying the system trusted X.509 root
// certificates. We could use SecTrustEvaluate to verify that a trust chain
// exists for a certificate, but the x509 API requires returning the entire
// chain.
//
// Apple publishes the list of trusted root certificates for iOS on
// support.apple.com. So we parse the list and extract the certificates from
// an OS X machine and embed them into the x509 package.
package main
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"encoding/pem"
"flag"
"fmt"
"go/format"
"io/ioutil"
"log"
"net/http"
"os/exec"
"regexp"
"strings"
"github.com/google/certificate-transparency-go/x509"
)
var output = flag.String("output", "root_darwin_armx.go", "file name to write")
func main() {
certs, err := selectCerts()
if err != nil {
log.Fatal(err)
}
buf := new(bytes.Buffer)
fmt.Fprintf(buf, "// Code generated by root_darwin_arm_gen --output %s; DO NOT EDIT.\n", *output)
fmt.Fprintf(buf, "%s", header)
fmt.Fprintf(buf, "const systemRootsPEM = `\n")
for _, cert := range certs {
b := &pem.Block{
Type: "CERTIFICATE",
Bytes: cert.Raw,
}
if err := pem.Encode(buf, b); err != nil {
log.Fatal(err)
}
}
fmt.Fprintf(buf, "`")
source, err := format.Source(buf.Bytes())
if err != nil {
log.Fatal("source format error:", err)
}
if err := ioutil.WriteFile(*output, source, 0644); err != nil {
log.Fatal(err)
}
}
func selectCerts() ([]*x509.Certificate, error) {
ids, err := fetchCertIDs()
if err != nil {
return nil, err
}
scerts, err := sysCerts()
if err != nil {
return nil, err
}
var certs []*x509.Certificate
for _, id := range ids {
if c, ok := scerts[id.fingerprint]; ok {
certs = append(certs, c)
} else {
fmt.Printf("WARNING: cannot find certificate: %s (fingerprint: %s)\n", id.name, id.fingerprint)
}
}
return certs, nil
}
func sysCerts() (certs map[string]*x509.Certificate, err error) {
cmd := exec.Command("/usr/bin/security", "find-certificate", "-a", "-p", "/System/Library/Keychains/SystemRootCertificates.keychain")
data, err := cmd.Output()
if err != nil {
return nil, err
}
certs = make(map[string]*x509.Certificate)
for len(data) > 0 {
var block *pem.Block
block, data = pem.Decode(data)
if block == nil {
break
}
if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
continue
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
continue
}
fingerprint := sha256.Sum256(cert.Raw)
certs[hex.EncodeToString(fingerprint[:])] = cert
}
return certs, nil
}
type certID struct {
name string
fingerprint string
}
// fetchCertIDs fetches IDs of iOS X509 certificates from apple.com.
func fetchCertIDs() ([]certID, error) {
// Download the iOS 11 support page. The index for all iOS versions is here:
// https://support.apple.com/en-us/HT204132
resp, err := http.Get("https://support.apple.com/en-us/HT208125")
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
text := string(body)
text = text[strings.Index(text, "<div id=trusted"):]
text = text[:strings.Index(text, "</div>")]
var ids []certID
cols := make(map[string]int)
for i, rowmatch := range regexp.MustCompile("(?s)<tr>(.*?)</tr>").FindAllStringSubmatch(text, -1) {
row := rowmatch[1]
if i == 0 {
// Parse table header row to extract column names
for i, match := range regexp.MustCompile("(?s)<th>(.*?)</th>").FindAllStringSubmatch(row, -1) {
cols[match[1]] = i
}
continue
}
values := regexp.MustCompile("(?s)<td>(.*?)</td>").FindAllStringSubmatch(row, -1)
name := values[cols["Certificate name"]][1]
fingerprint := values[cols["Fingerprint (SHA-256)"]][1]
fingerprint = strings.Replace(fingerprint, "<br>", "", -1)
fingerprint = strings.Replace(fingerprint, "\n", "", -1)
fingerprint = strings.Replace(fingerprint, " ", "", -1)
fingerprint = strings.ToLower(fingerprint)
ids = append(ids, certID{
name: name,
fingerprint: fingerprint,
})
}
return ids, nil
}
const header = `
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build cgo
// +build darwin
// +build arm arm64 ios
package x509
func loadSystemRoots() (*CertPool, error) {
p := NewCertPool()
p.AppendCertsFromPEM([]byte(systemRootsPEM))
return p, nil
}
`

File diff suppressed because it is too large Load Diff

View File

@@ -1,14 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x509
// Possible certificate files; stop after finding one.
var certFiles = []string{
"/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc.
"/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL 6
"/etc/ssl/ca-bundle.pem", // OpenSUSE
"/etc/pki/tls/cacert.pem", // OpenELEC
"/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", // CentOS/RHEL 7
}

View File

@@ -1,8 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x509
// Possible certificate files; stop after finding one.
var certFiles = []string{}

View File

@@ -1,11 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !cgo
package x509
func loadSystemRoots() (*CertPool, error) {
return execSecurityRoots()
}

View File

@@ -1,37 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build plan9
package x509
import (
"io/ioutil"
"os"
)
// Possible certificate files; stop after finding one.
var certFiles = []string{
"/sys/lib/tls/ca.pem",
}
func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
return nil, nil
}
func loadSystemRoots() (*CertPool, error) {
roots := NewCertPool()
var bestErr error
for _, file := range certFiles {
data, err := ioutil.ReadFile(file)
if err == nil {
roots.AppendCertsFromPEM(data)
return roots, nil
}
if bestErr == nil || (os.IsNotExist(bestErr) && !os.IsNotExist(err)) {
bestErr = err
}
}
return nil, bestErr
}

View File

@@ -1,12 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x509
// Possible certificate files; stop after finding one.
var certFiles = []string{
"/etc/certs/ca-certificates.crt", // Solaris 11.2+
"/etc/ssl/certs/ca-certificates.crt", // Joyent SmartOS
"/etc/ssl/cacert.pem", // OmniOS
}

View File

@@ -1,88 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build dragonfly freebsd linux nacl netbsd openbsd solaris
package x509
import (
"io/ioutil"
"os"
)
// Possible directories with certificate files; stop after successfully
// reading at least one file from a directory.
var certDirectories = []string{
"/etc/ssl/certs", // SLES10/SLES11, https://golang.org/issue/12139
"/system/etc/security/cacerts", // Android
"/usr/local/share/certs", // FreeBSD
"/etc/pki/tls/certs", // Fedora/RHEL
"/etc/openssl/certs", // NetBSD
}
const (
// certFileEnv is the environment variable which identifies where to locate
// the SSL certificate file. If set this overrides the system default.
certFileEnv = "SSL_CERT_FILE"
// certDirEnv is the environment variable which identifies which directory
// to check for SSL certificate files. If set this overrides the system default.
certDirEnv = "SSL_CERT_DIR"
)
func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
return nil, nil
}
func loadSystemRoots() (*CertPool, error) {
roots := NewCertPool()
files := certFiles
if f := os.Getenv(certFileEnv); f != "" {
files = []string{f}
}
var firstErr error
for _, file := range files {
data, err := ioutil.ReadFile(file)
if err == nil {
roots.AppendCertsFromPEM(data)
break
}
if firstErr == nil && !os.IsNotExist(err) {
firstErr = err
}
}
dirs := certDirectories
if d := os.Getenv(certDirEnv); d != "" {
dirs = []string{d}
}
for _, directory := range dirs {
fis, err := ioutil.ReadDir(directory)
if err != nil {
if firstErr == nil && !os.IsNotExist(err) {
firstErr = err
}
continue
}
rootsAdded := false
for _, fi := range fis {
data, err := ioutil.ReadFile(directory + "/" + fi.Name())
if err == nil && roots.AppendCertsFromPEM(data) {
rootsAdded = true
}
}
if rootsAdded {
return roots, nil
}
}
if len(roots.certs) > 0 {
return roots, nil
}
return nil, firstErr
}

View File

@@ -1,266 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x509
import (
"errors"
"syscall"
"unsafe"
)
// Creates a new *syscall.CertContext representing the leaf certificate in an in-memory
// certificate store containing itself and all of the intermediate certificates specified
// in the opts.Intermediates CertPool.
//
// A pointer to the in-memory store is available in the returned CertContext's Store field.
// The store is automatically freed when the CertContext is freed using
// syscall.CertFreeCertificateContext.
func createStoreContext(leaf *Certificate, opts *VerifyOptions) (*syscall.CertContext, error) {
var storeCtx *syscall.CertContext
leafCtx, err := syscall.CertCreateCertificateContext(syscall.X509_ASN_ENCODING|syscall.PKCS_7_ASN_ENCODING, &leaf.Raw[0], uint32(len(leaf.Raw)))
if err != nil {
return nil, err
}
defer syscall.CertFreeCertificateContext(leafCtx)
handle, err := syscall.CertOpenStore(syscall.CERT_STORE_PROV_MEMORY, 0, 0, syscall.CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG, 0)
if err != nil {
return nil, err
}
defer syscall.CertCloseStore(handle, 0)
err = syscall.CertAddCertificateContextToStore(handle, leafCtx, syscall.CERT_STORE_ADD_ALWAYS, &storeCtx)
if err != nil {
return nil, err
}
if opts.Intermediates != nil {
for _, intermediate := range opts.Intermediates.certs {
ctx, err := syscall.CertCreateCertificateContext(syscall.X509_ASN_ENCODING|syscall.PKCS_7_ASN_ENCODING, &intermediate.Raw[0], uint32(len(intermediate.Raw)))
if err != nil {
return nil, err
}
err = syscall.CertAddCertificateContextToStore(handle, ctx, syscall.CERT_STORE_ADD_ALWAYS, nil)
syscall.CertFreeCertificateContext(ctx)
if err != nil {
return nil, err
}
}
}
return storeCtx, nil
}
// extractSimpleChain extracts the final certificate chain from a CertSimpleChain.
func extractSimpleChain(simpleChain **syscall.CertSimpleChain, count int) (chain []*Certificate, err error) {
if simpleChain == nil || count == 0 {
return nil, errors.New("x509: invalid simple chain")
}
simpleChains := (*[1 << 20]*syscall.CertSimpleChain)(unsafe.Pointer(simpleChain))[:]
lastChain := simpleChains[count-1]
elements := (*[1 << 20]*syscall.CertChainElement)(unsafe.Pointer(lastChain.Elements))[:]
for i := 0; i < int(lastChain.NumElements); i++ {
// Copy the buf, since ParseCertificate does not create its own copy.
cert := elements[i].CertContext
encodedCert := (*[1 << 20]byte)(unsafe.Pointer(cert.EncodedCert))[:]
buf := make([]byte, cert.Length)
copy(buf, encodedCert[:])
parsedCert, err := ParseCertificate(buf)
if err != nil {
return nil, err
}
chain = append(chain, parsedCert)
}
return chain, nil
}
// checkChainTrustStatus checks the trust status of the certificate chain, translating
// any errors it finds into Go errors in the process.
func checkChainTrustStatus(c *Certificate, chainCtx *syscall.CertChainContext) error {
if chainCtx.TrustStatus.ErrorStatus != syscall.CERT_TRUST_NO_ERROR {
status := chainCtx.TrustStatus.ErrorStatus
switch status {
case syscall.CERT_TRUST_IS_NOT_TIME_VALID:
return CertificateInvalidError{c, Expired, ""}
default:
return UnknownAuthorityError{c, nil, nil}
}
}
return nil
}
// checkChainSSLServerPolicy checks that the certificate chain in chainCtx is valid for
// use as a certificate chain for a SSL/TLS server.
func checkChainSSLServerPolicy(c *Certificate, chainCtx *syscall.CertChainContext, opts *VerifyOptions) error {
servernamep, err := syscall.UTF16PtrFromString(opts.DNSName)
if err != nil {
return err
}
sslPara := &syscall.SSLExtraCertChainPolicyPara{
AuthType: syscall.AUTHTYPE_SERVER,
ServerName: servernamep,
}
sslPara.Size = uint32(unsafe.Sizeof(*sslPara))
para := &syscall.CertChainPolicyPara{
ExtraPolicyPara: convertToPolicyParaType(unsafe.Pointer(sslPara)),
}
para.Size = uint32(unsafe.Sizeof(*para))
status := syscall.CertChainPolicyStatus{}
err = syscall.CertVerifyCertificateChainPolicy(syscall.CERT_CHAIN_POLICY_SSL, chainCtx, para, &status)
if err != nil {
return err
}
// TODO(mkrautz): use the lChainIndex and lElementIndex fields
// of the CertChainPolicyStatus to provide proper context, instead
// using c.
if status.Error != 0 {
switch status.Error {
case syscall.CERT_E_EXPIRED:
return CertificateInvalidError{c, Expired, ""}
case syscall.CERT_E_CN_NO_MATCH:
return HostnameError{c, opts.DNSName}
case syscall.CERT_E_UNTRUSTEDROOT:
return UnknownAuthorityError{c, nil, nil}
default:
return UnknownAuthorityError{c, nil, nil}
}
}
return nil
}
// systemVerify is like Verify, except that it uses CryptoAPI calls
// to build certificate chains and verify them.
func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
hasDNSName := opts != nil && len(opts.DNSName) > 0
storeCtx, err := createStoreContext(c, opts)
if err != nil {
return nil, err
}
defer syscall.CertFreeCertificateContext(storeCtx)
para := new(syscall.CertChainPara)
para.Size = uint32(unsafe.Sizeof(*para))
// If there's a DNSName set in opts, assume we're verifying
// a certificate from a TLS server.
if hasDNSName {
oids := []*byte{
&syscall.OID_PKIX_KP_SERVER_AUTH[0],
// Both IE and Chrome allow certificates with
// Server Gated Crypto as well. Some certificates
// in the wild require them.
&syscall.OID_SERVER_GATED_CRYPTO[0],
&syscall.OID_SGC_NETSCAPE[0],
}
para.RequestedUsage.Type = syscall.USAGE_MATCH_TYPE_OR
para.RequestedUsage.Usage.Length = uint32(len(oids))
para.RequestedUsage.Usage.UsageIdentifiers = &oids[0]
} else {
para.RequestedUsage.Type = syscall.USAGE_MATCH_TYPE_AND
para.RequestedUsage.Usage.Length = 0
para.RequestedUsage.Usage.UsageIdentifiers = nil
}
var verifyTime *syscall.Filetime
if opts != nil && !opts.CurrentTime.IsZero() {
ft := syscall.NsecToFiletime(opts.CurrentTime.UnixNano())
verifyTime = &ft
}
// CertGetCertificateChain will traverse Windows's root stores
// in an attempt to build a verified certificate chain. Once
// it has found a verified chain, it stops. MSDN docs on
// CERT_CHAIN_CONTEXT:
//
// When a CERT_CHAIN_CONTEXT is built, the first simple chain
// begins with an end certificate and ends with a self-signed
// certificate. If that self-signed certificate is not a root
// or otherwise trusted certificate, an attempt is made to
// build a new chain. CTLs are used to create the new chain
// beginning with the self-signed certificate from the original
// chain as the end certificate of the new chain. This process
// continues building additional simple chains until the first
// self-signed certificate is a trusted certificate or until
// an additional simple chain cannot be built.
//
// The result is that we'll only get a single trusted chain to
// return to our caller.
var chainCtx *syscall.CertChainContext
err = syscall.CertGetCertificateChain(syscall.Handle(0), storeCtx, verifyTime, storeCtx.Store, para, 0, 0, &chainCtx)
if err != nil {
return nil, err
}
defer syscall.CertFreeCertificateChain(chainCtx)
err = checkChainTrustStatus(c, chainCtx)
if err != nil {
return nil, err
}
if hasDNSName {
err = checkChainSSLServerPolicy(c, chainCtx, opts)
if err != nil {
return nil, err
}
}
chain, err := extractSimpleChain(chainCtx.Chains, int(chainCtx.ChainCount))
if err != nil {
return nil, err
}
chains = append(chains, chain)
return chains, nil
}
func loadSystemRoots() (*CertPool, error) {
// TODO: restore this functionality on Windows. We tried to do
// it in Go 1.8 but had to revert it. See Issue 18609.
// Returning (nil, nil) was the old behavior, prior to CL 30578.
return nil, nil
const CRYPT_E_NOT_FOUND = 0x80092004
store, err := syscall.CertOpenSystemStore(0, syscall.StringToUTF16Ptr("ROOT"))
if err != nil {
return nil, err
}
defer syscall.CertCloseStore(store, 0)
roots := NewCertPool()
var cert *syscall.CertContext
for {
cert, err = syscall.CertEnumCertificatesInStore(store, cert)
if err != nil {
if errno, ok := err.(syscall.Errno); ok {
if errno == CRYPT_E_NOT_FOUND {
break
}
}
return nil, err
}
if cert == nil {
break
}
// Copy the buf, since ParseCertificate does not create its own copy.
buf := (*[1 << 20]byte)(unsafe.Pointer(cert.EncodedCert))[:]
buf2 := make([]byte, cert.Length)
copy(buf2, buf)
if c, err := ParseCertificate(buf2); err == nil {
roots.AddCert(c)
}
}
return roots, nil
}

View File

@@ -1,242 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x509
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"github.com/google/certificate-transparency-go/asn1"
)
// IPAddressPrefix describes an IP address prefix as an ASN.1 bit string,
// where the BitLength field holds the prefix length.
type IPAddressPrefix asn1.BitString
// IPAddressRange describes an (inclusive) IP address range.
type IPAddressRange struct {
Min IPAddressPrefix
Max IPAddressPrefix
}
// Most relevant values for AFI from:
// http://www.iana.org/assignments/address-family-numbers.
const (
IPv4AddressFamilyIndicator = uint16(1)
IPv6AddressFamilyIndicator = uint16(2)
)
// IPAddressFamilyBlocks describes a set of ranges of IP addresses.
type IPAddressFamilyBlocks struct {
// AFI holds an address family indicator from
// http://www.iana.org/assignments/address-family-numbers.
AFI uint16
// SAFI holds a subsequent address family indicator from
// http://www.iana.org/assignments/safi-namespace.
SAFI byte
// InheritFromIssuer indicates that the set of addresses should
// be taken from the issuer's certificate.
InheritFromIssuer bool
// AddressPrefixes holds prefixes if InheritFromIssuer is false.
AddressPrefixes []IPAddressPrefix
// AddressRanges holds ranges if InheritFromIssuer is false.
AddressRanges []IPAddressRange
}
// Internal types for asn1 unmarshalling.
type ipAddressFamily struct {
AddressFamily []byte // 2-byte AFI plus optional 1 byte SAFI
Choice asn1.RawValue
}
// Internally, use raw asn1.BitString rather than the IPAddressPrefix
// type alias (so that asn1.Unmarshal() decodes properly).
type ipAddressRange struct {
Min asn1.BitString
Max asn1.BitString
}
func parseRPKIAddrBlocks(data []byte, nfe *NonFatalErrors) []*IPAddressFamilyBlocks {
// RFC 3779 2.2.3
// IPAddrBlocks ::= SEQUENCE OF IPAddressFamily
//
// IPAddressFamily ::= SEQUENCE { -- AFI & optional SAFI --
// addressFamily OCTET STRING (SIZE (2..3)),
// ipAddressChoice IPAddressChoice }
//
// IPAddressChoice ::= CHOICE {
// inherit NULL, -- inherit from issuer --
// addressesOrRanges SEQUENCE OF IPAddressOrRange }
//
// IPAddressOrRange ::= CHOICE {
// addressPrefix IPAddress,
// addressRange IPAddressRange }
//
// IPAddressRange ::= SEQUENCE {
// min IPAddress,
// max IPAddress }
//
// IPAddress ::= BIT STRING
var addrBlocks []ipAddressFamily
if rest, err := asn1.Unmarshal(data, &addrBlocks); err != nil {
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks extension: %v", err))
return nil
} else if len(rest) != 0 {
nfe.AddError(errors.New("trailing data after ipAddrBlocks extension"))
return nil
}
var results []*IPAddressFamilyBlocks
for i, block := range addrBlocks {
var fam IPAddressFamilyBlocks
if l := len(block.AddressFamily); l < 2 || l > 3 {
nfe.AddError(fmt.Errorf("invalid address family length (%d) for ipAddrBlock.addressFamily", l))
continue
}
fam.AFI = binary.BigEndian.Uint16(block.AddressFamily[0:2])
if len(block.AddressFamily) > 2 {
fam.SAFI = block.AddressFamily[2]
}
// IPAddressChoice is an ASN.1 CHOICE where the chosen alternative is indicated by (implicit)
// tagging of the alternatives -- here, either NULL or SEQUENCE OF.
if bytes.Equal(block.Choice.FullBytes, asn1.NullBytes) {
fam.InheritFromIssuer = true
results = append(results, &fam)
continue
}
var addrRanges []asn1.RawValue
if _, err := asn1.Unmarshal(block.Choice.FullBytes, &addrRanges); err != nil {
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges: %v", i, err))
continue
}
for j, ar := range addrRanges {
// Each IPAddressOrRange is a CHOICE where the alternatives have distinct (implicit)
// tags -- here, either BIT STRING or SEQUENCE.
switch ar.Tag {
case asn1.TagBitString:
// BIT STRING for single prefix IPAddress
var val asn1.BitString
if _, err := asn1.Unmarshal(ar.FullBytes, &val); err != nil {
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges[%d].addressPrefix: %v", i, j, err))
continue
}
fam.AddressPrefixes = append(fam.AddressPrefixes, IPAddressPrefix(val))
case asn1.TagSequence:
var val ipAddressRange
if _, err := asn1.Unmarshal(ar.FullBytes, &val); err != nil {
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges[%d].addressRange: %v", i, j, err))
continue
}
fam.AddressRanges = append(fam.AddressRanges, IPAddressRange{Min: IPAddressPrefix(val.Min), Max: IPAddressPrefix(val.Max)})
default:
nfe.AddError(fmt.Errorf("unexpected ASN.1 type in ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges[%d]: %+v", i, j, ar))
}
}
results = append(results, &fam)
}
return results
}
// ASIDRange describes an inclusive range of AS Identifiers (AS numbers or routing
// domain identifiers).
type ASIDRange struct {
Min int
Max int
}
// ASIdentifiers describes a collection of AS Identifiers (AS numbers or routing
// domain identifiers).
type ASIdentifiers struct {
// InheritFromIssuer indicates that the set of AS identifiers should
// be taken from the issuer's certificate.
InheritFromIssuer bool
// ASIDs holds AS identifiers if InheritFromIssuer is false.
ASIDs []int
// ASIDs holds AS identifier ranges (inclusive) if InheritFromIssuer is false.
ASIDRanges []ASIDRange
}
type asIdentifiers struct {
ASNum asn1.RawValue `asn1:"optional,tag:0"`
RDI asn1.RawValue `asn1:"optional,tag:1"`
}
func parseASIDChoice(val asn1.RawValue, nfe *NonFatalErrors) *ASIdentifiers {
// RFC 3779 2.3.2
// ASIdentifierChoice ::= CHOICE {
// inherit NULL, -- inherit from issuer --
// asIdsOrRanges SEQUENCE OF ASIdOrRange }
// ASIdOrRange ::= CHOICE {
// id ASId,
// range ASRange }
// ASRange ::= SEQUENCE {
// min ASId,
// max ASId }
// ASId ::= INTEGER
if len(val.FullBytes) == 0 { // OPTIONAL
return nil
}
// ASIdentifierChoice is an ASN.1 CHOICE where the chosen alternative is indicated by (implicit)
// tagging of the alternatives -- here, either NULL or SEQUENCE OF.
if bytes.Equal(val.Bytes, asn1.NullBytes) {
return &ASIdentifiers{InheritFromIssuer: true}
}
var ids []asn1.RawValue
if rest, err := asn1.Unmarshal(val.Bytes, &ids); err != nil {
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers.asIdsOrRanges: %v", err))
return nil
} else if len(rest) != 0 {
nfe.AddError(errors.New("trailing data after ASIdentifiers.asIdsOrRanges"))
return nil
}
var asID ASIdentifiers
for i, id := range ids {
// Each ASIdOrRange is a CHOICE where the alternatives have distinct (implicit)
// tags -- here, either INTEGER or SEQUENCE.
switch id.Tag {
case asn1.TagInteger:
var val int
if _, err := asn1.Unmarshal(id.FullBytes, &val); err != nil {
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers.asIdsOrRanges[%d].id: %v", i, err))
continue
}
asID.ASIDs = append(asID.ASIDs, val)
case asn1.TagSequence:
var val ASIDRange
if _, err := asn1.Unmarshal(id.FullBytes, &val); err != nil {
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers.asIdsOrRanges[%d].range: %v", i, err))
continue
}
asID.ASIDRanges = append(asID.ASIDRanges, val)
default:
nfe.AddError(fmt.Errorf("unexpected value in ASIdentifiers.asIdsOrRanges[%d]: %+v", i, id))
}
}
return &asID
}
func parseRPKIASIdentifiers(data []byte, nfe *NonFatalErrors) (*ASIdentifiers, *ASIdentifiers) {
// RFC 3779 2.3.2
// ASIdentifiers ::= SEQUENCE {
// asnum [0] EXPLICIT ASIdentifierChoice OPTIONAL,
// rdi [1] EXPLICIT ASIdentifierChoice OPTIONAL}
var asIDs asIdentifiers
if rest, err := asn1.Unmarshal(data, &asIDs); err != nil {
nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers extension: %v", err))
return nil, nil
} else if len(rest) != 0 {
nfe.AddError(errors.New("trailing data after ASIdentifiers extension"))
return nil, nil
}
return parseASIDChoice(asIDs.ASNum, nfe), parseASIDChoice(asIDs.RDI, nfe)
}

View File

@@ -1,113 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x509
import (
"crypto/ecdsa"
"crypto/elliptic"
"errors"
"fmt"
"math/big"
"github.com/google/certificate-transparency-go/asn1"
)
const ecPrivKeyVersion = 1
// ecPrivateKey reflects an ASN.1 Elliptic Curve Private Key Structure.
// References:
// RFC 5915
// SEC1 - http://www.secg.org/sec1-v2.pdf
// Per RFC 5915 the NamedCurveOID is marked as ASN.1 OPTIONAL, however in
// most cases it is not.
type ecPrivateKey struct {
Version int
PrivateKey []byte
NamedCurveOID asn1.ObjectIdentifier `asn1:"optional,explicit,tag:0"`
PublicKey asn1.BitString `asn1:"optional,explicit,tag:1"`
}
// ParseECPrivateKey parses an ASN.1 Elliptic Curve Private Key Structure.
func ParseECPrivateKey(der []byte) (*ecdsa.PrivateKey, error) {
return parseECPrivateKey(nil, der)
}
// MarshalECPrivateKey marshals an EC private key into ASN.1, DER format.
func MarshalECPrivateKey(key *ecdsa.PrivateKey) ([]byte, error) {
oid, ok := OIDFromNamedCurve(key.Curve)
if !ok {
return nil, errors.New("x509: unknown elliptic curve")
}
return marshalECPrivateKeyWithOID(key, oid)
}
// marshalECPrivateKey marshals an EC private key into ASN.1, DER format and
// sets the curve ID to the given OID, or omits it if OID is nil.
func marshalECPrivateKeyWithOID(key *ecdsa.PrivateKey, oid asn1.ObjectIdentifier) ([]byte, error) {
privateKeyBytes := key.D.Bytes()
paddedPrivateKey := make([]byte, (key.Curve.Params().N.BitLen()+7)/8)
copy(paddedPrivateKey[len(paddedPrivateKey)-len(privateKeyBytes):], privateKeyBytes)
return asn1.Marshal(ecPrivateKey{
Version: 1,
PrivateKey: paddedPrivateKey,
NamedCurveOID: oid,
PublicKey: asn1.BitString{Bytes: elliptic.Marshal(key.Curve, key.X, key.Y)},
})
}
// parseECPrivateKey parses an ASN.1 Elliptic Curve Private Key Structure.
// The OID for the named curve may be provided from another source (such as
// the PKCS8 container) - if it is provided then use this instead of the OID
// that may exist in the EC private key structure.
func parseECPrivateKey(namedCurveOID *asn1.ObjectIdentifier, der []byte) (key *ecdsa.PrivateKey, err error) {
var privKey ecPrivateKey
if _, err := asn1.Unmarshal(der, &privKey); err != nil {
return nil, errors.New("x509: failed to parse EC private key: " + err.Error())
}
if privKey.Version != ecPrivKeyVersion {
return nil, fmt.Errorf("x509: unknown EC private key version %d", privKey.Version)
}
var nfe NonFatalErrors
var curve elliptic.Curve
if namedCurveOID != nil {
curve = namedCurveFromOID(*namedCurveOID, &nfe)
} else {
curve = namedCurveFromOID(privKey.NamedCurveOID, &nfe)
}
if curve == nil {
return nil, errors.New("x509: unknown elliptic curve")
}
k := new(big.Int).SetBytes(privKey.PrivateKey)
curveOrder := curve.Params().N
if k.Cmp(curveOrder) >= 0 {
return nil, errors.New("x509: invalid elliptic curve private key value")
}
priv := new(ecdsa.PrivateKey)
priv.Curve = curve
priv.D = k
privateKey := make([]byte, (curveOrder.BitLen()+7)/8)
// Some private keys have leading zero padding. This is invalid
// according to [SEC1], but this code will ignore it.
for len(privKey.PrivateKey) > len(privateKey) {
if privKey.PrivateKey[0] != 0 {
return nil, errors.New("x509: invalid private key length")
}
privKey.PrivateKey = privKey.PrivateKey[1:]
}
// Some private keys remove all leading zeros, this is also invalid
// according to [SEC1] but since OpenSSL used to do this, we ignore
// this too.
copy(privateKey[len(privateKey)-len(privKey.PrivateKey):], privKey.PrivateKey)
priv.X, priv.Y = curve.ScalarBaseMult(privateKey)
return priv, nil
}

View File

@@ -1,31 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIFazCCA1OgAwIBAgIJAL8a/lsnspOqMA0GCSqGSIb3DQEBCwUAMEwxCzAJBgNV
BAYTAlVLMRMwEQYDVQQIDApUZXN0LVN0YXRlMRUwEwYDVQQKDAxHb2xhbmcgVGVz
dHMxETAPBgNVBAMMCHRlc3QtZGlyMB4XDTE3MDIwMTIzNTAyN1oXDTI3MDEzMDIz
NTAyN1owTDELMAkGA1UEBhMCVUsxEzARBgNVBAgMClRlc3QtU3RhdGUxFTATBgNV
BAoMDEdvbGFuZyBUZXN0czERMA8GA1UEAwwIdGVzdC1kaXIwggIiMA0GCSqGSIb3
DQEBAQUAA4ICDwAwggIKAoICAQDzBoi43Yn30KN13PKFHu8LA4UmgCRToTukLItM
WK2Je45grs/axg9n3YJOXC6hmsyrkOnyBcx1xVNgSrOAll7fSjtChRIX72Xrloxu
XewtWVIrijqz6oylbvEmbRT3O8uynu5rF82Pmdiy8oiSfdywjKuPnE0hjV1ZSCql
MYcXqA+f0JFD8kMv4pbtxjGH8f2DkYQz+hHXLrJH4/MEYdVMQXoz/GDzLyOkrXBN
hpMaBBqg1p0P+tRdfLXuliNzA9vbZylzpF1YZ0gvsr0S5Y6LVtv7QIRygRuLY4kF
k+UYuFq8NrV8TykS7FVnO3tf4XcYZ7r2KV5FjYSrJtNNo85BV5c3xMD3fJ2XcOWk
+oD1ATdgAM3aKmSOxNtNItKKxBe1mkqDH41NbWx7xMad78gDznyeT0tjEOltN2bM
uXU1R/jgR/vq5Ec0AhXJyL/ziIcmuV2fSl/ZxT4ARD+16tgPiIx+welTf0v27/JY
adlfkkL5XsPRrbSguISrj7JeaO/gjG3KnDVHcZvYBpDfHqRhCgrosfe26TZcTXx2
cRxOfvBjMz1zJAg+esuUzSkerreyRhzD7RpeZTwi6sxvx82MhYMbA3w1LtgdABio
9JRqZy3xqsIbNv7N46WO/qXL1UMRKb1UyHeW8g8btboz+B4zv1U0Nj+9qxPBbQui
dgL9LQIDAQABo1AwTjAdBgNVHQ4EFgQUy0/0W8nwQfz2tO6AZ2jPkEiTzvUwHwYD
VR0jBBgwFoAUy0/0W8nwQfz2tO6AZ2jPkEiTzvUwDAYDVR0TBAUwAwEB/zANBgkq
hkiG9w0BAQsFAAOCAgEAvEVnUYsIOt87rggmLPqEueynkuQ+562M8EDHSQl82zbe
xDCxeg3DvPgKb+RvaUdt1362z/szK10SoeMgx6+EQLoV9LiVqXwNqeYfixrhrdw3
ppAhYYhymdkbUQCEMHypmXP1vPhAz4o8Bs+eES1M+zO6ErBiD7SqkmBElT+GixJC
6epC9ZQFs+dw3lPlbiZSsGE85sqc3VAs0/JgpL/pb1/Eg4s0FUhZD2C2uWdSyZGc
g0/v3aXJCp4j/9VoNhI1WXz3M45nysZIL5OQgXymLqJElQa1pZ3Wa4i/nidvT4AT
Xlxc/qijM8set/nOqp7hVd5J0uG6qdwLRILUddZ6OpXd7ZNi1EXg+Bpc7ehzGsDt
3UFGzYXDjxYnK2frQfjLS8stOQIqSrGthW6x0fdkVx0y8BByvd5J6+JmZl4UZfzA
m99VxXSt4B9x6BvnY7ktzcFDOjtuLc4B/7yg9fv1eQuStA4cHGGAttsCg1X/Kx8W
PvkkeH0UWDZ9vhH9K36703z89da6MWF+bz92B0+4HoOmlVaXRkvblsNaynJnL0LC
Ayry7QBxuh5cMnDdRwJB3AVJIiJ1GVpb7aGvBOnx+s2lwRv9HWtghb+cbwwktx1M
JHyBf3GZNSWTpKY7cD8V+NnBv3UuioOVVo+XAU4LF/bYUjdRpxWADJizNtZrtFo=
-----END CERTIFICATE-----

View File

@@ -1,32 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIFbTCCA1WgAwIBAgIJAN338vEmMtLsMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNV
BAYTAlVLMRMwEQYDVQQIDApUZXN0LVN0YXRlMRUwEwYDVQQKDAxHb2xhbmcgVGVz
dHMxEjAQBgNVBAMMCXRlc3QtZmlsZTAeFw0xNzAyMDEyMzUyMDhaFw0yNzAxMzAy
MzUyMDhaME0xCzAJBgNVBAYTAlVLMRMwEQYDVQQIDApUZXN0LVN0YXRlMRUwEwYD
VQQKDAxHb2xhbmcgVGVzdHMxEjAQBgNVBAMMCXRlc3QtZmlsZTCCAiIwDQYJKoZI
hvcNAQEBBQADggIPADCCAgoCggIBAPMGiLjdiffQo3Xc8oUe7wsDhSaAJFOhO6Qs
i0xYrYl7jmCuz9rGD2fdgk5cLqGazKuQ6fIFzHXFU2BKs4CWXt9KO0KFEhfvZeuW
jG5d7C1ZUiuKOrPqjKVu8SZtFPc7y7Ke7msXzY+Z2LLyiJJ93LCMq4+cTSGNXVlI
KqUxhxeoD5/QkUPyQy/ilu3GMYfx/YORhDP6Edcuskfj8wRh1UxBejP8YPMvI6St
cE2GkxoEGqDWnQ/61F18te6WI3MD29tnKXOkXVhnSC+yvRLljotW2/tAhHKBG4tj
iQWT5Ri4Wrw2tXxPKRLsVWc7e1/hdxhnuvYpXkWNhKsm002jzkFXlzfEwPd8nZdw
5aT6gPUBN2AAzdoqZI7E200i0orEF7WaSoMfjU1tbHvExp3vyAPOfJ5PS2MQ6W03
Zsy5dTVH+OBH++rkRzQCFcnIv/OIhya5XZ9KX9nFPgBEP7Xq2A+IjH7B6VN/S/bv
8lhp2V+SQvlew9GttKC4hKuPsl5o7+CMbcqcNUdxm9gGkN8epGEKCuix97bpNlxN
fHZxHE5+8GMzPXMkCD56y5TNKR6ut7JGHMPtGl5lPCLqzG/HzYyFgxsDfDUu2B0A
GKj0lGpnLfGqwhs2/s3jpY7+pcvVQxEpvVTId5byDxu1ujP4HjO/VTQ2P72rE8Ft
C6J2Av0tAgMBAAGjUDBOMB0GA1UdDgQWBBTLT/RbyfBB/Pa07oBnaM+QSJPO9TAf
BgNVHSMEGDAWgBTLT/RbyfBB/Pa07oBnaM+QSJPO9TAMBgNVHRMEBTADAQH/MA0G
CSqGSIb3DQEBCwUAA4ICAQB3sCntCcQwhMgRPPyvOCMyTcQ/Iv+cpfxz2Ck14nlx
AkEAH2CH0ov5GWTt07/ur3aa5x+SAKi0J3wTD1cdiw4U/6Uin6jWGKKxvoo4IaeK
SbM8w/6eKx6UbmHx7PA/eRABY9tTlpdPCVgw7/o3WDr03QM+IAtatzvaCPPczake
pbdLwmBZB/v8V+6jUajy6jOgdSH0PyffGnt7MWgDETmNC6p/Xigp5eh+C8Fb4NGT
xgHES5PBC+sruWp4u22bJGDKTvYNdZHsnw/CaKQWNsQqwisxa3/8N5v+PCff/pxl
r05pE3PdHn9JrCl4iWdVlgtiI9BoPtQyDfa/OEFaScE8KYR8LxaAgdgp3zYncWls
BpwQ6Y/A2wIkhlD9eEp5Ib2hz7isXOs9UwjdriKqrBXqcIAE5M+YIk3+KAQKxAtd
4YsK3CSJ010uphr12YKqlScj4vuKFjuOtd5RyyMIxUG3lrrhAu2AzCeKCLdVgA8+
75FrYMApUdvcjp4uzbBoED4XRQlx9kdFHVbYgmE/+yddBYJM8u4YlgAL0hW2/D8p
z9JWIfxVmjJnBnXaKGBuiUyZ864A3PJndP6EMMo7TzS2CDnfCYuJjvI0KvDjFNmc
rQA04+qfMSEz3nmKhbbZu4eYLzlADhfH8tT4GMtXf71WLA5AUHGf2Y4+HIHTsmHG
vQ==
-----END CERTIFICATE-----

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,55 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// This file is run by the x509 tests to ensure that a program with minimal
// imports can sign certificates without errors resulting from missing hash
// functions.
package main
import (
"crypto/rand"
// START CT CHANGES
"github.com/google/certificate-transparency-go/x509"
"github.com/google/certificate-transparency-go/x509/pkix"
// END CT CHANGES
"encoding/pem"
"math/big"
"time"
)
func main() {
block, _ := pem.Decode([]byte(pemPrivateKey))
rsaPriv, err := x509.ParsePKCS1PrivateKey(block.Bytes)
if err != nil {
panic("Failed to parse private key: " + err.Error())
}
template := x509.Certificate{
SerialNumber: big.NewInt(1),
Subject: pkix.Name{
CommonName: "test",
Organization: []string{"Σ Acme Co"},
},
NotBefore: time.Unix(1000, 0),
NotAfter: time.Unix(100000, 0),
KeyUsage: x509.KeyUsageCertSign,
}
if _, err = x509.CreateCertificate(rand.Reader, &template, &template, &rsaPriv.PublicKey, rsaPriv); err != nil {
panic("failed to create certificate with basic imports: " + err.Error())
}
}
var pemPrivateKey = `-----BEGIN RSA PRIVATE KEY-----
MIIBOgIBAAJBALKZD0nEffqM1ACuak0bijtqE2QrI/KLADv7l3kK3ppMyCuLKoF0
fd7Ai2KW5ToIwzFofvJcS/STa6HA5gQenRUCAwEAAQJBAIq9amn00aS0h/CrjXqu
/ThglAXJmZhOMPVn4eiu7/ROixi9sex436MaVeMqSNf7Ex9a8fRNfWss7Sqd9eWu
RTUCIQDasvGASLqmjeffBNLTXV2A5g4t+kLVCpsEIZAycV5GswIhANEPLmax0ME/
EO+ZJ79TJKN5yiGBRsv5yvx5UiHxajEXAiAhAol5N4EUyq6I9w1rYdhPMGpLfk7A
IU2snfRJ6Nq2CQIgFrPsWRCkV+gOYcajD17rEqmuLrdIRexpg8N1DOSXoJ8CIGlS
tAboUGBxTDq3ZroNism3DaMIbKPyYrAqhKov1h5V
-----END RSA PRIVATE KEY-----
`