Merge pull request #123529 from thockin/go-workspaces
Go workspaces for k/k and k/staging/*
This commit is contained in:
Binary file not shown.
BIN
vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der
generated
vendored
BIN
vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der
generated
vendored
Binary file not shown.
BIN
vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der
generated
vendored
BIN
vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der
generated
vendored
Binary file not shown.
Binary file not shown.
BIN
vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der
generated
vendored
BIN
vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der
generated
vendored
Binary file not shown.
BIN
vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der
generated
vendored
BIN
vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der
generated
vendored
Binary file not shown.
BIN
vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der
generated
vendored
BIN
vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der
generated
vendored
Binary file not shown.
24
vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem
generated
vendored
24
vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem
generated
vendored
@@ -1,24 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL
|
||||
BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2
|
||||
YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE
|
||||
AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN
|
||||
MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ
|
||||
BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx
|
||||
ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ
|
||||
KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
|
||||
AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9
|
||||
a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0
|
||||
OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3
|
||||
RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK
|
||||
P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316
|
||||
HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu
|
||||
0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl
|
||||
MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6
|
||||
EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9
|
||||
/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA
|
||||
QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ
|
||||
nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD
|
||||
X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco
|
||||
pKklVz0=
|
||||
-----END CERTIFICATE-----
|
||||
27
vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem
generated
vendored
27
vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem
generated
vendored
@@ -1,27 +0,0 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF
|
||||
l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj
|
||||
+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G
|
||||
4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA
|
||||
xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh
|
||||
68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ
|
||||
/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL
|
||||
Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA
|
||||
VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9
|
||||
9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH
|
||||
MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt
|
||||
aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq
|
||||
xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx
|
||||
2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv
|
||||
EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z
|
||||
aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq
|
||||
udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs
|
||||
VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm
|
||||
56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT
|
||||
GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V
|
||||
Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm
|
||||
HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q
|
||||
BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH
|
||||
qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh
|
||||
GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
BIN
vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der
generated
vendored
BIN
vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der
generated
vendored
Binary file not shown.
24
vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem
generated
vendored
24
vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem
generated
vendored
@@ -1,24 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL
|
||||
BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2
|
||||
YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE
|
||||
AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN
|
||||
MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ
|
||||
BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx
|
||||
ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ
|
||||
KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
|
||||
AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT
|
||||
fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ
|
||||
qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE
|
||||
xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es
|
||||
Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2
|
||||
Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM
|
||||
ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX
|
||||
MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR
|
||||
e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X
|
||||
POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl
|
||||
AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg
|
||||
odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+
|
||||
PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN
|
||||
Dhm6uZM=
|
||||
-----END CERTIFICATE-----
|
||||
27
vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem
generated
vendored
27
vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem
generated
vendored
@@ -1,27 +0,0 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs
|
||||
8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO
|
||||
QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk
|
||||
XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA
|
||||
Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc
|
||||
gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf
|
||||
LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl
|
||||
jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0
|
||||
4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q
|
||||
Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P
|
||||
nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1
|
||||
drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE
|
||||
duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50
|
||||
L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG
|
||||
06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm
|
||||
eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD
|
||||
uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7
|
||||
lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL
|
||||
a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb
|
||||
hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ
|
||||
7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j
|
||||
r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7
|
||||
eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD
|
||||
B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz
|
||||
7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g==
|
||||
-----END RSA PRIVATE KEY-----
|
||||
24
vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem
generated
vendored
24
vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem
generated
vendored
@@ -1,24 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL
|
||||
BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2
|
||||
YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE
|
||||
AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN
|
||||
MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ
|
||||
BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx
|
||||
ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ
|
||||
KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
|
||||
AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9
|
||||
a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0
|
||||
OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3
|
||||
RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK
|
||||
P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316
|
||||
HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu
|
||||
0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl
|
||||
MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6
|
||||
EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9
|
||||
/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA
|
||||
QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ
|
||||
nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD
|
||||
X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco
|
||||
pKklVz0=
|
||||
-----END CERTIFICATE-----
|
||||
27
vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem
generated
vendored
27
vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem
generated
vendored
@@ -1,27 +0,0 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF
|
||||
l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj
|
||||
+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G
|
||||
4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA
|
||||
xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh
|
||||
68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ
|
||||
/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL
|
||||
Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA
|
||||
VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9
|
||||
9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH
|
||||
MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt
|
||||
aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq
|
||||
xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx
|
||||
2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv
|
||||
EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z
|
||||
aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq
|
||||
udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs
|
||||
VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm
|
||||
56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT
|
||||
GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V
|
||||
Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm
|
||||
HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q
|
||||
BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH
|
||||
qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh
|
||||
GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
24
vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem
generated
vendored
24
vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem
generated
vendored
@@ -1,24 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL
|
||||
BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2
|
||||
YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE
|
||||
AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN
|
||||
MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ
|
||||
BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx
|
||||
ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ
|
||||
KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
|
||||
AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT
|
||||
fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ
|
||||
qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE
|
||||
xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es
|
||||
Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2
|
||||
Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM
|
||||
ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX
|
||||
MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR
|
||||
e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X
|
||||
POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl
|
||||
AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg
|
||||
odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+
|
||||
PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN
|
||||
Dhm6uZM=
|
||||
-----END CERTIFICATE-----
|
||||
27
vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem
generated
vendored
27
vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem
generated
vendored
@@ -1,27 +0,0 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs
|
||||
8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO
|
||||
QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk
|
||||
XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA
|
||||
Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc
|
||||
gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf
|
||||
LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl
|
||||
jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0
|
||||
4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q
|
||||
Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P
|
||||
nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1
|
||||
drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE
|
||||
duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50
|
||||
L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG
|
||||
06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm
|
||||
eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD
|
||||
uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7
|
||||
lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL
|
||||
a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb
|
||||
hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ
|
||||
7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j
|
||||
r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7
|
||||
eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD
|
||||
B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz
|
||||
7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g==
|
||||
-----END RSA PRIVATE KEY-----
|
||||
24
vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem
generated
vendored
24
vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem
generated
vendored
@@ -1,24 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL
|
||||
BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2
|
||||
YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE
|
||||
AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN
|
||||
MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ
|
||||
BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx
|
||||
ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ
|
||||
KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
|
||||
AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9
|
||||
a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0
|
||||
OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3
|
||||
RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK
|
||||
P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316
|
||||
HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu
|
||||
0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl
|
||||
MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6
|
||||
EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9
|
||||
/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA
|
||||
QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ
|
||||
nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD
|
||||
X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco
|
||||
pKklVz0=
|
||||
-----END CERTIFICATE-----
|
||||
27
vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem
generated
vendored
27
vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem
generated
vendored
@@ -1,27 +0,0 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF
|
||||
l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj
|
||||
+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G
|
||||
4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA
|
||||
xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh
|
||||
68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ
|
||||
/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL
|
||||
Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA
|
||||
VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9
|
||||
9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH
|
||||
MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt
|
||||
aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq
|
||||
xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx
|
||||
2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv
|
||||
EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z
|
||||
aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq
|
||||
udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs
|
||||
VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm
|
||||
56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT
|
||||
GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V
|
||||
Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm
|
||||
HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q
|
||||
BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH
|
||||
qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh
|
||||
GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
24
vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem
generated
vendored
24
vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem
generated
vendored
@@ -1,24 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL
|
||||
BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2
|
||||
YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE
|
||||
AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN
|
||||
MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ
|
||||
BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx
|
||||
ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ
|
||||
KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
|
||||
AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT
|
||||
fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ
|
||||
qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE
|
||||
xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es
|
||||
Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2
|
||||
Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM
|
||||
ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX
|
||||
MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR
|
||||
e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X
|
||||
POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl
|
||||
AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg
|
||||
odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+
|
||||
PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN
|
||||
Dhm6uZM=
|
||||
-----END CERTIFICATE-----
|
||||
27
vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem
generated
vendored
27
vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem
generated
vendored
@@ -1,27 +0,0 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs
|
||||
8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO
|
||||
QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk
|
||||
XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA
|
||||
Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc
|
||||
gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf
|
||||
LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl
|
||||
jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0
|
||||
4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q
|
||||
Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P
|
||||
nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1
|
||||
drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE
|
||||
duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50
|
||||
L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG
|
||||
06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm
|
||||
eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD
|
||||
uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7
|
||||
lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL
|
||||
a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb
|
||||
hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ
|
||||
7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j
|
||||
r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7
|
||||
eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD
|
||||
B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz
|
||||
7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g==
|
||||
-----END RSA PRIVATE KEY-----
|
||||
24
vendor/github.com/google/s2a-go/testdata/client_cert.pem
generated
vendored
24
vendor/github.com/google/s2a-go/testdata/client_cert.pem
generated
vendored
@@ -1,24 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL
|
||||
BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2
|
||||
YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE
|
||||
AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN
|
||||
MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ
|
||||
BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx
|
||||
ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ
|
||||
KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
|
||||
AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9
|
||||
a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0
|
||||
OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3
|
||||
RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK
|
||||
P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316
|
||||
HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu
|
||||
0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl
|
||||
MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6
|
||||
EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9
|
||||
/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA
|
||||
QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ
|
||||
nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD
|
||||
X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco
|
||||
pKklVz0=
|
||||
-----END CERTIFICATE-----
|
||||
27
vendor/github.com/google/s2a-go/testdata/client_key.pem
generated
vendored
27
vendor/github.com/google/s2a-go/testdata/client_key.pem
generated
vendored
@@ -1,27 +0,0 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF
|
||||
l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj
|
||||
+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G
|
||||
4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA
|
||||
xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh
|
||||
68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ
|
||||
/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL
|
||||
Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA
|
||||
VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9
|
||||
9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH
|
||||
MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt
|
||||
aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq
|
||||
xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx
|
||||
2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv
|
||||
EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z
|
||||
aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq
|
||||
udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs
|
||||
VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm
|
||||
56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT
|
||||
GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V
|
||||
Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm
|
||||
HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q
|
||||
BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH
|
||||
qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh
|
||||
GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
19
vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem
generated
vendored
19
vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem
generated
vendored
@@ -1,19 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDCDCCAfACFFlYsYCFit01ZpYmfjxpo7/6wMEbMA0GCSqGSIb3DQEBCwUAMEgx
|
||||
CzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEPMA0GA1UECgwGR29vZ2xlMRswGQYD
|
||||
VQQDDBJ0ZXN0LXMyYS1tdGxzLXJvb3QwHhcNMjMwODIyMTY0NTE4WhcNNDMwODIy
|
||||
MTY0NTE4WjA5MQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExHTAbBgNVBAMMFHRl
|
||||
c3QtczJhLW10bHMtY2xpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
|
||||
AQEAqrQQMyxNtmdCB+uY3szgRsfPrKC+TV9Fusnd8PfaCVuGTGcSBKM018nV2TDn
|
||||
3IYFQ1HgLpGwGwOFDBb3y0o9i2/l2VJySriX1GSNX6nDmVasQlO1wuOLCP7/LRmO
|
||||
7b6Kise5W0IFhYaptKyWnekn2pS0tAjimqpfn2w0U6FDGtQUqg/trQQmGtTSJHjb
|
||||
A+OFd0EFC18KGP8Q+jOMaMkJRmpeEiAPyHPDoMhqQNT26RApv9j2Uzo4SuXzHH6T
|
||||
cAdm1+zG+EXY/UZKX9oDkSbwIJvN+gCmNyORLalJ12gsGYOCjMd8K0mlXBqrmmbO
|
||||
VHVbUm9062lhE7x59AA8DK4DoQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCPOvtL
|
||||
dq2hxFHlIy0YUK8jp/DtwJZPwzx1id5FtWwd0CxBS1StIgmkHMxtkJGz1iyQLplI
|
||||
je+Msd4sTsb5zZi/8kGKehi8Wj4lghp4oP30cpob41OvM68M9RC/wSOVk9igSww+
|
||||
l3zof6wKRIswsi5VHrL16ruIVVoDlyFbKr8yk+cp9OPOV8hNNN7ewY9xC8OgnTt8
|
||||
YtdaLe6uTplKBLW+j3GtshigRhyfkGJyPFYL4LAeDJCHlC1qmBnkyP0ijMp6vneM
|
||||
E8TLavnMTMcpihWTWpyKeRkO6HDRsP4AofQAp7VAiAdSOplga+w2qgrVICV+m8MK
|
||||
BTq2PBvc59T6OFLq
|
||||
-----END CERTIFICATE-----
|
||||
28
vendor/github.com/google/s2a-go/testdata/mds_client_key.pem
generated
vendored
28
vendor/github.com/google/s2a-go/testdata/mds_client_key.pem
generated
vendored
@@ -1,28 +0,0 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCqtBAzLE22Z0IH
|
||||
65jezOBGx8+soL5NX0W6yd3w99oJW4ZMZxIEozTXydXZMOfchgVDUeAukbAbA4UM
|
||||
FvfLSj2Lb+XZUnJKuJfUZI1fqcOZVqxCU7XC44sI/v8tGY7tvoqKx7lbQgWFhqm0
|
||||
rJad6SfalLS0COKaql+fbDRToUMa1BSqD+2tBCYa1NIkeNsD44V3QQULXwoY/xD6
|
||||
M4xoyQlGal4SIA/Ic8OgyGpA1PbpECm/2PZTOjhK5fMcfpNwB2bX7Mb4Rdj9Rkpf
|
||||
2gORJvAgm836AKY3I5EtqUnXaCwZg4KMx3wrSaVcGquaZs5UdVtSb3TraWETvHn0
|
||||
ADwMrgOhAgMBAAECggEAUccupZ1ZY4OHTi0PkNk8rpwFwTFGyeFVEf2ofkr24RnA
|
||||
NnUAXEllxOUUNlcoFOz9s3kTeavg3qgqgpa0QmdAIb9LMXg+ec6CKkW7trMpGho8
|
||||
LxBUWNfSoU4sKEqAvyPT0lWJVo9D/up6/avbAi6TIbOw+Djzel4ZrlHTpabxc3WT
|
||||
EilXzn4q54b3MzxCQeQjcnzTieW4Q5semG2kLiXFToHIY2di01P/O8awUjgrD+uW
|
||||
/Cb6H49MnHm9VPkqea1iwZeMQd6Gh5FrC7RezsBjdB1JBcfsv6PFt2ySInjB8SF+
|
||||
XR5Gr3Cc5sh9s0LfprZ9Dq0rlSWmwasPMI1COK6SswKBgQDczgeWd3erQ1JX9LEI
|
||||
wollawqC9y7uJhEsw1hrPqA3uqZYiLUc7Nmi4laZ12mcGoXNDS3R3XmD58qGmGaU
|
||||
lxEVTb8KDVWBgw450VoBKzSMQnCP6zn4nZxTYxeqMKjDGf6TRB6TZc843qsG3eRC
|
||||
k91yxrCQ/0HV6PT48C+lieDzLwKBgQDF6aNKiyrswr457undBnM1H8q/Y6xC5ZlK
|
||||
UtiQdhuyBnicvz0U8WPxBY/8gha0OXWuSnBqq/z77iFVNv/zT6p9K7kM7nBGd8cB
|
||||
8KO6FNbyaHWFrhCI5zNzRTH4oha0hfvUOoti09vqavCtWD4L+D/63ba1wNLKPO9o
|
||||
4gWbCnUCLwKBgQC/vus372csgrnvR761LLrEJ8BpGt7WUJh5luoht7DKtHvgRleB
|
||||
Vu1oVcV+s2Iy/ZVUDC3OIdZ0hcWKPK5YOxfKuEk+IXYvke+4peTTPwHTC59UW6Fs
|
||||
FPK8N0FFuhvT0a8RlAY5WiAp8rPysp6WcnHMSl7qi8BQUozp4Sp/RsziYQKBgBXv
|
||||
r4mzoy5a53rEYGd/L4XT4EUWZyGDEVqLlDVu4eL5lKTLDZokp08vrqXuRVX0iHap
|
||||
CYzJQ2EpI8iuL/BoBB2bmwcz5n3pCMXORld5t9lmeqA2it6hwbIlGUTVsm6P6zm6
|
||||
w3hQwy9YaxTLkxUAjxbfPEEo/jQsTNzzMGve3NlBAoGAbgJExpDyMDnaD2Vi5eyr
|
||||
63b54BsqeLHqxJmADifyRCj7G1SJMm3zMKkNNOS0vsXgoiId973STFf1XQiojiv8
|
||||
Slbxyv5rczcY0n3LOuQYcM5OzsjzpNFZsT2dDnMfNRUF3rx3Geu/FuJ9scF1b00r
|
||||
fVMrcL3jSf/W1Xh4TgtyoU8=
|
||||
-----END PRIVATE KEY-----
|
||||
21
vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem
generated
vendored
21
vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem
generated
vendored
@@ -1,21 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDcTCCAlmgAwIBAgIUDUkgI+2FZtuUHyUUi0ZBH7JvN00wDQYJKoZIhvcNAQEL
|
||||
BQAwSDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQ8wDQYDVQQKDAZHb29nbGUx
|
||||
GzAZBgNVBAMMEnRlc3QtczJhLW10bHMtcm9vdDAeFw0yMzA4MjEyMTI5MTVaFw00
|
||||
MzA4MjEyMTI5MTVaMEgxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEPMA0GA1UE
|
||||
CgwGR29vZ2xlMRswGQYDVQQDDBJ0ZXN0LXMyYS1tdGxzLXJvb3QwggEiMA0GCSqG
|
||||
SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCbFEQfpvla27bATedrN4BAWsI9GSwSnJLW
|
||||
QWzXcnAk6cKxQBAhnaKHRxHY8ttLhNTtxQeub894CLzJvHE/0xDhuMzjtCCCZ7i2
|
||||
r08tKZ1KcEzPJCPNlxlzAXPA45XU3LRlbGvju/PBPhm6n1hCEKTNI/KETJ5DEaYg
|
||||
Cf2LcXVsl/zW20MwDZ+e2w/9a2a6n6DdpW1ekOR550hXAUOIxvmXRBeYeGLFvp1n
|
||||
rQgZBhRaxP03UB+PQD2oMi/4mfsS96uGCXdzzX8qV46O8m132HUbnA/wagIwboEe
|
||||
d7Bx237dERDyHw5GFnll7orgA0FOtoEufXdeQxWVvTjO0+PVPgsvAgMBAAGjUzBR
|
||||
MB0GA1UdDgQWBBRyMtg/yutV8hw8vOq0i8x0eBQi7DAfBgNVHSMEGDAWgBRyMtg/
|
||||
yutV8hw8vOq0i8x0eBQi7DAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUA
|
||||
A4IBAQArN/gdqWMxd5Rvq2eJMTp6I4RepJOT7Go4sMsRsy1caJqqcoS2EvREDZMN
|
||||
XNEBcyQBB5kYd6TCcZGoLnEtWYXQ4jjEiXG1g7/+rWxyqw0ZYuP7FWzuHg3Uor/x
|
||||
fApbEKwptP5ywVc+33h4qreGcqXkVCCn+sAcstGgrqubdGZW2T5gazUMyammOOuN
|
||||
9IWL1PbvXmgEKD+80NUIrk09zanYyrElGdU/zw/kUbZ3Jf6WUBtJGhTzRQ1qZeKa
|
||||
VnpCbLoG3vObEB8mxDUAlIzwAtfvw4U32BVIZA8xrocz6OOoAnSW1bTlo3EOIo/G
|
||||
MTV7jmY9TBPtfhRuO/cG650+F+cw
|
||||
-----END CERTIFICATE-----
|
||||
21
vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem
generated
vendored
21
vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem
generated
vendored
@@ -1,21 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDbjCCAlagAwIBAgIUbexZ5sZl86Al9dsI2PkOgtqKnkgwDQYJKoZIhvcNAQEL
|
||||
BQAwSDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQ8wDQYDVQQKDAZHb29nbGUx
|
||||
GzAZBgNVBAMMEnRlc3QtczJhLW10bHMtcm9vdDAeFw0yMzA4MjIwMDMyMDRaFw00
|
||||
MzA4MjIwMDMyMDRaMDkxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEdMBsGA1UE
|
||||
AwwUdGVzdC1zMmEtbXRscy1zZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
|
||||
ggEKAoIBAQCMEzybsGPqfh92GLwy43mt8kQDF3ztr8y06RwU1hVnY7QqYK4obpvh
|
||||
HkJVnTz9gwNBF3n5nUalqRzactlf2PCydN9oSYNCO8svVmo7vw1CleKAKFAiV5Qn
|
||||
H76QlqD15oJreh7nSM8R4qj5KukIHvt0cN0gD6CJQzIURDtsKJwkW3yQjYyT/FAK
|
||||
GYtFrB6buDn3Eg3Hsw6z7uj7CzLBsSl7BIGrQILbpbI9nFNT3rUTUhXZKY/3UtJA
|
||||
Ob66AjTmMbD16RGYZR4JsPx6CstheifJ6YSI79r5KgD37zX0jMXFWimvb2SmZmFe
|
||||
LoohtC8K7uTyjm/dROx6nHXdDt5TQYXHAgMBAAGjXzBdMBsGA1UdEQQUMBKHEAAA
|
||||
AAAAAAAAAAAAAAAAAAAwHQYDVR0OBBYEFI3i2+tIk6YYn0MIxC0q93jk1VsUMB8G
|
||||
A1UdIwQYMBaAFHIy2D/K61XyHDy86rSLzHR4FCLsMA0GCSqGSIb3DQEBCwUAA4IB
|
||||
AQAUhk+s/lrIAULBbU7E22C8f93AzTxE1mhyHGNlfPPJP3t1Dl+h4X4WkFpkz5gT
|
||||
EcNXB//Vvoq99HbEK5/92sxsIPexKdJBdcggeHXIgLDkOrEZEb0Nnh9eaAuU2QDn
|
||||
JW44hMB+aF6mEaJvOHE6DRkQw3hwFYFisFKKHtlQ3TyOhw5CHGzSExPZusdSFNIe
|
||||
2E7V/0QzGPJEFnEFUNe9N8nTH2P385Paoi+5+Iizlp/nztVXfzv0Cj/i+qGgtDUs
|
||||
HB+gBU2wxMw8eYyuNzACH70wqGR1Parj8/JoyYhx0S4+Gjzy3JH3CcAMaxyfH/dI
|
||||
4Wcvfz/isxgmH1UqIt3oc6ad
|
||||
-----END CERTIFICATE-----
|
||||
28
vendor/github.com/google/s2a-go/testdata/mds_server_key.pem
generated
vendored
28
vendor/github.com/google/s2a-go/testdata/mds_server_key.pem
generated
vendored
@@ -1,28 +0,0 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCMEzybsGPqfh92
|
||||
GLwy43mt8kQDF3ztr8y06RwU1hVnY7QqYK4obpvhHkJVnTz9gwNBF3n5nUalqRza
|
||||
ctlf2PCydN9oSYNCO8svVmo7vw1CleKAKFAiV5QnH76QlqD15oJreh7nSM8R4qj5
|
||||
KukIHvt0cN0gD6CJQzIURDtsKJwkW3yQjYyT/FAKGYtFrB6buDn3Eg3Hsw6z7uj7
|
||||
CzLBsSl7BIGrQILbpbI9nFNT3rUTUhXZKY/3UtJAOb66AjTmMbD16RGYZR4JsPx6
|
||||
CstheifJ6YSI79r5KgD37zX0jMXFWimvb2SmZmFeLoohtC8K7uTyjm/dROx6nHXd
|
||||
Dt5TQYXHAgMBAAECggEAIB5zGdIG/yh/Z1GBqfuOFaxFGx5iJ5BVlLAVH9P9IXFz
|
||||
yPnVRXEjbinFlSMSbqEBeIX9EpcVMXxHIPIP1RIGEy2IYr3kiqXyT771ahDDZh6/
|
||||
Spqz0UQatSPqyvW3H9uE0Uc12dvQm23JSCUmPRX5m7gbhDQBIChXzdzdcU4Yi59V
|
||||
4xmJUvbsAcLw5CBM6kwV+1NGVH9+3mUdhrr9M6B6+sVB/xnaqMGEDfQGiwL8U7EY
|
||||
QOuc46KXu3Pd/qCdVLn60IrdjSzDJKeC5UZZ+ejNAo+DfbtOovBj3qu3OCUg4XVy
|
||||
0CDBJ1sTdLvUfF4Gb+crjPsd+qBbXcjVfqdadwhsoQKBgQDBF1Pys/NitW8okJwp
|
||||
2fiDIASP3TiI+MthWHGyuoZGPvmXQ3H6iuLSm8c/iYI2WPTf53Xff1VcFm1GmQms
|
||||
GCsYM8Ax94zCeO6Ei1sYYxwcBloEZfOeV37MPA4pjJF4Lt+n5nveNxP+lrsjksJz
|
||||
wToSEgWPDT1b/xcdt4/5j9J85wKBgQC5tiLx+33mwH4DoaFRmSl0+VuSNYFw6DTQ
|
||||
SQ+kWqWGH4NENc9wf4Dj2VUZQhpXNhXVSxj+aP2d/ck1NrTJAWqYEXCDtFQOGSa2
|
||||
cGPRr+Fhy5NIEaEvR7IXcMBZzx3koYmWVBHricyrXs5FvHrT3N14mGDUG8n24U3f
|
||||
R799bau0IQKBgQC97UM+lHCPJCWNggiJRgSifcje9VtZp1btjoBvq/bNe74nYkjn
|
||||
htsrC91Fiu1Qpdlfr50K1IXSyaB886VG6JLjAGxI+dUzqJ38M9LLvxj0G+9JKjsi
|
||||
AbAQFfZcOg8QZxLJZPVsE0MQhZTXndC06VhEVAOxvPUg214Sde8hK61/+wKBgCRw
|
||||
O10VhnePT2pw/VEgZ0T/ZFtEylgYB7zSiRIrgwzVBBGPKVueePC8BPmGwdpYz2Hh
|
||||
cU8B1Ll6QU+Co2hJMdwSl+wPpup5PuJPHRbYlrV0lzpt0x2OyL/WrLcyb2Ab3f40
|
||||
EqwPhqwdVwXR3JvTW1U9OMqFhVQ+kuP7lPQMX8NhAoGBAJOgZ7Tokipc4Mi68Olw
|
||||
SCaOPvjjy4sW2rTRuKyjc1wTAzy7SJ3vXHfGkkN99nTLJFwAyJhWUpnRdwAXGi+x
|
||||
gyOa95ImsEfRSwEjbluWfF8/P0IU8GR+ZTqT4NnNCOsi8T/xst4Szd1ECJNnnZDe
|
||||
1ChfPP1AH+/75MJCvu6wQBQv
|
||||
-----END PRIVATE KEY-----
|
||||
19
vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem
generated
vendored
19
vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem
generated
vendored
@@ -1,19 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDITCCAgkCFBS8mLoytMpMWBwpAtnRaq3eIKnsMA0GCSqGSIb3DQEBCwUAME0x
|
||||
CzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTENMAsGA1UECgwEVGVzdDEiMCAGA1UE
|
||||
AwwZdGVzdC1zMmEtbXRscy1zZWxmLXNpZ25lZDAeFw0yMzA4MjIyMTE2MDFaFw00
|
||||
MzA4MjIyMTE2MDFaME0xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTENMAsGA1UE
|
||||
CgwEVGVzdDEiMCAGA1UEAwwZdGVzdC1zMmEtbXRscy1zZWxmLXNpZ25lZDCCASIw
|
||||
DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKFFPsYasKZeCFLEXl3RpE/ZOXFe
|
||||
2lhutIalSpZvCmso+mQGoZ4cHK7At+kDjBi5CrnXkYcw7quQAhHgU0frhWdj7tsW
|
||||
HUUtq7T8eaGWKBnVD9fl+MjtAl1BmhXwV9qRBbj4EesSKGDSGpKf66dOtzw83JbB
|
||||
cU7XlPAH1c1zo2GXC1himcZ+SVGHVrOjn4NmeFs8g94/Dke8dWkHwv5YTMVugFK4
|
||||
5KxKgSOKkr4ka7PCBzgxCnW4wYSZNRHcxrqkiArO2HAQq0ACr7u+fVDYH//9mP2Z
|
||||
ADo/zch7O5yhkiNbjXJIRrptDWEuVYMRloYDhT773h7bV/Q0Wo0NQGtasJ8CAwEA
|
||||
ATANBgkqhkiG9w0BAQsFAAOCAQEAPjbH0TMyegF/MDvglkc0sXr6DqlmTxDCZZmG
|
||||
lYPZ5Xy062+rxIHghMARbvO4BxepiG37KsP2agvOldm4TtU8nQ8LyswmSIFm4BQ+
|
||||
XQWwdsWyYyd8l0d5sXAdaN6AXwy50fvqCepmEqyreMY6dtLzlwo9gVCBFB7QuAPt
|
||||
Nc14phpEUZt/KPNuY6cUlB7bz3tmnFbwxUrWj1p0KBEYsr7+KEVZxR+z0wtlU7S9
|
||||
ZBrmUvx0fq5Ef7JWtHW0w4ofg1op742sdYl+53C26GZ76ts4MmqVz2/94DScgRaU
|
||||
gT0GLVuuCZXRDVeTXqTb4mditRCfzFPe9cCegYhGhSqBs8yh5A==
|
||||
-----END CERTIFICATE-----
|
||||
28
vendor/github.com/google/s2a-go/testdata/self_signed_key.pem
generated
vendored
28
vendor/github.com/google/s2a-go/testdata/self_signed_key.pem
generated
vendored
@@ -1,28 +0,0 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQChRT7GGrCmXghS
|
||||
xF5d0aRP2TlxXtpYbrSGpUqWbwprKPpkBqGeHByuwLfpA4wYuQq515GHMO6rkAIR
|
||||
4FNH64VnY+7bFh1FLau0/HmhligZ1Q/X5fjI7QJdQZoV8FfakQW4+BHrEihg0hqS
|
||||
n+unTrc8PNyWwXFO15TwB9XNc6NhlwtYYpnGfklRh1azo5+DZnhbPIPePw5HvHVp
|
||||
B8L+WEzFboBSuOSsSoEjipK+JGuzwgc4MQp1uMGEmTUR3Ma6pIgKzthwEKtAAq+7
|
||||
vn1Q2B///Zj9mQA6P83IezucoZIjW41ySEa6bQ1hLlWDEZaGA4U++94e21f0NFqN
|
||||
DUBrWrCfAgMBAAECggEAR8e8YwyqJ8KezcgdgIC5M9kp2i4v3UCZFX0or8CI0J2S
|
||||
pUbWVLuKgLXCpfIwPyjNf15Vpei/spkMcsx4BQDthdFTFSzIpmvni0z9DlD5VFYj
|
||||
ESOJElV7wepbHPy2/c+izmuL/ic81aturGiFyRgeMq+cN3WuaztFTXkPTrzzsZGF
|
||||
p/Mx3gqm7Hoc3d2xlv+8L5GjCtEJPlQgZJV+s3ennBjOAd8CC7d9qJetE3Er46pn
|
||||
r5jedV3bQRZYBzmooYNHjbAs26++wYac/jTE0/U6nKS17eWq4BQZUtlMXUw5N81B
|
||||
7LKn7C03rj2KCn+Nf5uin9ALmoy888LXCDdvL/NZkQKBgQDduv1Heu+tOZuNYUdQ
|
||||
Hswmd8sVNAAWGZxdxixHMv58zrgbLFXSX6K89X2l5Sj9XON8TH46MuSFdjSwwWw5
|
||||
fBrhVEhA5srcqpvVWIBE05yqPpt0s1NQktMWJKELWlG8jOhVKwM5OYDpdxtwehpz
|
||||
1g70XJz+nF/LTV8RdTK+OWDDpQKBgQC6MhdbGHUz/56dY3gZpE5TXnN2hkNbZCgk
|
||||
emr6z85VHhQflZbedhCzB9PUnZnCKWOGQHQdxRTtRfd46LVboZqCdYO1ZNQv6toP
|
||||
ysS7dTpZZFy7CpQaW0Y6/jS65jW6xIDKR1W40vgltZ3sfpG37JaowpzWdw2WuOnw
|
||||
Bg0rcJAf8wKBgQCqE+p/z97UwuF8eufWnyj9QNo382E1koOMspv4KTdnyLETtthF
|
||||
vDH6O1wbykG8xmmASLRyM+NyNA+KnXNETNvZh2q8zctBpGRQK8iIAsGjHM7ln0AD
|
||||
B/x+ea5GJQuZU4RK/+lDFca6TjBwAFkWDVX/PqL18kDQkxKfM4SuwRhmOQKBgDGh
|
||||
eoJIsa0LnP787Z2AI3Srf4F/ZmLs/ppCm1OBotEjdF+64v0nYWonUvqgi8SqfaHi
|
||||
elEZIGvis4ViGj1zhRjzNAlc+AZRxpBhDzGcnNIJI4Kj3jhsTfsZmXqcNIQ1LtM8
|
||||
Uogyi/yZPaA1WKg7Aym2vlGYaGHdplXZdxc2KOSrAoGABRkD9l2OVcwK7RyNgFxo
|
||||
mjxx0tfUdDBhHIi2igih1FiHpeP9E+4/kE/K7PnU9DoDrL1jW1MTpXaYV4seOylk
|
||||
k9z/9QfcRa9ePD2N4FqbHWSYp5n3aLoIcGq/9jyjTwayZbbIhWO+vNuHE9wIvecZ
|
||||
8x3gNkxJRb4NaLIoNzAhCoo=
|
||||
-----END PRIVATE KEY-----
|
||||
24
vendor/github.com/google/s2a-go/testdata/server_cert.pem
generated
vendored
24
vendor/github.com/google/s2a-go/testdata/server_cert.pem
generated
vendored
@@ -1,24 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL
|
||||
BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2
|
||||
YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE
|
||||
AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN
|
||||
MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ
|
||||
BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx
|
||||
ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ
|
||||
KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
|
||||
AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT
|
||||
fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ
|
||||
qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE
|
||||
xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es
|
||||
Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2
|
||||
Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM
|
||||
ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX
|
||||
MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR
|
||||
e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X
|
||||
POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl
|
||||
AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg
|
||||
odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+
|
||||
PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN
|
||||
Dhm6uZM=
|
||||
-----END CERTIFICATE-----
|
||||
27
vendor/github.com/google/s2a-go/testdata/server_key.pem
generated
vendored
27
vendor/github.com/google/s2a-go/testdata/server_key.pem
generated
vendored
@@ -1,27 +0,0 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs
|
||||
8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO
|
||||
QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk
|
||||
XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA
|
||||
Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc
|
||||
gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf
|
||||
LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl
|
||||
jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0
|
||||
4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q
|
||||
Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P
|
||||
nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1
|
||||
drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE
|
||||
duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50
|
||||
L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG
|
||||
06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm
|
||||
eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD
|
||||
uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7
|
||||
lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL
|
||||
a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb
|
||||
hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ
|
||||
7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j
|
||||
r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7
|
||||
eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD
|
||||
B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz
|
||||
7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g==
|
||||
-----END RSA PRIVATE KEY-----
|
||||
12
vendor/golang.org/x/net/html/token.go
generated
vendored
12
vendor/golang.org/x/net/html/token.go
generated
vendored
@@ -910,9 +910,6 @@ func (z *Tokenizer) readTagAttrKey() {
|
||||
return
|
||||
}
|
||||
switch c {
|
||||
case ' ', '\n', '\r', '\t', '\f', '/':
|
||||
z.pendingAttr[0].end = z.raw.end - 1
|
||||
return
|
||||
case '=':
|
||||
if z.pendingAttr[0].start+1 == z.raw.end {
|
||||
// WHATWG 13.2.5.32, if we see an equals sign before the attribute name
|
||||
@@ -920,7 +917,9 @@ func (z *Tokenizer) readTagAttrKey() {
|
||||
continue
|
||||
}
|
||||
fallthrough
|
||||
case '>':
|
||||
case ' ', '\n', '\r', '\t', '\f', '/', '>':
|
||||
// WHATWG 13.2.5.33 Attribute name state
|
||||
// We need to reconsume the char in the after attribute name state to support the / character
|
||||
z.raw.end--
|
||||
z.pendingAttr[0].end = z.raw.end
|
||||
return
|
||||
@@ -939,6 +938,11 @@ func (z *Tokenizer) readTagAttrVal() {
|
||||
if z.err != nil {
|
||||
return
|
||||
}
|
||||
if c == '/' {
|
||||
// WHATWG 13.2.5.34 After attribute name state
|
||||
// U+002F SOLIDUS (/) - Switch to the self-closing start tag state.
|
||||
return
|
||||
}
|
||||
if c != '=' {
|
||||
z.raw.end--
|
||||
return
|
||||
|
||||
11
vendor/golang.org/x/net/http2/frame.go
generated
vendored
11
vendor/golang.org/x/net/http2/frame.go
generated
vendored
@@ -1510,13 +1510,12 @@ func (mh *MetaHeadersFrame) checkPseudos() error {
|
||||
}
|
||||
|
||||
func (fr *Framer) maxHeaderStringLen() int {
|
||||
v := fr.maxHeaderListSize()
|
||||
if uint32(int(v)) == v {
|
||||
return int(v)
|
||||
v := int(fr.maxHeaderListSize())
|
||||
if v < 0 {
|
||||
// If maxHeaderListSize overflows an int, use no limit (0).
|
||||
return 0
|
||||
}
|
||||
// They had a crazy big number for MaxHeaderBytes anyway,
|
||||
// so give them unlimited header lengths:
|
||||
return 0
|
||||
return v
|
||||
}
|
||||
|
||||
// readMetaFrame returns 0 or more CONTINUATION frames from fr and
|
||||
|
||||
3
vendor/golang.org/x/sync/errgroup/errgroup.go
generated
vendored
3
vendor/golang.org/x/sync/errgroup/errgroup.go
generated
vendored
@@ -4,6 +4,9 @@
|
||||
|
||||
// Package errgroup provides synchronization, error propagation, and Context
|
||||
// cancelation for groups of goroutines working on subtasks of a common task.
|
||||
//
|
||||
// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks
|
||||
// returning errors.
|
||||
package errgroup
|
||||
|
||||
import (
|
||||
|
||||
40
vendor/golang.org/x/tools/go/packages/doc.go
generated
vendored
40
vendor/golang.org/x/tools/go/packages/doc.go
generated
vendored
@@ -15,22 +15,10 @@ Load passes most patterns directly to the underlying build tool.
|
||||
The default build tool is the go command.
|
||||
Its supported patterns are described at
|
||||
https://pkg.go.dev/cmd/go#hdr-Package_lists_and_patterns.
|
||||
Other build systems may be supported by providing a "driver";
|
||||
see [The driver protocol].
|
||||
|
||||
Load may be used in Go projects that use alternative build systems, by
|
||||
installing an appropriate "driver" program for the build system and
|
||||
specifying its location in the GOPACKAGESDRIVER environment variable.
|
||||
For example,
|
||||
https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration
|
||||
explains how to use the driver for Bazel.
|
||||
The driver program is responsible for interpreting patterns in its
|
||||
preferred notation and reporting information about the packages that
|
||||
they identify.
|
||||
(See driverRequest and driverResponse types for the JSON
|
||||
schema used by the protocol.
|
||||
Though the protocol is supported, these types are currently unexported;
|
||||
see #64608 for a proposal to publish them.)
|
||||
|
||||
Regardless of driver, all patterns with the prefix "query=", where query is a
|
||||
All patterns with the prefix "query=", where query is a
|
||||
non-empty string of letters from [a-z], are reserved and may be
|
||||
interpreted as query operators.
|
||||
|
||||
@@ -86,7 +74,29 @@ for details.
|
||||
Most tools should pass their command-line arguments (after any flags)
|
||||
uninterpreted to [Load], so that it can interpret them
|
||||
according to the conventions of the underlying build system.
|
||||
|
||||
See the Example function for typical usage.
|
||||
|
||||
# The driver protocol
|
||||
|
||||
[Load] may be used to load Go packages even in Go projects that use
|
||||
alternative build systems, by installing an appropriate "driver"
|
||||
program for the build system and specifying its location in the
|
||||
GOPACKAGESDRIVER environment variable.
|
||||
For example,
|
||||
https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration
|
||||
explains how to use the driver for Bazel.
|
||||
|
||||
The driver program is responsible for interpreting patterns in its
|
||||
preferred notation and reporting information about the packages that
|
||||
those patterns identify. Drivers must also support the special "file="
|
||||
and "pattern=" patterns described above.
|
||||
|
||||
The patterns are provided as positional command-line arguments. A
|
||||
JSON-encoded [DriverRequest] message providing additional information
|
||||
is written to the driver's standard input. The driver must write a
|
||||
JSON-encoded [DriverResponse] message to its standard output. (This
|
||||
message differs from the JSON schema produced by 'go list'.)
|
||||
*/
|
||||
package packages // import "golang.org/x/tools/go/packages"
|
||||
|
||||
|
||||
77
vendor/golang.org/x/tools/go/packages/external.go
generated
vendored
77
vendor/golang.org/x/tools/go/packages/external.go
generated
vendored
@@ -2,12 +2,11 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file enables an external tool to intercept package requests.
|
||||
// If the tool is present then its results are used in preference to
|
||||
// the go list command.
|
||||
|
||||
package packages
|
||||
|
||||
// This file defines the protocol that enables an external "driver"
|
||||
// tool to supply package metadata in place of 'go list'.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
@@ -17,31 +16,71 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// The Driver Protocol
|
||||
// DriverRequest defines the schema of a request for package metadata
|
||||
// from an external driver program. The JSON-encoded DriverRequest
|
||||
// message is provided to the driver program's standard input. The
|
||||
// query patterns are provided as command-line arguments.
|
||||
//
|
||||
// The driver, given the inputs to a call to Load, returns metadata about the packages specified.
|
||||
// This allows for different build systems to support go/packages by telling go/packages how the
|
||||
// packages' source is organized.
|
||||
// The driver is a binary, either specified by the GOPACKAGESDRIVER environment variable or in
|
||||
// the path as gopackagesdriver. It's given the inputs to load in its argv. See the package
|
||||
// documentation in doc.go for the full description of the patterns that need to be supported.
|
||||
// A driver receives as a JSON-serialized driverRequest struct in standard input and will
|
||||
// produce a JSON-serialized driverResponse (see definition in packages.go) in its standard output.
|
||||
|
||||
// driverRequest is used to provide the portion of Load's Config that is needed by a driver.
|
||||
type driverRequest struct {
|
||||
// See the package documentation for an overview.
|
||||
type DriverRequest struct {
|
||||
Mode LoadMode `json:"mode"`
|
||||
|
||||
// Env specifies the environment the underlying build system should be run in.
|
||||
Env []string `json:"env"`
|
||||
|
||||
// BuildFlags are flags that should be passed to the underlying build system.
|
||||
BuildFlags []string `json:"build_flags"`
|
||||
|
||||
// Tests specifies whether the patterns should also return test packages.
|
||||
Tests bool `json:"tests"`
|
||||
|
||||
// Overlay maps file paths (relative to the driver's working directory) to the byte contents
|
||||
// of overlay files.
|
||||
Overlay map[string][]byte `json:"overlay"`
|
||||
}
|
||||
|
||||
// DriverResponse defines the schema of a response from an external
|
||||
// driver program, providing the results of a query for package
|
||||
// metadata. The driver program must write a JSON-encoded
|
||||
// DriverResponse message to its standard output.
|
||||
//
|
||||
// See the package documentation for an overview.
|
||||
type DriverResponse struct {
|
||||
// NotHandled is returned if the request can't be handled by the current
|
||||
// driver. If an external driver returns a response with NotHandled, the
|
||||
// rest of the DriverResponse is ignored, and go/packages will fallback
|
||||
// to the next driver. If go/packages is extended in the future to support
|
||||
// lists of multiple drivers, go/packages will fall back to the next driver.
|
||||
NotHandled bool
|
||||
|
||||
// Compiler and Arch are the arguments pass of types.SizesFor
|
||||
// to get a types.Sizes to use when type checking.
|
||||
Compiler string
|
||||
Arch string
|
||||
|
||||
// Roots is the set of package IDs that make up the root packages.
|
||||
// We have to encode this separately because when we encode a single package
|
||||
// we cannot know if it is one of the roots as that requires knowledge of the
|
||||
// graph it is part of.
|
||||
Roots []string `json:",omitempty"`
|
||||
|
||||
// Packages is the full set of packages in the graph.
|
||||
// The packages are not connected into a graph.
|
||||
// The Imports if populated will be stubs that only have their ID set.
|
||||
// Imports will be connected and then type and syntax information added in a
|
||||
// later pass (see refine).
|
||||
Packages []*Package
|
||||
|
||||
// GoVersion is the minor version number used by the driver
|
||||
// (e.g. the go command on the PATH) when selecting .go files.
|
||||
// Zero means unknown.
|
||||
GoVersion int
|
||||
}
|
||||
|
||||
// driver is the type for functions that query the build system for the
|
||||
// packages named by the patterns.
|
||||
type driver func(cfg *Config, patterns ...string) (*DriverResponse, error)
|
||||
|
||||
// findExternalDriver returns the file path of a tool that supplies
|
||||
// the build system package structure, or "" if not found."
|
||||
// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its
|
||||
@@ -64,8 +103,8 @@ func findExternalDriver(cfg *Config) driver {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return func(cfg *Config, words ...string) (*driverResponse, error) {
|
||||
req, err := json.Marshal(driverRequest{
|
||||
return func(cfg *Config, words ...string) (*DriverResponse, error) {
|
||||
req, err := json.Marshal(DriverRequest{
|
||||
Mode: cfg.Mode,
|
||||
Env: cfg.Env,
|
||||
BuildFlags: cfg.BuildFlags,
|
||||
@@ -92,7 +131,7 @@ func findExternalDriver(cfg *Config) driver {
|
||||
fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd), stderr)
|
||||
}
|
||||
|
||||
var response driverResponse
|
||||
var response DriverResponse
|
||||
if err := json.Unmarshal(buf.Bytes(), &response); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
35
vendor/golang.org/x/tools/go/packages/golist.go
generated
vendored
35
vendor/golang.org/x/tools/go/packages/golist.go
generated
vendored
@@ -35,23 +35,23 @@ type goTooOldError struct {
|
||||
error
|
||||
}
|
||||
|
||||
// responseDeduper wraps a driverResponse, deduplicating its contents.
|
||||
// responseDeduper wraps a DriverResponse, deduplicating its contents.
|
||||
type responseDeduper struct {
|
||||
seenRoots map[string]bool
|
||||
seenPackages map[string]*Package
|
||||
dr *driverResponse
|
||||
dr *DriverResponse
|
||||
}
|
||||
|
||||
func newDeduper() *responseDeduper {
|
||||
return &responseDeduper{
|
||||
dr: &driverResponse{},
|
||||
dr: &DriverResponse{},
|
||||
seenRoots: map[string]bool{},
|
||||
seenPackages: map[string]*Package{},
|
||||
}
|
||||
}
|
||||
|
||||
// addAll fills in r with a driverResponse.
|
||||
func (r *responseDeduper) addAll(dr *driverResponse) {
|
||||
// addAll fills in r with a DriverResponse.
|
||||
func (r *responseDeduper) addAll(dr *DriverResponse) {
|
||||
for _, pkg := range dr.Packages {
|
||||
r.addPackage(pkg)
|
||||
}
|
||||
@@ -128,7 +128,7 @@ func (state *golistState) mustGetEnv() map[string]string {
|
||||
// goListDriver uses the go list command to interpret the patterns and produce
|
||||
// the build system package structure.
|
||||
// See driver for more details.
|
||||
func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
|
||||
func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) {
|
||||
// Make sure that any asynchronous go commands are killed when we return.
|
||||
parentCtx := cfg.Context
|
||||
if parentCtx == nil {
|
||||
@@ -146,16 +146,18 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
|
||||
}
|
||||
|
||||
// Fill in response.Sizes asynchronously if necessary.
|
||||
var sizeserr error
|
||||
var sizeswg sync.WaitGroup
|
||||
if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 {
|
||||
sizeswg.Add(1)
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner)
|
||||
sizeserr = err
|
||||
response.dr.Compiler = compiler
|
||||
response.dr.Arch = arch
|
||||
sizeswg.Done()
|
||||
errCh <- err
|
||||
}()
|
||||
defer func() {
|
||||
if sizesErr := <-errCh; sizesErr != nil {
|
||||
err = sizesErr
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -208,10 +210,7 @@ extractQueries:
|
||||
}
|
||||
}
|
||||
|
||||
sizeswg.Wait()
|
||||
if sizeserr != nil {
|
||||
return nil, sizeserr
|
||||
}
|
||||
// (We may yet return an error due to defer.)
|
||||
return response.dr, nil
|
||||
}
|
||||
|
||||
@@ -266,7 +265,7 @@ func (state *golistState) runContainsQueries(response *responseDeduper, queries
|
||||
|
||||
// adhocPackage attempts to load or construct an ad-hoc package for a given
|
||||
// query, if the original call to the driver produced inadequate results.
|
||||
func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, error) {
|
||||
func (state *golistState) adhocPackage(pattern, query string) (*DriverResponse, error) {
|
||||
response, err := state.createDriverResponse(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -357,7 +356,7 @@ func otherFiles(p *jsonPackage) [][]string {
|
||||
|
||||
// createDriverResponse uses the "go list" command to expand the pattern
|
||||
// words and return a response for the specified packages.
|
||||
func (state *golistState) createDriverResponse(words ...string) (*driverResponse, error) {
|
||||
func (state *golistState) createDriverResponse(words ...string) (*DriverResponse, error) {
|
||||
// go list uses the following identifiers in ImportPath and Imports:
|
||||
//
|
||||
// "p" -- importable package or main (command)
|
||||
@@ -384,7 +383,7 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse
|
||||
pkgs := make(map[string]*Package)
|
||||
additionalErrors := make(map[string][]Error)
|
||||
// Decode the JSON and convert it to Package form.
|
||||
response := &driverResponse{
|
||||
response := &DriverResponse{
|
||||
GoVersion: goVersion,
|
||||
}
|
||||
for dec := json.NewDecoder(buf); dec.More(); {
|
||||
|
||||
46
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
46
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
@@ -206,43 +206,6 @@ type Config struct {
|
||||
Overlay map[string][]byte
|
||||
}
|
||||
|
||||
// driver is the type for functions that query the build system for the
|
||||
// packages named by the patterns.
|
||||
type driver func(cfg *Config, patterns ...string) (*driverResponse, error)
|
||||
|
||||
// driverResponse contains the results for a driver query.
|
||||
type driverResponse struct {
|
||||
// NotHandled is returned if the request can't be handled by the current
|
||||
// driver. If an external driver returns a response with NotHandled, the
|
||||
// rest of the driverResponse is ignored, and go/packages will fallback
|
||||
// to the next driver. If go/packages is extended in the future to support
|
||||
// lists of multiple drivers, go/packages will fall back to the next driver.
|
||||
NotHandled bool
|
||||
|
||||
// Compiler and Arch are the arguments pass of types.SizesFor
|
||||
// to get a types.Sizes to use when type checking.
|
||||
Compiler string
|
||||
Arch string
|
||||
|
||||
// Roots is the set of package IDs that make up the root packages.
|
||||
// We have to encode this separately because when we encode a single package
|
||||
// we cannot know if it is one of the roots as that requires knowledge of the
|
||||
// graph it is part of.
|
||||
Roots []string `json:",omitempty"`
|
||||
|
||||
// Packages is the full set of packages in the graph.
|
||||
// The packages are not connected into a graph.
|
||||
// The Imports if populated will be stubs that only have their ID set.
|
||||
// Imports will be connected and then type and syntax information added in a
|
||||
// later pass (see refine).
|
||||
Packages []*Package
|
||||
|
||||
// GoVersion is the minor version number used by the driver
|
||||
// (e.g. the go command on the PATH) when selecting .go files.
|
||||
// Zero means unknown.
|
||||
GoVersion int
|
||||
}
|
||||
|
||||
// Load loads and returns the Go packages named by the given patterns.
|
||||
//
|
||||
// Config specifies loading options;
|
||||
@@ -291,7 +254,7 @@ func Load(cfg *Config, patterns ...string) ([]*Package, error) {
|
||||
// no external driver, or the driver returns a response with NotHandled set,
|
||||
// defaultDriver will fall back to the go list driver.
|
||||
// The boolean result indicates that an external driver handled the request.
|
||||
func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, bool, error) {
|
||||
func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, error) {
|
||||
if driver := findExternalDriver(cfg); driver != nil {
|
||||
response, err := driver(cfg, patterns...)
|
||||
if err != nil {
|
||||
@@ -303,7 +266,10 @@ func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, bool, erro
|
||||
}
|
||||
|
||||
response, err := goListDriver(cfg, patterns...)
|
||||
return response, false, err
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
return response, false, nil
|
||||
}
|
||||
|
||||
// A Package describes a loaded Go package.
|
||||
@@ -648,7 +614,7 @@ func newLoader(cfg *Config) *loader {
|
||||
|
||||
// refine connects the supplied packages into a graph and then adds type
|
||||
// and syntax information as requested by the LoadMode.
|
||||
func (ld *loader) refine(response *driverResponse) ([]*Package, error) {
|
||||
func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
|
||||
roots := response.Roots
|
||||
rootMap := make(map[string]int, len(roots))
|
||||
for i, root := range roots {
|
||||
|
||||
7
vendor/golang.org/x/tools/internal/gcimporter/iimport.go
generated
vendored
7
vendor/golang.org/x/tools/internal/gcimporter/iimport.go
generated
vendored
@@ -224,6 +224,7 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte
|
||||
|
||||
// Gather the relevant packages from the manifest.
|
||||
items := make([]GetPackagesItem, r.uint64())
|
||||
uniquePkgPaths := make(map[string]bool)
|
||||
for i := range items {
|
||||
pkgPathOff := r.uint64()
|
||||
pkgPath := p.stringAt(pkgPathOff)
|
||||
@@ -248,6 +249,12 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte
|
||||
}
|
||||
|
||||
items[i].nameIndex = nameIndex
|
||||
|
||||
uniquePkgPaths[pkgPath] = true
|
||||
}
|
||||
// Debugging #63822; hypothesis: there are duplicate PkgPaths.
|
||||
if len(uniquePkgPaths) != len(items) {
|
||||
reportf("found duplicate PkgPaths while reading export data manifest: %v", items)
|
||||
}
|
||||
|
||||
// Request packages all at once from the client,
|
||||
|
||||
320
vendor/golang.org/x/tools/internal/gopathwalk/walk.go
generated
vendored
320
vendor/golang.org/x/tools/internal/gopathwalk/walk.go
generated
vendored
@@ -9,11 +9,13 @@ package gopathwalk
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"io/fs"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -21,8 +23,13 @@ import (
|
||||
type Options struct {
|
||||
// If Logf is non-nil, debug logging is enabled through this function.
|
||||
Logf func(format string, args ...interface{})
|
||||
|
||||
// Search module caches. Also disables legacy goimports ignore rules.
|
||||
ModulesEnabled bool
|
||||
|
||||
// Maximum number of concurrent calls to user-provided callbacks,
|
||||
// or 0 for GOMAXPROCS.
|
||||
Concurrency int
|
||||
}
|
||||
|
||||
// RootType indicates the type of a Root.
|
||||
@@ -43,19 +50,28 @@ type Root struct {
|
||||
Type RootType
|
||||
}
|
||||
|
||||
// Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages.
|
||||
// Walk concurrently walks Go source directories ($GOROOT, $GOPATH, etc) to find packages.
|
||||
//
|
||||
// For each package found, add will be called with the absolute
|
||||
// paths of the containing source directory and the package directory.
|
||||
//
|
||||
// Unlike filepath.WalkDir, Walk follows symbolic links
|
||||
// (while guarding against cycles).
|
||||
func Walk(roots []Root, add func(root Root, dir string), opts Options) {
|
||||
WalkSkip(roots, add, func(Root, string) bool { return false }, opts)
|
||||
}
|
||||
|
||||
// WalkSkip walks Go source directories ($GOROOT, $GOPATH, etc) to find packages.
|
||||
// WalkSkip concurrently walks Go source directories ($GOROOT, $GOPATH, etc) to
|
||||
// find packages.
|
||||
//
|
||||
// For each package found, add will be called with the absolute
|
||||
// paths of the containing source directory and the package directory.
|
||||
// For each directory that will be scanned, skip will be called
|
||||
// with the absolute paths of the containing source directory and the directory.
|
||||
// If skip returns false on a directory it will be processed.
|
||||
//
|
||||
// Unlike filepath.WalkDir, WalkSkip follows symbolic links
|
||||
// (while guarding against cycles).
|
||||
func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root, dir string) bool, opts Options) {
|
||||
for _, root := range roots {
|
||||
walkDir(root, add, skip, opts)
|
||||
@@ -64,45 +80,51 @@ func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root
|
||||
|
||||
// walkDir creates a walker and starts fastwalk with this walker.
|
||||
func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) {
|
||||
if opts.Logf == nil {
|
||||
opts.Logf = func(format string, args ...interface{}) {}
|
||||
}
|
||||
if _, err := os.Stat(root.Path); os.IsNotExist(err) {
|
||||
if opts.Logf != nil {
|
||||
opts.Logf("skipping nonexistent directory: %v", root.Path)
|
||||
}
|
||||
opts.Logf("skipping nonexistent directory: %v", root.Path)
|
||||
return
|
||||
}
|
||||
start := time.Now()
|
||||
if opts.Logf != nil {
|
||||
opts.Logf("scanning %s", root.Path)
|
||||
}
|
||||
opts.Logf("scanning %s", root.Path)
|
||||
|
||||
concurrency := opts.Concurrency
|
||||
if concurrency == 0 {
|
||||
// The walk be either CPU-bound or I/O-bound, depending on what the
|
||||
// caller-supplied add function does and the details of the user's platform
|
||||
// and machine. Rather than trying to fine-tune the concurrency level for a
|
||||
// specific environment, we default to GOMAXPROCS: it is likely to be a good
|
||||
// choice for a CPU-bound add function, and if it is instead I/O-bound, then
|
||||
// dealing with I/O saturation is arguably the job of the kernel and/or
|
||||
// runtime. (Oversaturating I/O seems unlikely to harm performance as badly
|
||||
// as failing to saturate would.)
|
||||
concurrency = runtime.GOMAXPROCS(0)
|
||||
}
|
||||
w := &walker{
|
||||
root: root,
|
||||
add: add,
|
||||
skip: skip,
|
||||
opts: opts,
|
||||
added: make(map[string]bool),
|
||||
root: root,
|
||||
add: add,
|
||||
skip: skip,
|
||||
opts: opts,
|
||||
sem: make(chan struct{}, concurrency),
|
||||
}
|
||||
w.init()
|
||||
|
||||
// Add a trailing path separator to cause filepath.WalkDir to traverse symlinks.
|
||||
w.sem <- struct{}{}
|
||||
path := root.Path
|
||||
if len(path) == 0 {
|
||||
path = "." + string(filepath.Separator)
|
||||
} else if !os.IsPathSeparator(path[len(path)-1]) {
|
||||
path = path + string(filepath.Separator)
|
||||
if path == "" {
|
||||
path = "."
|
||||
}
|
||||
if fi, err := os.Lstat(path); err == nil {
|
||||
w.walk(path, nil, fs.FileInfoToDirEntry(fi))
|
||||
} else {
|
||||
w.opts.Logf("scanning directory %v: %v", root.Path, err)
|
||||
}
|
||||
<-w.sem
|
||||
w.walking.Wait()
|
||||
|
||||
if err := filepath.WalkDir(path, w.walk); err != nil {
|
||||
logf := opts.Logf
|
||||
if logf == nil {
|
||||
logf = log.Printf
|
||||
}
|
||||
logf("scanning directory %v: %v", root.Path, err)
|
||||
}
|
||||
|
||||
if opts.Logf != nil {
|
||||
opts.Logf("scanned %s in %v", root.Path, time.Since(start))
|
||||
}
|
||||
opts.Logf("scanned %s in %v", root.Path, time.Since(start))
|
||||
}
|
||||
|
||||
// walker is the callback for fastwalk.Walk.
|
||||
@@ -112,10 +134,18 @@ type walker struct {
|
||||
skip func(Root, string) bool // The callback that will be invoked for every dir. dir is skipped if it returns true.
|
||||
opts Options // Options passed to Walk by the user.
|
||||
|
||||
pathSymlinks []os.FileInfo
|
||||
ignoredDirs []string
|
||||
walking sync.WaitGroup
|
||||
sem chan struct{} // Channel of semaphore tokens; send to acquire, receive to release.
|
||||
ignoredDirs []string
|
||||
|
||||
added map[string]bool
|
||||
added sync.Map // map[string]bool
|
||||
}
|
||||
|
||||
// A symlinkList is a linked list of os.FileInfos for parent directories
|
||||
// reached via symlinks.
|
||||
type symlinkList struct {
|
||||
info os.FileInfo
|
||||
prev *symlinkList
|
||||
}
|
||||
|
||||
// init initializes the walker based on its Options
|
||||
@@ -132,9 +162,7 @@ func (w *walker) init() {
|
||||
for _, p := range ignoredPaths {
|
||||
full := filepath.Join(w.root.Path, p)
|
||||
w.ignoredDirs = append(w.ignoredDirs, full)
|
||||
if w.opts.Logf != nil {
|
||||
w.opts.Logf("Directory added to ignore list: %s", full)
|
||||
}
|
||||
w.opts.Logf("Directory added to ignore list: %s", full)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -144,12 +172,10 @@ func (w *walker) init() {
|
||||
func (w *walker) getIgnoredDirs(path string) []string {
|
||||
file := filepath.Join(path, ".goimportsignore")
|
||||
slurp, err := os.ReadFile(file)
|
||||
if w.opts.Logf != nil {
|
||||
if err != nil {
|
||||
w.opts.Logf("%v", err)
|
||||
} else {
|
||||
w.opts.Logf("Read %s", file)
|
||||
}
|
||||
if err != nil {
|
||||
w.opts.Logf("%v", err)
|
||||
} else {
|
||||
w.opts.Logf("Read %s", file)
|
||||
}
|
||||
if err != nil {
|
||||
return nil
|
||||
@@ -183,63 +209,22 @@ func (w *walker) shouldSkipDir(dir string) bool {
|
||||
|
||||
// walk walks through the given path.
|
||||
//
|
||||
// Errors are logged if w.opts.Logf is non-nil, but otherwise ignored:
|
||||
// walk returns only nil or fs.SkipDir.
|
||||
func (w *walker) walk(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
// We have no way to report errors back through Walk or WalkSkip,
|
||||
// so just log and ignore them.
|
||||
if w.opts.Logf != nil {
|
||||
w.opts.Logf("%v", err)
|
||||
}
|
||||
if d == nil {
|
||||
// Nothing more to do: the error prevents us from knowing
|
||||
// what path even represents.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if d.Type().IsRegular() {
|
||||
if !strings.HasSuffix(path, ".go") {
|
||||
return nil
|
||||
}
|
||||
|
||||
dir := filepath.Dir(path)
|
||||
if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) {
|
||||
// Doesn't make sense to have regular files
|
||||
// directly in your $GOPATH/src or $GOROOT/src.
|
||||
return nil
|
||||
}
|
||||
|
||||
if !w.added[dir] {
|
||||
w.add(w.root, dir)
|
||||
w.added[dir] = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if d.IsDir() {
|
||||
base := filepath.Base(path)
|
||||
if base == "" || base[0] == '.' || base[0] == '_' ||
|
||||
base == "testdata" ||
|
||||
(w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") ||
|
||||
(!w.opts.ModulesEnabled && base == "node_modules") {
|
||||
return fs.SkipDir
|
||||
}
|
||||
if w.shouldSkipDir(path) {
|
||||
return fs.SkipDir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Errors are logged if w.opts.Logf is non-nil, but otherwise ignored.
|
||||
func (w *walker) walk(path string, pathSymlinks *symlinkList, d fs.DirEntry) {
|
||||
if d.Type()&os.ModeSymlink != 0 {
|
||||
// Walk the symlink's target rather than the symlink itself.
|
||||
//
|
||||
// (Note that os.Stat, unlike the lower-lever os.Readlink,
|
||||
// follows arbitrarily many layers of symlinks, so it will eventually
|
||||
// reach either a non-symlink or a nonexistent target.)
|
||||
//
|
||||
// TODO(bcmills): 'go list all' itself ignores symlinks within GOROOT/src
|
||||
// and GOPATH/src. Do we really need to traverse them here? If so, why?
|
||||
|
||||
fi, err := os.Stat(path)
|
||||
if err != nil || !fi.IsDir() {
|
||||
// Not a directory. Just walk the file (or broken link) and be done.
|
||||
return w.walk(path, fs.FileInfoToDirEntry(fi), err)
|
||||
if err != nil {
|
||||
w.opts.Logf("%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Avoid walking symlink cycles: if we have already followed a symlink to
|
||||
@@ -249,83 +234,104 @@ func (w *walker) walk(path string, d fs.DirEntry, err error) error {
|
||||
// the number of extra stat calls we make if we *don't* encounter a cycle.
|
||||
// Since we don't actually expect to encounter symlink cycles in practice,
|
||||
// this seems like the right tradeoff.
|
||||
for _, parent := range w.pathSymlinks {
|
||||
if os.SameFile(fi, parent) {
|
||||
return nil
|
||||
for parent := pathSymlinks; parent != nil; parent = parent.prev {
|
||||
if os.SameFile(fi, parent.info) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
w.pathSymlinks = append(w.pathSymlinks, fi)
|
||||
defer func() {
|
||||
w.pathSymlinks = w.pathSymlinks[:len(w.pathSymlinks)-1]
|
||||
}()
|
||||
pathSymlinks = &symlinkList{
|
||||
info: fi,
|
||||
prev: pathSymlinks,
|
||||
}
|
||||
d = fs.FileInfoToDirEntry(fi)
|
||||
}
|
||||
|
||||
// On some platforms the OS (or the Go os package) sometimes fails to
|
||||
// resolve directory symlinks before a trailing slash
|
||||
// (even though POSIX requires it to do so).
|
||||
//
|
||||
// On macOS that failure may be caused by a known libc/kernel bug;
|
||||
// see https://go.dev/issue/59586.
|
||||
//
|
||||
// On Windows before Go 1.21, it may be caused by a bug in
|
||||
// os.Lstat (fixed in https://go.dev/cl/463177).
|
||||
//
|
||||
// Since we need to handle this explicitly on broken platforms anyway,
|
||||
// it is simplest to just always do that and not rely on POSIX pathname
|
||||
// resolution to walk the directory (such as by calling WalkDir with
|
||||
// a trailing slash appended to the path).
|
||||
//
|
||||
// Instead, we make a sequence of walk calls — directly and through
|
||||
// recursive calls to filepath.WalkDir — simulating what WalkDir would do
|
||||
// if the symlink were a regular directory.
|
||||
|
||||
// First we call walk on the path as a directory
|
||||
// (instead of a symlink).
|
||||
err = w.walk(path, fs.FileInfoToDirEntry(fi), nil)
|
||||
if err == fs.SkipDir {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
// This should be impossible, but handle it anyway in case
|
||||
// walk is changed to return other errors.
|
||||
return err
|
||||
if d.Type().IsRegular() {
|
||||
if !strings.HasSuffix(path, ".go") {
|
||||
return
|
||||
}
|
||||
|
||||
// Now read the directory and walk its entries.
|
||||
ents, err := os.ReadDir(path)
|
||||
dir := filepath.Dir(path)
|
||||
if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) {
|
||||
// Doesn't make sense to have regular files
|
||||
// directly in your $GOPATH/src or $GOROOT/src.
|
||||
//
|
||||
// TODO(bcmills): there are many levels of directory within
|
||||
// RootModuleCache where this also wouldn't make sense,
|
||||
// Can we generalize this to any directory without a corresponding
|
||||
// import path?
|
||||
return
|
||||
}
|
||||
|
||||
if _, dup := w.added.LoadOrStore(dir, true); !dup {
|
||||
w.add(w.root, dir)
|
||||
}
|
||||
}
|
||||
|
||||
if !d.IsDir() {
|
||||
return
|
||||
}
|
||||
|
||||
base := filepath.Base(path)
|
||||
if base == "" || base[0] == '.' || base[0] == '_' ||
|
||||
base == "testdata" ||
|
||||
(w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") ||
|
||||
(!w.opts.ModulesEnabled && base == "node_modules") ||
|
||||
w.shouldSkipDir(path) {
|
||||
return
|
||||
}
|
||||
|
||||
// Read the directory and walk its entries.
|
||||
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
w.opts.Logf("%v", err)
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
for {
|
||||
// We impose an arbitrary limit on the number of ReadDir results per
|
||||
// directory to limit the amount of memory consumed for stale or upcoming
|
||||
// directory entries. The limit trades off CPU (number of syscalls to read
|
||||
// the whole directory) against RAM (reachable directory entries other than
|
||||
// the one currently being processed).
|
||||
//
|
||||
// Since we process the directories recursively, we will end up maintaining
|
||||
// a slice of entries for each level of the directory tree.
|
||||
// (Compare https://go.dev/issue/36197.)
|
||||
ents, err := f.ReadDir(1024)
|
||||
if err != nil {
|
||||
// Report the ReadDir error, as filepath.WalkDir would do.
|
||||
err = w.walk(path, fs.FileInfoToDirEntry(fi), err)
|
||||
if err == fs.SkipDir {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err // Again, should be impossible.
|
||||
if err != io.EOF {
|
||||
w.opts.Logf("%v", err)
|
||||
}
|
||||
// Fall through and iterate over whatever entries we did manage to get.
|
||||
break
|
||||
}
|
||||
|
||||
for _, d := range ents {
|
||||
nextPath := filepath.Join(path, d.Name())
|
||||
if d.IsDir() {
|
||||
// We want to walk the whole directory tree rooted at nextPath,
|
||||
// not just the single entry for the directory.
|
||||
err := filepath.WalkDir(nextPath, w.walk)
|
||||
if err != nil && w.opts.Logf != nil {
|
||||
w.opts.Logf("%v", err)
|
||||
}
|
||||
} else {
|
||||
err := w.walk(nextPath, d, nil)
|
||||
if err == fs.SkipDir {
|
||||
// Skip the rest of the entries in the parent directory of nextPath
|
||||
// (that is, path itself).
|
||||
break
|
||||
} else if err != nil {
|
||||
return err // Again, should be impossible.
|
||||
select {
|
||||
case w.sem <- struct{}{}:
|
||||
// Got a new semaphore token, so we can traverse the directory concurrently.
|
||||
d := d
|
||||
w.walking.Add(1)
|
||||
go func() {
|
||||
defer func() {
|
||||
<-w.sem
|
||||
w.walking.Done()
|
||||
}()
|
||||
w.walk(nextPath, pathSymlinks, d)
|
||||
}()
|
||||
continue
|
||||
|
||||
default:
|
||||
// No tokens available, so traverse serially.
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Not a file, regular directory, or symlink; skip.
|
||||
return nil
|
||||
w.walk(nextPath, pathSymlinks, d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
134
vendor/golang.org/x/tools/internal/imports/fix.go
generated
vendored
134
vendor/golang.org/x/tools/internal/imports/fix.go
generated
vendored
@@ -13,6 +13,7 @@ import (
|
||||
"go/build"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -700,20 +701,21 @@ func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) (map
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func PrimeCache(ctx context.Context, env *ProcessEnv) error {
|
||||
func PrimeCache(ctx context.Context, resolver Resolver) error {
|
||||
// Fully scan the disk for directories, but don't actually read any Go files.
|
||||
callback := &scanCallback{
|
||||
rootFound: func(gopathwalk.Root) bool {
|
||||
return true
|
||||
rootFound: func(root gopathwalk.Root) bool {
|
||||
// See getCandidatePkgs: walking GOROOT is apparently expensive and
|
||||
// unnecessary.
|
||||
return root.Type != gopathwalk.RootGOROOT
|
||||
},
|
||||
dirFound: func(pkg *pkg) bool {
|
||||
return false
|
||||
},
|
||||
packageNameLoaded: func(pkg *pkg) bool {
|
||||
return false
|
||||
},
|
||||
// packageNameLoaded and exportsLoaded must never be called.
|
||||
}
|
||||
return getCandidatePkgs(ctx, callback, "", "", env)
|
||||
|
||||
return resolver.scan(ctx, callback)
|
||||
}
|
||||
|
||||
func candidateImportName(pkg *pkg) string {
|
||||
@@ -827,16 +829,45 @@ func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchP
|
||||
return getCandidatePkgs(ctx, callback, filename, filePkg, env)
|
||||
}
|
||||
|
||||
var requiredGoEnvVars = []string{"GO111MODULE", "GOFLAGS", "GOINSECURE", "GOMOD", "GOMODCACHE", "GONOPROXY", "GONOSUMDB", "GOPATH", "GOPROXY", "GOROOT", "GOSUMDB", "GOWORK"}
|
||||
// TODO(rfindley): we should depend on GOOS and GOARCH, to provide accurate
|
||||
// imports when doing cross-platform development.
|
||||
var requiredGoEnvVars = []string{
|
||||
"GO111MODULE",
|
||||
"GOFLAGS",
|
||||
"GOINSECURE",
|
||||
"GOMOD",
|
||||
"GOMODCACHE",
|
||||
"GONOPROXY",
|
||||
"GONOSUMDB",
|
||||
"GOPATH",
|
||||
"GOPROXY",
|
||||
"GOROOT",
|
||||
"GOSUMDB",
|
||||
"GOWORK",
|
||||
}
|
||||
|
||||
// ProcessEnv contains environment variables and settings that affect the use of
|
||||
// the go command, the go/build package, etc.
|
||||
//
|
||||
// ...a ProcessEnv *also* overwrites its Env along with derived state in the
|
||||
// form of the resolver. And because it is lazily initialized, an env may just
|
||||
// be broken and unusable, but there is no way for the caller to detect that:
|
||||
// all queries will just fail.
|
||||
//
|
||||
// TODO(rfindley): refactor this package so that this type (perhaps renamed to
|
||||
// just Env or Config) is an immutable configuration struct, to be exchanged
|
||||
// for an initialized object via a constructor that returns an error. Perhaps
|
||||
// the signature should be `func NewResolver(*Env) (*Resolver, error)`, where
|
||||
// resolver is a concrete type used for resolving imports. Via this
|
||||
// refactoring, we can avoid the need to call ProcessEnv.init and
|
||||
// ProcessEnv.GoEnv everywhere, and implicitly fix all the places where this
|
||||
// these are misused. Also, we'd delegate the caller the decision of how to
|
||||
// handle a broken environment.
|
||||
type ProcessEnv struct {
|
||||
GocmdRunner *gocommand.Runner
|
||||
|
||||
BuildFlags []string
|
||||
ModFlag string
|
||||
ModFile string
|
||||
|
||||
// SkipPathInScan returns true if the path should be skipped from scans of
|
||||
// the RootCurrentModule root type. The function argument is a clean,
|
||||
@@ -846,7 +877,7 @@ type ProcessEnv struct {
|
||||
// Env overrides the OS environment, and can be used to specify
|
||||
// GOPROXY, GO111MODULE, etc. PATH cannot be set here, because
|
||||
// exec.Command will not honor it.
|
||||
// Specifying all of RequiredGoEnvVars avoids a call to `go env`.
|
||||
// Specifying all of requiredGoEnvVars avoids a call to `go env`.
|
||||
Env map[string]string
|
||||
|
||||
WorkingDir string
|
||||
@@ -854,9 +885,17 @@ type ProcessEnv struct {
|
||||
// If Logf is non-nil, debug logging is enabled through this function.
|
||||
Logf func(format string, args ...interface{})
|
||||
|
||||
initialized bool
|
||||
// If set, ModCache holds a shared cache of directory info to use across
|
||||
// multiple ProcessEnvs.
|
||||
ModCache *DirInfoCache
|
||||
|
||||
resolver Resolver
|
||||
initialized bool // see TODO above
|
||||
|
||||
// resolver and resolverErr are lazily evaluated (see GetResolver).
|
||||
// This is unclean, but see the big TODO in the docstring for ProcessEnv
|
||||
// above: for now, we can't be sure that the ProcessEnv is fully initialized.
|
||||
resolver Resolver
|
||||
resolverErr error
|
||||
}
|
||||
|
||||
func (e *ProcessEnv) goEnv() (map[string]string, error) {
|
||||
@@ -936,20 +975,31 @@ func (e *ProcessEnv) env() []string {
|
||||
}
|
||||
|
||||
func (e *ProcessEnv) GetResolver() (Resolver, error) {
|
||||
if e.resolver != nil {
|
||||
return e.resolver, nil
|
||||
}
|
||||
if err := e.init(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 {
|
||||
e.resolver = newGopathResolver(e)
|
||||
return e.resolver, nil
|
||||
|
||||
if e.resolver == nil && e.resolverErr == nil {
|
||||
// TODO(rfindley): we should only use a gopathResolver here if the working
|
||||
// directory is actually *in* GOPATH. (I seem to recall an open gopls issue
|
||||
// for this behavior, but I can't find it).
|
||||
//
|
||||
// For gopls, we can optionally explicitly choose a resolver type, since we
|
||||
// already know the view type.
|
||||
if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 {
|
||||
e.resolver = newGopathResolver(e)
|
||||
} else {
|
||||
e.resolver, e.resolverErr = newModuleResolver(e, e.ModCache)
|
||||
}
|
||||
}
|
||||
e.resolver = newModuleResolver(e)
|
||||
return e.resolver, nil
|
||||
|
||||
return e.resolver, e.resolverErr
|
||||
}
|
||||
|
||||
// buildContext returns the build.Context to use for matching files.
|
||||
//
|
||||
// TODO(rfindley): support dynamic GOOS, GOARCH here, when doing cross-platform
|
||||
// development.
|
||||
func (e *ProcessEnv) buildContext() (*build.Context, error) {
|
||||
ctx := build.Default
|
||||
goenv, err := e.goEnv()
|
||||
@@ -1029,15 +1079,23 @@ func addStdlibCandidates(pass *pass, refs references) error {
|
||||
type Resolver interface {
|
||||
// loadPackageNames loads the package names in importPaths.
|
||||
loadPackageNames(importPaths []string, srcDir string) (map[string]string, error)
|
||||
|
||||
// scan works with callback to search for packages. See scanCallback for details.
|
||||
scan(ctx context.Context, callback *scanCallback) error
|
||||
|
||||
// loadExports returns the set of exported symbols in the package at dir.
|
||||
// loadExports may be called concurrently.
|
||||
loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error)
|
||||
|
||||
// scoreImportPath returns the relevance for an import path.
|
||||
scoreImportPath(ctx context.Context, path string) float64
|
||||
|
||||
ClearForNewScan()
|
||||
// ClearForNewScan returns a new Resolver based on the receiver that has
|
||||
// cleared its internal caches of directory contents.
|
||||
//
|
||||
// The new resolver should be primed and then set via
|
||||
// [ProcessEnv.UpdateResolver].
|
||||
ClearForNewScan() Resolver
|
||||
}
|
||||
|
||||
// A scanCallback controls a call to scan and receives its results.
|
||||
@@ -1120,7 +1178,7 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil
|
||||
go func(pkgName string, symbols map[string]bool) {
|
||||
defer wg.Done()
|
||||
|
||||
found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols, filename)
|
||||
found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols)
|
||||
|
||||
if err != nil {
|
||||
firstErrOnce.Do(func() {
|
||||
@@ -1151,6 +1209,17 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil
|
||||
}()
|
||||
|
||||
for result := range results {
|
||||
// Don't offer completions that would shadow predeclared
|
||||
// names, such as github.com/coreos/etcd/error.
|
||||
if types.Universe.Lookup(result.pkg.name) != nil { // predeclared
|
||||
// Ideally we would skip this candidate only
|
||||
// if the predeclared name is actually
|
||||
// referenced by the file, but that's a lot
|
||||
// trickier to compute and would still create
|
||||
// an import that is likely to surprise the
|
||||
// user before long.
|
||||
continue
|
||||
}
|
||||
pass.addCandidate(result.imp, result.pkg)
|
||||
}
|
||||
return firstErr
|
||||
@@ -1193,31 +1262,22 @@ func ImportPathToAssumedName(importPath string) string {
|
||||
type gopathResolver struct {
|
||||
env *ProcessEnv
|
||||
walked bool
|
||||
cache *dirInfoCache
|
||||
cache *DirInfoCache
|
||||
scanSema chan struct{} // scanSema prevents concurrent scans.
|
||||
}
|
||||
|
||||
func newGopathResolver(env *ProcessEnv) *gopathResolver {
|
||||
r := &gopathResolver{
|
||||
env: env,
|
||||
cache: &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
listeners: map[*int]cacheListener{},
|
||||
},
|
||||
env: env,
|
||||
cache: NewDirInfoCache(),
|
||||
scanSema: make(chan struct{}, 1),
|
||||
}
|
||||
r.scanSema <- struct{}{}
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *gopathResolver) ClearForNewScan() {
|
||||
<-r.scanSema
|
||||
r.cache = &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
listeners: map[*int]cacheListener{},
|
||||
}
|
||||
r.walked = false
|
||||
r.scanSema <- struct{}{}
|
||||
func (r *gopathResolver) ClearForNewScan() Resolver {
|
||||
return newGopathResolver(r.env)
|
||||
}
|
||||
|
||||
func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
|
||||
@@ -1538,7 +1598,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl
|
||||
|
||||
// findImport searches for a package with the given symbols.
|
||||
// If no package is found, findImport returns ("", false, nil)
|
||||
func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool, filename string) (*pkg, error) {
|
||||
func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) {
|
||||
// Sort the candidates by their import package length,
|
||||
// assuming that shorter package names are better than long
|
||||
// ones. Note that this sorts by the de-vendored name, so
|
||||
|
||||
2
vendor/golang.org/x/tools/internal/imports/imports.go
generated
vendored
2
vendor/golang.org/x/tools/internal/imports/imports.go
generated
vendored
@@ -236,7 +236,7 @@ func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast
|
||||
src = src[:len(src)-len("}\n")]
|
||||
// Gofmt has also indented the function body one level.
|
||||
// Remove that indent.
|
||||
src = bytes.Replace(src, []byte("\n\t"), []byte("\n"), -1)
|
||||
src = bytes.ReplaceAll(src, []byte("\n\t"), []byte("\n"))
|
||||
return matchSpace(orig, src)
|
||||
}
|
||||
return file, adjust, nil
|
||||
|
||||
283
vendor/golang.org/x/tools/internal/imports/mod.go
generated
vendored
283
vendor/golang.org/x/tools/internal/imports/mod.go
generated
vendored
@@ -23,49 +23,88 @@ import (
|
||||
"golang.org/x/tools/internal/gopathwalk"
|
||||
)
|
||||
|
||||
// ModuleResolver implements resolver for modules using the go command as little
|
||||
// as feasible.
|
||||
// Notes(rfindley): ModuleResolver appears to be heavily optimized for scanning
|
||||
// as fast as possible, which is desirable for a call to goimports from the
|
||||
// command line, but it doesn't work as well for gopls, where it suffers from
|
||||
// slow startup (golang/go#44863) and intermittent hanging (golang/go#59216),
|
||||
// both caused by populating the cache, albeit in slightly different ways.
|
||||
//
|
||||
// A high level list of TODOs:
|
||||
// - Optimize the scan itself, as there is some redundancy statting and
|
||||
// reading go.mod files.
|
||||
// - Invert the relationship between ProcessEnv and Resolver (see the
|
||||
// docstring of ProcessEnv).
|
||||
// - Make it easier to use an external resolver implementation.
|
||||
//
|
||||
// Smaller TODOs are annotated in the code below.
|
||||
|
||||
// ModuleResolver implements the Resolver interface for a workspace using
|
||||
// modules.
|
||||
//
|
||||
// A goal of the ModuleResolver is to invoke the Go command as little as
|
||||
// possible. To this end, it runs the Go command only for listing module
|
||||
// information (i.e. `go list -m -e -json ...`). Package scanning, the process
|
||||
// of loading package information for the modules, is implemented internally
|
||||
// via the scan method.
|
||||
//
|
||||
// It has two types of state: the state derived from the go command, which
|
||||
// is populated by init, and the state derived from scans, which is populated
|
||||
// via scan. A root is considered scanned if it has been walked to discover
|
||||
// directories. However, if the scan did not require additional information
|
||||
// from the directory (such as package name or exports), the directory
|
||||
// information itself may be partially populated. It will be lazily filled in
|
||||
// as needed by scans, using the scanCallback.
|
||||
type ModuleResolver struct {
|
||||
env *ProcessEnv
|
||||
moduleCacheDir string
|
||||
dummyVendorMod *gocommand.ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory.
|
||||
roots []gopathwalk.Root
|
||||
scanSema chan struct{} // scanSema prevents concurrent scans and guards scannedRoots.
|
||||
scannedRoots map[gopathwalk.Root]bool
|
||||
env *ProcessEnv
|
||||
|
||||
initialized bool
|
||||
mains []*gocommand.ModuleJSON
|
||||
mainByDir map[string]*gocommand.ModuleJSON
|
||||
modsByModPath []*gocommand.ModuleJSON // All modules, ordered by # of path components in module Path...
|
||||
modsByDir []*gocommand.ModuleJSON // ...or number of path components in their Dir.
|
||||
// Module state, populated during construction
|
||||
dummyVendorMod *gocommand.ModuleJSON // if vendoring is enabled, a pseudo-module to represent the /vendor directory
|
||||
moduleCacheDir string // GOMODCACHE, inferred from GOPATH if unset
|
||||
roots []gopathwalk.Root // roots to scan, in approximate order of importance
|
||||
mains []*gocommand.ModuleJSON // main modules
|
||||
mainByDir map[string]*gocommand.ModuleJSON // module information by dir, to join with roots
|
||||
modsByModPath []*gocommand.ModuleJSON // all modules, ordered by # of path components in their module path
|
||||
modsByDir []*gocommand.ModuleJSON // ...or by the number of path components in their Dir.
|
||||
|
||||
// moduleCacheCache stores information about the module cache.
|
||||
moduleCacheCache *dirInfoCache
|
||||
otherCache *dirInfoCache
|
||||
// Scanning state, populated by scan
|
||||
|
||||
// scanSema prevents concurrent scans, and guards scannedRoots and the cache
|
||||
// fields below (though the caches themselves are concurrency safe).
|
||||
// Receive to acquire, send to release.
|
||||
scanSema chan struct{}
|
||||
scannedRoots map[gopathwalk.Root]bool // if true, root has been walked
|
||||
|
||||
// Caches of directory info, populated by scans and scan callbacks
|
||||
//
|
||||
// moduleCacheCache stores cached information about roots in the module
|
||||
// cache, which are immutable and therefore do not need to be invalidated.
|
||||
//
|
||||
// otherCache stores information about all other roots (even GOROOT), which
|
||||
// may change.
|
||||
moduleCacheCache *DirInfoCache
|
||||
otherCache *DirInfoCache
|
||||
}
|
||||
|
||||
func newModuleResolver(e *ProcessEnv) *ModuleResolver {
|
||||
// newModuleResolver returns a new module-aware goimports resolver.
|
||||
//
|
||||
// Note: use caution when modifying this constructor: changes must also be
|
||||
// reflected in ModuleResolver.ClearForNewScan.
|
||||
func newModuleResolver(e *ProcessEnv, moduleCacheCache *DirInfoCache) (*ModuleResolver, error) {
|
||||
r := &ModuleResolver{
|
||||
env: e,
|
||||
scanSema: make(chan struct{}, 1),
|
||||
}
|
||||
r.scanSema <- struct{}{}
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) init() error {
|
||||
if r.initialized {
|
||||
return nil
|
||||
}
|
||||
r.scanSema <- struct{}{} // release
|
||||
|
||||
goenv, err := r.env.goEnv()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO(rfindley): can we refactor to share logic with r.env.invokeGo?
|
||||
inv := gocommand.Invocation{
|
||||
BuildFlags: r.env.BuildFlags,
|
||||
ModFlag: r.env.ModFlag,
|
||||
ModFile: r.env.ModFile,
|
||||
Env: r.env.env(),
|
||||
Logf: r.env.Logf,
|
||||
WorkingDir: r.env.WorkingDir,
|
||||
@@ -77,9 +116,12 @@ func (r *ModuleResolver) init() error {
|
||||
// Module vendor directories are ignored in workspace mode:
|
||||
// https://go.googlesource.com/proposal/+/master/design/45713-workspace.md
|
||||
if len(r.env.Env["GOWORK"]) == 0 {
|
||||
// TODO(rfindley): VendorEnabled runs the go command to get GOFLAGS, but
|
||||
// they should be available from the ProcessEnv. Can we avoid the redundant
|
||||
// invocation?
|
||||
vendorEnabled, mainModVendor, err = gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -100,19 +142,14 @@ func (r *ModuleResolver) init() error {
|
||||
// GO111MODULE=on. Other errors are fatal.
|
||||
if err != nil {
|
||||
if errMsg := err.Error(); !strings.Contains(errMsg, "working directory is not part of a module") && !strings.Contains(errMsg, "go.mod file not found") {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if gmc := r.env.Env["GOMODCACHE"]; gmc != "" {
|
||||
r.moduleCacheDir = gmc
|
||||
} else {
|
||||
gopaths := filepath.SplitList(goenv["GOPATH"])
|
||||
if len(gopaths) == 0 {
|
||||
return fmt.Errorf("empty GOPATH")
|
||||
}
|
||||
r.moduleCacheDir = filepath.Join(gopaths[0], "/pkg/mod")
|
||||
r.moduleCacheDir = gomodcacheForEnv(goenv)
|
||||
if r.moduleCacheDir == "" {
|
||||
return nil, fmt.Errorf("cannot resolve GOMODCACHE")
|
||||
}
|
||||
|
||||
sort.Slice(r.modsByModPath, func(i, j int) bool {
|
||||
@@ -141,7 +178,11 @@ func (r *ModuleResolver) init() error {
|
||||
} else {
|
||||
addDep := func(mod *gocommand.ModuleJSON) {
|
||||
if mod.Replace == nil {
|
||||
// This is redundant with the cache, but we'll skip it cheaply enough.
|
||||
// This is redundant with the cache, but we'll skip it cheaply enough
|
||||
// when we encounter it in the module cache scan.
|
||||
//
|
||||
// Including it at a lower index in r.roots than the module cache dir
|
||||
// helps prioritize matches from within existing dependencies.
|
||||
r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootModuleCache})
|
||||
} else {
|
||||
r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootOther})
|
||||
@@ -158,24 +199,40 @@ func (r *ModuleResolver) init() error {
|
||||
addDep(mod)
|
||||
}
|
||||
}
|
||||
// If provided, share the moduleCacheCache.
|
||||
//
|
||||
// TODO(rfindley): The module cache is immutable. However, the loaded
|
||||
// exports do depend on GOOS and GOARCH. Fortunately, the
|
||||
// ProcessEnv.buildContext does not adjust these from build.DefaultContext
|
||||
// (even though it should). So for now, this is OK to share, but we need to
|
||||
// add logic for handling GOOS/GOARCH.
|
||||
r.moduleCacheCache = moduleCacheCache
|
||||
r.roots = append(r.roots, gopathwalk.Root{Path: r.moduleCacheDir, Type: gopathwalk.RootModuleCache})
|
||||
}
|
||||
|
||||
r.scannedRoots = map[gopathwalk.Root]bool{}
|
||||
if r.moduleCacheCache == nil {
|
||||
r.moduleCacheCache = &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
listeners: map[*int]cacheListener{},
|
||||
}
|
||||
r.moduleCacheCache = NewDirInfoCache()
|
||||
}
|
||||
if r.otherCache == nil {
|
||||
r.otherCache = &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
listeners: map[*int]cacheListener{},
|
||||
}
|
||||
r.otherCache = NewDirInfoCache()
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// gomodcacheForEnv returns the GOMODCACHE value to use based on the given env
|
||||
// map, which must have GOMODCACHE and GOPATH populated.
|
||||
//
|
||||
// TODO(rfindley): this is defensive refactoring.
|
||||
// 1. Is this even relevant anymore? Can't we just read GOMODCACHE.
|
||||
// 2. Use this to separate module cache scanning from other scanning.
|
||||
func gomodcacheForEnv(goenv map[string]string) string {
|
||||
if gmc := goenv["GOMODCACHE"]; gmc != "" {
|
||||
return gmc
|
||||
}
|
||||
r.initialized = true
|
||||
return nil
|
||||
gopaths := filepath.SplitList(goenv["GOPATH"])
|
||||
if len(gopaths) == 0 {
|
||||
return ""
|
||||
}
|
||||
return filepath.Join(gopaths[0], "/pkg/mod")
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) initAllMods() error {
|
||||
@@ -206,30 +263,82 @@ func (r *ModuleResolver) initAllMods() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) ClearForNewScan() {
|
||||
<-r.scanSema
|
||||
r.scannedRoots = map[gopathwalk.Root]bool{}
|
||||
r.otherCache = &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
listeners: map[*int]cacheListener{},
|
||||
}
|
||||
r.scanSema <- struct{}{}
|
||||
}
|
||||
// ClearForNewScan invalidates the last scan.
|
||||
//
|
||||
// It preserves the set of roots, but forgets about the set of directories.
|
||||
// Though it forgets the set of module cache directories, it remembers their
|
||||
// contents, since they are assumed to be immutable.
|
||||
func (r *ModuleResolver) ClearForNewScan() Resolver {
|
||||
<-r.scanSema // acquire r, to guard scannedRoots
|
||||
r2 := &ModuleResolver{
|
||||
env: r.env,
|
||||
dummyVendorMod: r.dummyVendorMod,
|
||||
moduleCacheDir: r.moduleCacheDir,
|
||||
roots: r.roots,
|
||||
mains: r.mains,
|
||||
mainByDir: r.mainByDir,
|
||||
modsByModPath: r.modsByModPath,
|
||||
|
||||
func (r *ModuleResolver) ClearForNewMod() {
|
||||
<-r.scanSema
|
||||
*r = ModuleResolver{
|
||||
env: r.env,
|
||||
scanSema: make(chan struct{}, 1),
|
||||
scannedRoots: make(map[gopathwalk.Root]bool),
|
||||
otherCache: NewDirInfoCache(),
|
||||
moduleCacheCache: r.moduleCacheCache,
|
||||
otherCache: r.otherCache,
|
||||
scanSema: r.scanSema,
|
||||
}
|
||||
r.init()
|
||||
r.scanSema <- struct{}{}
|
||||
r2.scanSema <- struct{}{} // r2 must start released
|
||||
// Invalidate root scans. We don't need to invalidate module cache roots,
|
||||
// because they are immutable.
|
||||
// (We don't support a use case where GOMODCACHE is cleaned in the middle of
|
||||
// e.g. a gopls session: the user must restart gopls to get accurate
|
||||
// imports.)
|
||||
//
|
||||
// Scanning for new directories in GOMODCACHE should be handled elsewhere,
|
||||
// via a call to ScanModuleCache.
|
||||
for _, root := range r.roots {
|
||||
if root.Type == gopathwalk.RootModuleCache && r.scannedRoots[root] {
|
||||
r2.scannedRoots[root] = true
|
||||
}
|
||||
}
|
||||
r.scanSema <- struct{}{} // release r
|
||||
return r2
|
||||
}
|
||||
|
||||
// findPackage returns the module and directory that contains the package at
|
||||
// the given import path, or returns nil, "" if no module is in scope.
|
||||
// ClearModuleInfo invalidates resolver state that depends on go.mod file
|
||||
// contents (essentially, the output of go list -m -json ...).
|
||||
//
|
||||
// Notably, it does not forget directory contents, which are reset
|
||||
// asynchronously via ClearForNewScan.
|
||||
//
|
||||
// If the ProcessEnv is a GOPATH environment, ClearModuleInfo is a no op.
|
||||
//
|
||||
// TODO(rfindley): move this to a new env.go, consolidating ProcessEnv methods.
|
||||
func (e *ProcessEnv) ClearModuleInfo() {
|
||||
if r, ok := e.resolver.(*ModuleResolver); ok {
|
||||
resolver, resolverErr := newModuleResolver(e, e.ModCache)
|
||||
if resolverErr == nil {
|
||||
<-r.scanSema // acquire (guards caches)
|
||||
resolver.moduleCacheCache = r.moduleCacheCache
|
||||
resolver.otherCache = r.otherCache
|
||||
r.scanSema <- struct{}{} // release
|
||||
}
|
||||
e.resolver = resolver
|
||||
e.resolverErr = resolverErr
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateResolver sets the resolver for the ProcessEnv to use in imports
|
||||
// operations. Only for use with the result of [Resolver.ClearForNewScan].
|
||||
//
|
||||
// TODO(rfindley): this awkward API is a result of the (arguably) inverted
|
||||
// relationship between configuration and state described in the doc comment
|
||||
// for [ProcessEnv].
|
||||
func (e *ProcessEnv) UpdateResolver(r Resolver) {
|
||||
e.resolver = r
|
||||
e.resolverErr = nil
|
||||
}
|
||||
|
||||
// findPackage returns the module and directory from within the main modules
|
||||
// and their dependencies that contains the package at the given import path,
|
||||
// or returns nil, "" if no module is in scope.
|
||||
func (r *ModuleResolver) findPackage(importPath string) (*gocommand.ModuleJSON, string) {
|
||||
// This can't find packages in the stdlib, but that's harmless for all
|
||||
// the existing code paths.
|
||||
@@ -295,10 +404,6 @@ func (r *ModuleResolver) cacheStore(info directoryPackageInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) cacheKeys() []string {
|
||||
return append(r.moduleCacheCache.Keys(), r.otherCache.Keys()...)
|
||||
}
|
||||
|
||||
// cachePackageName caches the package name for a dir already in the cache.
|
||||
func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (string, error) {
|
||||
if info.rootType == gopathwalk.RootModuleCache {
|
||||
@@ -367,15 +472,15 @@ func (r *ModuleResolver) dirIsNestedModule(dir string, mod *gocommand.ModuleJSON
|
||||
return modDir != mod.Dir
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) modInfo(dir string) (modDir string, modName string) {
|
||||
readModName := func(modFile string) string {
|
||||
modBytes, err := os.ReadFile(modFile)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return modulePath(modBytes)
|
||||
func readModName(modFile string) string {
|
||||
modBytes, err := os.ReadFile(modFile)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return modulePath(modBytes)
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) modInfo(dir string) (modDir, modName string) {
|
||||
if r.dirInModuleCache(dir) {
|
||||
if matches := modCacheRegexp.FindStringSubmatch(dir); len(matches) == 3 {
|
||||
index := strings.Index(dir, matches[1]+"@"+matches[2])
|
||||
@@ -409,11 +514,9 @@ func (r *ModuleResolver) dirInModuleCache(dir string) bool {
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
|
||||
if err := r.init(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
names := map[string]string{}
|
||||
for _, path := range importPaths {
|
||||
// TODO(rfindley): shouldn't this use the dirInfoCache?
|
||||
_, packageDir := r.findPackage(path)
|
||||
if packageDir == "" {
|
||||
continue
|
||||
@@ -431,10 +534,6 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error
|
||||
ctx, done := event.Start(ctx, "imports.ModuleResolver.scan")
|
||||
defer done()
|
||||
|
||||
if err := r.init(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
processDir := func(info directoryPackageInfo) {
|
||||
// Skip this directory if we were not able to get the package information successfully.
|
||||
if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil {
|
||||
@@ -444,18 +543,18 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !callback.dirFound(pkg) {
|
||||
return
|
||||
}
|
||||
|
||||
pkg.packageName, err = r.cachePackageName(info)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !callback.packageNameLoaded(pkg) {
|
||||
return
|
||||
}
|
||||
|
||||
_, exports, err := r.loadExports(ctx, pkg, false)
|
||||
if err != nil {
|
||||
return
|
||||
@@ -494,7 +593,6 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error
|
||||
return packageScanned
|
||||
}
|
||||
|
||||
// Add anything new to the cache, and process it if we're still listening.
|
||||
add := func(root gopathwalk.Root, dir string) {
|
||||
r.cacheStore(r.scanDirForPackage(root, dir))
|
||||
}
|
||||
@@ -509,9 +607,9 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-r.scanSema:
|
||||
case <-r.scanSema: // acquire
|
||||
}
|
||||
defer func() { r.scanSema <- struct{}{} }()
|
||||
defer func() { r.scanSema <- struct{}{} }() // release
|
||||
// We have the lock on r.scannedRoots, and no other scans can run.
|
||||
for _, root := range roots {
|
||||
if ctx.Err() != nil {
|
||||
@@ -613,9 +711,6 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) {
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) {
|
||||
if err := r.init(); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if info, ok := r.cacheLoad(pkg.dir); ok && !includeTest {
|
||||
return r.cacheExports(ctx, r.env, info)
|
||||
}
|
||||
|
||||
116
vendor/golang.org/x/tools/internal/imports/mod_cache.go
generated
vendored
116
vendor/golang.org/x/tools/internal/imports/mod_cache.go
generated
vendored
@@ -7,8 +7,12 @@ package imports
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/mod/module"
|
||||
"golang.org/x/tools/internal/gopathwalk"
|
||||
)
|
||||
|
||||
@@ -39,6 +43,8 @@ const (
|
||||
exportsLoaded
|
||||
)
|
||||
|
||||
// directoryPackageInfo holds (possibly incomplete) information about packages
|
||||
// contained in a given directory.
|
||||
type directoryPackageInfo struct {
|
||||
// status indicates the extent to which this struct has been filled in.
|
||||
status directoryPackageStatus
|
||||
@@ -63,7 +69,10 @@ type directoryPackageInfo struct {
|
||||
packageName string // the package name, as declared in the source.
|
||||
|
||||
// Set when status >= exportsLoaded.
|
||||
|
||||
// TODO(rfindley): it's hard to see this, but exports depend implicitly on
|
||||
// the default build context GOOS and GOARCH.
|
||||
//
|
||||
// We can make this explicit, and key exports by GOOS, GOARCH.
|
||||
exports []string
|
||||
}
|
||||
|
||||
@@ -79,7 +88,7 @@ func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) (
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// dirInfoCache is a concurrency safe map for storing information about
|
||||
// DirInfoCache is a concurrency-safe map for storing information about
|
||||
// directories that may contain packages.
|
||||
//
|
||||
// The information in this cache is built incrementally. Entries are initialized in scan.
|
||||
@@ -92,21 +101,26 @@ func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) (
|
||||
// The information in the cache is not expected to change for the cache's
|
||||
// lifetime, so there is no protection against competing writes. Users should
|
||||
// take care not to hold the cache across changes to the underlying files.
|
||||
//
|
||||
// TODO(suzmue): consider other concurrency strategies and data structures (RWLocks, sync.Map, etc)
|
||||
type dirInfoCache struct {
|
||||
type DirInfoCache struct {
|
||||
mu sync.Mutex
|
||||
// dirs stores information about packages in directories, keyed by absolute path.
|
||||
dirs map[string]*directoryPackageInfo
|
||||
listeners map[*int]cacheListener
|
||||
}
|
||||
|
||||
func NewDirInfoCache() *DirInfoCache {
|
||||
return &DirInfoCache{
|
||||
dirs: make(map[string]*directoryPackageInfo),
|
||||
listeners: make(map[*int]cacheListener),
|
||||
}
|
||||
}
|
||||
|
||||
type cacheListener func(directoryPackageInfo)
|
||||
|
||||
// ScanAndListen calls listener on all the items in the cache, and on anything
|
||||
// newly added. The returned stop function waits for all in-flight callbacks to
|
||||
// finish and blocks new ones.
|
||||
func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() {
|
||||
func (d *DirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
// Flushing out all the callbacks is tricky without knowing how many there
|
||||
@@ -162,8 +176,10 @@ func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener
|
||||
}
|
||||
|
||||
// Store stores the package info for dir.
|
||||
func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) {
|
||||
func (d *DirInfoCache) Store(dir string, info directoryPackageInfo) {
|
||||
d.mu.Lock()
|
||||
// TODO(rfindley, golang/go#59216): should we overwrite an existing entry?
|
||||
// That seems incorrect as the cache should be idempotent.
|
||||
_, old := d.dirs[dir]
|
||||
d.dirs[dir] = &info
|
||||
var listeners []cacheListener
|
||||
@@ -180,7 +196,7 @@ func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) {
|
||||
}
|
||||
|
||||
// Load returns a copy of the directoryPackageInfo for absolute directory dir.
|
||||
func (d *dirInfoCache) Load(dir string) (directoryPackageInfo, bool) {
|
||||
func (d *DirInfoCache) Load(dir string) (directoryPackageInfo, bool) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
info, ok := d.dirs[dir]
|
||||
@@ -191,7 +207,7 @@ func (d *dirInfoCache) Load(dir string) (directoryPackageInfo, bool) {
|
||||
}
|
||||
|
||||
// Keys returns the keys currently present in d.
|
||||
func (d *dirInfoCache) Keys() (keys []string) {
|
||||
func (d *DirInfoCache) Keys() (keys []string) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
for key := range d.dirs {
|
||||
@@ -200,7 +216,7 @@ func (d *dirInfoCache) Keys() (keys []string) {
|
||||
return keys
|
||||
}
|
||||
|
||||
func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) {
|
||||
func (d *DirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) {
|
||||
if loaded, err := info.reachedStatus(nameLoaded); loaded {
|
||||
return info.packageName, err
|
||||
}
|
||||
@@ -213,7 +229,7 @@ func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, erro
|
||||
return info.packageName, info.err
|
||||
}
|
||||
|
||||
func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) {
|
||||
func (d *DirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) {
|
||||
if reached, _ := info.reachedStatus(exportsLoaded); reached {
|
||||
return info.packageName, info.exports, info.err
|
||||
}
|
||||
@@ -234,3 +250,81 @@ func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info d
|
||||
d.Store(info.dir, info)
|
||||
return info.packageName, info.exports, info.err
|
||||
}
|
||||
|
||||
// ScanModuleCache walks the given directory, which must be a GOMODCACHE value,
|
||||
// for directory package information, storing the results in cache.
|
||||
func ScanModuleCache(dir string, cache *DirInfoCache, logf func(string, ...any)) {
|
||||
// Note(rfindley): it's hard to see, but this function attempts to implement
|
||||
// just the side effects on cache of calling PrimeCache with a ProcessEnv
|
||||
// that has the given dir as its GOMODCACHE.
|
||||
//
|
||||
// Teasing out the control flow, we see that we can avoid any handling of
|
||||
// vendor/ and can infer module info entirely from the path, simplifying the
|
||||
// logic here.
|
||||
|
||||
root := gopathwalk.Root{
|
||||
Path: filepath.Clean(dir),
|
||||
Type: gopathwalk.RootModuleCache,
|
||||
}
|
||||
|
||||
directoryInfo := func(root gopathwalk.Root, dir string) directoryPackageInfo {
|
||||
// This is a copy of ModuleResolver.scanDirForPackage, trimmed down to
|
||||
// logic that applies to a module cache directory.
|
||||
|
||||
subdir := ""
|
||||
if dir != root.Path {
|
||||
subdir = dir[len(root.Path)+len("/"):]
|
||||
}
|
||||
|
||||
matches := modCacheRegexp.FindStringSubmatch(subdir)
|
||||
if len(matches) == 0 {
|
||||
return directoryPackageInfo{
|
||||
status: directoryScanned,
|
||||
err: fmt.Errorf("invalid module cache path: %v", subdir),
|
||||
}
|
||||
}
|
||||
modPath, err := module.UnescapePath(filepath.ToSlash(matches[1]))
|
||||
if err != nil {
|
||||
if logf != nil {
|
||||
logf("decoding module cache path %q: %v", subdir, err)
|
||||
}
|
||||
return directoryPackageInfo{
|
||||
status: directoryScanned,
|
||||
err: fmt.Errorf("decoding module cache path %q: %v", subdir, err),
|
||||
}
|
||||
}
|
||||
importPath := path.Join(modPath, filepath.ToSlash(matches[3]))
|
||||
index := strings.Index(dir, matches[1]+"@"+matches[2])
|
||||
modDir := filepath.Join(dir[:index], matches[1]+"@"+matches[2])
|
||||
modName := readModName(filepath.Join(modDir, "go.mod"))
|
||||
return directoryPackageInfo{
|
||||
status: directoryScanned,
|
||||
dir: dir,
|
||||
rootType: root.Type,
|
||||
nonCanonicalImportPath: importPath,
|
||||
moduleDir: modDir,
|
||||
moduleName: modName,
|
||||
}
|
||||
}
|
||||
|
||||
add := func(root gopathwalk.Root, dir string) {
|
||||
info := directoryInfo(root, dir)
|
||||
cache.Store(info.dir, info)
|
||||
}
|
||||
|
||||
skip := func(_ gopathwalk.Root, dir string) bool {
|
||||
// Skip directories that have already been scanned.
|
||||
//
|
||||
// Note that gopathwalk only adds "package" directories, which must contain
|
||||
// a .go file, and all such package directories in the module cache are
|
||||
// immutable. So if we can load a dir, it can be skipped.
|
||||
info, ok := cache.Load(dir)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
packageScanned, _ := info.reachedStatus(directoryScanned)
|
||||
return packageScanned
|
||||
}
|
||||
|
||||
gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Logf: logf, ModulesEnabled: true})
|
||||
}
|
||||
|
||||
61
vendor/golang.org/x/tools/internal/imports/zstdlib.go
generated
vendored
61
vendor/golang.org/x/tools/internal/imports/zstdlib.go
generated
vendored
@@ -151,6 +151,7 @@ var stdlib = map[string][]string{
|
||||
"cmp": {
|
||||
"Compare",
|
||||
"Less",
|
||||
"Or",
|
||||
"Ordered",
|
||||
},
|
||||
"compress/bzip2": {
|
||||
@@ -632,6 +633,8 @@ var stdlib = map[string][]string{
|
||||
"NameMismatch",
|
||||
"NewCertPool",
|
||||
"NotAuthorizedToSign",
|
||||
"OID",
|
||||
"OIDFromInts",
|
||||
"PEMCipher",
|
||||
"PEMCipher3DES",
|
||||
"PEMCipherAES128",
|
||||
@@ -706,6 +709,7 @@ var stdlib = map[string][]string{
|
||||
"LevelWriteCommitted",
|
||||
"Named",
|
||||
"NamedArg",
|
||||
"Null",
|
||||
"NullBool",
|
||||
"NullByte",
|
||||
"NullFloat64",
|
||||
@@ -1921,6 +1925,7 @@ var stdlib = map[string][]string{
|
||||
"R_LARCH_32",
|
||||
"R_LARCH_32_PCREL",
|
||||
"R_LARCH_64",
|
||||
"R_LARCH_64_PCREL",
|
||||
"R_LARCH_ABS64_HI12",
|
||||
"R_LARCH_ABS64_LO20",
|
||||
"R_LARCH_ABS_HI20",
|
||||
@@ -1928,12 +1933,17 @@ var stdlib = map[string][]string{
|
||||
"R_LARCH_ADD16",
|
||||
"R_LARCH_ADD24",
|
||||
"R_LARCH_ADD32",
|
||||
"R_LARCH_ADD6",
|
||||
"R_LARCH_ADD64",
|
||||
"R_LARCH_ADD8",
|
||||
"R_LARCH_ADD_ULEB128",
|
||||
"R_LARCH_ALIGN",
|
||||
"R_LARCH_B16",
|
||||
"R_LARCH_B21",
|
||||
"R_LARCH_B26",
|
||||
"R_LARCH_CFA",
|
||||
"R_LARCH_COPY",
|
||||
"R_LARCH_DELETE",
|
||||
"R_LARCH_GNU_VTENTRY",
|
||||
"R_LARCH_GNU_VTINHERIT",
|
||||
"R_LARCH_GOT64_HI12",
|
||||
@@ -1953,6 +1963,7 @@ var stdlib = map[string][]string{
|
||||
"R_LARCH_PCALA64_LO20",
|
||||
"R_LARCH_PCALA_HI20",
|
||||
"R_LARCH_PCALA_LO12",
|
||||
"R_LARCH_PCREL20_S2",
|
||||
"R_LARCH_RELATIVE",
|
||||
"R_LARCH_RELAX",
|
||||
"R_LARCH_SOP_ADD",
|
||||
@@ -1983,8 +1994,10 @@ var stdlib = map[string][]string{
|
||||
"R_LARCH_SUB16",
|
||||
"R_LARCH_SUB24",
|
||||
"R_LARCH_SUB32",
|
||||
"R_LARCH_SUB6",
|
||||
"R_LARCH_SUB64",
|
||||
"R_LARCH_SUB8",
|
||||
"R_LARCH_SUB_ULEB128",
|
||||
"R_LARCH_TLS_DTPMOD32",
|
||||
"R_LARCH_TLS_DTPMOD64",
|
||||
"R_LARCH_TLS_DTPREL32",
|
||||
@@ -2035,6 +2048,7 @@ var stdlib = map[string][]string{
|
||||
"R_MIPS_LO16",
|
||||
"R_MIPS_NONE",
|
||||
"R_MIPS_PC16",
|
||||
"R_MIPS_PC32",
|
||||
"R_MIPS_PJUMP",
|
||||
"R_MIPS_REL16",
|
||||
"R_MIPS_REL32",
|
||||
@@ -2952,6 +2966,8 @@ var stdlib = map[string][]string{
|
||||
"RegisterName",
|
||||
},
|
||||
"encoding/hex": {
|
||||
"AppendDecode",
|
||||
"AppendEncode",
|
||||
"Decode",
|
||||
"DecodeString",
|
||||
"DecodedLen",
|
||||
@@ -3233,6 +3249,7 @@ var stdlib = map[string][]string{
|
||||
"TypeSpec",
|
||||
"TypeSwitchStmt",
|
||||
"UnaryExpr",
|
||||
"Unparen",
|
||||
"ValueSpec",
|
||||
"Var",
|
||||
"Visitor",
|
||||
@@ -3492,6 +3509,7 @@ var stdlib = map[string][]string{
|
||||
"XOR_ASSIGN",
|
||||
},
|
||||
"go/types": {
|
||||
"Alias",
|
||||
"ArgumentError",
|
||||
"Array",
|
||||
"AssertableTo",
|
||||
@@ -3559,6 +3577,7 @@ var stdlib = map[string][]string{
|
||||
"MethodVal",
|
||||
"MissingMethod",
|
||||
"Named",
|
||||
"NewAlias",
|
||||
"NewArray",
|
||||
"NewChan",
|
||||
"NewChecker",
|
||||
@@ -3627,6 +3646,7 @@ var stdlib = map[string][]string{
|
||||
"Uint64",
|
||||
"Uint8",
|
||||
"Uintptr",
|
||||
"Unalias",
|
||||
"Union",
|
||||
"Universe",
|
||||
"Unsafe",
|
||||
@@ -3643,6 +3663,11 @@ var stdlib = map[string][]string{
|
||||
"WriteSignature",
|
||||
"WriteType",
|
||||
},
|
||||
"go/version": {
|
||||
"Compare",
|
||||
"IsValid",
|
||||
"Lang",
|
||||
},
|
||||
"hash": {
|
||||
"Hash",
|
||||
"Hash32",
|
||||
@@ -4078,6 +4103,7 @@ var stdlib = map[string][]string{
|
||||
"NewTextHandler",
|
||||
"Record",
|
||||
"SetDefault",
|
||||
"SetLogLoggerLevel",
|
||||
"Source",
|
||||
"SourceKey",
|
||||
"String",
|
||||
@@ -4367,6 +4393,35 @@ var stdlib = map[string][]string{
|
||||
"Uint64",
|
||||
"Zipf",
|
||||
},
|
||||
"math/rand/v2": {
|
||||
"ChaCha8",
|
||||
"ExpFloat64",
|
||||
"Float32",
|
||||
"Float64",
|
||||
"Int",
|
||||
"Int32",
|
||||
"Int32N",
|
||||
"Int64",
|
||||
"Int64N",
|
||||
"IntN",
|
||||
"N",
|
||||
"New",
|
||||
"NewChaCha8",
|
||||
"NewPCG",
|
||||
"NewZipf",
|
||||
"NormFloat64",
|
||||
"PCG",
|
||||
"Perm",
|
||||
"Rand",
|
||||
"Shuffle",
|
||||
"Source",
|
||||
"Uint32",
|
||||
"Uint32N",
|
||||
"Uint64",
|
||||
"Uint64N",
|
||||
"UintN",
|
||||
"Zipf",
|
||||
},
|
||||
"mime": {
|
||||
"AddExtensionType",
|
||||
"BEncoding",
|
||||
@@ -4540,6 +4595,7 @@ var stdlib = map[string][]string{
|
||||
"FS",
|
||||
"File",
|
||||
"FileServer",
|
||||
"FileServerFS",
|
||||
"FileSystem",
|
||||
"Flusher",
|
||||
"Get",
|
||||
@@ -4566,6 +4622,7 @@ var stdlib = map[string][]string{
|
||||
"MethodPut",
|
||||
"MethodTrace",
|
||||
"NewFileTransport",
|
||||
"NewFileTransportFS",
|
||||
"NewRequest",
|
||||
"NewRequestWithContext",
|
||||
"NewResponseController",
|
||||
@@ -4599,6 +4656,7 @@ var stdlib = map[string][]string{
|
||||
"Serve",
|
||||
"ServeContent",
|
||||
"ServeFile",
|
||||
"ServeFileFS",
|
||||
"ServeMux",
|
||||
"ServeTLS",
|
||||
"Server",
|
||||
@@ -5106,6 +5164,7 @@ var stdlib = map[string][]string{
|
||||
"StructTag",
|
||||
"Swapper",
|
||||
"Type",
|
||||
"TypeFor",
|
||||
"TypeOf",
|
||||
"Uint",
|
||||
"Uint16",
|
||||
@@ -5342,6 +5401,7 @@ var stdlib = map[string][]string{
|
||||
"CompactFunc",
|
||||
"Compare",
|
||||
"CompareFunc",
|
||||
"Concat",
|
||||
"Contains",
|
||||
"ContainsFunc",
|
||||
"Delete",
|
||||
@@ -10824,6 +10884,7 @@ var stdlib = map[string][]string{
|
||||
"Value",
|
||||
},
|
||||
"testing/slogtest": {
|
||||
"Run",
|
||||
"TestHandler",
|
||||
},
|
||||
"text/scanner": {
|
||||
|
||||
@@ -2,11 +2,14 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.22
|
||||
// +build !go1.22
|
||||
|
||||
package versions
|
||||
|
||||
// Note: If we use build tags to use go/versions when go >=1.22,
|
||||
// we run into go.dev/issue/53737. Under some operations users would see an
|
||||
// import of "go/versions" even if they would not compile the file.
|
||||
// For example, during `go get -u ./...` (go.dev/issue/64490) we do not try to include
|
||||
// For this reason, this library just a clone of go/versions for the moment.
|
||||
|
||||
// Lang returns the Go language version for version x.
|
||||
// If x is not a valid version, Lang returns the empty string.
|
||||
// For example:
|
||||
38
vendor/golang.org/x/tools/internal/versions/versions_go122.go
generated
vendored
38
vendor/golang.org/x/tools/internal/versions/versions_go122.go
generated
vendored
@@ -1,38 +0,0 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.22
|
||||
// +build go1.22
|
||||
|
||||
package versions
|
||||
|
||||
import (
|
||||
"go/version"
|
||||
)
|
||||
|
||||
// Lang returns the Go language version for version x.
|
||||
// If x is not a valid version, Lang returns the empty string.
|
||||
// For example:
|
||||
//
|
||||
// Lang("go1.21rc2") = "go1.21"
|
||||
// Lang("go1.21.2") = "go1.21"
|
||||
// Lang("go1.21") = "go1.21"
|
||||
// Lang("go1") = "go1"
|
||||
// Lang("bad") = ""
|
||||
// Lang("1.21") = ""
|
||||
func Lang(x string) string { return version.Lang(x) }
|
||||
|
||||
// Compare returns -1, 0, or +1 depending on whether
|
||||
// x < y, x == y, or x > y, interpreted as Go versions.
|
||||
// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21".
|
||||
// Invalid versions, including the empty string, compare less than
|
||||
// valid versions and equal to each other.
|
||||
// The language version "go1.21" compares less than the
|
||||
// release candidate and eventual releases "go1.21rc1" and "go1.21.0".
|
||||
// Custom toolchain suffixes are ignored during comparison:
|
||||
// "go1.21.0" and "go1.21.0-bigcorp" are equal.
|
||||
func Compare(x, y string) int { return version.Compare(x, y) }
|
||||
|
||||
// IsValid reports whether the version x is valid.
|
||||
func IsValid(x string) bool { return version.IsValid(x) }
|
||||
1
vendor/k8s.io/api
generated
vendored
1
vendor/k8s.io/api
generated
vendored
@@ -1 +0,0 @@
|
||||
../../staging/src/k8s.io/api
|
||||
1
vendor/k8s.io/apiextensions-apiserver
generated
vendored
1
vendor/k8s.io/apiextensions-apiserver
generated
vendored
@@ -1 +0,0 @@
|
||||
../../staging/src/k8s.io/apiextensions-apiserver
|
||||
1
vendor/k8s.io/apimachinery
generated
vendored
1
vendor/k8s.io/apimachinery
generated
vendored
@@ -1 +0,0 @@
|
||||
../../staging/src/k8s.io/apimachinery
|
||||
1
vendor/k8s.io/apiserver
generated
vendored
1
vendor/k8s.io/apiserver
generated
vendored
@@ -1 +0,0 @@
|
||||
../../staging/src/k8s.io/apiserver
|
||||
1
vendor/k8s.io/cli-runtime
generated
vendored
1
vendor/k8s.io/cli-runtime
generated
vendored
@@ -1 +0,0 @@
|
||||
../../staging/src/k8s.io/cli-runtime
|
||||
1
vendor/k8s.io/client-go
generated
vendored
1
vendor/k8s.io/client-go
generated
vendored
@@ -1 +0,0 @@
|
||||
../../staging/src/k8s.io/client-go
|
||||
1
vendor/k8s.io/cloud-provider
generated
vendored
1
vendor/k8s.io/cloud-provider
generated
vendored
@@ -1 +0,0 @@
|
||||
../../staging/src/k8s.io/cloud-provider
|
||||
1
vendor/k8s.io/cluster-bootstrap
generated
vendored
1
vendor/k8s.io/cluster-bootstrap
generated
vendored
@@ -1 +0,0 @@
|
||||
../../staging/src/k8s.io/cluster-bootstrap
|
||||
1
vendor/k8s.io/code-generator
generated
vendored
1
vendor/k8s.io/code-generator
generated
vendored
@@ -1 +0,0 @@
|
||||
../../staging/src/k8s.io/code-generator
|
||||
1
vendor/k8s.io/component-base
generated
vendored
1
vendor/k8s.io/component-base
generated
vendored
@@ -1 +0,0 @@
|
||||
../../staging/src/k8s.io/component-base
|
||||
1
vendor/k8s.io/component-helpers
generated
vendored
1
vendor/k8s.io/component-helpers
generated
vendored
@@ -1 +0,0 @@
|
||||
../../staging/src/k8s.io/component-helpers
|
||||
1
vendor/k8s.io/controller-manager
generated
vendored
1
vendor/k8s.io/controller-manager
generated
vendored
@@ -1 +0,0 @@
|
||||
../../staging/src/k8s.io/controller-manager
|
||||
1
vendor/k8s.io/cri-api
generated
vendored
1
vendor/k8s.io/cri-api
generated
vendored
@@ -1 +0,0 @@
|
||||
../../staging/src/k8s.io/cri-api
|
||||
1
vendor/k8s.io/csi-translation-lib
generated
vendored
1
vendor/k8s.io/csi-translation-lib
generated
vendored
@@ -1 +0,0 @@
|
||||
../../staging/src/k8s.io/csi-translation-lib
|
||||
1
vendor/k8s.io/dynamic-resource-allocation
generated
vendored
1
vendor/k8s.io/dynamic-resource-allocation
generated
vendored
@@ -1 +0,0 @@
|
||||
../../staging/src/k8s.io/dynamic-resource-allocation
|
||||
1
vendor/k8s.io/endpointslice
generated
vendored
1
vendor/k8s.io/endpointslice
generated
vendored
@@ -1 +0,0 @@
|
||||
../../staging/src/k8s.io/endpointslice
|
||||
199
vendor/k8s.io/gengo/args/args.go
generated
vendored
199
vendor/k8s.io/gengo/args/args.go
generated
vendored
@@ -1,199 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package args has common command-line flags for generation programs.
|
||||
package args
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
goflag "flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/gengo/generator"
|
||||
"k8s.io/gengo/namer"
|
||||
"k8s.io/gengo/parser"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// Default returns a defaulted GeneratorArgs. You may change the defaults
|
||||
// before calling AddFlags.
|
||||
func Default() *GeneratorArgs {
|
||||
return &GeneratorArgs{
|
||||
OutputBase: DefaultSourceTree(),
|
||||
GoHeaderFilePath: filepath.Join(DefaultSourceTree(), "k8s.io/gengo/boilerplate/boilerplate.go.txt"),
|
||||
GeneratedBuildTag: "ignore_autogenerated",
|
||||
GeneratedByCommentTemplate: "// Code generated by GENERATOR_NAME. DO NOT EDIT.",
|
||||
defaultCommandLineFlags: true,
|
||||
}
|
||||
}
|
||||
|
||||
// GeneratorArgs has arguments that are passed to generators.
|
||||
type GeneratorArgs struct {
|
||||
// Which directories to parse.
|
||||
InputDirs []string
|
||||
|
||||
// Source tree to write results to.
|
||||
OutputBase string
|
||||
|
||||
// Package path within the source tree.
|
||||
OutputPackagePath string
|
||||
|
||||
// Output file name.
|
||||
OutputFileBaseName string
|
||||
|
||||
// Where to get copyright header text.
|
||||
GoHeaderFilePath string
|
||||
|
||||
// If GeneratedByCommentTemplate is set, generate a "Code generated by" comment
|
||||
// below the bloilerplate, of the format defined by this string.
|
||||
// Any instances of "GENERATOR_NAME" will be replaced with the name of the code generator.
|
||||
GeneratedByCommentTemplate string
|
||||
|
||||
// If true, only verify, don't write anything.
|
||||
VerifyOnly bool
|
||||
|
||||
// If true, include *_test.go files
|
||||
IncludeTestFiles bool
|
||||
|
||||
// GeneratedBuildTag is the tag used to identify code generated by execution
|
||||
// of this type. Each generator should use a different tag, and different
|
||||
// groups of generators (external API that depends on Kube generations) should
|
||||
// keep tags distinct as well.
|
||||
GeneratedBuildTag string
|
||||
|
||||
// Any custom arguments go here
|
||||
CustomArgs interface{}
|
||||
|
||||
// If specified, trim the prefix from OutputPackagePath before writing files.
|
||||
TrimPathPrefix string
|
||||
|
||||
// Whether to use default command line flags
|
||||
defaultCommandLineFlags bool
|
||||
}
|
||||
|
||||
// WithoutDefaultFlagParsing disables implicit addition of command line flags and parsing.
|
||||
func (g *GeneratorArgs) WithoutDefaultFlagParsing() *GeneratorArgs {
|
||||
g.defaultCommandLineFlags = false
|
||||
return g
|
||||
}
|
||||
|
||||
func (g *GeneratorArgs) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.StringSliceVarP(&g.InputDirs, "input-dirs", "i", g.InputDirs, "Comma-separated list of import paths to get input types from.")
|
||||
fs.StringVarP(&g.OutputBase, "output-base", "o", g.OutputBase, "Output base; defaults to $GOPATH/src/ or ./ if $GOPATH is not set.")
|
||||
fs.StringVarP(&g.OutputPackagePath, "output-package", "p", g.OutputPackagePath, "Base package path.")
|
||||
fs.StringVarP(&g.OutputFileBaseName, "output-file-base", "O", g.OutputFileBaseName, "Base name (without .go suffix) for output files.")
|
||||
fs.StringVarP(&g.GoHeaderFilePath, "go-header-file", "h", g.GoHeaderFilePath, "File containing boilerplate header text. The string YEAR will be replaced with the current 4-digit year.")
|
||||
fs.BoolVar(&g.VerifyOnly, "verify-only", g.VerifyOnly, "If true, only verify existing output, do not write anything.")
|
||||
fs.StringVar(&g.GeneratedBuildTag, "build-tag", g.GeneratedBuildTag, "A Go build tag to use to identify files generated by this command. Should be unique.")
|
||||
fs.StringVar(&g.TrimPathPrefix, "trim-path-prefix", g.TrimPathPrefix, "If set, trim the specified prefix from --output-package when generating files.")
|
||||
}
|
||||
|
||||
// LoadGoBoilerplate loads the boilerplate file passed to --go-header-file.
|
||||
func (g *GeneratorArgs) LoadGoBoilerplate() ([]byte, error) {
|
||||
b, err := ioutil.ReadFile(g.GoHeaderFilePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b = bytes.Replace(b, []byte("YEAR"), []byte(strconv.Itoa(time.Now().UTC().Year())), -1)
|
||||
|
||||
if g.GeneratedByCommentTemplate != "" {
|
||||
if len(b) != 0 {
|
||||
b = append(b, byte('\n'))
|
||||
}
|
||||
generatorName := path.Base(os.Args[0])
|
||||
generatedByComment := strings.Replace(g.GeneratedByCommentTemplate, "GENERATOR_NAME", generatorName, -1)
|
||||
s := fmt.Sprintf("%s\n\n", generatedByComment)
|
||||
b = append(b, []byte(s)...)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// NewBuilder makes a new parser.Builder and populates it with the input
|
||||
// directories.
|
||||
func (g *GeneratorArgs) NewBuilder() (*parser.Builder, error) {
|
||||
b := parser.New()
|
||||
|
||||
// flag for including *_test.go
|
||||
b.IncludeTestFiles = g.IncludeTestFiles
|
||||
|
||||
// Ignore all auto-generated files.
|
||||
b.AddBuildTags(g.GeneratedBuildTag)
|
||||
|
||||
for _, d := range g.InputDirs {
|
||||
var err error
|
||||
if strings.HasSuffix(d, "/...") {
|
||||
err = b.AddDirRecursive(strings.TrimSuffix(d, "/..."))
|
||||
} else {
|
||||
err = b.AddDir(d)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to add directory %q: %v", d, err)
|
||||
}
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// DefaultSourceTree returns the /src directory of the first entry in $GOPATH.
|
||||
// If $GOPATH is empty, it returns "./". Useful as a default output location.
|
||||
func DefaultSourceTree() string {
|
||||
paths := strings.Split(os.Getenv("GOPATH"), string(filepath.ListSeparator))
|
||||
if len(paths) > 0 && len(paths[0]) > 0 {
|
||||
return filepath.Join(paths[0], "src")
|
||||
}
|
||||
return "./"
|
||||
}
|
||||
|
||||
// Execute implements main().
|
||||
// If you don't need any non-default behavior, use as:
|
||||
// args.Default().Execute(...)
|
||||
func (g *GeneratorArgs) Execute(nameSystems namer.NameSystems, defaultSystem string, pkgs func(*generator.Context, *GeneratorArgs) generator.Packages) error {
|
||||
if g.defaultCommandLineFlags {
|
||||
g.AddFlags(pflag.CommandLine)
|
||||
pflag.CommandLine.AddGoFlagSet(goflag.CommandLine)
|
||||
pflag.Parse()
|
||||
}
|
||||
|
||||
b, err := g.NewBuilder()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed making a parser: %v", err)
|
||||
}
|
||||
|
||||
// pass through the flag on whether to include *_test.go files
|
||||
b.IncludeTestFiles = g.IncludeTestFiles
|
||||
|
||||
c, err := generator.NewContext(b, nameSystems, defaultSystem)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed making a context: %v", err)
|
||||
}
|
||||
|
||||
c.TrimPathPrefix = g.TrimPathPrefix
|
||||
|
||||
c.Verify = g.VerifyOnly
|
||||
packages := pkgs(c, g)
|
||||
if err := c.ExecutePackages(g.OutputBase, packages); err != nil {
|
||||
return fmt.Errorf("Failed executing generator: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
935
vendor/k8s.io/gengo/examples/deepcopy-gen/generators/deepcopy.go
generated
vendored
935
vendor/k8s.io/gengo/examples/deepcopy-gen/generators/deepcopy.go
generated
vendored
@@ -1,935 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package generators
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"k8s.io/gengo/args"
|
||||
"k8s.io/gengo/examples/set-gen/sets"
|
||||
"k8s.io/gengo/generator"
|
||||
"k8s.io/gengo/namer"
|
||||
"k8s.io/gengo/types"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// CustomArgs is used tby the go2idl framework to pass args specific to this
|
||||
// generator.
|
||||
type CustomArgs struct {
|
||||
BoundingDirs []string // Only deal with types rooted under these dirs.
|
||||
}
|
||||
|
||||
// This is the comment tag that carries parameters for deep-copy generation.
|
||||
const (
|
||||
tagEnabledName = "k8s:deepcopy-gen"
|
||||
interfacesTagName = tagEnabledName + ":interfaces"
|
||||
interfacesNonPointerTagName = tagEnabledName + ":nonpointer-interfaces" // attach the DeepCopy<Interface> methods to the
|
||||
)
|
||||
|
||||
// Known values for the comment tag.
|
||||
const tagValuePackage = "package"
|
||||
|
||||
// enabledTagValue holds parameters from a tagName tag.
|
||||
type enabledTagValue struct {
|
||||
value string
|
||||
register bool
|
||||
}
|
||||
|
||||
func extractEnabledTypeTag(t *types.Type) *enabledTagValue {
|
||||
comments := append(append([]string{}, t.SecondClosestCommentLines...), t.CommentLines...)
|
||||
return extractEnabledTag(comments)
|
||||
}
|
||||
|
||||
func extractEnabledTag(comments []string) *enabledTagValue {
|
||||
tagVals := types.ExtractCommentTags("+", comments)[tagEnabledName]
|
||||
if tagVals == nil {
|
||||
// No match for the tag.
|
||||
return nil
|
||||
}
|
||||
// If there are multiple values, abort.
|
||||
if len(tagVals) > 1 {
|
||||
klog.Fatalf("Found %d %s tags: %q", len(tagVals), tagEnabledName, tagVals)
|
||||
}
|
||||
|
||||
// If we got here we are returning something.
|
||||
tag := &enabledTagValue{}
|
||||
|
||||
// Get the primary value.
|
||||
parts := strings.Split(tagVals[0], ",")
|
||||
if len(parts) >= 1 {
|
||||
tag.value = parts[0]
|
||||
}
|
||||
|
||||
// Parse extra arguments.
|
||||
parts = parts[1:]
|
||||
for i := range parts {
|
||||
kv := strings.SplitN(parts[i], "=", 2)
|
||||
k := kv[0]
|
||||
v := ""
|
||||
if len(kv) == 2 {
|
||||
v = kv[1]
|
||||
}
|
||||
switch k {
|
||||
case "register":
|
||||
if v != "false" {
|
||||
tag.register = true
|
||||
}
|
||||
default:
|
||||
klog.Fatalf("Unsupported %s param: %q", tagEnabledName, parts[i])
|
||||
}
|
||||
}
|
||||
return tag
|
||||
}
|
||||
|
||||
// TODO: This is created only to reduce number of changes in a single PR.
|
||||
// Remove it and use PublicNamer instead.
|
||||
func deepCopyNamer() *namer.NameStrategy {
|
||||
return &namer.NameStrategy{
|
||||
Join: func(pre string, in []string, post string) string {
|
||||
return strings.Join(in, "_")
|
||||
},
|
||||
PrependPackageNames: 1,
|
||||
}
|
||||
}
|
||||
|
||||
// NameSystems returns the name system used by the generators in this package.
|
||||
func NameSystems() namer.NameSystems {
|
||||
return namer.NameSystems{
|
||||
"public": deepCopyNamer(),
|
||||
"raw": namer.NewRawNamer("", nil),
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultNameSystem returns the default name system for ordering the types to be
|
||||
// processed by the generators in this package.
|
||||
func DefaultNameSystem() string {
|
||||
return "public"
|
||||
}
|
||||
|
||||
func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages {
|
||||
boilerplate, err := arguments.LoadGoBoilerplate()
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed loading boilerplate: %v", err)
|
||||
}
|
||||
|
||||
inputs := sets.NewString(context.Inputs...)
|
||||
packages := generator.Packages{}
|
||||
header := append([]byte(fmt.Sprintf("//go:build !%s\n// +build !%s\n\n", arguments.GeneratedBuildTag, arguments.GeneratedBuildTag)), boilerplate...)
|
||||
|
||||
boundingDirs := []string{}
|
||||
if customArgs, ok := arguments.CustomArgs.(*CustomArgs); ok {
|
||||
if customArgs.BoundingDirs == nil {
|
||||
customArgs.BoundingDirs = context.Inputs
|
||||
}
|
||||
for i := range customArgs.BoundingDirs {
|
||||
// Strip any trailing slashes - they are not exactly "correct" but
|
||||
// this is friendlier.
|
||||
boundingDirs = append(boundingDirs, strings.TrimRight(customArgs.BoundingDirs[i], "/"))
|
||||
}
|
||||
}
|
||||
|
||||
for i := range inputs {
|
||||
klog.V(5).Infof("Considering pkg %q", i)
|
||||
pkg := context.Universe[i]
|
||||
if pkg == nil {
|
||||
// If the input had no Go files, for example.
|
||||
continue
|
||||
}
|
||||
|
||||
ptag := extractEnabledTag(pkg.Comments)
|
||||
ptagValue := ""
|
||||
ptagRegister := false
|
||||
if ptag != nil {
|
||||
ptagValue = ptag.value
|
||||
if ptagValue != tagValuePackage {
|
||||
klog.Fatalf("Package %v: unsupported %s value: %q", i, tagEnabledName, ptagValue)
|
||||
}
|
||||
ptagRegister = ptag.register
|
||||
klog.V(5).Infof(" tag.value: %q, tag.register: %t", ptagValue, ptagRegister)
|
||||
} else {
|
||||
klog.V(5).Infof(" no tag")
|
||||
}
|
||||
|
||||
// If the pkg-scoped tag says to generate, we can skip scanning types.
|
||||
pkgNeedsGeneration := (ptagValue == tagValuePackage)
|
||||
if !pkgNeedsGeneration {
|
||||
// If the pkg-scoped tag did not exist, scan all types for one that
|
||||
// explicitly wants generation. Ensure all types that want generation
|
||||
// can be copied.
|
||||
var uncopyable []string
|
||||
for _, t := range pkg.Types {
|
||||
klog.V(5).Infof(" considering type %q", t.Name.String())
|
||||
ttag := extractEnabledTypeTag(t)
|
||||
if ttag != nil && ttag.value == "true" {
|
||||
klog.V(5).Infof(" tag=true")
|
||||
if !copyableType(t) {
|
||||
uncopyable = append(uncopyable, fmt.Sprintf("%v", t))
|
||||
} else {
|
||||
pkgNeedsGeneration = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(uncopyable) > 0 {
|
||||
klog.Fatalf("Types requested deepcopy generation but are not copyable: %s",
|
||||
strings.Join(uncopyable, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
if pkgNeedsGeneration {
|
||||
klog.V(3).Infof("Package %q needs generation", i)
|
||||
path := pkg.Path
|
||||
// if the source path is within a /vendor/ directory (for example,
|
||||
// k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1), allow
|
||||
// generation to output to the proper relative path (under vendor).
|
||||
// Otherwise, the generator will create the file in the wrong location
|
||||
// in the output directory.
|
||||
// TODO: build a more fundamental concept in gengo for dealing with modifications
|
||||
// to vendored packages.
|
||||
if strings.HasPrefix(pkg.SourcePath, arguments.OutputBase) {
|
||||
expandedPath := strings.TrimPrefix(pkg.SourcePath, arguments.OutputBase)
|
||||
if strings.Contains(expandedPath, "/vendor/") {
|
||||
path = expandedPath
|
||||
}
|
||||
}
|
||||
packages = append(packages,
|
||||
&generator.DefaultPackage{
|
||||
PackageName: strings.Split(filepath.Base(pkg.Path), ".")[0],
|
||||
PackagePath: path,
|
||||
HeaderText: header,
|
||||
GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) {
|
||||
return []generator.Generator{
|
||||
NewGenDeepCopy(arguments.OutputFileBaseName, pkg.Path, boundingDirs, (ptagValue == tagValuePackage), ptagRegister),
|
||||
}
|
||||
},
|
||||
FilterFunc: func(c *generator.Context, t *types.Type) bool {
|
||||
return t.Name.Package == pkg.Path
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
return packages
|
||||
}
|
||||
|
||||
// genDeepCopy produces a file with autogenerated deep-copy functions.
|
||||
type genDeepCopy struct {
|
||||
generator.DefaultGen
|
||||
targetPackage string
|
||||
boundingDirs []string
|
||||
allTypes bool
|
||||
registerTypes bool
|
||||
imports namer.ImportTracker
|
||||
typesForInit []*types.Type
|
||||
}
|
||||
|
||||
func NewGenDeepCopy(sanitizedName, targetPackage string, boundingDirs []string, allTypes, registerTypes bool) generator.Generator {
|
||||
return &genDeepCopy{
|
||||
DefaultGen: generator.DefaultGen{
|
||||
OptionalName: sanitizedName,
|
||||
},
|
||||
targetPackage: targetPackage,
|
||||
boundingDirs: boundingDirs,
|
||||
allTypes: allTypes,
|
||||
registerTypes: registerTypes,
|
||||
imports: generator.NewImportTracker(),
|
||||
typesForInit: make([]*types.Type, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (g *genDeepCopy) Namers(c *generator.Context) namer.NameSystems {
|
||||
// Have the raw namer for this file track what it imports.
|
||||
return namer.NameSystems{
|
||||
"raw": namer.NewRawNamer(g.targetPackage, g.imports),
|
||||
}
|
||||
}
|
||||
|
||||
func (g *genDeepCopy) Filter(c *generator.Context, t *types.Type) bool {
|
||||
// Filter out types not being processed or not copyable within the package.
|
||||
enabled := g.allTypes
|
||||
if !enabled {
|
||||
ttag := extractEnabledTypeTag(t)
|
||||
if ttag != nil && ttag.value == "true" {
|
||||
enabled = true
|
||||
}
|
||||
}
|
||||
if !enabled {
|
||||
return false
|
||||
}
|
||||
if !copyableType(t) {
|
||||
klog.V(2).Infof("Type %v is not copyable", t)
|
||||
return false
|
||||
}
|
||||
klog.V(4).Infof("Type %v is copyable", t)
|
||||
g.typesForInit = append(g.typesForInit, t)
|
||||
return true
|
||||
}
|
||||
|
||||
func (g *genDeepCopy) copyableAndInBounds(t *types.Type) bool {
|
||||
if !copyableType(t) {
|
||||
return false
|
||||
}
|
||||
// Only packages within the restricted range can be processed.
|
||||
if !isRootedUnder(t.Name.Package, g.boundingDirs) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// deepCopyMethod returns the signature of a DeepCopy() method, nil or an error
|
||||
// if the type does not match. This allows more efficient deep copy
|
||||
// implementations to be defined by the type's author. The correct signature
|
||||
// for a type T is:
|
||||
// func (t T) DeepCopy() T
|
||||
// or:
|
||||
// func (t *T) DeepCopy() *T
|
||||
func deepCopyMethod(t *types.Type) (*types.Signature, error) {
|
||||
f, found := t.Methods["DeepCopy"]
|
||||
if !found {
|
||||
return nil, nil
|
||||
}
|
||||
if len(f.Signature.Parameters) != 0 {
|
||||
return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected no parameters", t)
|
||||
}
|
||||
if len(f.Signature.Results) != 1 {
|
||||
return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected exactly one result", t)
|
||||
}
|
||||
|
||||
ptrResult := f.Signature.Results[0].Kind == types.Pointer && f.Signature.Results[0].Elem.Name == t.Name
|
||||
nonPtrResult := f.Signature.Results[0].Name == t.Name
|
||||
|
||||
if !ptrResult && !nonPtrResult {
|
||||
return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected to return %s or *%s", t, t.Name.Name, t.Name.Name)
|
||||
}
|
||||
|
||||
ptrRcvr := f.Signature.Receiver != nil && f.Signature.Receiver.Kind == types.Pointer && f.Signature.Receiver.Elem.Name == t.Name
|
||||
nonPtrRcvr := f.Signature.Receiver != nil && f.Signature.Receiver.Name == t.Name
|
||||
|
||||
if ptrRcvr && !ptrResult {
|
||||
return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected a *%s result for a *%s receiver", t, t.Name.Name, t.Name.Name)
|
||||
}
|
||||
if nonPtrRcvr && !nonPtrResult {
|
||||
return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected a %s result for a %s receiver", t, t.Name.Name, t.Name.Name)
|
||||
}
|
||||
|
||||
return f.Signature, nil
|
||||
}
|
||||
|
||||
// deepCopyMethodOrDie returns the signatrue of a DeepCopy method, nil or calls klog.Fatalf
|
||||
// if the type does not match.
|
||||
func deepCopyMethodOrDie(t *types.Type) *types.Signature {
|
||||
ret, err := deepCopyMethod(t)
|
||||
if err != nil {
|
||||
klog.Fatal(err)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// deepCopyIntoMethod returns the signature of a DeepCopyInto() method, nil or an error
|
||||
// if the type is wrong. DeepCopyInto allows more efficient deep copy
|
||||
// implementations to be defined by the type's author. The correct signature
|
||||
// for a type T is:
|
||||
// func (t T) DeepCopyInto(t *T)
|
||||
// or:
|
||||
// func (t *T) DeepCopyInto(t *T)
|
||||
func deepCopyIntoMethod(t *types.Type) (*types.Signature, error) {
|
||||
f, found := t.Methods["DeepCopyInto"]
|
||||
if !found {
|
||||
return nil, nil
|
||||
}
|
||||
if len(f.Signature.Parameters) != 1 {
|
||||
return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected exactly one parameter", t)
|
||||
}
|
||||
if len(f.Signature.Results) != 0 {
|
||||
return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected no result type", t)
|
||||
}
|
||||
|
||||
ptrParam := f.Signature.Parameters[0].Kind == types.Pointer && f.Signature.Parameters[0].Elem.Name == t.Name
|
||||
|
||||
if !ptrParam {
|
||||
return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected parameter of type *%s", t, t.Name.Name)
|
||||
}
|
||||
|
||||
ptrRcvr := f.Signature.Receiver != nil && f.Signature.Receiver.Kind == types.Pointer && f.Signature.Receiver.Elem.Name == t.Name
|
||||
nonPtrRcvr := f.Signature.Receiver != nil && f.Signature.Receiver.Name == t.Name
|
||||
|
||||
if !ptrRcvr && !nonPtrRcvr {
|
||||
// this should never happen
|
||||
return nil, fmt.Errorf("type %v: invalid DeepCopy signature, expected a receiver of type %s or *%s", t, t.Name.Name, t.Name.Name)
|
||||
}
|
||||
|
||||
return f.Signature, nil
|
||||
}
|
||||
|
||||
// deepCopyIntoMethodOrDie returns the signature of a DeepCopyInto() method, nil or calls klog.Fatalf
|
||||
// if the type is wrong.
|
||||
func deepCopyIntoMethodOrDie(t *types.Type) *types.Signature {
|
||||
ret, err := deepCopyIntoMethod(t)
|
||||
if err != nil {
|
||||
klog.Fatal(err)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func isRootedUnder(pkg string, roots []string) bool {
|
||||
// Add trailing / to avoid false matches, e.g. foo/bar vs foo/barn. This
|
||||
// assumes that bounding dirs do not have trailing slashes.
|
||||
pkg = pkg + "/"
|
||||
for _, root := range roots {
|
||||
if strings.HasPrefix(pkg, root+"/") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func copyableType(t *types.Type) bool {
|
||||
// If the type opts out of copy-generation, stop.
|
||||
ttag := extractEnabledTypeTag(t)
|
||||
if ttag != nil && ttag.value == "false" {
|
||||
return false
|
||||
}
|
||||
|
||||
// Filter out private types.
|
||||
if namer.IsPrivateGoName(t.Name.Name) {
|
||||
return false
|
||||
}
|
||||
|
||||
if t.Kind == types.Alias {
|
||||
// if the underlying built-in is not deepcopy-able, deepcopy is opt-in through definition of custom methods.
|
||||
// Note that aliases of builtins, maps, slices can have deepcopy methods.
|
||||
if deepCopyMethodOrDie(t) != nil || deepCopyIntoMethodOrDie(t) != nil {
|
||||
return true
|
||||
} else {
|
||||
return t.Underlying.Kind != types.Builtin || copyableType(t.Underlying)
|
||||
}
|
||||
}
|
||||
|
||||
if t.Kind != types.Struct {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func underlyingType(t *types.Type) *types.Type {
|
||||
for t.Kind == types.Alias {
|
||||
t = t.Underlying
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (g *genDeepCopy) isOtherPackage(pkg string) bool {
|
||||
if pkg == g.targetPackage {
|
||||
return false
|
||||
}
|
||||
if strings.HasSuffix(pkg, "\""+g.targetPackage+"\"") {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (g *genDeepCopy) Imports(c *generator.Context) (imports []string) {
|
||||
importLines := []string{}
|
||||
for _, singleImport := range g.imports.ImportLines() {
|
||||
if g.isOtherPackage(singleImport) {
|
||||
importLines = append(importLines, singleImport)
|
||||
}
|
||||
}
|
||||
return importLines
|
||||
}
|
||||
|
||||
func argsFromType(ts ...*types.Type) generator.Args {
|
||||
a := generator.Args{
|
||||
"type": ts[0],
|
||||
}
|
||||
for i, t := range ts {
|
||||
a[fmt.Sprintf("type%d", i+1)] = t
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func (g *genDeepCopy) Init(c *generator.Context, w io.Writer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *genDeepCopy) needsGeneration(t *types.Type) bool {
|
||||
tag := extractEnabledTypeTag(t)
|
||||
tv := ""
|
||||
if tag != nil {
|
||||
tv = tag.value
|
||||
if tv != "true" && tv != "false" {
|
||||
klog.Fatalf("Type %v: unsupported %s value: %q", t, tagEnabledName, tag.value)
|
||||
}
|
||||
}
|
||||
if g.allTypes && tv == "false" {
|
||||
// The whole package is being generated, but this type has opted out.
|
||||
klog.V(5).Infof("Not generating for type %v because type opted out", t)
|
||||
return false
|
||||
}
|
||||
if !g.allTypes && tv != "true" {
|
||||
// The whole package is NOT being generated, and this type has NOT opted in.
|
||||
klog.V(5).Infof("Not generating for type %v because type did not opt in", t)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func extractInterfacesTag(t *types.Type) []string {
|
||||
var result []string
|
||||
comments := append(append([]string{}, t.SecondClosestCommentLines...), t.CommentLines...)
|
||||
values := types.ExtractCommentTags("+", comments)[interfacesTagName]
|
||||
for _, v := range values {
|
||||
if len(v) == 0 {
|
||||
continue
|
||||
}
|
||||
intfs := strings.Split(v, ",")
|
||||
for _, intf := range intfs {
|
||||
if intf == "" {
|
||||
continue
|
||||
}
|
||||
result = append(result, intf)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func extractNonPointerInterfaces(t *types.Type) (bool, error) {
|
||||
comments := append(append([]string{}, t.SecondClosestCommentLines...), t.CommentLines...)
|
||||
values := types.ExtractCommentTags("+", comments)[interfacesNonPointerTagName]
|
||||
if len(values) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
result := values[0] == "true"
|
||||
for _, v := range values {
|
||||
if v == "true" != result {
|
||||
return false, fmt.Errorf("contradicting %v value %q found to previous value %v", interfacesNonPointerTagName, v, result)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (g *genDeepCopy) deepCopyableInterfacesInner(c *generator.Context, t *types.Type) ([]*types.Type, error) {
|
||||
if t.Kind != types.Struct {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
intfs := extractInterfacesTag(t)
|
||||
|
||||
var ts []*types.Type
|
||||
for _, intf := range intfs {
|
||||
t := types.ParseFullyQualifiedName(intf)
|
||||
err := c.AddDir(t.Package)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
intfT := c.Universe.Type(t)
|
||||
if intfT == nil {
|
||||
return nil, fmt.Errorf("unknown type %q in %s tag of type %s", intf, interfacesTagName, intfT)
|
||||
}
|
||||
if intfT.Kind != types.Interface {
|
||||
return nil, fmt.Errorf("type %q in %s tag of type %s is not an interface, but: %q", intf, interfacesTagName, t, intfT.Kind)
|
||||
}
|
||||
g.imports.AddType(intfT)
|
||||
ts = append(ts, intfT)
|
||||
}
|
||||
|
||||
return ts, nil
|
||||
}
|
||||
|
||||
// deepCopyableInterfaces returns the interface types to implement and whether they apply to a non-pointer receiver.
|
||||
func (g *genDeepCopy) deepCopyableInterfaces(c *generator.Context, t *types.Type) ([]*types.Type, bool, error) {
|
||||
ts, err := g.deepCopyableInterfacesInner(c, t)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
set := map[string]*types.Type{}
|
||||
for _, t := range ts {
|
||||
set[t.String()] = t
|
||||
}
|
||||
|
||||
result := []*types.Type{}
|
||||
for _, t := range set {
|
||||
result = append(result, t)
|
||||
}
|
||||
|
||||
TypeSlice(result).Sort() // we need a stable sorting because it determines the order in generation
|
||||
|
||||
nonPointerReceiver, err := extractNonPointerInterfaces(t)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
return result, nonPointerReceiver, nil
|
||||
}
|
||||
|
||||
type TypeSlice []*types.Type
|
||||
|
||||
func (s TypeSlice) Len() int { return len(s) }
|
||||
func (s TypeSlice) Less(i, j int) bool { return s[i].String() < s[j].String() }
|
||||
func (s TypeSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s TypeSlice) Sort() { sort.Sort(s) }
|
||||
|
||||
func (g *genDeepCopy) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error {
|
||||
if !g.needsGeneration(t) {
|
||||
return nil
|
||||
}
|
||||
klog.V(5).Infof("Generating deepcopy function for type %v", t)
|
||||
|
||||
sw := generator.NewSnippetWriter(w, c, "$", "$")
|
||||
args := argsFromType(t)
|
||||
|
||||
if deepCopyIntoMethodOrDie(t) == nil {
|
||||
sw.Do("// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.\n", args)
|
||||
if isReference(t) {
|
||||
sw.Do("func (in $.type|raw$) DeepCopyInto(out *$.type|raw$) {\n", args)
|
||||
sw.Do("{in:=&in\n", nil)
|
||||
} else {
|
||||
sw.Do("func (in *$.type|raw$) DeepCopyInto(out *$.type|raw$) {\n", args)
|
||||
}
|
||||
if deepCopyMethodOrDie(t) != nil {
|
||||
if t.Methods["DeepCopy"].Signature.Receiver.Kind == types.Pointer {
|
||||
sw.Do("clone := in.DeepCopy()\n", nil)
|
||||
sw.Do("*out = *clone\n", nil)
|
||||
} else {
|
||||
sw.Do("*out = in.DeepCopy()\n", nil)
|
||||
}
|
||||
sw.Do("return\n", nil)
|
||||
} else {
|
||||
g.generateFor(t, sw)
|
||||
sw.Do("return\n", nil)
|
||||
}
|
||||
if isReference(t) {
|
||||
sw.Do("}\n", nil)
|
||||
}
|
||||
sw.Do("}\n\n", nil)
|
||||
}
|
||||
|
||||
if deepCopyMethodOrDie(t) == nil {
|
||||
sw.Do("// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new $.type|raw$.\n", args)
|
||||
if isReference(t) {
|
||||
sw.Do("func (in $.type|raw$) DeepCopy() $.type|raw$ {\n", args)
|
||||
} else {
|
||||
sw.Do("func (in *$.type|raw$) DeepCopy() *$.type|raw$ {\n", args)
|
||||
}
|
||||
sw.Do("if in == nil { return nil }\n", nil)
|
||||
sw.Do("out := new($.type|raw$)\n", args)
|
||||
sw.Do("in.DeepCopyInto(out)\n", nil)
|
||||
if isReference(t) {
|
||||
sw.Do("return *out\n", nil)
|
||||
} else {
|
||||
sw.Do("return out\n", nil)
|
||||
}
|
||||
sw.Do("}\n\n", nil)
|
||||
}
|
||||
|
||||
intfs, nonPointerReceiver, err := g.deepCopyableInterfaces(c, t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, intf := range intfs {
|
||||
sw.Do(fmt.Sprintf("// DeepCopy%s is an autogenerated deepcopy function, copying the receiver, creating a new $.type2|raw$.\n", intf.Name.Name), argsFromType(t, intf))
|
||||
if nonPointerReceiver {
|
||||
sw.Do(fmt.Sprintf("func (in $.type|raw$) DeepCopy%s() $.type2|raw$ {\n", intf.Name.Name), argsFromType(t, intf))
|
||||
sw.Do("return *in.DeepCopy()", nil)
|
||||
sw.Do("}\n\n", nil)
|
||||
} else {
|
||||
sw.Do(fmt.Sprintf("func (in *$.type|raw$) DeepCopy%s() $.type2|raw$ {\n", intf.Name.Name), argsFromType(t, intf))
|
||||
sw.Do("if c := in.DeepCopy(); c != nil {\n", nil)
|
||||
sw.Do("return c\n", nil)
|
||||
sw.Do("}\n", nil)
|
||||
sw.Do("return nil\n", nil)
|
||||
sw.Do("}\n\n", nil)
|
||||
}
|
||||
}
|
||||
|
||||
return sw.Error()
|
||||
}
|
||||
|
||||
// isReference return true for pointer, maps, slices and aliases of those.
|
||||
func isReference(t *types.Type) bool {
|
||||
if t.Kind == types.Pointer || t.Kind == types.Map || t.Kind == types.Slice {
|
||||
return true
|
||||
}
|
||||
return t.Kind == types.Alias && isReference(underlyingType(t))
|
||||
}
|
||||
|
||||
// we use the system of shadowing 'in' and 'out' so that the same code is valid
|
||||
// at any nesting level. This makes the autogenerator easy to understand, and
|
||||
// the compiler shouldn't care.
|
||||
func (g *genDeepCopy) generateFor(t *types.Type, sw *generator.SnippetWriter) {
|
||||
// derive inner types if t is an alias. We call the do* methods below with the alias type.
|
||||
// basic rule: generate according to inner type, but construct objects with the alias type.
|
||||
ut := underlyingType(t)
|
||||
|
||||
var f func(*types.Type, *generator.SnippetWriter)
|
||||
switch ut.Kind {
|
||||
case types.Builtin:
|
||||
f = g.doBuiltin
|
||||
case types.Map:
|
||||
f = g.doMap
|
||||
case types.Slice:
|
||||
f = g.doSlice
|
||||
case types.Struct:
|
||||
f = g.doStruct
|
||||
case types.Pointer:
|
||||
f = g.doPointer
|
||||
case types.Interface:
|
||||
// interfaces are handled in-line in the other cases
|
||||
klog.Fatalf("Hit an interface type %v. This should never happen.", t)
|
||||
case types.Alias:
|
||||
// can never happen because we branch on the underlying type which is never an alias
|
||||
klog.Fatalf("Hit an alias type %v. This should never happen.", t)
|
||||
default:
|
||||
klog.Fatalf("Hit an unsupported type %v.", t)
|
||||
}
|
||||
f(t, sw)
|
||||
}
|
||||
|
||||
// doBuiltin generates code for a builtin or an alias to a builtin. The generated code is
|
||||
// is the same for both cases, i.e. it's the code for the underlying type.
|
||||
func (g *genDeepCopy) doBuiltin(t *types.Type, sw *generator.SnippetWriter) {
|
||||
if deepCopyMethodOrDie(t) != nil || deepCopyIntoMethodOrDie(t) != nil {
|
||||
sw.Do("*out = in.DeepCopy()\n", nil)
|
||||
return
|
||||
}
|
||||
|
||||
sw.Do("*out = *in\n", nil)
|
||||
}
|
||||
|
||||
// doMap generates code for a map or an alias to a map. The generated code is
|
||||
// is the same for both cases, i.e. it's the code for the underlying type.
|
||||
func (g *genDeepCopy) doMap(t *types.Type, sw *generator.SnippetWriter) {
|
||||
ut := underlyingType(t)
|
||||
uet := underlyingType(ut.Elem)
|
||||
|
||||
if deepCopyMethodOrDie(t) != nil || deepCopyIntoMethodOrDie(t) != nil {
|
||||
sw.Do("*out = in.DeepCopy()\n", nil)
|
||||
return
|
||||
}
|
||||
|
||||
if !ut.Key.IsAssignable() {
|
||||
klog.Fatalf("Hit an unsupported type %v for: %v", uet, t)
|
||||
}
|
||||
|
||||
sw.Do("*out = make($.|raw$, len(*in))\n", t)
|
||||
sw.Do("for key, val := range *in {\n", nil)
|
||||
dc, dci := deepCopyMethodOrDie(ut.Elem), deepCopyIntoMethodOrDie(ut.Elem)
|
||||
switch {
|
||||
case dc != nil || dci != nil:
|
||||
// Note: a DeepCopy exists because it is added if DeepCopyInto is manually defined
|
||||
leftPointer := ut.Elem.Kind == types.Pointer
|
||||
rightPointer := !isReference(ut.Elem)
|
||||
if dc != nil {
|
||||
rightPointer = dc.Results[0].Kind == types.Pointer
|
||||
}
|
||||
if leftPointer == rightPointer {
|
||||
sw.Do("(*out)[key] = val.DeepCopy()\n", nil)
|
||||
} else if leftPointer {
|
||||
sw.Do("x := val.DeepCopy()\n", nil)
|
||||
sw.Do("(*out)[key] = &x\n", nil)
|
||||
} else {
|
||||
sw.Do("(*out)[key] = *val.DeepCopy()\n", nil)
|
||||
}
|
||||
case ut.Elem.IsAnonymousStruct(): // not uet here because it needs type cast
|
||||
sw.Do("(*out)[key] = val\n", nil)
|
||||
case uet.IsAssignable():
|
||||
sw.Do("(*out)[key] = val\n", nil)
|
||||
case uet.Kind == types.Interface:
|
||||
// Note: do not generate code that won't compile as `DeepCopyinterface{}()` is not a valid function
|
||||
if uet.Name.Name == "interface{}" {
|
||||
klog.Fatalf("DeepCopy of %q is unsupported. Instead, use named interfaces with DeepCopy<named-interface> as one of the methods.", uet.Name.Name)
|
||||
}
|
||||
sw.Do("if val == nil {(*out)[key]=nil} else {\n", nil)
|
||||
// Note: if t.Elem has been an alias "J" of an interface "I" in Go, we will see it
|
||||
// as kind Interface of name "J" here, i.e. generate val.DeepCopyJ(). The golang
|
||||
// parser does not give us the underlying interface name. So we cannot do any better.
|
||||
sw.Do(fmt.Sprintf("(*out)[key] = val.DeepCopy%s()\n", uet.Name.Name), nil)
|
||||
sw.Do("}\n", nil)
|
||||
case uet.Kind == types.Slice || uet.Kind == types.Map || uet.Kind == types.Pointer:
|
||||
sw.Do("var outVal $.|raw$\n", uet)
|
||||
sw.Do("if val == nil { (*out)[key] = nil } else {\n", nil)
|
||||
sw.Do("in, out := &val, &outVal\n", uet)
|
||||
g.generateFor(ut.Elem, sw)
|
||||
sw.Do("}\n", nil)
|
||||
sw.Do("(*out)[key] = outVal\n", nil)
|
||||
case uet.Kind == types.Struct:
|
||||
sw.Do("(*out)[key] = *val.DeepCopy()\n", uet)
|
||||
default:
|
||||
klog.Fatalf("Hit an unsupported type %v for %v", uet, t)
|
||||
}
|
||||
sw.Do("}\n", nil)
|
||||
}
|
||||
|
||||
// doSlice generates code for a slice or an alias to a slice. The generated code is
|
||||
// is the same for both cases, i.e. it's the code for the underlying type.
|
||||
func (g *genDeepCopy) doSlice(t *types.Type, sw *generator.SnippetWriter) {
|
||||
ut := underlyingType(t)
|
||||
uet := underlyingType(ut.Elem)
|
||||
|
||||
if deepCopyMethodOrDie(t) != nil || deepCopyIntoMethodOrDie(t) != nil {
|
||||
sw.Do("*out = in.DeepCopy()\n", nil)
|
||||
return
|
||||
}
|
||||
|
||||
sw.Do("*out = make($.|raw$, len(*in))\n", t)
|
||||
if deepCopyMethodOrDie(ut.Elem) != nil || deepCopyIntoMethodOrDie(ut.Elem) != nil {
|
||||
sw.Do("for i := range *in {\n", nil)
|
||||
// Note: a DeepCopyInto exists because it is added if DeepCopy is manually defined
|
||||
sw.Do("(*in)[i].DeepCopyInto(&(*out)[i])\n", nil)
|
||||
sw.Do("}\n", nil)
|
||||
} else if uet.Kind == types.Builtin || uet.IsAssignable() {
|
||||
sw.Do("copy(*out, *in)\n", nil)
|
||||
} else {
|
||||
sw.Do("for i := range *in {\n", nil)
|
||||
if uet.Kind == types.Slice || uet.Kind == types.Map || uet.Kind == types.Pointer || deepCopyMethodOrDie(ut.Elem) != nil || deepCopyIntoMethodOrDie(ut.Elem) != nil {
|
||||
sw.Do("if (*in)[i] != nil {\n", nil)
|
||||
sw.Do("in, out := &(*in)[i], &(*out)[i]\n", nil)
|
||||
g.generateFor(ut.Elem, sw)
|
||||
sw.Do("}\n", nil)
|
||||
} else if uet.Kind == types.Interface {
|
||||
// Note: do not generate code that won't compile as `DeepCopyinterface{}()` is not a valid function
|
||||
if uet.Name.Name == "interface{}" {
|
||||
klog.Fatalf("DeepCopy of %q is unsupported. Instead, use named interfaces with DeepCopy<named-interface> as one of the methods.", uet.Name.Name)
|
||||
}
|
||||
sw.Do("if (*in)[i] != nil {\n", nil)
|
||||
// Note: if t.Elem has been an alias "J" of an interface "I" in Go, we will see it
|
||||
// as kind Interface of name "J" here, i.e. generate val.DeepCopyJ(). The golang
|
||||
// parser does not give us the underlying interface name. So we cannot do any better.
|
||||
sw.Do(fmt.Sprintf("(*out)[i] = (*in)[i].DeepCopy%s()\n", uet.Name.Name), nil)
|
||||
sw.Do("}\n", nil)
|
||||
} else if uet.Kind == types.Struct {
|
||||
sw.Do("(*in)[i].DeepCopyInto(&(*out)[i])\n", nil)
|
||||
} else {
|
||||
klog.Fatalf("Hit an unsupported type %v for %v", uet, t)
|
||||
}
|
||||
sw.Do("}\n", nil)
|
||||
}
|
||||
}
|
||||
|
||||
// doStruct generates code for a struct or an alias to a struct. The generated code is
|
||||
// is the same for both cases, i.e. it's the code for the underlying type.
|
||||
func (g *genDeepCopy) doStruct(t *types.Type, sw *generator.SnippetWriter) {
|
||||
ut := underlyingType(t)
|
||||
|
||||
if deepCopyMethodOrDie(t) != nil || deepCopyIntoMethodOrDie(t) != nil {
|
||||
sw.Do("*out = in.DeepCopy()\n", nil)
|
||||
return
|
||||
}
|
||||
|
||||
// Simple copy covers a lot of cases.
|
||||
sw.Do("*out = *in\n", nil)
|
||||
|
||||
// Now fix-up fields as needed.
|
||||
for _, m := range ut.Members {
|
||||
ft := m.Type
|
||||
uft := underlyingType(ft)
|
||||
|
||||
args := generator.Args{
|
||||
"type": ft,
|
||||
"kind": ft.Kind,
|
||||
"name": m.Name,
|
||||
}
|
||||
dc, dci := deepCopyMethodOrDie(ft), deepCopyIntoMethodOrDie(ft)
|
||||
switch {
|
||||
case dc != nil || dci != nil:
|
||||
// Note: a DeepCopyInto exists because it is added if DeepCopy is manually defined
|
||||
leftPointer := ft.Kind == types.Pointer
|
||||
rightPointer := !isReference(ft)
|
||||
if dc != nil {
|
||||
rightPointer = dc.Results[0].Kind == types.Pointer
|
||||
}
|
||||
if leftPointer == rightPointer {
|
||||
sw.Do("out.$.name$ = in.$.name$.DeepCopy()\n", args)
|
||||
} else if leftPointer {
|
||||
sw.Do("x := in.$.name$.DeepCopy()\n", args)
|
||||
sw.Do("out.$.name$ = = &x\n", args)
|
||||
} else {
|
||||
sw.Do("in.$.name$.DeepCopyInto(&out.$.name$)\n", args)
|
||||
}
|
||||
case uft.Kind == types.Builtin:
|
||||
// the initial *out = *in was enough
|
||||
case uft.Kind == types.Map, uft.Kind == types.Slice, uft.Kind == types.Pointer:
|
||||
// Fixup non-nil reference-semantic types.
|
||||
sw.Do("if in.$.name$ != nil {\n", args)
|
||||
sw.Do("in, out := &in.$.name$, &out.$.name$\n", args)
|
||||
g.generateFor(ft, sw)
|
||||
sw.Do("}\n", nil)
|
||||
case uft.Kind == types.Array:
|
||||
sw.Do("out.$.name$ = in.$.name$\n", args)
|
||||
case uft.Kind == types.Struct:
|
||||
if ft.IsAssignable() {
|
||||
sw.Do("out.$.name$ = in.$.name$\n", args)
|
||||
} else {
|
||||
sw.Do("in.$.name$.DeepCopyInto(&out.$.name$)\n", args)
|
||||
}
|
||||
case uft.Kind == types.Interface:
|
||||
// Note: do not generate code that won't compile as `DeepCopyinterface{}()` is not a valid function
|
||||
if uft.Name.Name == "interface{}" {
|
||||
klog.Fatalf("DeepCopy of %q is unsupported. Instead, use named interfaces with DeepCopy<named-interface> as one of the methods.", uft.Name.Name)
|
||||
}
|
||||
sw.Do("if in.$.name$ != nil {\n", args)
|
||||
// Note: if t.Elem has been an alias "J" of an interface "I" in Go, we will see it
|
||||
// as kind Interface of name "J" here, i.e. generate val.DeepCopyJ(). The golang
|
||||
// parser does not give us the underlying interface name. So we cannot do any better.
|
||||
sw.Do(fmt.Sprintf("out.$.name$ = in.$.name$.DeepCopy%s()\n", uft.Name.Name), args)
|
||||
sw.Do("}\n", nil)
|
||||
default:
|
||||
klog.Fatalf("Hit an unsupported type %v for %v, from %v", uft, ft, t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// doPointer generates code for a pointer or an alias to a pointer. The generated code is
|
||||
// is the same for both cases, i.e. it's the code for the underlying type.
|
||||
func (g *genDeepCopy) doPointer(t *types.Type, sw *generator.SnippetWriter) {
|
||||
ut := underlyingType(t)
|
||||
uet := underlyingType(ut.Elem)
|
||||
|
||||
dc, dci := deepCopyMethodOrDie(ut.Elem), deepCopyIntoMethodOrDie(ut.Elem)
|
||||
switch {
|
||||
case dc != nil || dci != nil:
|
||||
rightPointer := !isReference(ut.Elem)
|
||||
if dc != nil {
|
||||
rightPointer = dc.Results[0].Kind == types.Pointer
|
||||
}
|
||||
if rightPointer {
|
||||
sw.Do("*out = (*in).DeepCopy()\n", nil)
|
||||
} else {
|
||||
sw.Do("x := (*in).DeepCopy()\n", nil)
|
||||
sw.Do("*out = &x\n", nil)
|
||||
}
|
||||
case uet.IsAssignable():
|
||||
sw.Do("*out = new($.Elem|raw$)\n", ut)
|
||||
sw.Do("**out = **in", nil)
|
||||
case uet.Kind == types.Map, uet.Kind == types.Slice, uet.Kind == types.Pointer:
|
||||
sw.Do("*out = new($.Elem|raw$)\n", ut)
|
||||
sw.Do("if **in != nil {\n", nil)
|
||||
sw.Do("in, out := *in, *out\n", nil)
|
||||
g.generateFor(uet, sw)
|
||||
sw.Do("}\n", nil)
|
||||
case uet.Kind == types.Struct:
|
||||
sw.Do("*out = new($.Elem|raw$)\n", ut)
|
||||
sw.Do("(*in).DeepCopyInto(*out)\n", nil)
|
||||
default:
|
||||
klog.Fatalf("Hit an unsupported type %v for %v", uet, t)
|
||||
}
|
||||
}
|
||||
1260
vendor/k8s.io/gengo/examples/defaulter-gen/generators/defaulter.go
generated
vendored
1260
vendor/k8s.io/gengo/examples/defaulter-gen/generators/defaulter.go
generated
vendored
File diff suppressed because it is too large
Load Diff
443
vendor/k8s.io/gengo/examples/import-boss/generators/import_restrict.go
generated
vendored
443
vendor/k8s.io/gengo/examples/import-boss/generators/import_restrict.go
generated
vendored
@@ -1,443 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package generators has the generators for the import-boss utility.
|
||||
package generators
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"k8s.io/gengo/args"
|
||||
"k8s.io/gengo/generator"
|
||||
"k8s.io/gengo/namer"
|
||||
"k8s.io/gengo/types"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
goModFile = "go.mod"
|
||||
importBossFileType = "import-boss"
|
||||
)
|
||||
|
||||
// NameSystems returns the name system used by the generators in this package.
|
||||
func NameSystems() namer.NameSystems {
|
||||
return namer.NameSystems{
|
||||
"raw": namer.NewRawNamer("", nil),
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultNameSystem returns the default name system for ordering the types to be
|
||||
// processed by the generators in this package.
|
||||
func DefaultNameSystem() string {
|
||||
return "raw"
|
||||
}
|
||||
|
||||
// Packages makes the import-boss package definition.
|
||||
func Packages(c *generator.Context, arguments *args.GeneratorArgs) generator.Packages {
|
||||
pkgs := generator.Packages{}
|
||||
c.FileTypes = map[string]generator.FileType{
|
||||
importBossFileType: importRuleFile{c},
|
||||
}
|
||||
|
||||
for _, p := range c.Universe {
|
||||
if !inputIncludes(arguments.InputDirs, p) {
|
||||
// Don't run on e.g. third party dependencies.
|
||||
continue
|
||||
}
|
||||
savedPackage := p
|
||||
pkgs = append(pkgs, &generator.DefaultPackage{
|
||||
PackageName: p.Name,
|
||||
PackagePath: p.Path,
|
||||
Source: p.SourcePath,
|
||||
// GeneratorFunc returns a list of generators. Each generator makes a
|
||||
// single file.
|
||||
GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) {
|
||||
return []generator.Generator{&importRules{
|
||||
myPackage: savedPackage,
|
||||
}}
|
||||
},
|
||||
FilterFunc: func(c *generator.Context, t *types.Type) bool {
|
||||
return false
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return pkgs
|
||||
}
|
||||
|
||||
// inputIncludes returns true if the given package is a (sub) package of one of
|
||||
// the InputDirs.
|
||||
func inputIncludes(inputs []string, p *types.Package) bool {
|
||||
// TODO: This does not handle conversion of local paths (./foo) into
|
||||
// canonical packages (github.com/example/project/foo).
|
||||
for _, input := range inputs {
|
||||
// Normalize paths
|
||||
input := strings.TrimSuffix(input, "/")
|
||||
input = strings.TrimPrefix(input, "./vendor/")
|
||||
seek := strings.TrimSuffix(p.Path, "/")
|
||||
|
||||
if input == seek {
|
||||
return true
|
||||
}
|
||||
if strings.HasSuffix(input, "...") {
|
||||
input = strings.TrimSuffix(input, "...")
|
||||
if strings.HasPrefix(seek+"/", input) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// A single import restriction rule.
|
||||
type Rule struct {
|
||||
// All import paths that match this regexp...
|
||||
SelectorRegexp string
|
||||
// ... must have one of these prefixes ...
|
||||
AllowedPrefixes []string
|
||||
// ... and must not have one of these prefixes.
|
||||
ForbiddenPrefixes []string
|
||||
}
|
||||
|
||||
type InverseRule struct {
|
||||
Rule
|
||||
// True if the rule is to be applied to transitive imports.
|
||||
Transitive bool
|
||||
}
|
||||
|
||||
type fileFormat struct {
|
||||
CurrentImports []string
|
||||
|
||||
Rules []Rule
|
||||
InverseRules []InverseRule
|
||||
|
||||
path string
|
||||
}
|
||||
|
||||
func readFile(path string) (*fileFormat, error) {
|
||||
currentBytes, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't read %v: %v", path, err)
|
||||
}
|
||||
|
||||
var current fileFormat
|
||||
err = yaml.Unmarshal(currentBytes, ¤t)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't unmarshal %v: %v", path, err)
|
||||
}
|
||||
current.path = path
|
||||
return ¤t, nil
|
||||
}
|
||||
|
||||
func writeFile(path string, ff *fileFormat) error {
|
||||
raw, err := json.MarshalIndent(ff, "", "\t")
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't format data for file %v.\n%#v", path, ff)
|
||||
}
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't open %v for writing: %v", path, err)
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = f.Write(raw)
|
||||
return err
|
||||
}
|
||||
|
||||
// This does the actual checking, since it knows the literal destination file.
|
||||
type importRuleFile struct {
|
||||
context *generator.Context
|
||||
}
|
||||
|
||||
func (irf importRuleFile) AssembleFile(f *generator.File, path string) error {
|
||||
return irf.VerifyFile(f, path)
|
||||
}
|
||||
|
||||
// TODO: make a flag to enable this, or expose this information in some other way.
|
||||
func (importRuleFile) listEntireImportTree(f *generator.File, path string) error {
|
||||
// If the file exists, populate its current imports. This is mostly to help
|
||||
// humans figure out what they need to fix.
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
// Ignore packages which haven't opted in by adding an .import-restrictions file.
|
||||
return nil
|
||||
}
|
||||
|
||||
current, err := readFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
current.CurrentImports = []string{}
|
||||
for v := range f.Imports {
|
||||
current.CurrentImports = append(current.CurrentImports, v)
|
||||
}
|
||||
sort.Strings(current.CurrentImports)
|
||||
|
||||
return writeFile(path, current)
|
||||
}
|
||||
|
||||
// removeLastDir removes the last directory, but leaves the file name
|
||||
// unchanged. It returns the new path and the removed directory. So:
|
||||
// "a/b/c/file" -> ("a/b/file", "c")
|
||||
func removeLastDir(path string) (newPath, removedDir string) {
|
||||
dir, file := filepath.Split(path)
|
||||
dir = strings.TrimSuffix(dir, string(filepath.Separator))
|
||||
return filepath.Join(filepath.Dir(dir), file), filepath.Base(dir)
|
||||
}
|
||||
|
||||
// isGoModRoot checks if a directory is the root directory for a package
|
||||
// by checking for the existence of a 'go.mod' file in that directory.
|
||||
func isGoModRoot(path string) bool {
|
||||
_, err := os.Stat(filepath.Join(filepath.Dir(path), goModFile))
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// recursiveRead collects all '.import-restriction' files, between the current directory,
|
||||
// and the package root when Go modules are enabled, or $GOPATH/src when they are not.
|
||||
func recursiveRead(path string) ([]*fileFormat, error) {
|
||||
restrictionFiles := make([]*fileFormat, 0)
|
||||
|
||||
for {
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
rules, err := readFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
restrictionFiles = append(restrictionFiles, rules)
|
||||
}
|
||||
|
||||
nextPath, removedDir := removeLastDir(path)
|
||||
if nextPath == path || isGoModRoot(path) || removedDir == "src" {
|
||||
break
|
||||
}
|
||||
|
||||
path = nextPath
|
||||
}
|
||||
|
||||
return restrictionFiles, nil
|
||||
}
|
||||
|
||||
func (irf importRuleFile) VerifyFile(f *generator.File, path string) error {
|
||||
restrictionFiles, err := recursiveRead(filepath.Join(f.PackageSourcePath, f.Name))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error finding rules file: %v", err)
|
||||
}
|
||||
|
||||
if err := irf.verifyRules(restrictionFiles, f); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return irf.verifyInverseRules(restrictionFiles, f)
|
||||
}
|
||||
|
||||
func (irf importRuleFile) verifyRules(restrictionFiles []*fileFormat, f *generator.File) error {
|
||||
selectors := make([][]*regexp.Regexp, len(restrictionFiles))
|
||||
for i, restrictionFile := range restrictionFiles {
|
||||
for _, r := range restrictionFile.Rules {
|
||||
re, err := regexp.Compile(r.SelectorRegexp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("regexp `%s` in file %q doesn't compile: %v", r.SelectorRegexp, restrictionFile.path, err)
|
||||
}
|
||||
|
||||
selectors[i] = append(selectors[i], re)
|
||||
}
|
||||
}
|
||||
|
||||
forbiddenImports := map[string]string{}
|
||||
allowedMismatchedImports := []string{}
|
||||
|
||||
for v := range f.Imports {
|
||||
explicitlyAllowed := false
|
||||
|
||||
NextRestrictionFiles:
|
||||
for i, rules := range restrictionFiles {
|
||||
for j, r := range rules.Rules {
|
||||
matching := selectors[i][j].MatchString(v)
|
||||
klog.V(5).Infof("Checking %v matches %v: %v\n", r.SelectorRegexp, v, matching)
|
||||
if !matching {
|
||||
continue
|
||||
}
|
||||
for _, forbidden := range r.ForbiddenPrefixes {
|
||||
klog.V(4).Infof("Checking %v against %v\n", v, forbidden)
|
||||
if strings.HasPrefix(v, forbidden) {
|
||||
forbiddenImports[v] = forbidden
|
||||
}
|
||||
}
|
||||
for _, allowed := range r.AllowedPrefixes {
|
||||
klog.V(4).Infof("Checking %v against %v\n", v, allowed)
|
||||
if strings.HasPrefix(v, allowed) {
|
||||
explicitlyAllowed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !explicitlyAllowed {
|
||||
allowedMismatchedImports = append(allowedMismatchedImports, v)
|
||||
} else {
|
||||
klog.V(2).Infof("%v importing %v allowed by %v\n", f.PackagePath, v, restrictionFiles[i].path)
|
||||
break NextRestrictionFiles
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(forbiddenImports) > 0 || len(allowedMismatchedImports) > 0 {
|
||||
var errorBuilder strings.Builder
|
||||
for i, f := range forbiddenImports {
|
||||
fmt.Fprintf(&errorBuilder, "import %v has forbidden prefix %v\n", i, f)
|
||||
}
|
||||
if len(allowedMismatchedImports) > 0 {
|
||||
sort.Sort(sort.StringSlice(allowedMismatchedImports))
|
||||
fmt.Fprintf(&errorBuilder, "the following imports did not match any allowed prefix:\n")
|
||||
for _, i := range allowedMismatchedImports {
|
||||
fmt.Fprintf(&errorBuilder, " %v\n", i)
|
||||
}
|
||||
}
|
||||
return errors.New(errorBuilder.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyInverseRules checks that all packages that import a package are allowed to import it.
|
||||
func (irf importRuleFile) verifyInverseRules(restrictionFiles []*fileFormat, f *generator.File) error {
|
||||
// compile all Selector regex in all restriction files
|
||||
selectors := make([][]*regexp.Regexp, len(restrictionFiles))
|
||||
for i, restrictionFile := range restrictionFiles {
|
||||
for _, r := range restrictionFile.InverseRules {
|
||||
re, err := regexp.Compile(r.SelectorRegexp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("regexp `%s` in file %q doesn't compile: %v", r.SelectorRegexp, restrictionFile.path, err)
|
||||
}
|
||||
|
||||
selectors[i] = append(selectors[i], re)
|
||||
}
|
||||
}
|
||||
|
||||
directImport := map[string]bool{}
|
||||
for _, imp := range irf.context.IncomingImports()[f.PackagePath] {
|
||||
directImport[imp] = true
|
||||
}
|
||||
|
||||
forbiddenImports := map[string]string{}
|
||||
allowedMismatchedImports := []string{}
|
||||
|
||||
for _, v := range irf.context.TransitiveIncomingImports()[f.PackagePath] {
|
||||
explicitlyAllowed := false
|
||||
|
||||
NextRestrictionFiles:
|
||||
for i, rules := range restrictionFiles {
|
||||
for j, r := range rules.InverseRules {
|
||||
if !r.Transitive && !directImport[v] {
|
||||
continue
|
||||
}
|
||||
|
||||
re := selectors[i][j]
|
||||
matching := re.MatchString(v)
|
||||
klog.V(4).Infof("Checking %v matches %v (importing %v: %v\n", r.SelectorRegexp, v, f.PackagePath, matching)
|
||||
if !matching {
|
||||
continue
|
||||
}
|
||||
for _, forbidden := range r.ForbiddenPrefixes {
|
||||
klog.V(4).Infof("Checking %v against %v\n", v, forbidden)
|
||||
if strings.HasPrefix(v, forbidden) {
|
||||
forbiddenImports[v] = forbidden
|
||||
}
|
||||
}
|
||||
for _, allowed := range r.AllowedPrefixes {
|
||||
klog.V(4).Infof("Checking %v against %v\n", v, allowed)
|
||||
if strings.HasPrefix(v, allowed) {
|
||||
explicitlyAllowed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !explicitlyAllowed {
|
||||
allowedMismatchedImports = append(allowedMismatchedImports, v)
|
||||
} else {
|
||||
klog.V(2).Infof("%v importing %v allowed by %v\n", v, f.PackagePath, restrictionFiles[i].path)
|
||||
break NextRestrictionFiles
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(forbiddenImports) > 0 || len(allowedMismatchedImports) > 0 {
|
||||
var errorBuilder strings.Builder
|
||||
for i, f := range forbiddenImports {
|
||||
fmt.Fprintf(&errorBuilder, "(inverse): import %v has forbidden prefix %v\n", i, f)
|
||||
}
|
||||
if len(allowedMismatchedImports) > 0 {
|
||||
sort.Sort(sort.StringSlice(allowedMismatchedImports))
|
||||
fmt.Fprintf(&errorBuilder, "(inverse): the following imports did not match any allowed prefix:\n")
|
||||
for _, i := range allowedMismatchedImports {
|
||||
fmt.Fprintf(&errorBuilder, " %v\n", i)
|
||||
}
|
||||
}
|
||||
return errors.New(errorBuilder.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// importRules produces a file with a set for a single type.
|
||||
type importRules struct {
|
||||
myPackage *types.Package
|
||||
imports namer.ImportTracker
|
||||
}
|
||||
|
||||
var (
|
||||
_ = generator.Generator(&importRules{})
|
||||
_ = generator.FileType(importRuleFile{})
|
||||
)
|
||||
|
||||
func (r *importRules) Name() string { return "import rules" }
|
||||
func (r *importRules) Filter(*generator.Context, *types.Type) bool { return false }
|
||||
func (r *importRules) Namers(*generator.Context) namer.NameSystems { return nil }
|
||||
func (r *importRules) PackageVars(*generator.Context) []string { return []string{} }
|
||||
func (r *importRules) PackageConsts(*generator.Context) []string { return []string{} }
|
||||
func (r *importRules) GenerateType(*generator.Context, *types.Type, io.Writer) error { return nil }
|
||||
func (r *importRules) Filename() string { return ".import-restrictions" }
|
||||
func (r *importRules) FileType() string { return importBossFileType }
|
||||
func (r *importRules) Init(c *generator.Context, w io.Writer) error { return nil }
|
||||
func (r *importRules) Finalize(*generator.Context, io.Writer) error { return nil }
|
||||
|
||||
func dfsImports(dest *[]string, seen map[string]bool, p *types.Package) {
|
||||
for _, p2 := range p.Imports {
|
||||
if seen[p2.Path] {
|
||||
continue
|
||||
}
|
||||
seen[p2.Path] = true
|
||||
dfsImports(dest, seen, p2)
|
||||
*dest = append(*dest, p2.Path)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *importRules) Imports(*generator.Context) []string {
|
||||
all := []string{}
|
||||
dfsImports(&all, map[string]bool{}, r.myPackage)
|
||||
return all
|
||||
}
|
||||
378
vendor/k8s.io/gengo/examples/set-gen/generators/sets.go
generated
vendored
378
vendor/k8s.io/gengo/examples/set-gen/generators/sets.go
generated
vendored
@@ -1,378 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package generators has the generators for the set-gen utility.
|
||||
package generators
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"k8s.io/gengo/args"
|
||||
"k8s.io/gengo/generator"
|
||||
"k8s.io/gengo/namer"
|
||||
"k8s.io/gengo/types"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// NameSystems returns the name system used by the generators in this package.
|
||||
func NameSystems() namer.NameSystems {
|
||||
return namer.NameSystems{
|
||||
"public": namer.NewPublicNamer(0),
|
||||
"private": namer.NewPrivateNamer(0),
|
||||
"raw": namer.NewRawNamer("", nil),
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultNameSystem returns the default name system for ordering the types to be
|
||||
// processed by the generators in this package.
|
||||
func DefaultNameSystem() string {
|
||||
return "public"
|
||||
}
|
||||
|
||||
// Packages makes the sets package definition.
|
||||
func Packages(_ *generator.Context, arguments *args.GeneratorArgs) generator.Packages {
|
||||
boilerplate, err := arguments.LoadGoBoilerplate()
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed loading boilerplate: %v", err)
|
||||
}
|
||||
|
||||
return generator.Packages{&generator.DefaultPackage{
|
||||
PackageName: "sets",
|
||||
PackagePath: arguments.OutputPackagePath,
|
||||
HeaderText: boilerplate,
|
||||
PackageDocumentation: []byte(
|
||||
`// Package sets has auto-generated set types.
|
||||
`),
|
||||
// GeneratorFunc returns a list of generators. Each generator makes a
|
||||
// single file.
|
||||
GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) {
|
||||
generators = []generator.Generator{
|
||||
// Always generate a "doc.go" file.
|
||||
generator.DefaultGen{OptionalName: "doc"},
|
||||
// Make a separate file for the Empty type, since it's shared by every type.
|
||||
generator.DefaultGen{
|
||||
OptionalName: "empty",
|
||||
OptionalBody: []byte(emptyTypeDecl),
|
||||
},
|
||||
}
|
||||
// Since we want a file per type that we generate a set for, we
|
||||
// have to provide a function for this.
|
||||
for _, t := range c.Order {
|
||||
generators = append(generators, &genSet{
|
||||
DefaultGen: generator.DefaultGen{
|
||||
// Use the privatized version of the
|
||||
// type name as the file name.
|
||||
//
|
||||
// TODO: make a namer that converts
|
||||
// camelCase to '-' separation for file
|
||||
// names?
|
||||
OptionalName: c.Namers["private"].Name(t),
|
||||
},
|
||||
outputPackage: arguments.OutputPackagePath,
|
||||
typeToMatch: t,
|
||||
imports: generator.NewImportTracker(),
|
||||
})
|
||||
}
|
||||
return generators
|
||||
},
|
||||
FilterFunc: func(c *generator.Context, t *types.Type) bool {
|
||||
// It would be reasonable to filter by the type's package here.
|
||||
// It might be necessary if your input directory has a big
|
||||
// import graph.
|
||||
switch t.Kind {
|
||||
case types.Map, types.Slice, types.Pointer:
|
||||
// These types can't be keys in a map.
|
||||
return false
|
||||
case types.Builtin:
|
||||
return true
|
||||
case types.Struct:
|
||||
// Only some structs can be keys in a map. This is triggered by the line
|
||||
// // +genset
|
||||
// or
|
||||
// // +genset=true
|
||||
return extractBoolTagOrDie("genset", t.CommentLines) == true
|
||||
}
|
||||
return false
|
||||
},
|
||||
}}
|
||||
}
|
||||
|
||||
// genSet produces a file with a set for a single type.
|
||||
type genSet struct {
|
||||
generator.DefaultGen
|
||||
outputPackage string
|
||||
typeToMatch *types.Type
|
||||
imports namer.ImportTracker
|
||||
}
|
||||
|
||||
// Filter ignores all but one type because we're making a single file per type.
|
||||
func (g *genSet) Filter(c *generator.Context, t *types.Type) bool { return t == g.typeToMatch }
|
||||
|
||||
func (g *genSet) Namers(c *generator.Context) namer.NameSystems {
|
||||
return namer.NameSystems{
|
||||
"raw": namer.NewRawNamer(g.outputPackage, g.imports),
|
||||
}
|
||||
}
|
||||
|
||||
func (g *genSet) Imports(c *generator.Context) (imports []string) {
|
||||
return append(g.imports.ImportLines(), "reflect", "sort")
|
||||
}
|
||||
|
||||
// args constructs arguments for templates. Usage:
|
||||
// g.args(t, "key1", value1, "key2", value2, ...)
|
||||
//
|
||||
// 't' is loaded with the key 'type'.
|
||||
//
|
||||
// We could use t directly as the argument, but doing it this way makes it easy
|
||||
// to mix in additional parameters. This feature is not used in this set
|
||||
// generator, but is present as an example.
|
||||
func (g *genSet) args(t *types.Type, kv ...interface{}) interface{} {
|
||||
m := map[interface{}]interface{}{"type": t}
|
||||
for i := 0; i < len(kv)/2; i++ {
|
||||
m[kv[i*2]] = kv[i*2+1]
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// GenerateType makes the body of a file implementing a set for type t.
|
||||
func (g *genSet) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error {
|
||||
sw := generator.NewSnippetWriter(w, c, "$", "$")
|
||||
sw.Do(setCode, g.args(t))
|
||||
sw.Do("func less$.type|public$(lhs, rhs $.type|raw$) bool {\n", g.args(t))
|
||||
g.lessBody(sw, t)
|
||||
sw.Do("}\n", g.args(t))
|
||||
return sw.Error()
|
||||
}
|
||||
|
||||
func (g *genSet) lessBody(sw *generator.SnippetWriter, t *types.Type) {
|
||||
// TODO: make this recursive, handle pointers and multiple nested structs...
|
||||
switch t.Kind {
|
||||
case types.Struct:
|
||||
for _, m := range types.FlattenMembers(t.Members) {
|
||||
sw.Do("if lhs.$.Name$ < rhs.$.Name$ { return true }\n", m)
|
||||
sw.Do("if lhs.$.Name$ > rhs.$.Name$ { return false }\n", m)
|
||||
}
|
||||
sw.Do("return false\n", nil)
|
||||
default:
|
||||
sw.Do("return lhs < rhs\n", nil)
|
||||
}
|
||||
}
|
||||
|
||||
// written to the "empty.go" file.
|
||||
var emptyTypeDecl = `
|
||||
// Empty is public since it is used by some internal API objects for conversions between external
|
||||
// string arrays and internal sets, and conversion logic requires public types today.
|
||||
type Empty struct{}
|
||||
`
|
||||
|
||||
// Written for every type. If you've never used text/template before:
|
||||
// $.type$ refers to the source type; |public means to
|
||||
// call the function giving the public name, |raw the raw type name.
|
||||
var setCode = `// sets.$.type|public$ is a set of $.type|raw$s, implemented via map[$.type|raw$]struct{} for minimal memory consumption.
|
||||
type $.type|public$ map[$.type|raw$]Empty
|
||||
|
||||
// New$.type|public$ creates a $.type|public$ from a list of values.
|
||||
func New$.type|public$(items ...$.type|raw$) $.type|public$ {
|
||||
ss := make($.type|public$, len(items))
|
||||
ss.Insert(items...)
|
||||
return ss
|
||||
}
|
||||
|
||||
// $.type|public$KeySet creates a $.type|public$ from a keys of a map[$.type|raw$](? extends interface{}).
|
||||
// If the value passed in is not actually a map, this will panic.
|
||||
func $.type|public$KeySet(theMap interface{}) $.type|public$ {
|
||||
v := reflect.ValueOf(theMap)
|
||||
ret := $.type|public${}
|
||||
|
||||
for _, keyValue := range v.MapKeys() {
|
||||
ret.Insert(keyValue.Interface().($.type|raw$))
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// Insert adds items to the set.
|
||||
func (s $.type|public$) Insert(items ...$.type|raw$) $.type|public$ {
|
||||
for _, item := range items {
|
||||
s[item] = Empty{}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Delete removes all items from the set.
|
||||
func (s $.type|public$) Delete(items ...$.type|raw$) $.type|public$ {
|
||||
for _, item := range items {
|
||||
delete(s, item)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Has returns true if and only if item is contained in the set.
|
||||
func (s $.type|public$) Has(item $.type|raw$) bool {
|
||||
_, contained := s[item]
|
||||
return contained
|
||||
}
|
||||
|
||||
// HasAll returns true if and only if all items are contained in the set.
|
||||
func (s $.type|public$) HasAll(items ...$.type|raw$) bool {
|
||||
for _, item := range items {
|
||||
if !s.Has(item) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// HasAny returns true if any items are contained in the set.
|
||||
func (s $.type|public$) HasAny(items ...$.type|raw$) bool {
|
||||
for _, item := range items {
|
||||
if s.Has(item) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Clone returns a new set which is a copy of the current set.
|
||||
func (s $.type|public$) Clone() $.type|public$ {
|
||||
result := make($.type|public$, len(s))
|
||||
for key := range s {
|
||||
result.Insert(key)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Difference returns a set of objects that are not in s2.
|
||||
// For example:
|
||||
// s1 = {a1, a2, a3}
|
||||
// s2 = {a1, a2, a4, a5}
|
||||
// s1.Difference(s2) = {a3}
|
||||
// s2.Difference(s1) = {a4, a5}
|
||||
func (s1 $.type|public$) Difference(s2 $.type|public$) $.type|public$ {
|
||||
result := New$.type|public$()
|
||||
for key := range s1 {
|
||||
if !s2.Has(key) {
|
||||
result.Insert(key)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection.
|
||||
// For example:
|
||||
// s1 = {a1, a2, a3}
|
||||
// s2 = {a1, a2, a4, a5}
|
||||
// s1.SymmetricDifference(s2) = {a3, a4, a5}
|
||||
// s2.SymmetricDifference(s1) = {a3, a4, a5}
|
||||
func (s1 $.type|public$) SymmetricDifference(s2 $.type|public$) $.type|public$ {
|
||||
return s1.Difference(s2).Union(s2.Difference(s1))
|
||||
}
|
||||
|
||||
// Union returns a new set which includes items in either s1 or s2.
|
||||
// For example:
|
||||
// s1 = {a1, a2}
|
||||
// s2 = {a3, a4}
|
||||
// s1.Union(s2) = {a1, a2, a3, a4}
|
||||
// s2.Union(s1) = {a1, a2, a3, a4}
|
||||
func (s1 $.type|public$) Union(s2 $.type|public$) $.type|public$ {
|
||||
result := s1.Clone()
|
||||
for key := range s2 {
|
||||
result.Insert(key)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Intersection returns a new set which includes the item in BOTH s1 and s2
|
||||
// For example:
|
||||
// s1 = {a1, a2}
|
||||
// s2 = {a2, a3}
|
||||
// s1.Intersection(s2) = {a2}
|
||||
func (s1 $.type|public$) Intersection(s2 $.type|public$) $.type|public$ {
|
||||
var walk, other $.type|public$
|
||||
result := New$.type|public$()
|
||||
if s1.Len() < s2.Len() {
|
||||
walk = s1
|
||||
other = s2
|
||||
} else {
|
||||
walk = s2
|
||||
other = s1
|
||||
}
|
||||
for key := range walk {
|
||||
if other.Has(key) {
|
||||
result.Insert(key)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// IsSuperset returns true if and only if s1 is a superset of s2.
|
||||
func (s1 $.type|public$) IsSuperset(s2 $.type|public$) bool {
|
||||
for item := range s2 {
|
||||
if !s1.Has(item) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Equal returns true if and only if s1 is equal (as a set) to s2.
|
||||
// Two sets are equal if their membership is identical.
|
||||
// (In practice, this means same elements, order doesn't matter)
|
||||
func (s1 $.type|public$) Equal(s2 $.type|public$) bool {
|
||||
return len(s1) == len(s2) && s1.IsSuperset(s2)
|
||||
}
|
||||
|
||||
type sortableSliceOf$.type|public$ []$.type|raw$
|
||||
|
||||
func (s sortableSliceOf$.type|public$) Len() int { return len(s) }
|
||||
func (s sortableSliceOf$.type|public$) Less(i, j int) bool { return less$.type|public$(s[i], s[j]) }
|
||||
func (s sortableSliceOf$.type|public$) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// List returns the contents as a sorted $.type|raw$ slice.
|
||||
func (s $.type|public$) List() []$.type|raw$ {
|
||||
res := make(sortableSliceOf$.type|public$, 0, len(s))
|
||||
for key := range s {
|
||||
res = append(res, key)
|
||||
}
|
||||
sort.Sort(res)
|
||||
return []$.type|raw$(res)
|
||||
}
|
||||
|
||||
// UnsortedList returns the slice with contents in random order.
|
||||
func (s $.type|public$) UnsortedList() []$.type|raw$ {
|
||||
res :=make([]$.type|raw$, 0, len(s))
|
||||
for key := range s {
|
||||
res = append(res, key)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// Returns a single element from the set.
|
||||
func (s $.type|public$) PopAny() ($.type|raw$, bool) {
|
||||
for key := range s {
|
||||
s.Delete(key)
|
||||
return key, true
|
||||
}
|
||||
var zeroValue $.type|raw$
|
||||
return zeroValue, false
|
||||
}
|
||||
|
||||
// Len returns the size of the set.
|
||||
func (s $.type|public$) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
`
|
||||
33
vendor/k8s.io/gengo/examples/set-gen/generators/tags.go
generated
vendored
33
vendor/k8s.io/gengo/examples/set-gen/generators/tags.go
generated
vendored
@@ -1,33 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package generators
|
||||
|
||||
import (
|
||||
"k8s.io/gengo/types"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// extractBoolTagOrDie gets the comment-tags for the key and asserts that, if
|
||||
// it exists, the value is boolean. If the tag did not exist, it returns
|
||||
// false.
|
||||
func extractBoolTagOrDie(key string, lines []string) bool {
|
||||
val, err := types.ExtractSingleBoolCommentTag("+", key, false, lines)
|
||||
if err != nil {
|
||||
klog.Fatalf(err.Error())
|
||||
}
|
||||
return val
|
||||
}
|
||||
221
vendor/k8s.io/gengo/examples/set-gen/sets/byte.go
generated
vendored
221
vendor/k8s.io/gengo/examples/set-gen/sets/byte.go
generated
vendored
@@ -1,221 +0,0 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by set-gen. DO NOT EDIT.
|
||||
|
||||
package sets
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// sets.Byte is a set of bytes, implemented via map[byte]struct{} for minimal memory consumption.
|
||||
type Byte map[byte]Empty
|
||||
|
||||
// NewByte creates a Byte from a list of values.
|
||||
func NewByte(items ...byte) Byte {
|
||||
ss := make(Byte, len(items))
|
||||
ss.Insert(items...)
|
||||
return ss
|
||||
}
|
||||
|
||||
// ByteKeySet creates a Byte from a keys of a map[byte](? extends interface{}).
|
||||
// If the value passed in is not actually a map, this will panic.
|
||||
func ByteKeySet(theMap interface{}) Byte {
|
||||
v := reflect.ValueOf(theMap)
|
||||
ret := Byte{}
|
||||
|
||||
for _, keyValue := range v.MapKeys() {
|
||||
ret.Insert(keyValue.Interface().(byte))
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// Insert adds items to the set.
|
||||
func (s Byte) Insert(items ...byte) Byte {
|
||||
for _, item := range items {
|
||||
s[item] = Empty{}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Delete removes all items from the set.
|
||||
func (s Byte) Delete(items ...byte) Byte {
|
||||
for _, item := range items {
|
||||
delete(s, item)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Has returns true if and only if item is contained in the set.
|
||||
func (s Byte) Has(item byte) bool {
|
||||
_, contained := s[item]
|
||||
return contained
|
||||
}
|
||||
|
||||
// HasAll returns true if and only if all items are contained in the set.
|
||||
func (s Byte) HasAll(items ...byte) bool {
|
||||
for _, item := range items {
|
||||
if !s.Has(item) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// HasAny returns true if any items are contained in the set.
|
||||
func (s Byte) HasAny(items ...byte) bool {
|
||||
for _, item := range items {
|
||||
if s.Has(item) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Clone returns a new set which is a copy of the current set.
|
||||
func (s Byte) Clone() Byte {
|
||||
result := make(Byte, len(s))
|
||||
for key := range s {
|
||||
result.Insert(key)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Difference returns a set of objects that are not in s2.
|
||||
// For example:
|
||||
// s1 = {a1, a2, a3}
|
||||
// s2 = {a1, a2, a4, a5}
|
||||
// s1.Difference(s2) = {a3}
|
||||
// s2.Difference(s1) = {a4, a5}
|
||||
func (s1 Byte) Difference(s2 Byte) Byte {
|
||||
result := NewByte()
|
||||
for key := range s1 {
|
||||
if !s2.Has(key) {
|
||||
result.Insert(key)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection.
|
||||
// For example:
|
||||
// s1 = {a1, a2, a3}
|
||||
// s2 = {a1, a2, a4, a5}
|
||||
// s1.SymmetricDifference(s2) = {a3, a4, a5}
|
||||
// s2.SymmetricDifference(s1) = {a3, a4, a5}
|
||||
func (s1 Byte) SymmetricDifference(s2 Byte) Byte {
|
||||
return s1.Difference(s2).Union(s2.Difference(s1))
|
||||
}
|
||||
|
||||
// Union returns a new set which includes items in either s1 or s2.
|
||||
// For example:
|
||||
// s1 = {a1, a2}
|
||||
// s2 = {a3, a4}
|
||||
// s1.Union(s2) = {a1, a2, a3, a4}
|
||||
// s2.Union(s1) = {a1, a2, a3, a4}
|
||||
func (s1 Byte) Union(s2 Byte) Byte {
|
||||
result := s1.Clone()
|
||||
for key := range s2 {
|
||||
result.Insert(key)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Intersection returns a new set which includes the item in BOTH s1 and s2
|
||||
// For example:
|
||||
// s1 = {a1, a2}
|
||||
// s2 = {a2, a3}
|
||||
// s1.Intersection(s2) = {a2}
|
||||
func (s1 Byte) Intersection(s2 Byte) Byte {
|
||||
var walk, other Byte
|
||||
result := NewByte()
|
||||
if s1.Len() < s2.Len() {
|
||||
walk = s1
|
||||
other = s2
|
||||
} else {
|
||||
walk = s2
|
||||
other = s1
|
||||
}
|
||||
for key := range walk {
|
||||
if other.Has(key) {
|
||||
result.Insert(key)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// IsSuperset returns true if and only if s1 is a superset of s2.
|
||||
func (s1 Byte) IsSuperset(s2 Byte) bool {
|
||||
for item := range s2 {
|
||||
if !s1.Has(item) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Equal returns true if and only if s1 is equal (as a set) to s2.
|
||||
// Two sets are equal if their membership is identical.
|
||||
// (In practice, this means same elements, order doesn't matter)
|
||||
func (s1 Byte) Equal(s2 Byte) bool {
|
||||
return len(s1) == len(s2) && s1.IsSuperset(s2)
|
||||
}
|
||||
|
||||
type sortableSliceOfByte []byte
|
||||
|
||||
func (s sortableSliceOfByte) Len() int { return len(s) }
|
||||
func (s sortableSliceOfByte) Less(i, j int) bool { return lessByte(s[i], s[j]) }
|
||||
func (s sortableSliceOfByte) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// List returns the contents as a sorted byte slice.
|
||||
func (s Byte) List() []byte {
|
||||
res := make(sortableSliceOfByte, 0, len(s))
|
||||
for key := range s {
|
||||
res = append(res, key)
|
||||
}
|
||||
sort.Sort(res)
|
||||
return []byte(res)
|
||||
}
|
||||
|
||||
// UnsortedList returns the slice with contents in random order.
|
||||
func (s Byte) UnsortedList() []byte {
|
||||
res := make([]byte, 0, len(s))
|
||||
for key := range s {
|
||||
res = append(res, key)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// Returns a single element from the set.
|
||||
func (s Byte) PopAny() (byte, bool) {
|
||||
for key := range s {
|
||||
s.Delete(key)
|
||||
return key, true
|
||||
}
|
||||
var zeroValue byte
|
||||
return zeroValue, false
|
||||
}
|
||||
|
||||
// Len returns the size of the set.
|
||||
func (s Byte) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func lessByte(lhs, rhs byte) bool {
|
||||
return lhs < rhs
|
||||
}
|
||||
20
vendor/k8s.io/gengo/examples/set-gen/sets/doc.go
generated
vendored
20
vendor/k8s.io/gengo/examples/set-gen/sets/doc.go
generated
vendored
@@ -1,20 +0,0 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by set-gen. DO NOT EDIT.
|
||||
|
||||
// Package sets has auto-generated set types.
|
||||
package sets
|
||||
23
vendor/k8s.io/gengo/examples/set-gen/sets/empty.go
generated
vendored
23
vendor/k8s.io/gengo/examples/set-gen/sets/empty.go
generated
vendored
@@ -1,23 +0,0 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by set-gen. DO NOT EDIT.
|
||||
|
||||
package sets
|
||||
|
||||
// Empty is public since it is used by some internal API objects for conversions between external
|
||||
// string arrays and internal sets, and conversion logic requires public types today.
|
||||
type Empty struct{}
|
||||
221
vendor/k8s.io/gengo/examples/set-gen/sets/int.go
generated
vendored
221
vendor/k8s.io/gengo/examples/set-gen/sets/int.go
generated
vendored
@@ -1,221 +0,0 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by set-gen. DO NOT EDIT.
|
||||
|
||||
package sets
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// sets.Int is a set of ints, implemented via map[int]struct{} for minimal memory consumption.
|
||||
type Int map[int]Empty
|
||||
|
||||
// NewInt creates a Int from a list of values.
|
||||
func NewInt(items ...int) Int {
|
||||
ss := make(Int, len(items))
|
||||
ss.Insert(items...)
|
||||
return ss
|
||||
}
|
||||
|
||||
// IntKeySet creates a Int from a keys of a map[int](? extends interface{}).
|
||||
// If the value passed in is not actually a map, this will panic.
|
||||
func IntKeySet(theMap interface{}) Int {
|
||||
v := reflect.ValueOf(theMap)
|
||||
ret := Int{}
|
||||
|
||||
for _, keyValue := range v.MapKeys() {
|
||||
ret.Insert(keyValue.Interface().(int))
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// Insert adds items to the set.
|
||||
func (s Int) Insert(items ...int) Int {
|
||||
for _, item := range items {
|
||||
s[item] = Empty{}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Delete removes all items from the set.
|
||||
func (s Int) Delete(items ...int) Int {
|
||||
for _, item := range items {
|
||||
delete(s, item)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Has returns true if and only if item is contained in the set.
|
||||
func (s Int) Has(item int) bool {
|
||||
_, contained := s[item]
|
||||
return contained
|
||||
}
|
||||
|
||||
// HasAll returns true if and only if all items are contained in the set.
|
||||
func (s Int) HasAll(items ...int) bool {
|
||||
for _, item := range items {
|
||||
if !s.Has(item) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// HasAny returns true if any items are contained in the set.
|
||||
func (s Int) HasAny(items ...int) bool {
|
||||
for _, item := range items {
|
||||
if s.Has(item) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Clone returns a new set which is a copy of the current set.
|
||||
func (s Int) Clone() Int {
|
||||
result := make(Int, len(s))
|
||||
for key := range s {
|
||||
result.Insert(key)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Difference returns a set of objects that are not in s2.
|
||||
// For example:
|
||||
// s1 = {a1, a2, a3}
|
||||
// s2 = {a1, a2, a4, a5}
|
||||
// s1.Difference(s2) = {a3}
|
||||
// s2.Difference(s1) = {a4, a5}
|
||||
func (s1 Int) Difference(s2 Int) Int {
|
||||
result := NewInt()
|
||||
for key := range s1 {
|
||||
if !s2.Has(key) {
|
||||
result.Insert(key)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection.
|
||||
// For example:
|
||||
// s1 = {a1, a2, a3}
|
||||
// s2 = {a1, a2, a4, a5}
|
||||
// s1.SymmetricDifference(s2) = {a3, a4, a5}
|
||||
// s2.SymmetricDifference(s1) = {a3, a4, a5}
|
||||
func (s1 Int) SymmetricDifference(s2 Int) Int {
|
||||
return s1.Difference(s2).Union(s2.Difference(s1))
|
||||
}
|
||||
|
||||
// Union returns a new set which includes items in either s1 or s2.
|
||||
// For example:
|
||||
// s1 = {a1, a2}
|
||||
// s2 = {a3, a4}
|
||||
// s1.Union(s2) = {a1, a2, a3, a4}
|
||||
// s2.Union(s1) = {a1, a2, a3, a4}
|
||||
func (s1 Int) Union(s2 Int) Int {
|
||||
result := s1.Clone()
|
||||
for key := range s2 {
|
||||
result.Insert(key)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Intersection returns a new set which includes the item in BOTH s1 and s2
|
||||
// For example:
|
||||
// s1 = {a1, a2}
|
||||
// s2 = {a2, a3}
|
||||
// s1.Intersection(s2) = {a2}
|
||||
func (s1 Int) Intersection(s2 Int) Int {
|
||||
var walk, other Int
|
||||
result := NewInt()
|
||||
if s1.Len() < s2.Len() {
|
||||
walk = s1
|
||||
other = s2
|
||||
} else {
|
||||
walk = s2
|
||||
other = s1
|
||||
}
|
||||
for key := range walk {
|
||||
if other.Has(key) {
|
||||
result.Insert(key)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// IsSuperset returns true if and only if s1 is a superset of s2.
|
||||
func (s1 Int) IsSuperset(s2 Int) bool {
|
||||
for item := range s2 {
|
||||
if !s1.Has(item) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Equal returns true if and only if s1 is equal (as a set) to s2.
|
||||
// Two sets are equal if their membership is identical.
|
||||
// (In practice, this means same elements, order doesn't matter)
|
||||
func (s1 Int) Equal(s2 Int) bool {
|
||||
return len(s1) == len(s2) && s1.IsSuperset(s2)
|
||||
}
|
||||
|
||||
type sortableSliceOfInt []int
|
||||
|
||||
func (s sortableSliceOfInt) Len() int { return len(s) }
|
||||
func (s sortableSliceOfInt) Less(i, j int) bool { return lessInt(s[i], s[j]) }
|
||||
func (s sortableSliceOfInt) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// List returns the contents as a sorted int slice.
|
||||
func (s Int) List() []int {
|
||||
res := make(sortableSliceOfInt, 0, len(s))
|
||||
for key := range s {
|
||||
res = append(res, key)
|
||||
}
|
||||
sort.Sort(res)
|
||||
return []int(res)
|
||||
}
|
||||
|
||||
// UnsortedList returns the slice with contents in random order.
|
||||
func (s Int) UnsortedList() []int {
|
||||
res := make([]int, 0, len(s))
|
||||
for key := range s {
|
||||
res = append(res, key)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// Returns a single element from the set.
|
||||
func (s Int) PopAny() (int, bool) {
|
||||
for key := range s {
|
||||
s.Delete(key)
|
||||
return key, true
|
||||
}
|
||||
var zeroValue int
|
||||
return zeroValue, false
|
||||
}
|
||||
|
||||
// Len returns the size of the set.
|
||||
func (s Int) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func lessInt(lhs, rhs int) bool {
|
||||
return lhs < rhs
|
||||
}
|
||||
221
vendor/k8s.io/gengo/examples/set-gen/sets/int64.go
generated
vendored
221
vendor/k8s.io/gengo/examples/set-gen/sets/int64.go
generated
vendored
@@ -1,221 +0,0 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by set-gen. DO NOT EDIT.
|
||||
|
||||
package sets
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// sets.Int64 is a set of int64s, implemented via map[int64]struct{} for minimal memory consumption.
|
||||
type Int64 map[int64]Empty
|
||||
|
||||
// NewInt64 creates a Int64 from a list of values.
|
||||
func NewInt64(items ...int64) Int64 {
|
||||
ss := make(Int64, len(items))
|
||||
ss.Insert(items...)
|
||||
return ss
|
||||
}
|
||||
|
||||
// Int64KeySet creates a Int64 from a keys of a map[int64](? extends interface{}).
|
||||
// If the value passed in is not actually a map, this will panic.
|
||||
func Int64KeySet(theMap interface{}) Int64 {
|
||||
v := reflect.ValueOf(theMap)
|
||||
ret := Int64{}
|
||||
|
||||
for _, keyValue := range v.MapKeys() {
|
||||
ret.Insert(keyValue.Interface().(int64))
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// Insert adds items to the set.
|
||||
func (s Int64) Insert(items ...int64) Int64 {
|
||||
for _, item := range items {
|
||||
s[item] = Empty{}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Delete removes all items from the set.
|
||||
func (s Int64) Delete(items ...int64) Int64 {
|
||||
for _, item := range items {
|
||||
delete(s, item)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Has returns true if and only if item is contained in the set.
|
||||
func (s Int64) Has(item int64) bool {
|
||||
_, contained := s[item]
|
||||
return contained
|
||||
}
|
||||
|
||||
// HasAll returns true if and only if all items are contained in the set.
|
||||
func (s Int64) HasAll(items ...int64) bool {
|
||||
for _, item := range items {
|
||||
if !s.Has(item) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// HasAny returns true if any items are contained in the set.
|
||||
func (s Int64) HasAny(items ...int64) bool {
|
||||
for _, item := range items {
|
||||
if s.Has(item) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Clone returns a new set which is a copy of the current set.
|
||||
func (s Int64) Clone() Int64 {
|
||||
result := make(Int64, len(s))
|
||||
for key := range s {
|
||||
result.Insert(key)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Difference returns a set of objects that are not in s2.
|
||||
// For example:
|
||||
// s1 = {a1, a2, a3}
|
||||
// s2 = {a1, a2, a4, a5}
|
||||
// s1.Difference(s2) = {a3}
|
||||
// s2.Difference(s1) = {a4, a5}
|
||||
func (s1 Int64) Difference(s2 Int64) Int64 {
|
||||
result := NewInt64()
|
||||
for key := range s1 {
|
||||
if !s2.Has(key) {
|
||||
result.Insert(key)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection.
|
||||
// For example:
|
||||
// s1 = {a1, a2, a3}
|
||||
// s2 = {a1, a2, a4, a5}
|
||||
// s1.SymmetricDifference(s2) = {a3, a4, a5}
|
||||
// s2.SymmetricDifference(s1) = {a3, a4, a5}
|
||||
func (s1 Int64) SymmetricDifference(s2 Int64) Int64 {
|
||||
return s1.Difference(s2).Union(s2.Difference(s1))
|
||||
}
|
||||
|
||||
// Union returns a new set which includes items in either s1 or s2.
|
||||
// For example:
|
||||
// s1 = {a1, a2}
|
||||
// s2 = {a3, a4}
|
||||
// s1.Union(s2) = {a1, a2, a3, a4}
|
||||
// s2.Union(s1) = {a1, a2, a3, a4}
|
||||
func (s1 Int64) Union(s2 Int64) Int64 {
|
||||
result := s1.Clone()
|
||||
for key := range s2 {
|
||||
result.Insert(key)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Intersection returns a new set which includes the item in BOTH s1 and s2
|
||||
// For example:
|
||||
// s1 = {a1, a2}
|
||||
// s2 = {a2, a3}
|
||||
// s1.Intersection(s2) = {a2}
|
||||
func (s1 Int64) Intersection(s2 Int64) Int64 {
|
||||
var walk, other Int64
|
||||
result := NewInt64()
|
||||
if s1.Len() < s2.Len() {
|
||||
walk = s1
|
||||
other = s2
|
||||
} else {
|
||||
walk = s2
|
||||
other = s1
|
||||
}
|
||||
for key := range walk {
|
||||
if other.Has(key) {
|
||||
result.Insert(key)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// IsSuperset returns true if and only if s1 is a superset of s2.
|
||||
func (s1 Int64) IsSuperset(s2 Int64) bool {
|
||||
for item := range s2 {
|
||||
if !s1.Has(item) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Equal returns true if and only if s1 is equal (as a set) to s2.
|
||||
// Two sets are equal if their membership is identical.
|
||||
// (In practice, this means same elements, order doesn't matter)
|
||||
func (s1 Int64) Equal(s2 Int64) bool {
|
||||
return len(s1) == len(s2) && s1.IsSuperset(s2)
|
||||
}
|
||||
|
||||
type sortableSliceOfInt64 []int64
|
||||
|
||||
func (s sortableSliceOfInt64) Len() int { return len(s) }
|
||||
func (s sortableSliceOfInt64) Less(i, j int) bool { return lessInt64(s[i], s[j]) }
|
||||
func (s sortableSliceOfInt64) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// List returns the contents as a sorted int64 slice.
|
||||
func (s Int64) List() []int64 {
|
||||
res := make(sortableSliceOfInt64, 0, len(s))
|
||||
for key := range s {
|
||||
res = append(res, key)
|
||||
}
|
||||
sort.Sort(res)
|
||||
return []int64(res)
|
||||
}
|
||||
|
||||
// UnsortedList returns the slice with contents in random order.
|
||||
func (s Int64) UnsortedList() []int64 {
|
||||
res := make([]int64, 0, len(s))
|
||||
for key := range s {
|
||||
res = append(res, key)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// Returns a single element from the set.
|
||||
func (s Int64) PopAny() (int64, bool) {
|
||||
for key := range s {
|
||||
s.Delete(key)
|
||||
return key, true
|
||||
}
|
||||
var zeroValue int64
|
||||
return zeroValue, false
|
||||
}
|
||||
|
||||
// Len returns the size of the set.
|
||||
func (s Int64) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func lessInt64(lhs, rhs int64) bool {
|
||||
return lhs < rhs
|
||||
}
|
||||
221
vendor/k8s.io/gengo/examples/set-gen/sets/string.go
generated
vendored
221
vendor/k8s.io/gengo/examples/set-gen/sets/string.go
generated
vendored
@@ -1,221 +0,0 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by set-gen. DO NOT EDIT.
|
||||
|
||||
package sets
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// sets.String is a set of strings, implemented via map[string]struct{} for minimal memory consumption.
|
||||
type String map[string]Empty
|
||||
|
||||
// NewString creates a String from a list of values.
|
||||
func NewString(items ...string) String {
|
||||
ss := make(String, len(items))
|
||||
ss.Insert(items...)
|
||||
return ss
|
||||
}
|
||||
|
||||
// StringKeySet creates a String from a keys of a map[string](? extends interface{}).
|
||||
// If the value passed in is not actually a map, this will panic.
|
||||
func StringKeySet(theMap interface{}) String {
|
||||
v := reflect.ValueOf(theMap)
|
||||
ret := String{}
|
||||
|
||||
for _, keyValue := range v.MapKeys() {
|
||||
ret.Insert(keyValue.Interface().(string))
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// Insert adds items to the set.
|
||||
func (s String) Insert(items ...string) String {
|
||||
for _, item := range items {
|
||||
s[item] = Empty{}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Delete removes all items from the set.
|
||||
func (s String) Delete(items ...string) String {
|
||||
for _, item := range items {
|
||||
delete(s, item)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Has returns true if and only if item is contained in the set.
|
||||
func (s String) Has(item string) bool {
|
||||
_, contained := s[item]
|
||||
return contained
|
||||
}
|
||||
|
||||
// HasAll returns true if and only if all items are contained in the set.
|
||||
func (s String) HasAll(items ...string) bool {
|
||||
for _, item := range items {
|
||||
if !s.Has(item) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// HasAny returns true if any items are contained in the set.
|
||||
func (s String) HasAny(items ...string) bool {
|
||||
for _, item := range items {
|
||||
if s.Has(item) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Clone returns a new set which is a copy of the current set.
|
||||
func (s String) Clone() String {
|
||||
result := make(String, len(s))
|
||||
for key := range s {
|
||||
result.Insert(key)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Difference returns a set of objects that are not in s2.
|
||||
// For example:
|
||||
// s1 = {a1, a2, a3}
|
||||
// s2 = {a1, a2, a4, a5}
|
||||
// s1.Difference(s2) = {a3}
|
||||
// s2.Difference(s1) = {a4, a5}
|
||||
func (s1 String) Difference(s2 String) String {
|
||||
result := NewString()
|
||||
for key := range s1 {
|
||||
if !s2.Has(key) {
|
||||
result.Insert(key)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// SymmetricDifference returns a set of elements which are in either of the sets, but not in their intersection.
|
||||
// For example:
|
||||
// s1 = {a1, a2, a3}
|
||||
// s2 = {a1, a2, a4, a5}
|
||||
// s1.SymmetricDifference(s2) = {a3, a4, a5}
|
||||
// s2.SymmetricDifference(s1) = {a3, a4, a5}
|
||||
func (s1 String) SymmetricDifference(s2 String) String {
|
||||
return s1.Difference(s2).Union(s2.Difference(s1))
|
||||
}
|
||||
|
||||
// Union returns a new set which includes items in either s1 or s2.
|
||||
// For example:
|
||||
// s1 = {a1, a2}
|
||||
// s2 = {a3, a4}
|
||||
// s1.Union(s2) = {a1, a2, a3, a4}
|
||||
// s2.Union(s1) = {a1, a2, a3, a4}
|
||||
func (s1 String) Union(s2 String) String {
|
||||
result := s1.Clone()
|
||||
for key := range s2 {
|
||||
result.Insert(key)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Intersection returns a new set which includes the item in BOTH s1 and s2
|
||||
// For example:
|
||||
// s1 = {a1, a2}
|
||||
// s2 = {a2, a3}
|
||||
// s1.Intersection(s2) = {a2}
|
||||
func (s1 String) Intersection(s2 String) String {
|
||||
var walk, other String
|
||||
result := NewString()
|
||||
if s1.Len() < s2.Len() {
|
||||
walk = s1
|
||||
other = s2
|
||||
} else {
|
||||
walk = s2
|
||||
other = s1
|
||||
}
|
||||
for key := range walk {
|
||||
if other.Has(key) {
|
||||
result.Insert(key)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// IsSuperset returns true if and only if s1 is a superset of s2.
|
||||
func (s1 String) IsSuperset(s2 String) bool {
|
||||
for item := range s2 {
|
||||
if !s1.Has(item) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Equal returns true if and only if s1 is equal (as a set) to s2.
|
||||
// Two sets are equal if their membership is identical.
|
||||
// (In practice, this means same elements, order doesn't matter)
|
||||
func (s1 String) Equal(s2 String) bool {
|
||||
return len(s1) == len(s2) && s1.IsSuperset(s2)
|
||||
}
|
||||
|
||||
type sortableSliceOfString []string
|
||||
|
||||
func (s sortableSliceOfString) Len() int { return len(s) }
|
||||
func (s sortableSliceOfString) Less(i, j int) bool { return lessString(s[i], s[j]) }
|
||||
func (s sortableSliceOfString) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// List returns the contents as a sorted string slice.
|
||||
func (s String) List() []string {
|
||||
res := make(sortableSliceOfString, 0, len(s))
|
||||
for key := range s {
|
||||
res = append(res, key)
|
||||
}
|
||||
sort.Sort(res)
|
||||
return []string(res)
|
||||
}
|
||||
|
||||
// UnsortedList returns the slice with contents in random order.
|
||||
func (s String) UnsortedList() []string {
|
||||
res := make([]string, 0, len(s))
|
||||
for key := range s {
|
||||
res = append(res, key)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// Returns a single element from the set.
|
||||
func (s String) PopAny() (string, bool) {
|
||||
for key := range s {
|
||||
s.Delete(key)
|
||||
return key, true
|
||||
}
|
||||
var zeroValue string
|
||||
return zeroValue, false
|
||||
}
|
||||
|
||||
// Len returns the size of the set.
|
||||
func (s String) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func lessString(lhs, rhs string) bool {
|
||||
return lhs < rhs
|
||||
}
|
||||
62
vendor/k8s.io/gengo/generator/default_generator.go
generated
vendored
62
vendor/k8s.io/gengo/generator/default_generator.go
generated
vendored
@@ -1,62 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package generator
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"k8s.io/gengo/namer"
|
||||
"k8s.io/gengo/types"
|
||||
)
|
||||
|
||||
const (
|
||||
GolangFileType = "golang"
|
||||
)
|
||||
|
||||
// DefaultGen implements a do-nothing Generator.
|
||||
//
|
||||
// It can be used to implement static content files.
|
||||
type DefaultGen struct {
|
||||
// OptionalName, if present, will be used for the generator's name, and
|
||||
// the filename (with ".go" appended).
|
||||
OptionalName string
|
||||
|
||||
// OptionalBody, if present, will be used as the return from the "Init"
|
||||
// method. This causes it to be static content for the entire file if
|
||||
// no other generator touches the file.
|
||||
OptionalBody []byte
|
||||
}
|
||||
|
||||
func (d DefaultGen) Name() string { return d.OptionalName }
|
||||
func (d DefaultGen) Filter(*Context, *types.Type) bool { return true }
|
||||
func (d DefaultGen) Namers(*Context) namer.NameSystems { return nil }
|
||||
func (d DefaultGen) Imports(*Context) []string { return []string{} }
|
||||
func (d DefaultGen) PackageVars(*Context) []string { return []string{} }
|
||||
func (d DefaultGen) PackageConsts(*Context) []string { return []string{} }
|
||||
func (d DefaultGen) GenerateType(*Context, *types.Type, io.Writer) error { return nil }
|
||||
func (d DefaultGen) Filename() string { return d.OptionalName + ".go" }
|
||||
func (d DefaultGen) FileType() string { return GolangFileType }
|
||||
func (d DefaultGen) Finalize(*Context, io.Writer) error { return nil }
|
||||
|
||||
func (d DefaultGen) Init(c *Context, w io.Writer) error {
|
||||
_, err := w.Write(d.OptionalBody)
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
_ = Generator(DefaultGen{})
|
||||
)
|
||||
75
vendor/k8s.io/gengo/generator/default_package.go
generated
vendored
75
vendor/k8s.io/gengo/generator/default_package.go
generated
vendored
@@ -1,75 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package generator
|
||||
|
||||
import (
|
||||
"k8s.io/gengo/types"
|
||||
)
|
||||
|
||||
// DefaultPackage contains a default implementation of Package.
|
||||
type DefaultPackage struct {
|
||||
// Short name of package, used in the "package xxxx" line.
|
||||
PackageName string
|
||||
// Import path of the package, and the location on disk of the package.
|
||||
PackagePath string
|
||||
// The location of the package on disk.
|
||||
Source string
|
||||
|
||||
// Emitted at the top of every file.
|
||||
HeaderText []byte
|
||||
|
||||
// Emitted only for a "doc.go" file; appended to the HeaderText for
|
||||
// that file.
|
||||
PackageDocumentation []byte
|
||||
|
||||
// If non-nil, will be called on "Generators"; otherwise, the static
|
||||
// list will be used. So you should set only one of these two fields.
|
||||
GeneratorFunc func(*Context) []Generator
|
||||
GeneratorList []Generator
|
||||
|
||||
// Optional; filters the types exposed to the generators.
|
||||
FilterFunc func(*Context, *types.Type) bool
|
||||
}
|
||||
|
||||
func (d *DefaultPackage) Name() string { return d.PackageName }
|
||||
func (d *DefaultPackage) Path() string { return d.PackagePath }
|
||||
func (d *DefaultPackage) SourcePath() string { return d.Source }
|
||||
|
||||
func (d *DefaultPackage) Filter(c *Context, t *types.Type) bool {
|
||||
if d.FilterFunc != nil {
|
||||
return d.FilterFunc(c, t)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *DefaultPackage) Generators(c *Context) []Generator {
|
||||
if d.GeneratorFunc != nil {
|
||||
return d.GeneratorFunc(c)
|
||||
}
|
||||
return d.GeneratorList
|
||||
}
|
||||
|
||||
func (d *DefaultPackage) Header(filename string) []byte {
|
||||
if filename == "doc.go" {
|
||||
return append(d.HeaderText, d.PackageDocumentation...)
|
||||
}
|
||||
return d.HeaderText
|
||||
}
|
||||
|
||||
var (
|
||||
_ = Package(&DefaultPackage{})
|
||||
)
|
||||
65
vendor/k8s.io/gengo/generator/transitive_closure.go
generated
vendored
65
vendor/k8s.io/gengo/generator/transitive_closure.go
generated
vendored
@@ -1,65 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package generator
|
||||
|
||||
import "sort"
|
||||
|
||||
type edge struct {
|
||||
from string
|
||||
to string
|
||||
}
|
||||
|
||||
func transitiveClosure(in map[string][]string) map[string][]string {
|
||||
adj := make(map[edge]bool)
|
||||
imports := make(map[string]struct{})
|
||||
for from, tos := range in {
|
||||
for _, to := range tos {
|
||||
adj[edge{from, to}] = true
|
||||
imports[to] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Warshal's algorithm
|
||||
for k := range in {
|
||||
for i := range in {
|
||||
if !adj[edge{i, k}] {
|
||||
continue
|
||||
}
|
||||
for j := range imports {
|
||||
if adj[edge{i, j}] {
|
||||
continue
|
||||
}
|
||||
if adj[edge{k, j}] {
|
||||
adj[edge{i, j}] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
out := make(map[string][]string, len(in))
|
||||
for i := range in {
|
||||
for j := range imports {
|
||||
if adj[edge{i, j}] {
|
||||
out[i] = append(out[i], j)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(out[i])
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
925
vendor/k8s.io/gengo/parser/parse.go
generated
vendored
925
vendor/k8s.io/gengo/parser/parse.go
generated
vendored
@@ -1,925 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package parser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/build"
|
||||
"go/constant"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
tc "go/types"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"k8s.io/gengo/types"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// This clarifies when a pkg path has been canonicalized.
|
||||
type importPathString string
|
||||
|
||||
// Builder lets you add all the go files in all the packages that you care
|
||||
// about, then constructs the type source data.
|
||||
type Builder struct {
|
||||
context *build.Context
|
||||
|
||||
// If true, include *_test.go
|
||||
IncludeTestFiles bool
|
||||
|
||||
// Map of package names to more canonical information about the package.
|
||||
// This might hold the same value for multiple names, e.g. if someone
|
||||
// referenced ./pkg/name or in the case of vendoring, which canonicalizes
|
||||
// differently that what humans would type.
|
||||
//
|
||||
// This must only be accessed via getLoadedBuildPackage and setLoadedBuildPackage
|
||||
buildPackages map[importPathString]*build.Package
|
||||
|
||||
fset *token.FileSet
|
||||
// map of package path to list of parsed files
|
||||
parsed map[importPathString][]parsedFile
|
||||
// map of package path to absolute path (to prevent overlap)
|
||||
absPaths map[importPathString]string
|
||||
|
||||
// Set by typeCheckPackage(), used by importPackage() and friends.
|
||||
typeCheckedPackages map[importPathString]*tc.Package
|
||||
|
||||
// Map of package path to whether the user requested it or it was from
|
||||
// an import.
|
||||
userRequested map[importPathString]bool
|
||||
|
||||
// All comments from everywhere in every parsed file.
|
||||
endLineToCommentGroup map[fileLine]*ast.CommentGroup
|
||||
|
||||
// map of package to list of packages it imports.
|
||||
importGraph map[importPathString]map[string]struct{}
|
||||
}
|
||||
|
||||
// parsedFile is for tracking files with name
|
||||
type parsedFile struct {
|
||||
name string
|
||||
file *ast.File
|
||||
}
|
||||
|
||||
// key type for finding comments.
|
||||
type fileLine struct {
|
||||
file string
|
||||
line int
|
||||
}
|
||||
|
||||
// New constructs a new builder.
|
||||
func New() *Builder {
|
||||
c := build.Default
|
||||
if c.GOROOT == "" {
|
||||
if p, err := exec.Command("which", "go").CombinedOutput(); err == nil {
|
||||
// The returned string will have some/path/bin/go, so remove the last two elements.
|
||||
c.GOROOT = filepath.Dir(filepath.Dir(strings.Trim(string(p), "\n")))
|
||||
} else {
|
||||
klog.Warningf("Warning: $GOROOT not set, and unable to run `which go` to find it: %v\n", err)
|
||||
}
|
||||
}
|
||||
// Force this to off, since we don't properly parse CGo. All symbols must
|
||||
// have non-CGo equivalents.
|
||||
c.CgoEnabled = false
|
||||
return &Builder{
|
||||
context: &c,
|
||||
buildPackages: map[importPathString]*build.Package{},
|
||||
typeCheckedPackages: map[importPathString]*tc.Package{},
|
||||
fset: token.NewFileSet(),
|
||||
parsed: map[importPathString][]parsedFile{},
|
||||
absPaths: map[importPathString]string{},
|
||||
userRequested: map[importPathString]bool{},
|
||||
endLineToCommentGroup: map[fileLine]*ast.CommentGroup{},
|
||||
importGraph: map[importPathString]map[string]struct{}{},
|
||||
}
|
||||
}
|
||||
|
||||
// AddBuildTags adds the specified build tags to the parse context.
|
||||
func (b *Builder) AddBuildTags(tags ...string) {
|
||||
b.context.BuildTags = append(b.context.BuildTags, tags...)
|
||||
}
|
||||
|
||||
func (b *Builder) getLoadedBuildPackage(importPath string) (*build.Package, bool) {
|
||||
canonicalized := canonicalizeImportPath(importPath)
|
||||
if string(canonicalized) != importPath {
|
||||
klog.V(5).Infof("getLoadedBuildPackage: %s normalized to %s", importPath, canonicalized)
|
||||
}
|
||||
buildPkg, ok := b.buildPackages[canonicalized]
|
||||
return buildPkg, ok
|
||||
}
|
||||
func (b *Builder) setLoadedBuildPackage(importPath string, buildPkg *build.Package) {
|
||||
canonicalizedImportPath := canonicalizeImportPath(importPath)
|
||||
if string(canonicalizedImportPath) != importPath {
|
||||
klog.V(5).Infof("setLoadedBuildPackage: importPath %s normalized to %s", importPath, canonicalizedImportPath)
|
||||
}
|
||||
|
||||
canonicalizedBuildPkgImportPath := canonicalizeImportPath(buildPkg.ImportPath)
|
||||
if string(canonicalizedBuildPkgImportPath) != buildPkg.ImportPath {
|
||||
klog.V(5).Infof("setLoadedBuildPackage: buildPkg.ImportPath %s normalized to %s", buildPkg.ImportPath, canonicalizedBuildPkgImportPath)
|
||||
}
|
||||
|
||||
if canonicalizedImportPath != canonicalizedBuildPkgImportPath {
|
||||
klog.V(5).Infof("setLoadedBuildPackage: normalized importPath (%s) differs from buildPkg.ImportPath (%s)", canonicalizedImportPath, canonicalizedBuildPkgImportPath)
|
||||
}
|
||||
b.buildPackages[canonicalizedImportPath] = buildPkg
|
||||
b.buildPackages[canonicalizedBuildPkgImportPath] = buildPkg
|
||||
}
|
||||
|
||||
// Get package information from the go/build package. Automatically excludes
|
||||
// e.g. test files and files for other platforms-- there is quite a bit of
|
||||
// logic of that nature in the build package.
|
||||
func (b *Builder) importBuildPackage(dir string) (*build.Package, error) {
|
||||
if buildPkg, ok := b.getLoadedBuildPackage(dir); ok {
|
||||
return buildPkg, nil
|
||||
}
|
||||
// This validates the `package foo // github.com/bar/foo` comments.
|
||||
buildPkg, err := b.importWithMode(dir, build.ImportComment)
|
||||
if err != nil {
|
||||
if _, ok := err.(*build.NoGoError); !ok {
|
||||
return nil, fmt.Errorf("unable to import %q: %v", dir, err)
|
||||
}
|
||||
}
|
||||
if buildPkg == nil {
|
||||
// Might be an empty directory. Try to just find the dir.
|
||||
buildPkg, err = b.importWithMode(dir, build.FindOnly)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Remember it under the user-provided name.
|
||||
klog.V(5).Infof("saving buildPackage %s", dir)
|
||||
b.setLoadedBuildPackage(dir, buildPkg)
|
||||
|
||||
return buildPkg, nil
|
||||
}
|
||||
|
||||
// AddFileForTest adds a file to the set, without verifying that the provided
|
||||
// pkg actually exists on disk. The pkg must be of the form "canonical/pkg/path"
|
||||
// and the path must be the absolute path to the file. Because this bypasses
|
||||
// the normal recursive finding of package dependencies (on disk), test should
|
||||
// sort their test files topologically first, so all deps are resolved by the
|
||||
// time we need them.
|
||||
func (b *Builder) AddFileForTest(pkg string, path string, src []byte) error {
|
||||
if err := b.addFile(importPathString(pkg), path, src, true); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := b.typeCheckPackage(importPathString(pkg), true); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// addFile adds a file to the set. The pkgPath must be of the form
|
||||
// "canonical/pkg/path" and the path must be the absolute path to the file. A
|
||||
// flag indicates whether this file was user-requested or just from following
|
||||
// the import graph.
|
||||
func (b *Builder) addFile(pkgPath importPathString, path string, src []byte, userRequested bool) error {
|
||||
for _, p := range b.parsed[pkgPath] {
|
||||
if path == p.name {
|
||||
klog.V(5).Infof("addFile %s %s already parsed, skipping", pkgPath, path)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
klog.V(6).Infof("addFile %s %s", pkgPath, path)
|
||||
p, err := parser.ParseFile(b.fset, path, src, parser.DeclarationErrors|parser.ParseComments)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// This is redundant with addDir, but some tests call AddFileForTest, which
|
||||
// call into here without calling addDir.
|
||||
b.userRequested[pkgPath] = userRequested || b.userRequested[pkgPath]
|
||||
|
||||
b.parsed[pkgPath] = append(b.parsed[pkgPath], parsedFile{path, p})
|
||||
for _, c := range p.Comments {
|
||||
position := b.fset.Position(c.End())
|
||||
b.endLineToCommentGroup[fileLine{position.Filename, position.Line}] = c
|
||||
}
|
||||
|
||||
// We have to get the packages from this specific file, in case the
|
||||
// user added individual files instead of entire directories.
|
||||
if b.importGraph[pkgPath] == nil {
|
||||
b.importGraph[pkgPath] = map[string]struct{}{}
|
||||
}
|
||||
for _, im := range p.Imports {
|
||||
importedPath := strings.Trim(im.Path.Value, `"`)
|
||||
b.importGraph[pkgPath][importedPath] = struct{}{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddDir adds an entire directory, scanning it for go files. 'dir' should have
|
||||
// a single go package in it. GOPATH, GOROOT, and the location of your go
|
||||
// binary (`which go`) will all be searched if dir doesn't literally resolve.
|
||||
func (b *Builder) AddDir(dir string) error {
|
||||
_, err := b.importPackage(dir, true)
|
||||
return err
|
||||
}
|
||||
|
||||
// AddDirRecursive is just like AddDir, but it also recursively adds
|
||||
// subdirectories; it returns an error only if the path couldn't be resolved;
|
||||
// any directories recursed into without go source are ignored.
|
||||
func (b *Builder) AddDirRecursive(dir string) error {
|
||||
// Add the root.
|
||||
if _, err := b.importPackage(dir, true); err != nil {
|
||||
klog.Warningf("Ignoring directory %v: %v", dir, err)
|
||||
}
|
||||
|
||||
// filepath.Walk does not follow symlinks. We therefore evaluate symlinks and use that with
|
||||
// filepath.Walk.
|
||||
buildPkg, ok := b.getLoadedBuildPackage(dir)
|
||||
if !ok {
|
||||
return fmt.Errorf("no loaded build package for %s", dir)
|
||||
}
|
||||
realPath, err := filepath.EvalSymlinks(buildPkg.Dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fn := func(filePath string, info os.FileInfo, err error) error {
|
||||
if info != nil && info.IsDir() {
|
||||
rel := filepath.ToSlash(strings.TrimPrefix(filePath, realPath))
|
||||
if rel != "" {
|
||||
// Make a pkg path.
|
||||
buildPkg, ok := b.getLoadedBuildPackage(dir)
|
||||
if !ok {
|
||||
return fmt.Errorf("no loaded build package for %s", dir)
|
||||
}
|
||||
pkg := path.Join(string(canonicalizeImportPath(buildPkg.ImportPath)), rel)
|
||||
|
||||
// Add it.
|
||||
if _, err := b.importPackage(pkg, true); err != nil {
|
||||
klog.Warningf("Ignoring child directory %v: %v", pkg, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err := filepath.Walk(realPath, fn); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddDirTo adds an entire directory to a given Universe. Unlike AddDir, this
|
||||
// processes the package immediately, which makes it safe to use from within a
|
||||
// generator (rather than just at init time. 'dir' must be a single go package.
|
||||
// GOPATH, GOROOT, and the location of your go binary (`which go`) will all be
|
||||
// searched if dir doesn't literally resolve.
|
||||
// Deprecated. Please use AddDirectoryTo.
|
||||
func (b *Builder) AddDirTo(dir string, u *types.Universe) error {
|
||||
// We want all types from this package, as if they were directly added
|
||||
// by the user. They WERE added by the user, in effect.
|
||||
if _, err := b.importPackage(dir, true); err != nil {
|
||||
return err
|
||||
}
|
||||
pkg, ok := b.getLoadedBuildPackage(dir)
|
||||
if !ok {
|
||||
return fmt.Errorf("no such package: %q", dir)
|
||||
}
|
||||
return b.findTypesIn(canonicalizeImportPath(pkg.ImportPath), u)
|
||||
}
|
||||
|
||||
// AddDirectoryTo adds an entire directory to a given Universe. Unlike AddDir,
|
||||
// this processes the package immediately, which makes it safe to use from
|
||||
// within a generator (rather than just at init time. 'dir' must be a single go
|
||||
// package. GOPATH, GOROOT, and the location of your go binary (`which go`)
|
||||
// will all be searched if dir doesn't literally resolve.
|
||||
func (b *Builder) AddDirectoryTo(dir string, u *types.Universe) (*types.Package, error) {
|
||||
// We want all types from this package, as if they were directly added
|
||||
// by the user. They WERE added by the user, in effect.
|
||||
if _, err := b.importPackage(dir, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pkg, ok := b.getLoadedBuildPackage(dir)
|
||||
if !ok || pkg == nil {
|
||||
return nil, fmt.Errorf("no such package: %q", dir)
|
||||
}
|
||||
path := canonicalizeImportPath(pkg.ImportPath)
|
||||
if err := b.findTypesIn(path, u); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return u.Package(string(path)), nil
|
||||
}
|
||||
|
||||
// The implementation of AddDir. A flag indicates whether this directory was
|
||||
// user-requested or just from following the import graph.
|
||||
func (b *Builder) addDir(dir string, userRequested bool) error {
|
||||
klog.V(5).Infof("addDir %s", dir)
|
||||
buildPkg, err := b.importBuildPackage(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
canonicalPackage := canonicalizeImportPath(buildPkg.ImportPath)
|
||||
pkgPath := canonicalPackage
|
||||
if dir != string(canonicalPackage) {
|
||||
klog.V(5).Infof("addDir %s, canonical path is %s", dir, pkgPath)
|
||||
}
|
||||
|
||||
// Sanity check the pkg dir has not changed.
|
||||
if prev, found := b.absPaths[pkgPath]; found {
|
||||
if buildPkg.Dir != prev {
|
||||
return fmt.Errorf("package %q (%s) previously resolved to %s", pkgPath, buildPkg.Dir, prev)
|
||||
}
|
||||
} else {
|
||||
b.absPaths[pkgPath] = buildPkg.Dir
|
||||
}
|
||||
|
||||
files := []string{}
|
||||
files = append(files, buildPkg.GoFiles...)
|
||||
if b.IncludeTestFiles {
|
||||
files = append(files, buildPkg.TestGoFiles...)
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
if !strings.HasSuffix(file, ".go") {
|
||||
continue
|
||||
}
|
||||
absPath := filepath.Join(buildPkg.Dir, file)
|
||||
data, err := ioutil.ReadFile(absPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("while loading %q: %v", absPath, err)
|
||||
}
|
||||
err = b.addFile(pkgPath, absPath, data, userRequested)
|
||||
if err != nil {
|
||||
return fmt.Errorf("while parsing %q: %v", absPath, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// regexErrPackageNotFound helps test the expected error for not finding a package.
|
||||
var regexErrPackageNotFound = regexp.MustCompile(`^unable to import ".*?":.*`)
|
||||
|
||||
func isErrPackageNotFound(err error) bool {
|
||||
return regexErrPackageNotFound.MatchString(err.Error())
|
||||
}
|
||||
|
||||
// importPackage is a function that will be called by the type check package when it
|
||||
// needs to import a go package. 'path' is the import path.
|
||||
func (b *Builder) importPackage(dir string, userRequested bool) (*tc.Package, error) {
|
||||
klog.V(5).Infof("importPackage %s", dir)
|
||||
|
||||
var pkgPath = importPathString(dir)
|
||||
|
||||
// Get the canonical path if we can.
|
||||
if buildPkg, _ := b.getLoadedBuildPackage(dir); buildPkg != nil {
|
||||
canonicalPackage := canonicalizeImportPath(buildPkg.ImportPath)
|
||||
klog.V(5).Infof("importPackage %s, canonical path is %s", dir, canonicalPackage)
|
||||
pkgPath = canonicalPackage
|
||||
}
|
||||
|
||||
// If we have not seen this before, process it now.
|
||||
ignoreError := false
|
||||
if _, found := b.parsed[pkgPath]; !found {
|
||||
// Ignore errors in paths that we're importing solely because
|
||||
// they're referenced by other packages.
|
||||
ignoreError = true
|
||||
|
||||
// Add it.
|
||||
if err := b.addDir(dir, userRequested); err != nil {
|
||||
if isErrPackageNotFound(err) {
|
||||
klog.V(6).Info(err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the canonical path now that it has been added.
|
||||
if buildPkg, _ := b.getLoadedBuildPackage(dir); buildPkg != nil {
|
||||
canonicalPackage := canonicalizeImportPath(buildPkg.ImportPath)
|
||||
klog.V(5).Infof("importPackage %s, canonical path is %s", dir, canonicalPackage)
|
||||
pkgPath = canonicalPackage
|
||||
}
|
||||
}
|
||||
|
||||
// If it was previously known, just check that the user-requestedness hasn't
|
||||
// changed.
|
||||
b.userRequested[pkgPath] = userRequested || b.userRequested[pkgPath]
|
||||
|
||||
// Run the type checker. We may end up doing this to pkgs that are already
|
||||
// done, or are in the queue to be done later, but it will short-circuit,
|
||||
// and we can't miss pkgs that are only depended on.
|
||||
pkg, err := b.typeCheckPackage(pkgPath, !ignoreError)
|
||||
if err != nil {
|
||||
switch {
|
||||
case ignoreError && pkg != nil:
|
||||
klog.V(4).Infof("type checking encountered some issues in %q, but ignoring.\n", pkgPath)
|
||||
case !ignoreError && pkg != nil:
|
||||
klog.V(3).Infof("type checking encountered some errors in %q\n", pkgPath)
|
||||
return nil, err
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return pkg, nil
|
||||
}
|
||||
|
||||
type importAdapter struct {
|
||||
b *Builder
|
||||
}
|
||||
|
||||
func (a importAdapter) Import(path string) (*tc.Package, error) {
|
||||
return a.b.importPackage(path, false)
|
||||
}
|
||||
|
||||
// typeCheckPackage will attempt to return the package even if there are some
|
||||
// errors, so you may check whether the package is nil or not even if you get
|
||||
// an error.
|
||||
func (b *Builder) typeCheckPackage(pkgPath importPathString, logErr bool) (*tc.Package, error) {
|
||||
klog.V(5).Infof("typeCheckPackage %s", pkgPath)
|
||||
if pkg, ok := b.typeCheckedPackages[pkgPath]; ok {
|
||||
if pkg != nil {
|
||||
klog.V(6).Infof("typeCheckPackage %s already done", pkgPath)
|
||||
return pkg, nil
|
||||
}
|
||||
// We store a nil right before starting work on a package. So
|
||||
// if we get here and it's present and nil, that means there's
|
||||
// another invocation of this function on the call stack
|
||||
// already processing this package.
|
||||
return nil, fmt.Errorf("circular dependency for %q", pkgPath)
|
||||
}
|
||||
parsedFiles, ok := b.parsed[pkgPath]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("No files for pkg %q", pkgPath)
|
||||
}
|
||||
files := make([]*ast.File, len(parsedFiles))
|
||||
for i := range parsedFiles {
|
||||
files[i] = parsedFiles[i].file
|
||||
}
|
||||
b.typeCheckedPackages[pkgPath] = nil
|
||||
c := tc.Config{
|
||||
IgnoreFuncBodies: true,
|
||||
// Note that importAdapter can call b.importPackage which calls this
|
||||
// method. So there can't be cycles in the import graph.
|
||||
Importer: importAdapter{b},
|
||||
Error: func(err error) {
|
||||
if logErr {
|
||||
klog.V(2).Infof("type checker: %v\n", err)
|
||||
} else {
|
||||
klog.V(3).Infof("type checker: %v\n", err)
|
||||
}
|
||||
},
|
||||
}
|
||||
pkg, err := c.Check(string(pkgPath), b.fset, files, nil)
|
||||
b.typeCheckedPackages[pkgPath] = pkg // record the result whether or not there was an error
|
||||
return pkg, err
|
||||
}
|
||||
|
||||
// FindPackages fetches a list of the user-imported packages.
|
||||
// Note that you need to call b.FindTypes() first.
|
||||
func (b *Builder) FindPackages() []string {
|
||||
// Iterate packages in a predictable order.
|
||||
pkgPaths := []string{}
|
||||
for k := range b.typeCheckedPackages {
|
||||
pkgPaths = append(pkgPaths, string(k))
|
||||
}
|
||||
sort.Strings(pkgPaths)
|
||||
|
||||
result := []string{}
|
||||
for _, pkgPath := range pkgPaths {
|
||||
if b.userRequested[importPathString(pkgPath)] {
|
||||
// Since walkType is recursive, all types that are in packages that
|
||||
// were directly mentioned will be included. We don't need to
|
||||
// include all types in all transitive packages, though.
|
||||
result = append(result, pkgPath)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// FindTypes finalizes the package imports, and searches through all the
|
||||
// packages for types.
|
||||
func (b *Builder) FindTypes() (types.Universe, error) {
|
||||
// Take a snapshot of pkgs to iterate, since this will recursively mutate
|
||||
// b.parsed. Iterate in a predictable order.
|
||||
pkgPaths := []string{}
|
||||
for pkgPath := range b.parsed {
|
||||
pkgPaths = append(pkgPaths, string(pkgPath))
|
||||
}
|
||||
sort.Strings(pkgPaths)
|
||||
|
||||
u := types.Universe{}
|
||||
for _, pkgPath := range pkgPaths {
|
||||
if err := b.findTypesIn(importPathString(pkgPath), &u); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// addCommentsToType takes any accumulated comment lines prior to obj and
|
||||
// attaches them to the type t.
|
||||
func (b *Builder) addCommentsToType(obj tc.Object, t *types.Type) {
|
||||
c1 := b.priorCommentLines(obj.Pos(), 1)
|
||||
// c1.Text() is safe if c1 is nil
|
||||
t.CommentLines = splitLines(c1.Text())
|
||||
if c1 == nil {
|
||||
t.SecondClosestCommentLines = splitLines(b.priorCommentLines(obj.Pos(), 2).Text())
|
||||
} else {
|
||||
t.SecondClosestCommentLines = splitLines(b.priorCommentLines(c1.List[0].Slash, 2).Text())
|
||||
}
|
||||
}
|
||||
|
||||
// findTypesIn finalizes the package import and searches through the package
|
||||
// for types.
|
||||
func (b *Builder) findTypesIn(pkgPath importPathString, u *types.Universe) error {
|
||||
klog.V(5).Infof("findTypesIn %s", pkgPath)
|
||||
pkg := b.typeCheckedPackages[pkgPath]
|
||||
if pkg == nil {
|
||||
return fmt.Errorf("findTypesIn(%s): package is not known", pkgPath)
|
||||
}
|
||||
if !b.userRequested[pkgPath] {
|
||||
// Since walkType is recursive, all types that the
|
||||
// packages they asked for depend on will be included.
|
||||
// But we don't need to include all types in all
|
||||
// *packages* they depend on.
|
||||
klog.V(5).Infof("findTypesIn %s: package is not user requested", pkgPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// We're keeping this package. This call will create the record.
|
||||
u.Package(string(pkgPath)).Name = pkg.Name()
|
||||
u.Package(string(pkgPath)).Path = pkg.Path()
|
||||
u.Package(string(pkgPath)).SourcePath = b.absPaths[pkgPath]
|
||||
|
||||
for _, f := range b.parsed[pkgPath] {
|
||||
if _, fileName := filepath.Split(f.name); fileName == "doc.go" {
|
||||
tp := u.Package(string(pkgPath))
|
||||
// findTypesIn might be called multiple times. Clean up tp.Comments
|
||||
// to avoid repeatedly fill same comments to it.
|
||||
tp.Comments = []string{}
|
||||
for i := range f.file.Comments {
|
||||
tp.Comments = append(tp.Comments, splitLines(f.file.Comments[i].Text())...)
|
||||
}
|
||||
if f.file.Doc != nil {
|
||||
tp.DocComments = splitLines(f.file.Doc.Text())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s := pkg.Scope()
|
||||
for _, n := range s.Names() {
|
||||
obj := s.Lookup(n)
|
||||
tn, ok := obj.(*tc.TypeName)
|
||||
if ok {
|
||||
t := b.walkType(*u, nil, tn.Type())
|
||||
b.addCommentsToType(obj, t)
|
||||
}
|
||||
tf, ok := obj.(*tc.Func)
|
||||
// We only care about functions, not concrete/abstract methods.
|
||||
if ok && tf.Type() != nil && tf.Type().(*tc.Signature).Recv() == nil {
|
||||
t := b.addFunction(*u, nil, tf)
|
||||
b.addCommentsToType(obj, t)
|
||||
}
|
||||
tv, ok := obj.(*tc.Var)
|
||||
if ok && !tv.IsField() {
|
||||
t := b.addVariable(*u, nil, tv)
|
||||
b.addCommentsToType(obj, t)
|
||||
}
|
||||
tconst, ok := obj.(*tc.Const)
|
||||
if ok {
|
||||
t := b.addConstant(*u, nil, tconst)
|
||||
b.addCommentsToType(obj, t)
|
||||
}
|
||||
}
|
||||
|
||||
importedPkgs := []string{}
|
||||
for k := range b.importGraph[pkgPath] {
|
||||
importedPkgs = append(importedPkgs, string(k))
|
||||
}
|
||||
sort.Strings(importedPkgs)
|
||||
for _, p := range importedPkgs {
|
||||
u.AddImports(string(pkgPath), p)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Builder) importWithMode(dir string, mode build.ImportMode) (*build.Package, error) {
|
||||
// This is a bit of a hack. The srcDir argument to Import() should
|
||||
// properly be the dir of the file which depends on the package to be
|
||||
// imported, so that vendoring can work properly and local paths can
|
||||
// resolve. We assume that there is only one level of vendoring, and that
|
||||
// the CWD is inside the GOPATH, so this should be safe. Nobody should be
|
||||
// using local (relative) paths except on the CLI, so CWD is also
|
||||
// sufficient.
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get current directory: %v", err)
|
||||
}
|
||||
|
||||
// normalize to drop /vendor/ if present
|
||||
dir = string(canonicalizeImportPath(dir))
|
||||
|
||||
buildPkg, err := b.context.Import(filepath.ToSlash(dir), cwd, mode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buildPkg, nil
|
||||
}
|
||||
|
||||
// if there's a comment on the line `lines` before pos, return its text, otherwise "".
|
||||
func (b *Builder) priorCommentLines(pos token.Pos, lines int) *ast.CommentGroup {
|
||||
position := b.fset.Position(pos)
|
||||
key := fileLine{position.Filename, position.Line - lines}
|
||||
return b.endLineToCommentGroup[key]
|
||||
}
|
||||
|
||||
func splitLines(str string) []string {
|
||||
return strings.Split(strings.TrimRight(str, "\n"), "\n")
|
||||
}
|
||||
|
||||
func tcFuncNameToName(in string) types.Name {
|
||||
name := strings.TrimPrefix(in, "func ")
|
||||
nameParts := strings.Split(name, "(")
|
||||
return tcNameToName(nameParts[0])
|
||||
}
|
||||
|
||||
func tcVarNameToName(in string) types.Name {
|
||||
nameParts := strings.Split(in, " ")
|
||||
// nameParts[0] is "var".
|
||||
// nameParts[2:] is the type of the variable, we ignore it for now.
|
||||
return tcNameToName(nameParts[1])
|
||||
}
|
||||
|
||||
func tcNameToName(in string) types.Name {
|
||||
// Detect anonymous type names. (These may have '.' characters because
|
||||
// embedded types may have packages, so we detect them specially.)
|
||||
if strings.HasPrefix(in, "struct{") ||
|
||||
strings.HasPrefix(in, "<-chan") ||
|
||||
strings.HasPrefix(in, "chan<-") ||
|
||||
strings.HasPrefix(in, "chan ") ||
|
||||
strings.HasPrefix(in, "func(") ||
|
||||
strings.HasPrefix(in, "func (") ||
|
||||
strings.HasPrefix(in, "*") ||
|
||||
strings.HasPrefix(in, "map[") ||
|
||||
strings.HasPrefix(in, "[") {
|
||||
return types.Name{Name: in}
|
||||
}
|
||||
|
||||
// Otherwise, if there are '.' characters present, the name has a
|
||||
// package path in front.
|
||||
nameParts := strings.Split(in, ".")
|
||||
name := types.Name{Name: in}
|
||||
if n := len(nameParts); n >= 2 {
|
||||
// The final "." is the name of the type--previous ones must
|
||||
// have been in the package path.
|
||||
name.Package, name.Name = strings.Join(nameParts[:n-1], "."), nameParts[n-1]
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func (b *Builder) convertSignature(u types.Universe, t *tc.Signature) *types.Signature {
|
||||
signature := &types.Signature{}
|
||||
for i := 0; i < t.Params().Len(); i++ {
|
||||
signature.Parameters = append(signature.Parameters, b.walkType(u, nil, t.Params().At(i).Type()))
|
||||
signature.ParameterNames = append(signature.ParameterNames, t.Params().At(i).Name())
|
||||
}
|
||||
for i := 0; i < t.Results().Len(); i++ {
|
||||
signature.Results = append(signature.Results, b.walkType(u, nil, t.Results().At(i).Type()))
|
||||
signature.ResultNames = append(signature.ResultNames, t.Results().At(i).Name())
|
||||
}
|
||||
if r := t.Recv(); r != nil {
|
||||
signature.Receiver = b.walkType(u, nil, r.Type())
|
||||
}
|
||||
signature.Variadic = t.Variadic()
|
||||
return signature
|
||||
}
|
||||
|
||||
// walkType adds the type, and any necessary child types.
|
||||
func (b *Builder) walkType(u types.Universe, useName *types.Name, in tc.Type) *types.Type {
|
||||
// Most of the cases are underlying types of the named type.
|
||||
name := tcNameToName(in.String())
|
||||
if useName != nil {
|
||||
name = *useName
|
||||
}
|
||||
|
||||
switch t := in.(type) {
|
||||
case *tc.Struct:
|
||||
out := u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Struct
|
||||
for i := 0; i < t.NumFields(); i++ {
|
||||
f := t.Field(i)
|
||||
m := types.Member{
|
||||
Name: f.Name(),
|
||||
Embedded: f.Anonymous(),
|
||||
Tags: t.Tag(i),
|
||||
Type: b.walkType(u, nil, f.Type()),
|
||||
CommentLines: splitLines(b.priorCommentLines(f.Pos(), 1).Text()),
|
||||
}
|
||||
out.Members = append(out.Members, m)
|
||||
}
|
||||
return out
|
||||
case *tc.Map:
|
||||
out := u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Map
|
||||
out.Elem = b.walkType(u, nil, t.Elem())
|
||||
out.Key = b.walkType(u, nil, t.Key())
|
||||
return out
|
||||
case *tc.Pointer:
|
||||
out := u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Pointer
|
||||
out.Elem = b.walkType(u, nil, t.Elem())
|
||||
return out
|
||||
case *tc.Slice:
|
||||
out := u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Slice
|
||||
out.Elem = b.walkType(u, nil, t.Elem())
|
||||
return out
|
||||
case *tc.Array:
|
||||
out := u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Array
|
||||
out.Elem = b.walkType(u, nil, t.Elem())
|
||||
out.Len = in.(*tc.Array).Len()
|
||||
return out
|
||||
case *tc.Chan:
|
||||
out := u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Chan
|
||||
out.Elem = b.walkType(u, nil, t.Elem())
|
||||
// TODO: need to store direction, otherwise raw type name
|
||||
// cannot be properly written.
|
||||
return out
|
||||
case *tc.Basic:
|
||||
out := u.Type(types.Name{
|
||||
Package: "",
|
||||
Name: t.Name(),
|
||||
})
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Unsupported
|
||||
return out
|
||||
case *tc.Signature:
|
||||
out := u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Func
|
||||
out.Signature = b.convertSignature(u, t)
|
||||
return out
|
||||
case *tc.Interface:
|
||||
out := u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Interface
|
||||
t.Complete()
|
||||
for i := 0; i < t.NumMethods(); i++ {
|
||||
if out.Methods == nil {
|
||||
out.Methods = map[string]*types.Type{}
|
||||
}
|
||||
method := t.Method(i)
|
||||
name := tcNameToName(method.String())
|
||||
mt := b.walkType(u, &name, method.Type())
|
||||
mt.CommentLines = splitLines(b.priorCommentLines(method.Pos(), 1).Text())
|
||||
out.Methods[method.Name()] = mt
|
||||
}
|
||||
return out
|
||||
case *tc.Named:
|
||||
var out *types.Type
|
||||
switch t.Underlying().(type) {
|
||||
case *tc.Named, *tc.Basic, *tc.Map, *tc.Slice:
|
||||
name := tcNameToName(t.String())
|
||||
out = u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Alias
|
||||
out.Underlying = b.walkType(u, nil, t.Underlying())
|
||||
default:
|
||||
// tc package makes everything "named" with an
|
||||
// underlying anonymous type--we remove that annoying
|
||||
// "feature" for users. This flattens those types
|
||||
// together.
|
||||
name := tcNameToName(t.String())
|
||||
if out := u.Type(name); out.Kind != types.Unknown {
|
||||
return out // short circuit if we've already made this.
|
||||
}
|
||||
out = b.walkType(u, &name, t.Underlying())
|
||||
}
|
||||
// If the underlying type didn't already add methods, add them.
|
||||
// (Interface types will have already added methods.)
|
||||
if len(out.Methods) == 0 {
|
||||
for i := 0; i < t.NumMethods(); i++ {
|
||||
if out.Methods == nil {
|
||||
out.Methods = map[string]*types.Type{}
|
||||
}
|
||||
method := t.Method(i)
|
||||
name := tcNameToName(method.String())
|
||||
mt := b.walkType(u, &name, method.Type())
|
||||
mt.CommentLines = splitLines(b.priorCommentLines(method.Pos(), 1).Text())
|
||||
out.Methods[method.Name()] = mt
|
||||
}
|
||||
}
|
||||
return out
|
||||
default:
|
||||
out := u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Unsupported
|
||||
klog.Warningf("Making unsupported type entry %q for: %#v\n", out, t)
|
||||
return out
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Builder) addFunction(u types.Universe, useName *types.Name, in *tc.Func) *types.Type {
|
||||
name := tcFuncNameToName(in.String())
|
||||
if useName != nil {
|
||||
name = *useName
|
||||
}
|
||||
out := u.Function(name)
|
||||
out.Kind = types.DeclarationOf
|
||||
out.Underlying = b.walkType(u, nil, in.Type())
|
||||
return out
|
||||
}
|
||||
|
||||
func (b *Builder) addVariable(u types.Universe, useName *types.Name, in *tc.Var) *types.Type {
|
||||
name := tcVarNameToName(in.String())
|
||||
if useName != nil {
|
||||
name = *useName
|
||||
}
|
||||
out := u.Variable(name)
|
||||
out.Kind = types.DeclarationOf
|
||||
out.Underlying = b.walkType(u, nil, in.Type())
|
||||
return out
|
||||
}
|
||||
|
||||
func (b *Builder) addConstant(u types.Universe, useName *types.Name, in *tc.Const) *types.Type {
|
||||
name := tcVarNameToName(in.String())
|
||||
if useName != nil {
|
||||
name = *useName
|
||||
}
|
||||
out := u.Constant(name)
|
||||
out.Kind = types.DeclarationOf
|
||||
out.Underlying = b.walkType(u, nil, in.Type())
|
||||
|
||||
var constval string
|
||||
|
||||
// For strings, we use `StringVal()` to get the un-truncated,
|
||||
// un-quoted string. For other values, `.String()` is preferable to
|
||||
// get something relatively human readable (especially since for
|
||||
// floating point types, `ExactString()` will generate numeric
|
||||
// expressions using `big.(*Float).Text()`.
|
||||
switch in.Val().Kind() {
|
||||
case constant.String:
|
||||
constval = constant.StringVal(in.Val())
|
||||
default:
|
||||
constval = in.Val().String()
|
||||
}
|
||||
|
||||
out.ConstValue = &constval
|
||||
return out
|
||||
}
|
||||
|
||||
// canonicalizeImportPath takes an import path and returns the actual package.
|
||||
// It doesn't support nested vendoring.
|
||||
func canonicalizeImportPath(importPath string) importPathString {
|
||||
if !strings.Contains(importPath, "/vendor/") {
|
||||
return importPathString(importPath)
|
||||
}
|
||||
|
||||
return importPathString(importPath[strings.Index(importPath, "/vendor/")+len("/vendor/"):])
|
||||
}
|
||||
57
vendor/k8s.io/gengo/types/flatten.go
generated
vendored
57
vendor/k8s.io/gengo/types/flatten.go
generated
vendored
@@ -1,57 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package types
|
||||
|
||||
// FlattenMembers recursively takes any embedded members and puts them in the
|
||||
// top level, correctly hiding them if the top level hides them. There must not
|
||||
// be a cycle-- that implies infinite members.
|
||||
//
|
||||
// This is useful for e.g. computing all the valid keys in a json struct,
|
||||
// properly considering any configuration of embedded structs.
|
||||
func FlattenMembers(m []Member) []Member {
|
||||
embedded := []Member{}
|
||||
normal := []Member{}
|
||||
type nameInfo struct {
|
||||
top bool
|
||||
i int
|
||||
}
|
||||
names := map[string]nameInfo{}
|
||||
for i := range m {
|
||||
if m[i].Embedded && m[i].Type.Kind == Struct {
|
||||
embedded = append(embedded, m[i])
|
||||
} else {
|
||||
normal = append(normal, m[i])
|
||||
names[m[i].Name] = nameInfo{true, len(normal) - 1}
|
||||
}
|
||||
}
|
||||
for i := range embedded {
|
||||
for _, e := range FlattenMembers(embedded[i].Type.Members) {
|
||||
if info, found := names[e.Name]; found {
|
||||
if info.top {
|
||||
continue
|
||||
}
|
||||
if n := normal[info.i]; n.Name == e.Name && n.Type == e.Type {
|
||||
continue
|
||||
}
|
||||
panic("conflicting members")
|
||||
}
|
||||
normal = append(normal, e)
|
||||
names[e.Name] = nameInfo{false, len(normal) - 1}
|
||||
}
|
||||
}
|
||||
return normal
|
||||
}
|
||||
0
vendor/k8s.io/gengo/LICENSE → vendor/k8s.io/gengo/v2/LICENSE
generated
vendored
0
vendor/k8s.io/gengo/LICENSE → vendor/k8s.io/gengo/v2/LICENSE
generated
vendored
53
vendor/k8s.io/gengo/v2/README.md
generated
vendored
Normal file
53
vendor/k8s.io/gengo/v2/README.md
generated
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
[![GoDoc Widget]][GoDoc] [![GoReport]][GoReportStatus]
|
||||
|
||||
[GoDoc]: https://godoc.org/k8s.io/gengo
|
||||
[GoDoc Widget]: https://godoc.org/k8s.io/gengo?status.svg
|
||||
[GoReport]: https://goreportcard.com/badge/github.com/kubernetes/gengo
|
||||
[GoReportStatus]: https://goreportcard.com/report/github.com/kubernetes/gengo
|
||||
|
||||
# Gengo: a framework for building simple code generators
|
||||
|
||||
This repo is used by Kubernetes to build some codegen tooling. It is not
|
||||
intended to be general-purpose and makes some assumptions that may not hold
|
||||
outside of Kubernetes.
|
||||
|
||||
In the past this repo was partially supported for external use (outside of the
|
||||
Kubernetes project overall), but that is no longer true. We may change the API
|
||||
in incompatible ways, without warning.
|
||||
|
||||
If you are not building something that is part of Kubernetes, DO NOT DEPEND ON
|
||||
THIS REPO.
|
||||
|
||||
## New usage within Kubernetes
|
||||
|
||||
Gengo is a very opinionated framework. It is primarily aimed at generating Go
|
||||
code derived from types defined in other Go code, but it is possible to use it
|
||||
for other things (e.g. proto files). Net new tools should consider using
|
||||
`golang.org/x/tools/go/packages` directly. Gengo can serve as an example of
|
||||
how to do that.
|
||||
|
||||
If you still decide you want to use gengo, see the
|
||||
[simple examples](./examples) in this repo or the more extensive tools in the
|
||||
Kubernetes [code-generator](https://github.com/kubernetes/code-generator/)
|
||||
repo.
|
||||
|
||||
## Overview
|
||||
|
||||
Gengo is used to build tools (generally a tool is a binary). Each tool
|
||||
describes some number of `Targets`. A target is a single output package, which
|
||||
may be the same as the inputs (if the tool generates code alongside the inputs)
|
||||
or different. Each `Target` describes some number of `Generators`. A
|
||||
generator is responsible for emitting a single file into the target directory.
|
||||
|
||||
Gengo helps the tool to load and process input packages, e.g. extracting type
|
||||
information and associating comments. Each target will be offered every known
|
||||
type, and can filter that down to the set of types it cares about. Each
|
||||
generator will be offered the result of the target's filtering, and can filter
|
||||
the set of types further. Finally, the generator will be called to emit code
|
||||
for all of the remaining types.
|
||||
|
||||
The `tracer` example in this repo can be used to examine all of the hooks.
|
||||
|
||||
## Contributing
|
||||
|
||||
Please see [CONTRIBUTING.md](CONTRIBUTING.md) for instructions on how to contribute.
|
||||
21
vendor/k8s.io/gengo/types/comments.go → vendor/k8s.io/gengo/v2/comments.go
generated
vendored
21
vendor/k8s.io/gengo/types/comments.go → vendor/k8s.io/gengo/v2/comments.go
generated
vendored
@@ -14,9 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package types contains go type information, packaged in a way that makes
|
||||
// auto-generation convenient, whether by template or straight go functions.
|
||||
package types
|
||||
package gengo
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -25,7 +23,7 @@ import (
|
||||
|
||||
// ExtractCommentTags parses comments for lines of the form:
|
||||
//
|
||||
// 'marker' + "key=value".
|
||||
// 'marker' + "key=value".
|
||||
//
|
||||
// Values are optional; "" is the default. A tag can be specified more than
|
||||
// one time and all values are returned. If the resulting map has an entry for
|
||||
@@ -33,12 +31,15 @@ import (
|
||||
//
|
||||
// Example: if you pass "+" for 'marker', and the following lines are in
|
||||
// the comments:
|
||||
// +foo=value1
|
||||
// +bar
|
||||
// +foo=value2
|
||||
// +baz="qux"
|
||||
//
|
||||
// +foo=value1
|
||||
// +bar
|
||||
// +foo=value2
|
||||
// +baz="qux"
|
||||
//
|
||||
// Then this function will return:
|
||||
// map[string][]string{"foo":{"value1, "value2"}, "bar": {""}, "baz": {"qux"}}
|
||||
//
|
||||
// map[string][]string{"foo":{"value1, "value2"}, "bar": {""}, "baz": {"qux"}}
|
||||
func ExtractCommentTags(marker string, lines []string) map[string][]string {
|
||||
out := map[string][]string{}
|
||||
for _, line := range lines {
|
||||
@@ -62,7 +63,7 @@ func ExtractCommentTags(marker string, lines []string) map[string][]string {
|
||||
|
||||
// ExtractSingleBoolCommentTag parses comments for lines of the form:
|
||||
//
|
||||
// 'marker' + "key=value1"
|
||||
// 'marker' + "key=value1"
|
||||
//
|
||||
// If the tag is not found, the default value is returned. Values are asserted
|
||||
// to be boolean ("true" or "false"), and any other value will cause an error
|
||||
98
vendor/k8s.io/gengo/v2/execute.go
generated
vendored
Normal file
98
vendor/k8s.io/gengo/v2/execute.go
generated
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package gengo is a code-generation framework.
|
||||
package gengo
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/gengo/v2/generator"
|
||||
"k8s.io/gengo/v2/namer"
|
||||
"k8s.io/gengo/v2/parser"
|
||||
)
|
||||
|
||||
// StdBuildTag is a suggested build-tag which tools can use both as an argument
|
||||
// to GoBoilerplate and to Execute.
|
||||
const StdBuildTag = "ignore_autogenerated"
|
||||
|
||||
// StdGeneratedBy is a suggested "generated by" line which tools can use as an
|
||||
// argument to GoBoilerplate.
|
||||
const StdGeneratedBy = "// Code generated by GENERATOR_NAME. DO NOT EDIT."
|
||||
|
||||
// GoBoilerplate returns the Go file header:
|
||||
// - an optional build tag (negative, set it to ignore generated code)
|
||||
// - an optional boilerplate file
|
||||
// - an optional "generated by" comment
|
||||
func GoBoilerplate(headerFile, buildTag, generatedBy string) ([]byte, error) {
|
||||
buf := bytes.Buffer{}
|
||||
|
||||
if buildTag != "" {
|
||||
buf.WriteString(
|
||||
fmt.Sprintf("//go:build !%s\n// +build !%s\n\n", buildTag, buildTag))
|
||||
}
|
||||
|
||||
if headerFile != "" {
|
||||
b, err := os.ReadFile(headerFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b = bytes.ReplaceAll(b, []byte("YEAR"), []byte(strconv.Itoa(time.Now().UTC().Year())))
|
||||
buf.Write(b)
|
||||
buf.WriteByte('\n')
|
||||
}
|
||||
|
||||
if generatedBy != "" {
|
||||
generatorName := filepath.Base(os.Args[0])
|
||||
// Strip the extension from the name to normalize output between *nix and Windows.
|
||||
generatorName = generatorName[:len(generatorName)-len(filepath.Ext(generatorName))]
|
||||
generatedByComment := strings.ReplaceAll(generatedBy, "GENERATOR_NAME", generatorName)
|
||||
buf.WriteString(fmt.Sprintf("%s\n\n", generatedByComment))
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// Execute implements most of a tool's main loop.
|
||||
func Execute(nameSystems namer.NameSystems, defaultSystem string, getTargets func(*generator.Context) []generator.Target, buildTag string, patterns []string) error {
|
||||
var buildTags []string
|
||||
if buildTag != "" {
|
||||
buildTags = append(buildTags, buildTag)
|
||||
}
|
||||
|
||||
p := parser.NewWithOptions(parser.Options{BuildTags: buildTags})
|
||||
if err := p.LoadPackages(patterns...); err != nil {
|
||||
return fmt.Errorf("failed making a parser: %v", err)
|
||||
}
|
||||
|
||||
c, err := generator.NewContext(p, nameSystems, defaultSystem)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed making a context: %v", err)
|
||||
}
|
||||
|
||||
targets := getTargets(c)
|
||||
if err := c.ExecuteTargets(targets); err != nil {
|
||||
return fmt.Errorf("failed executing generator: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
2
vendor/k8s.io/gengo/generator/doc.go → vendor/k8s.io/gengo/v2/generator/doc.go
generated
vendored
2
vendor/k8s.io/gengo/generator/doc.go → vendor/k8s.io/gengo/v2/generator/doc.go
generated
vendored
@@ -28,4 +28,4 @@ limitations under the License.
|
||||
// package. Additionally, all naming systems in the Context will be added as
|
||||
// functions to the parsed template, so that they can be called directly from
|
||||
// your templates!
|
||||
package generator // import "k8s.io/gengo/generator"
|
||||
package generator // import "k8s.io/gengo/v2/generator"
|
||||
155
vendor/k8s.io/gengo/generator/execute.go → vendor/k8s.io/gengo/v2/generator/execute.go
generated
vendored
155
vendor/k8s.io/gengo/generator/execute.go → vendor/k8s.io/gengo/v2/generator/execute.go
generated
vendored
@@ -18,42 +18,31 @@ package generator
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/imports"
|
||||
"k8s.io/gengo/namer"
|
||||
"k8s.io/gengo/types"
|
||||
|
||||
"k8s.io/gengo/v2/namer"
|
||||
"k8s.io/gengo/v2/types"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func errs2strings(errors []error) []string {
|
||||
strs := make([]string, len(errors))
|
||||
for i := range errors {
|
||||
strs[i] = errors[i].Error()
|
||||
}
|
||||
return strs
|
||||
}
|
||||
// ExecuteTargets runs the generators for the provided targets.
|
||||
func (c *Context) ExecuteTargets(targets []Target) error {
|
||||
klog.V(5).Infof("ExecuteTargets: %d targets", len(targets))
|
||||
|
||||
// ExecutePackages runs the generators for every package in 'packages'. 'outDir'
|
||||
// is the base directory in which to place all the generated packages; it
|
||||
// should be a physical path on disk, not an import path. e.g.:
|
||||
// /path/to/home/path/to/gopath/src/
|
||||
// Each package has its import path already, this will be appended to 'outDir'.
|
||||
func (c *Context) ExecutePackages(outDir string, packages Packages) error {
|
||||
var errors []error
|
||||
for _, p := range packages {
|
||||
if err := c.ExecutePackage(outDir, p); err != nil {
|
||||
errors = append(errors, err)
|
||||
var errs []error
|
||||
for _, tgt := range targets {
|
||||
if err := c.ExecuteTarget(tgt); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
if len(errors) > 0 {
|
||||
return fmt.Errorf("some packages had errors:\n%v\n", strings.Join(errs2strings(errors), "\n"))
|
||||
if len(errs) > 0 {
|
||||
return fmt.Errorf("some targets had errors: %w", errors.Join(errs...))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -65,6 +54,7 @@ type DefaultFileType struct {
|
||||
|
||||
func (ft DefaultFileType) AssembleFile(f *File, pathname string) error {
|
||||
klog.V(5).Infof("Assembling file %q", pathname)
|
||||
|
||||
destFile, err := os.Create(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -78,7 +68,7 @@ func (ft DefaultFileType) AssembleFile(f *File, pathname string) error {
|
||||
return et.Error()
|
||||
}
|
||||
if formatted, err := ft.Format(b.Bytes()); err != nil {
|
||||
err = fmt.Errorf("unable to format file %q (%v).", pathname, err)
|
||||
err = fmt.Errorf("unable to format file %q (%v)", pathname, err)
|
||||
// Write the file anyway, so they can see what's going wrong and fix the generator.
|
||||
if _, err2 := destFile.Write(b.Bytes()); err2 != nil {
|
||||
return err2
|
||||
@@ -90,42 +80,7 @@ func (ft DefaultFileType) AssembleFile(f *File, pathname string) error {
|
||||
}
|
||||
}
|
||||
|
||||
func (ft DefaultFileType) VerifyFile(f *File, pathname string) error {
|
||||
klog.V(5).Infof("Verifying file %q", pathname)
|
||||
friendlyName := filepath.Join(f.PackageName, f.Name)
|
||||
b := &bytes.Buffer{}
|
||||
et := NewErrorTracker(b)
|
||||
ft.Assemble(et, f)
|
||||
if et.Error() != nil {
|
||||
return et.Error()
|
||||
}
|
||||
formatted, err := ft.Format(b.Bytes())
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to format the output for %q: %v", friendlyName, err)
|
||||
}
|
||||
existing, err := ioutil.ReadFile(pathname)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to read file %q for comparison: %v", friendlyName, err)
|
||||
}
|
||||
if bytes.Compare(formatted, existing) == 0 {
|
||||
return nil
|
||||
}
|
||||
// Be nice and find the first place where they differ
|
||||
i := 0
|
||||
for i < len(formatted) && i < len(existing) && formatted[i] == existing[i] {
|
||||
i++
|
||||
}
|
||||
eDiff, fDiff := existing[i:], formatted[i:]
|
||||
if len(eDiff) > 100 {
|
||||
eDiff = eDiff[:100]
|
||||
}
|
||||
if len(fDiff) > 100 {
|
||||
fDiff = fDiff[:100]
|
||||
}
|
||||
return fmt.Errorf("output for %q differs; first existing/expected diff: \n %q\n %q", friendlyName, string(eDiff), string(fDiff))
|
||||
}
|
||||
|
||||
func assembleGolangFile(w io.Writer, f *File) {
|
||||
func assembleGoFile(w io.Writer, f *File) {
|
||||
w.Write(f.Header)
|
||||
fmt.Fprintf(w, "package %v\n\n", f.PackageName)
|
||||
|
||||
@@ -162,10 +117,10 @@ func importsWrapper(src []byte) ([]byte, error) {
|
||||
return imports.Process("", src, nil)
|
||||
}
|
||||
|
||||
func NewGolangFile() *DefaultFileType {
|
||||
func NewGoFile() *DefaultFileType {
|
||||
return &DefaultFileType{
|
||||
Format: importsWrapper,
|
||||
Assemble: assembleGolangFile,
|
||||
Assemble: assembleGoFile,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -208,33 +163,23 @@ func (c *Context) addNameSystems(namers namer.NameSystems) *Context {
|
||||
return &c2
|
||||
}
|
||||
|
||||
// ExecutePackage executes a single package. 'outDir' is the base directory in
|
||||
// which to place the package; it should be a physical path on disk, not an
|
||||
// import path. e.g.: '/path/to/home/path/to/gopath/src/' The package knows its
|
||||
// import path already, this will be appended to 'outDir'.
|
||||
func (c *Context) ExecutePackage(outDir string, p Package) error {
|
||||
path := filepath.Join(outDir, p.Path())
|
||||
|
||||
// When working outside of GOPATH, we typically won't want to generate the
|
||||
// full path for a package. For example, if our current project's root/base
|
||||
// package is github.com/foo/bar, outDir=., p.Path()=github.com/foo/bar/generated,
|
||||
// then we really want to be writing files to ./generated, not ./github.com/foo/bar/generated.
|
||||
// The following will trim a path prefix (github.com/foo/bar) from p.Path() to arrive at
|
||||
// a relative path that works with projects not in GOPATH.
|
||||
if c.TrimPathPrefix != "" {
|
||||
separator := string(filepath.Separator)
|
||||
if !strings.HasSuffix(c.TrimPathPrefix, separator) {
|
||||
c.TrimPathPrefix += separator
|
||||
}
|
||||
|
||||
path = strings.TrimPrefix(path, c.TrimPathPrefix)
|
||||
// ExecuteTarget runs the generators for a single target.
|
||||
func (c *Context) ExecuteTarget(tgt Target) error {
|
||||
tgtDir := tgt.Dir()
|
||||
if tgtDir == "" {
|
||||
return fmt.Errorf("no directory for target %s", tgt.Path())
|
||||
}
|
||||
klog.V(5).Infof("Processing package %q, disk location %q", p.Name(), path)
|
||||
klog.V(5).Infof("Executing target %q (%q)", tgt.Name(), tgtDir)
|
||||
|
||||
// Filter out any types the *package* doesn't care about.
|
||||
packageContext := c.filteredBy(p.Filter)
|
||||
os.MkdirAll(path, 0755)
|
||||
packageContext := c.filteredBy(tgt.Filter)
|
||||
|
||||
if err := os.MkdirAll(tgtDir, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
files := map[string]*File{}
|
||||
for _, g := range p.Generators(packageContext) {
|
||||
for _, g := range tgt.Generators(packageContext) {
|
||||
// Filter out types the *generator* doesn't care about.
|
||||
genContext := packageContext.filteredBy(g.Filter)
|
||||
// Now add any extra name systems defined by this generator
|
||||
@@ -248,19 +193,17 @@ func (c *Context) ExecutePackage(outDir string, p Package) error {
|
||||
if f == nil {
|
||||
// This is the first generator to reference this file, so start it.
|
||||
f = &File{
|
||||
Name: g.Filename(),
|
||||
FileType: fileType,
|
||||
PackageName: p.Name(),
|
||||
PackagePath: p.Path(),
|
||||
PackageSourcePath: p.SourcePath(),
|
||||
Header: p.Header(g.Filename()),
|
||||
Imports: map[string]struct{}{},
|
||||
Name: g.Filename(),
|
||||
FileType: fileType,
|
||||
PackageName: tgt.Name(),
|
||||
PackagePath: tgt.Path(),
|
||||
PackageDir: tgt.Dir(),
|
||||
Header: tgt.Header(g.Filename()),
|
||||
Imports: map[string]struct{}{},
|
||||
}
|
||||
files[f.Name] = f
|
||||
} else {
|
||||
if f.FileType != g.FileType() {
|
||||
return fmt.Errorf("file %q already has type %q, but generator %q wants to use type %q", f.Name, f.FileType, g.Name(), g.FileType())
|
||||
}
|
||||
} else if f.FileType != g.FileType() {
|
||||
return fmt.Errorf("file %q already has type %q, but generator %q wants to use type %q", f.Name, f.FileType, g.Name(), g.FileType())
|
||||
}
|
||||
|
||||
if vars := g.PackageVars(genContext); len(vars) > 0 {
|
||||
@@ -289,25 +232,19 @@ func (c *Context) ExecutePackage(outDir string, p Package) error {
|
||||
}
|
||||
}
|
||||
|
||||
var errors []error
|
||||
var errs []error
|
||||
for _, f := range files {
|
||||
finalPath := filepath.Join(path, f.Name)
|
||||
finalPath := filepath.Join(tgtDir, f.Name)
|
||||
assembler, ok := c.FileTypes[f.FileType]
|
||||
if !ok {
|
||||
return fmt.Errorf("the file type %q registered for file %q does not exist in the context", f.FileType, f.Name)
|
||||
}
|
||||
var err error
|
||||
if c.Verify {
|
||||
err = assembler.VerifyFile(f, finalPath)
|
||||
} else {
|
||||
err = assembler.AssembleFile(f, finalPath)
|
||||
}
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
if err := assembler.AssembleFile(f, finalPath); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
if len(errors) > 0 {
|
||||
return fmt.Errorf("errors in package %q:\n%v\n", p.Path(), strings.Join(errs2strings(errors), "\n"))
|
||||
if len(errs) > 0 {
|
||||
return fmt.Errorf("errors in target %q: %w", tgt.Path(), errors.Join(errs...))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
123
vendor/k8s.io/gengo/generator/generator.go → vendor/k8s.io/gengo/v2/generator/generator.go
generated
vendored
123
vendor/k8s.io/gengo/generator/generator.go → vendor/k8s.io/gengo/v2/generator/generator.go
generated
vendored
@@ -20,19 +20,22 @@ import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"k8s.io/gengo/namer"
|
||||
"k8s.io/gengo/parser"
|
||||
"k8s.io/gengo/types"
|
||||
"k8s.io/gengo/v2/namer"
|
||||
"k8s.io/gengo/v2/parser"
|
||||
"k8s.io/gengo/v2/types"
|
||||
)
|
||||
|
||||
// Package contains the contract for generating a package.
|
||||
type Package interface {
|
||||
// Name returns the package short name.
|
||||
// Target describes a Go package into which code will be generated. A single
|
||||
// Target may have many Generators, each of which emits one file.
|
||||
type Target interface {
|
||||
// Name returns the package short name (as in `package foo`).
|
||||
Name() string
|
||||
// Path returns the package import path.
|
||||
// Path returns the package import path (as in `import "example.com/foo"`).
|
||||
Path() string
|
||||
// SourcePath returns the location of the package on disk.
|
||||
SourcePath() string
|
||||
// Dir returns the location of the resulting package on disk. This may be
|
||||
// the same directory as an input package (when generating code in-place)
|
||||
// or a different directory entirely.
|
||||
Dir() string
|
||||
|
||||
// Filter should return true if this package cares about this type.
|
||||
// Otherwise, this type will be omitted from the type ordering for
|
||||
@@ -52,26 +55,22 @@ type Package interface {
|
||||
}
|
||||
|
||||
type File struct {
|
||||
Name string
|
||||
FileType string
|
||||
PackageName string
|
||||
Header []byte
|
||||
PackagePath string
|
||||
PackageSourcePath string
|
||||
Imports map[string]struct{}
|
||||
Vars bytes.Buffer
|
||||
Consts bytes.Buffer
|
||||
Body bytes.Buffer
|
||||
Name string
|
||||
FileType string
|
||||
PackageName string
|
||||
Header []byte
|
||||
PackagePath string
|
||||
PackageDir string
|
||||
Imports map[string]struct{}
|
||||
Vars bytes.Buffer
|
||||
Consts bytes.Buffer
|
||||
Body bytes.Buffer
|
||||
}
|
||||
|
||||
type FileType interface {
|
||||
AssembleFile(f *File, path string) error
|
||||
VerifyFile(f *File, path string) error
|
||||
}
|
||||
|
||||
// Packages is a list of packages to generate.
|
||||
type Packages []Package
|
||||
|
||||
// Generator is the contract for anything that wants to do auto-generation.
|
||||
// It's expected that the io.Writers passed to the below functions will be
|
||||
// ErrorTrackers; this allows implementations to not check for io errors,
|
||||
@@ -110,7 +109,7 @@ type Generator interface {
|
||||
|
||||
// Init should write an init function, and any other content that's not
|
||||
// generated per-type. (It's not intended for generator specific
|
||||
// initialization! Do that when your Package constructs the
|
||||
// initialization! Do that when your Target constructs the
|
||||
// Generators.)
|
||||
Init(*Context, io.Writer) error
|
||||
|
||||
@@ -160,38 +159,25 @@ type Context struct {
|
||||
// All the types, in case you want to look up something.
|
||||
Universe types.Universe
|
||||
|
||||
// Incoming imports, i.e. packages importing the given package.
|
||||
incomingImports map[string][]string
|
||||
|
||||
// Incoming transitive imports, i.e. the transitive closure of IncomingImports
|
||||
incomingTransitiveImports map[string][]string
|
||||
|
||||
// All the user-specified packages. This is after recursive expansion.
|
||||
Inputs []string
|
||||
|
||||
// The canonical ordering of the types (will be filtered by both the
|
||||
// Package's and Generator's Filter methods).
|
||||
// Target's and Generator's Filter methods).
|
||||
Order []*types.Type
|
||||
|
||||
// A set of types this context can process. If this is empty or nil,
|
||||
// the default "golang" filetype will be provided.
|
||||
// the default "go" filetype will be provided.
|
||||
FileTypes map[string]FileType
|
||||
|
||||
// If true, Execute* calls will just verify that the existing output is
|
||||
// correct. (You may set this after calling NewContext.)
|
||||
Verify bool
|
||||
|
||||
// Allows generators to add packages at runtime.
|
||||
builder *parser.Builder
|
||||
|
||||
// If specified, trim the prefix from a package's path before writing files.
|
||||
TrimPathPrefix string
|
||||
parser *parser.Parser
|
||||
}
|
||||
|
||||
// NewContext generates a context from the given builder, naming systems, and
|
||||
// NewContext generates a context from the given parser, naming systems, and
|
||||
// the naming system you wish to construct the canonical ordering from.
|
||||
func NewContext(b *parser.Builder, nameSystems namer.NameSystems, canonicalOrderName string) (*Context, error) {
|
||||
universe, err := b.FindTypes()
|
||||
func NewContext(p *parser.Parser, nameSystems namer.NameSystems, canonicalOrderName string) (*Context, error) {
|
||||
universe, err := p.NewUniverse()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -199,11 +185,11 @@ func NewContext(b *parser.Builder, nameSystems namer.NameSystems, canonicalOrder
|
||||
c := &Context{
|
||||
Namers: namer.NameSystems{},
|
||||
Universe: universe,
|
||||
Inputs: b.FindPackages(),
|
||||
Inputs: p.UserRequestedPackages(),
|
||||
FileTypes: map[string]FileType{
|
||||
GolangFileType: NewGolangFile(),
|
||||
GoFileType: NewGoFile(),
|
||||
},
|
||||
builder: b,
|
||||
parser: p,
|
||||
}
|
||||
|
||||
for name, systemNamer := range nameSystems {
|
||||
@@ -216,44 +202,13 @@ func NewContext(b *parser.Builder, nameSystems namer.NameSystems, canonicalOrder
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// IncomingImports returns the incoming imports for each package. The map is lazily computed.
|
||||
func (ctxt *Context) IncomingImports() map[string][]string {
|
||||
if ctxt.incomingImports == nil {
|
||||
incoming := map[string][]string{}
|
||||
for _, pkg := range ctxt.Universe {
|
||||
for imp := range pkg.Imports {
|
||||
incoming[imp] = append(incoming[imp], pkg.Path)
|
||||
}
|
||||
}
|
||||
ctxt.incomingImports = incoming
|
||||
}
|
||||
return ctxt.incomingImports
|
||||
// LoadPackages adds Go packages to the context.
|
||||
func (c *Context) LoadPackages(patterns ...string) ([]*types.Package, error) {
|
||||
return c.parser.LoadPackagesTo(&c.Universe, patterns...)
|
||||
}
|
||||
|
||||
// TransitiveIncomingImports returns the transitive closure of the incoming imports for each package.
|
||||
// The map is lazily computed.
|
||||
func (ctxt *Context) TransitiveIncomingImports() map[string][]string {
|
||||
if ctxt.incomingTransitiveImports == nil {
|
||||
ctxt.incomingTransitiveImports = transitiveClosure(ctxt.IncomingImports())
|
||||
}
|
||||
return ctxt.incomingTransitiveImports
|
||||
}
|
||||
|
||||
// AddDir adds a Go package to the context. The specified path must be a single
|
||||
// go package import path. GOPATH, GOROOT, and the location of your go binary
|
||||
// (`which go`) will all be searched, in the normal Go fashion.
|
||||
// Deprecated. Please use AddDirectory.
|
||||
func (ctxt *Context) AddDir(path string) error {
|
||||
ctxt.incomingImports = nil
|
||||
ctxt.incomingTransitiveImports = nil
|
||||
return ctxt.builder.AddDirTo(path, &ctxt.Universe)
|
||||
}
|
||||
|
||||
// AddDirectory adds a Go package to the context. The specified path must be a
|
||||
// single go package import path. GOPATH, GOROOT, and the location of your go
|
||||
// binary (`which go`) will all be searched, in the normal Go fashion.
|
||||
func (ctxt *Context) AddDirectory(path string) (*types.Package, error) {
|
||||
ctxt.incomingImports = nil
|
||||
ctxt.incomingTransitiveImports = nil
|
||||
return ctxt.builder.AddDirectoryTo(path, &ctxt.Universe)
|
||||
// FindPackages expands Go package patterns into a list of package import
|
||||
// paths, akin to `go list -find`.
|
||||
func (c *Context) FindPackages(patterns ...string) ([]string, error) {
|
||||
return c.parser.FindPackages(patterns...)
|
||||
}
|
||||
61
vendor/k8s.io/gengo/v2/generator/go_generator.go
generated
vendored
Normal file
61
vendor/k8s.io/gengo/v2/generator/go_generator.go
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package generator
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"k8s.io/gengo/v2/namer"
|
||||
"k8s.io/gengo/v2/types"
|
||||
)
|
||||
|
||||
const (
|
||||
GoFileType = "go"
|
||||
)
|
||||
|
||||
// GoGenerator implements a do-nothing Generator for Go files. It can be
|
||||
// used as a base for custom Generators, which embed it and then define the
|
||||
// methods they need to specialize.
|
||||
type GoGenerator struct {
|
||||
// OutputFilename is used as the Generator's name, and filename.
|
||||
OutputFilename string
|
||||
|
||||
// Body, if present, will be used as the return from the "Init" method.
|
||||
// This causes it to be static content for the entire file if no other
|
||||
// generator touches the file.
|
||||
OptionalBody []byte
|
||||
}
|
||||
|
||||
func (gg GoGenerator) Name() string { return gg.OutputFilename }
|
||||
func (gg GoGenerator) Filter(*Context, *types.Type) bool { return true }
|
||||
func (gg GoGenerator) Namers(*Context) namer.NameSystems { return nil }
|
||||
func (gg GoGenerator) Imports(*Context) []string { return []string{} }
|
||||
func (gg GoGenerator) PackageVars(*Context) []string { return []string{} }
|
||||
func (gg GoGenerator) PackageConsts(*Context) []string { return []string{} }
|
||||
func (gg GoGenerator) GenerateType(*Context, *types.Type, io.Writer) error { return nil }
|
||||
func (gg GoGenerator) Filename() string { return gg.OutputFilename }
|
||||
func (gg GoGenerator) FileType() string { return GoFileType }
|
||||
func (gg GoGenerator) Finalize(*Context, io.Writer) error { return nil }
|
||||
|
||||
func (gg GoGenerator) Init(c *Context, w io.Writer) error {
|
||||
_, err := w.Write(gg.OptionalBody)
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
_ = Generator(GoGenerator{})
|
||||
)
|
||||
@@ -22,8 +22,8 @@ import (
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/gengo/namer"
|
||||
"k8s.io/gengo/types"
|
||||
"k8s.io/gengo/v2/namer"
|
||||
"k8s.io/gengo/v2/types"
|
||||
)
|
||||
|
||||
// NewImportTrackerForPackage creates a new import tracker which is aware
|
||||
@@ -45,7 +45,7 @@ import (
|
||||
func NewImportTrackerForPackage(local string, typesToAdd ...*types.Type) *namer.DefaultImportTracker {
|
||||
tracker := namer.NewDefaultImportTracker(types.Name{Package: local})
|
||||
tracker.IsInvalidType = func(*types.Type) bool { return false }
|
||||
tracker.LocalName = func(name types.Name) string { return golangTrackerLocalName(&tracker, name) }
|
||||
tracker.LocalName = func(name types.Name) string { return goTrackerLocalName(&tracker, name) }
|
||||
tracker.PrintImport = func(path, name string) string { return name + " \"" + path + "\"" }
|
||||
|
||||
tracker.AddTypes(typesToAdd...)
|
||||
@@ -56,7 +56,7 @@ func NewImportTracker(typesToAdd ...*types.Type) *namer.DefaultImportTracker {
|
||||
return NewImportTrackerForPackage("", typesToAdd...)
|
||||
}
|
||||
|
||||
func golangTrackerLocalName(tracker namer.ImportTracker, t types.Name) string {
|
||||
func goTrackerLocalName(tracker namer.ImportTracker, t types.Name) string {
|
||||
path := t.Package
|
||||
|
||||
// Using backslashes in package names causes gengo to produce Go code which
|
||||
@@ -69,11 +69,11 @@ func golangTrackerLocalName(tracker namer.ImportTracker, t types.Name) string {
|
||||
for n := len(dirs) - 1; n >= 0; n-- {
|
||||
// follow kube convention of not having anything between directory names
|
||||
name := strings.Join(dirs[n:], "")
|
||||
name = strings.Replace(name, "_", "", -1)
|
||||
name = strings.ReplaceAll(name, "_", "")
|
||||
// These characters commonly appear in import paths for go
|
||||
// packages, but aren't legal go names. So we'll sanitize.
|
||||
name = strings.Replace(name, ".", "", -1)
|
||||
name = strings.Replace(name, "-", "", -1)
|
||||
name = strings.ReplaceAll(name, ".", "")
|
||||
name = strings.ReplaceAll(name, "-", "")
|
||||
if _, found := tracker.PathOf(name); found {
|
||||
// This name collides with some other package
|
||||
continue
|
||||
77
vendor/k8s.io/gengo/v2/generator/simple_target.go
generated
vendored
Normal file
77
vendor/k8s.io/gengo/v2/generator/simple_target.go
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package generator
|
||||
|
||||
import (
|
||||
"k8s.io/gengo/v2/types"
|
||||
)
|
||||
|
||||
// SimpleTarget is implements Target in terms of static configuration.
|
||||
// The package name, path, and dir are required to be non-empty.
|
||||
type SimpleTarget struct {
|
||||
// PkgName is the name of the resulting package (as in "package xxxx").
|
||||
// Required.
|
||||
PkgName string
|
||||
// PkgPath is the canonical Go import-path of the resulting package (as in
|
||||
// "import example.com/xxxx/yyyy"). Required.
|
||||
PkgPath string
|
||||
// PkgDir is the location of the resulting package on disk (which may not
|
||||
// exist yet). It may be absolute or relative to CWD. Required.
|
||||
PkgDir string
|
||||
|
||||
// HeaderComment is emitted at the top of every output file. Optional.
|
||||
HeaderComment []byte
|
||||
|
||||
// PkgDocComment is emitted after the header comment for a "doc.go" file.
|
||||
// Optional.
|
||||
PkgDocComment []byte
|
||||
|
||||
// FilterFunc will be called to implement Target.Filter. Optional.
|
||||
FilterFunc func(*Context, *types.Type) bool
|
||||
|
||||
// GeneratorsFunc will be called to implement Target.Generators. Optional.
|
||||
GeneratorsFunc func(*Context) []Generator
|
||||
}
|
||||
|
||||
func (st SimpleTarget) Name() string { return st.PkgName }
|
||||
func (st SimpleTarget) Path() string { return st.PkgPath }
|
||||
func (st SimpleTarget) Dir() string { return st.PkgDir }
|
||||
|
||||
func (st SimpleTarget) Filter(c *Context, t *types.Type) bool {
|
||||
if st.FilterFunc != nil {
|
||||
return st.FilterFunc(c, t)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (st SimpleTarget) Generators(c *Context) []Generator {
|
||||
if st.GeneratorsFunc != nil {
|
||||
return st.GeneratorsFunc(c)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st SimpleTarget) Header(filename string) []byte {
|
||||
if filename == "doc.go" {
|
||||
return append(st.HeaderComment, st.PkgDocComment...)
|
||||
}
|
||||
return st.HeaderComment
|
||||
}
|
||||
|
||||
var (
|
||||
_ = Target(SimpleTarget{})
|
||||
)
|
||||
@@ -74,15 +74,15 @@ func NewSnippetWriter(w io.Writer, c *Context, left, right string) *SnippetWrite
|
||||
// return sw.Error()
|
||||
//
|
||||
// Where:
|
||||
// * "$" starts a template directive
|
||||
// * "." references the entire thing passed as args
|
||||
// * "type" therefore sees a map and looks up the key "type"
|
||||
// * "|" means "pass the thing on the left to the thing on the right"
|
||||
// * "public" is the name of a naming system, so the SnippetWriter has given
|
||||
// the template a function called "public" that takes a *types.Type and
|
||||
// returns the naming system's name. E.g., if the type is "string" this might
|
||||
// return "String".
|
||||
// * the second "$" ends the template directive.
|
||||
// - "$" starts a template directive
|
||||
// - "." references the entire thing passed as args
|
||||
// - "type" therefore sees a map and looks up the key "type"
|
||||
// - "|" means "pass the thing on the left to the thing on the right"
|
||||
// - "public" is the name of a naming system, so the SnippetWriter has given
|
||||
// the template a function called "public" that takes a *types.Type and
|
||||
// returns the naming system's name. E.g., if the type is "string" this might
|
||||
// return "String".
|
||||
// - the second "$" ends the template directive.
|
||||
//
|
||||
// The map is actually not necessary. The below does the same thing:
|
||||
//
|
||||
2
vendor/k8s.io/gengo/namer/doc.go → vendor/k8s.io/gengo/v2/namer/doc.go
generated
vendored
2
vendor/k8s.io/gengo/namer/doc.go → vendor/k8s.io/gengo/v2/namer/doc.go
generated
vendored
@@ -28,4 +28,4 @@ limitations under the License.
|
||||
//
|
||||
// Additionally, a "RawNamer" can optionally keep track of what needs to be
|
||||
// imported.
|
||||
package namer // import "k8s.io/gengo/namer"
|
||||
package namer // import "k8s.io/gengo/v2/namer"
|
||||
@@ -19,7 +19,7 @@ package namer
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"k8s.io/gengo/types"
|
||||
"k8s.io/gengo/v2/types"
|
||||
)
|
||||
|
||||
// ImportTracker may be passed to a namer.RawNamer, to track the imports needed
|
||||
@@ -99,7 +99,7 @@ func (tracker *DefaultImportTracker) ImportLines() []string {
|
||||
for path := range tracker.pathToName {
|
||||
importPaths = append(importPaths, path)
|
||||
}
|
||||
sort.Sort(sort.StringSlice(importPaths))
|
||||
sort.Strings(importPaths)
|
||||
out := []string{}
|
||||
for _, path := range importPaths {
|
||||
out = append(out, tracker.PrintImport(path, tracker.pathToName[path]))
|
||||
2
vendor/k8s.io/gengo/namer/namer.go → vendor/k8s.io/gengo/v2/namer/namer.go
generated
vendored
2
vendor/k8s.io/gengo/namer/namer.go → vendor/k8s.io/gengo/v2/namer/namer.go
generated
vendored
@@ -22,7 +22,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/gengo/types"
|
||||
"k8s.io/gengo/v2/types"
|
||||
)
|
||||
|
||||
const (
|
||||
2
vendor/k8s.io/gengo/namer/order.go → vendor/k8s.io/gengo/v2/namer/order.go
generated
vendored
2
vendor/k8s.io/gengo/namer/order.go → vendor/k8s.io/gengo/v2/namer/order.go
generated
vendored
@@ -19,7 +19,7 @@ package namer
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"k8s.io/gengo/types"
|
||||
"k8s.io/gengo/v2/types"
|
||||
)
|
||||
|
||||
// Orderer produces an ordering of types given a Namer.
|
||||
@@ -19,7 +19,7 @@ package namer
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"k8s.io/gengo/types"
|
||||
"k8s.io/gengo/v2/types"
|
||||
)
|
||||
|
||||
var consonants = "bcdfghjklmnpqrstvwxyz"
|
||||
2
vendor/k8s.io/gengo/parser/doc.go → vendor/k8s.io/gengo/v2/parser/doc.go
generated
vendored
2
vendor/k8s.io/gengo/parser/doc.go → vendor/k8s.io/gengo/v2/parser/doc.go
generated
vendored
@@ -16,4 +16,4 @@ limitations under the License.
|
||||
|
||||
// Package parser provides code to parse go files, type-check them, extract the
|
||||
// types.
|
||||
package parser // import "k8s.io/gengo/parser"
|
||||
package parser // import "k8s.io/gengo/v2/parser"
|
||||
821
vendor/k8s.io/gengo/v2/parser/parse.go
generated
vendored
Normal file
821
vendor/k8s.io/gengo/v2/parser/parse.go
generated
vendored
Normal file
@@ -0,0 +1,821 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package parser
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/constant"
|
||||
"go/token"
|
||||
gotypes "go/types"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/tools/go/packages"
|
||||
"k8s.io/gengo/v2/types"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// Parser lets you add all the go files in all the packages that you care
|
||||
// about, then constructs the type source data.
|
||||
type Parser struct {
|
||||
// Map of package paths to definitions. These keys should be canonical
|
||||
// Go import paths (example.com/foo/bar) and not local paths (./foo/bar).
|
||||
goPkgs map[string]*packages.Package
|
||||
|
||||
// Keep track of which packages were directly requested (as opposed to
|
||||
// those which are transitively loaded).
|
||||
userRequested map[string]bool
|
||||
|
||||
// Keep track of which packages have already been scanned for types.
|
||||
fullyProcessed map[string]bool
|
||||
|
||||
// Build tags to set when loading packages.
|
||||
buildTags []string
|
||||
|
||||
// Tracks accumulated parsed files, so we can do position lookups later.
|
||||
fset *token.FileSet
|
||||
|
||||
// All comments from everywhere in every parsed file. This map is keyed by
|
||||
// the file-line on which the comment block ends, which makes it easy to
|
||||
// look up comments which immediately precede a given obect (e.g. a type or
|
||||
// function definition), which is what we almost always want. We need this
|
||||
// because Go's own ast package does a very poor job of handling comments.
|
||||
endLineToCommentGroup map[fileLine]*ast.CommentGroup
|
||||
}
|
||||
|
||||
// key type for finding comments.
|
||||
type fileLine struct {
|
||||
file string
|
||||
line int
|
||||
}
|
||||
|
||||
// New constructs a new Parser.
|
||||
func New() *Parser {
|
||||
return NewWithOptions(Options{})
|
||||
}
|
||||
|
||||
func NewWithOptions(opts Options) *Parser {
|
||||
return &Parser{
|
||||
goPkgs: map[string]*packages.Package{},
|
||||
userRequested: map[string]bool{},
|
||||
fullyProcessed: map[string]bool{},
|
||||
fset: token.NewFileSet(),
|
||||
endLineToCommentGroup: map[fileLine]*ast.CommentGroup{},
|
||||
buildTags: opts.BuildTags,
|
||||
}
|
||||
}
|
||||
|
||||
// Options holds optional settings for the Parser.
|
||||
type Options struct {
|
||||
// BuildTags is a list of optional tags to be specified when loading
|
||||
// packages.
|
||||
BuildTags []string
|
||||
}
|
||||
|
||||
// FindPackages expands the provided patterns into a list of Go import-paths,
|
||||
// much like `go list -find`.
|
||||
func (p *Parser) FindPackages(patterns ...string) ([]string, error) {
|
||||
return p.findPackages(nil, patterns...)
|
||||
}
|
||||
|
||||
// baseCfg is an optional (may be nil) config which might be injected by tests.
|
||||
func (p *Parser) findPackages(baseCfg *packages.Config, patterns ...string) ([]string, error) {
|
||||
toFind := make([]string, 0, len(patterns))
|
||||
results := make([]string, 0, len(patterns))
|
||||
for _, pat := range patterns {
|
||||
if pkg := p.goPkgs[pat]; pkg != nil {
|
||||
results = append(results, pkg.PkgPath)
|
||||
} else {
|
||||
toFind = append(toFind, pat)
|
||||
}
|
||||
}
|
||||
if len(toFind) == 0 {
|
||||
return results, nil
|
||||
}
|
||||
|
||||
cfg := packages.Config{
|
||||
Mode: packages.NeedName | packages.NeedFiles,
|
||||
BuildFlags: []string{"-tags", strings.Join(p.buildTags, ",")},
|
||||
Tests: false,
|
||||
}
|
||||
if baseCfg != nil {
|
||||
// This is to support tests, e.g. to inject a fake GOPATH or CWD.
|
||||
cfg.Dir = baseCfg.Dir
|
||||
cfg.Env = baseCfg.Env
|
||||
}
|
||||
|
||||
pkgs, err := packages.Load(&cfg, toFind...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error loading packages: %w", err)
|
||||
}
|
||||
var allErrs []error
|
||||
for _, pkg := range pkgs {
|
||||
results = append(results, pkg.PkgPath)
|
||||
|
||||
// pkg.Errors is not a slice of `error`, but concrete types. We have
|
||||
// to iteratively convert each one into `error`.
|
||||
var errs []error
|
||||
for _, e := range pkg.Errors {
|
||||
errs = append(errs, e)
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
allErrs = append(allErrs, fmt.Errorf("error(s) in %q:\n%w", pkg.PkgPath, errors.Join(errs...)))
|
||||
}
|
||||
}
|
||||
if len(allErrs) != 0 {
|
||||
return nil, errors.Join(allErrs...)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// LoadPackages loads and parses the specified Go packages. Specifically
|
||||
// named packages (without a trailing "/...") which do not exist or have no Go
|
||||
// files are an error.
|
||||
func (p *Parser) LoadPackages(patterns ...string) error {
|
||||
_, err := p.loadPackages(patterns...)
|
||||
return err
|
||||
}
|
||||
|
||||
// LoadPackagesWithConfigForTesting loads and parses the specified Go packages with the
|
||||
// specified packages.Config as a starting point. This is for testing, and
|
||||
// only the .Dir and .Env fields of the Config will be considered.
|
||||
func (p *Parser) LoadPackagesWithConfigForTesting(cfg *packages.Config, patterns ...string) error {
|
||||
_, err := p.loadPackagesWithConfig(cfg, patterns...)
|
||||
return err
|
||||
}
|
||||
|
||||
// LoadPackagesTo loads and parses the specified Go packages, and inserts them
|
||||
// into the specified Universe. It returns the packages which match the
|
||||
// patterns, but loads all packages and their imports, recursively, into the
|
||||
// universe. See NewUniverse for more.
|
||||
func (p *Parser) LoadPackagesTo(u *types.Universe, patterns ...string) ([]*types.Package, error) {
|
||||
// Load Packages.
|
||||
pkgs, err := p.loadPackages(patterns...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Load types in all packages (it will internally filter).
|
||||
if err := p.addPkgsToUniverse(pkgs, u); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return the results as gengo types.Packages.
|
||||
ret := make([]*types.Package, 0, len(pkgs))
|
||||
for _, pkg := range pkgs {
|
||||
ret = append(ret, u.Package(pkg.PkgPath))
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (p *Parser) loadPackages(patterns ...string) ([]*packages.Package, error) {
|
||||
return p.loadPackagesWithConfig(nil, patterns...)
|
||||
}
|
||||
|
||||
// baseCfg is an optional (may be nil) config which might be injected by tests.
|
||||
func (p *Parser) loadPackagesWithConfig(baseCfg *packages.Config, patterns ...string) ([]*packages.Package, error) {
|
||||
klog.V(5).Infof("loadPackages %q", patterns)
|
||||
|
||||
// Loading packages is slow - only do ones we know we have not already done
|
||||
// (e.g. if a tool calls LoadPackages itself).
|
||||
existingPkgs, netNewPkgs, err := p.alreadyLoaded(baseCfg, patterns...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if vlog := klog.V(5); vlog.Enabled() {
|
||||
if len(existingPkgs) > 0 {
|
||||
keys := make([]string, 0, len(existingPkgs))
|
||||
for _, p := range existingPkgs {
|
||||
keys = append(keys, p.PkgPath)
|
||||
}
|
||||
vlog.Infof(" already have: %q", keys)
|
||||
}
|
||||
if len(netNewPkgs) > 0 {
|
||||
vlog.Infof(" to be loaded: %q", netNewPkgs)
|
||||
}
|
||||
}
|
||||
|
||||
// If these were not user-requested before, they are now.
|
||||
for _, pkg := range existingPkgs {
|
||||
if !p.userRequested[pkg.PkgPath] {
|
||||
p.userRequested[pkg.PkgPath] = true
|
||||
}
|
||||
}
|
||||
for _, pkg := range netNewPkgs {
|
||||
if !p.userRequested[pkg] {
|
||||
p.userRequested[pkg] = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(netNewPkgs) == 0 {
|
||||
return existingPkgs, nil
|
||||
}
|
||||
|
||||
cfg := packages.Config{
|
||||
Mode: packages.NeedName |
|
||||
packages.NeedFiles | packages.NeedImports | packages.NeedDeps |
|
||||
packages.NeedModule | packages.NeedTypes | packages.NeedSyntax,
|
||||
BuildFlags: []string{"-tags", strings.Join(p.buildTags, ",")},
|
||||
Fset: p.fset,
|
||||
Tests: false,
|
||||
}
|
||||
if baseCfg != nil {
|
||||
// This is to support tests, e.g. to inject a fake GOPATH or CWD.
|
||||
cfg.Dir = baseCfg.Dir
|
||||
cfg.Env = baseCfg.Env
|
||||
}
|
||||
|
||||
tBefore := time.Now()
|
||||
pkgs, err := packages.Load(&cfg, netNewPkgs...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error loading packages: %w", err)
|
||||
}
|
||||
klog.V(5).Infof(" loaded %d pkg(s) in %v", len(pkgs), time.Since(tBefore))
|
||||
|
||||
// Handle any errors.
|
||||
collectErrors := func(pkg *packages.Package) error {
|
||||
var errs []error
|
||||
for _, e := range pkg.Errors {
|
||||
if e.Kind == packages.ListError || e.Kind == packages.ParseError {
|
||||
errs = append(errs, e)
|
||||
}
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return fmt.Errorf("error(s) in %q:\n%w", pkg.PkgPath, errors.Join(errs...))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err := forEachPackageRecursive(pkgs, collectErrors); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Finish integrating packages into our state.
|
||||
absorbPkg := func(pkg *packages.Package) error {
|
||||
p.goPkgs[pkg.PkgPath] = pkg
|
||||
|
||||
for _, f := range pkg.Syntax {
|
||||
for _, c := range f.Comments {
|
||||
// We need to do this on _every_ pkg, not just user-requested
|
||||
// ones, because some generators look at tags in other
|
||||
// packages.
|
||||
//
|
||||
// TODO: It would be nice if we only did this on user-requested
|
||||
// packages. The problem is that we don't always know which
|
||||
// other packages will need this information, and even when we
|
||||
// do we may have already loaded the package (as a transitive
|
||||
// dep) and might have stored pointers into it. Doing a
|
||||
// thorough "reload" without invalidating all those pointers is
|
||||
// a problem for another day.
|
||||
position := p.fset.Position(c.End()) // Fset is synchronized
|
||||
p.endLineToCommentGroup[fileLine{position.Filename, position.Line}] = c
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
if err := forEachPackageRecursive(pkgs, absorbPkg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return append(existingPkgs, pkgs...), nil
|
||||
}
|
||||
|
||||
// alreadyLoaded figures out which of the specified patterns have already been loaded
|
||||
// and which have not, and returns those respectively.
|
||||
// baseCfg is an optional (may be nil) config which might be injected by tests.
|
||||
func (p *Parser) alreadyLoaded(baseCfg *packages.Config, patterns ...string) ([]*packages.Package, []string, error) {
|
||||
existingPkgs := make([]*packages.Package, 0, len(patterns))
|
||||
netNewPkgs := make([]string, 0, len(patterns))
|
||||
|
||||
// Expand and canonicalize the requested patterns. This should be fast.
|
||||
if pkgPaths, err := p.findPackages(baseCfg, patterns...); err != nil {
|
||||
return nil, nil, err
|
||||
} else {
|
||||
for _, pkgPath := range pkgPaths {
|
||||
if pkg := p.goPkgs[pkgPath]; pkg != nil {
|
||||
existingPkgs = append(existingPkgs, pkg)
|
||||
} else {
|
||||
netNewPkgs = append(netNewPkgs, pkgPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
return existingPkgs, netNewPkgs, nil
|
||||
}
|
||||
|
||||
// forEachPackageRecursive will run the provided function on all of the specified
|
||||
// packages, and on their imports recursively. Errors are accumulated and
|
||||
// returned as via errors.Join.
|
||||
func forEachPackageRecursive(pkgs []*packages.Package, fn func(pkg *packages.Package) error) error {
|
||||
seen := map[string]bool{} // PkgPaths we have already visited
|
||||
var errs []error
|
||||
for _, pkg := range pkgs {
|
||||
errs = append(errs, recursePackage(pkg, fn, seen)...)
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func recursePackage(pkg *packages.Package, fn func(pkg *packages.Package) error, seen map[string]bool) []error {
|
||||
if seen[pkg.PkgPath] {
|
||||
return nil
|
||||
}
|
||||
var errs []error
|
||||
seen[pkg.PkgPath] = true
|
||||
if err := fn(pkg); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
for _, imp := range pkg.Imports {
|
||||
errs = append(errs, recursePackage(imp, fn, seen)...)
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
// UserRequestedPackages fetches a list of the user-imported packages.
|
||||
func (p *Parser) UserRequestedPackages() []string {
|
||||
// Iterate packages in a predictable order.
|
||||
pkgPaths := make([]string, 0, len(p.userRequested))
|
||||
for k := range p.userRequested {
|
||||
pkgPaths = append(pkgPaths, string(k))
|
||||
}
|
||||
sort.Strings(pkgPaths)
|
||||
return pkgPaths
|
||||
}
|
||||
|
||||
// NewUniverse finalizes the loaded packages, searches through them for types
|
||||
// and produces a new Universe. The returned Universe has one types.Package
|
||||
// entry for each Go package that has been loaded, including all of their
|
||||
// dependencies, recursively. It also has one entry, whose key is "", which
|
||||
// represents "builtin" types.
|
||||
func (p *Parser) NewUniverse() (types.Universe, error) {
|
||||
u := types.Universe{}
|
||||
|
||||
pkgs := []*packages.Package{}
|
||||
for _, path := range p.UserRequestedPackages() {
|
||||
pkgs = append(pkgs, p.goPkgs[path])
|
||||
}
|
||||
if err := p.addPkgsToUniverse(pkgs, &u); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// addCommentsToType takes any accumulated comment lines prior to obj and
|
||||
// attaches them to the type t.
|
||||
func (p *Parser) addCommentsToType(obj gotypes.Object, t *types.Type) {
|
||||
t.CommentLines = p.docComment(obj.Pos())
|
||||
t.SecondClosestCommentLines = p.priorDetachedComment(obj.Pos())
|
||||
}
|
||||
|
||||
// packageDir tries to figure out the directory of the specified package.
|
||||
func packageDir(pkg *packages.Package) (string, error) {
|
||||
// Sometimes Module is present but has no Dir, e.g. when it is vendored.
|
||||
if pkg.Module != nil && pkg.Module.Dir != "" {
|
||||
// NOTE: this will not work if tests are loaded, because Go mutates the
|
||||
// Package.PkgPath.
|
||||
subdir := strings.TrimPrefix(pkg.PkgPath, pkg.Module.Path)
|
||||
return filepath.Join(pkg.Module.Dir, subdir), nil
|
||||
}
|
||||
if len(pkg.GoFiles) > 0 {
|
||||
return filepath.Dir(pkg.GoFiles[0]), nil
|
||||
}
|
||||
if len(pkg.IgnoredFiles) > 0 {
|
||||
return filepath.Dir(pkg.IgnoredFiles[0]), nil
|
||||
}
|
||||
return "", fmt.Errorf("can't find package dir for %q - no module info and no Go files", pkg.PkgPath)
|
||||
}
|
||||
|
||||
// addPkgsToUniverse adds the packages, and all of their deps, recursively, to
|
||||
// the universe and (if needed) searches through them for types.
|
||||
func (p *Parser) addPkgsToUniverse(pkgs []*packages.Package, u *types.Universe) error {
|
||||
addOne := func(pkg *packages.Package) error {
|
||||
if err := p.addPkgToUniverse(pkg, u); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err := forEachPackageRecursive(pkgs, addOne); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// addPkgToUniverse adds one package to the universe and (if needed) searches
|
||||
// through it for types.
|
||||
func (p *Parser) addPkgToUniverse(pkg *packages.Package, u *types.Universe) error {
|
||||
pkgPath := pkg.PkgPath
|
||||
if p.fullyProcessed[pkgPath] {
|
||||
return nil
|
||||
}
|
||||
|
||||
// This will get-or-create the Package.
|
||||
gengoPkg := u.Package(pkgPath)
|
||||
|
||||
if gengoPkg.Dir == "" {
|
||||
// We're keeping this package, though we might not fully process it.
|
||||
if vlog := klog.V(5); vlog.Enabled() {
|
||||
why := "user-requested"
|
||||
if !p.userRequested[pkgPath] {
|
||||
why = "dependency"
|
||||
}
|
||||
vlog.Infof("addPkgToUniverse %q (%s)", pkgPath, why)
|
||||
}
|
||||
|
||||
absPath := ""
|
||||
if dir, err := packageDir(pkg); err != nil {
|
||||
return err
|
||||
} else {
|
||||
absPath = dir
|
||||
}
|
||||
|
||||
gengoPkg.Path = pkg.PkgPath
|
||||
gengoPkg.Dir = absPath
|
||||
}
|
||||
|
||||
// If the package was not user-requested, we can stop here.
|
||||
if !p.userRequested[pkgPath] {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mark it as done, so we don't ever re-process it.
|
||||
p.fullyProcessed[pkgPath] = true
|
||||
gengoPkg.Name = pkg.Name
|
||||
|
||||
// For historical reasons we treat files named "doc.go" specially.
|
||||
// TODO: It would be nice to not do this and instead treat package
|
||||
// doc-comments as the "global" config place. This would require changing
|
||||
// most generators and input files.
|
||||
for _, f := range pkg.Syntax {
|
||||
// This gets the filename for the ast.File. Iterating pkg.GoFiles is
|
||||
// documented as unreliable.
|
||||
pos := p.fset.Position(f.FileStart)
|
||||
if filepath.Base(pos.Filename) == "doc.go" {
|
||||
gengoPkg.Comments = []string{}
|
||||
for i := range f.Comments {
|
||||
gengoPkg.Comments = append(gengoPkg.Comments, splitLines(f.Comments[i].Text())...)
|
||||
}
|
||||
if f.Doc != nil {
|
||||
gengoPkg.DocComments = splitLines(f.Doc.Text())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Walk all the types, recursively and save them for later access.
|
||||
s := pkg.Types.Scope()
|
||||
for _, n := range s.Names() {
|
||||
switch obj := s.Lookup(n).(type) {
|
||||
case *gotypes.TypeName:
|
||||
t := p.walkType(*u, nil, obj.Type())
|
||||
p.addCommentsToType(obj, t)
|
||||
case *gotypes.Func:
|
||||
// We only care about functions, not concrete/abstract methods.
|
||||
if obj.Type() != nil && obj.Type().(*gotypes.Signature).Recv() == nil {
|
||||
t := p.addFunction(*u, nil, obj)
|
||||
p.addCommentsToType(obj, t)
|
||||
}
|
||||
case *gotypes.Var:
|
||||
if !obj.IsField() {
|
||||
t := p.addVariable(*u, nil, obj)
|
||||
p.addCommentsToType(obj, t)
|
||||
}
|
||||
case *gotypes.Const:
|
||||
t := p.addConstant(*u, nil, obj)
|
||||
p.addCommentsToType(obj, t)
|
||||
default:
|
||||
klog.Infof("addPkgToUniverse %q: unhandled object of type %T: %v", pkgPath, obj, obj)
|
||||
}
|
||||
}
|
||||
|
||||
// Add all of this package's imports.
|
||||
importedPkgs := []string{}
|
||||
for _, imp := range pkg.Imports {
|
||||
if err := p.addPkgToUniverse(imp, u); err != nil {
|
||||
return err
|
||||
}
|
||||
importedPkgs = append(importedPkgs, imp.PkgPath)
|
||||
}
|
||||
sort.Strings(importedPkgs)
|
||||
u.AddImports(pkg.PkgPath, importedPkgs...)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the specified position has a "doc comment", return that.
|
||||
func (p *Parser) docComment(pos token.Pos) []string {
|
||||
// An object's doc comment always ends on the line before the object's own
|
||||
// declaration.
|
||||
c1 := p.priorCommentLines(pos, 1)
|
||||
return splitLines(c1.Text()) // safe even if c1 is nil
|
||||
}
|
||||
|
||||
// If there is a detached (not immediately before a declaration) comment,
|
||||
// return that.
|
||||
func (p *Parser) priorDetachedComment(pos token.Pos) []string {
|
||||
// An object's doc comment always ends on the line before the object's own
|
||||
// declaration.
|
||||
c1 := p.priorCommentLines(pos, 1)
|
||||
|
||||
// Using a literal "2" here is brittle in theory (it means literally 2
|
||||
// lines), but in practice Go code is gofmt'ed (which elides repeated blank
|
||||
// lines), so it works.
|
||||
var c2 *ast.CommentGroup
|
||||
if c1 == nil {
|
||||
c2 = p.priorCommentLines(pos, 2)
|
||||
} else {
|
||||
c2 = p.priorCommentLines(c1.List[0].Slash, 2)
|
||||
}
|
||||
return splitLines(c2.Text()) // safe even if c1 is nil
|
||||
}
|
||||
|
||||
// If there's a comment block which ends nlines before pos, return it.
|
||||
func (p *Parser) priorCommentLines(pos token.Pos, lines int) *ast.CommentGroup {
|
||||
position := p.fset.Position(pos)
|
||||
key := fileLine{position.Filename, position.Line - lines}
|
||||
return p.endLineToCommentGroup[key]
|
||||
}
|
||||
|
||||
func splitLines(str string) []string {
|
||||
return strings.Split(strings.TrimRight(str, "\n"), "\n")
|
||||
}
|
||||
|
||||
func goFuncNameToName(in string) types.Name {
|
||||
name := strings.TrimPrefix(in, "func ")
|
||||
nameParts := strings.Split(name, "(")
|
||||
return goNameToName(nameParts[0])
|
||||
}
|
||||
|
||||
func goVarNameToName(in string) types.Name {
|
||||
nameParts := strings.Split(in, " ")
|
||||
// nameParts[0] is "var".
|
||||
// nameParts[2:] is the type of the variable, we ignore it for now.
|
||||
return goNameToName(nameParts[1])
|
||||
}
|
||||
|
||||
func goNameToName(in string) types.Name {
|
||||
// Detect anonymous type names. (These may have '.' characters because
|
||||
// embedded types may have packages, so we detect them specially.)
|
||||
if strings.HasPrefix(in, "struct{") ||
|
||||
strings.HasPrefix(in, "<-chan") ||
|
||||
strings.HasPrefix(in, "chan<-") ||
|
||||
strings.HasPrefix(in, "chan ") ||
|
||||
strings.HasPrefix(in, "func(") ||
|
||||
strings.HasPrefix(in, "func (") ||
|
||||
strings.HasPrefix(in, "*") ||
|
||||
strings.HasPrefix(in, "map[") ||
|
||||
strings.HasPrefix(in, "[") {
|
||||
return types.Name{Name: in}
|
||||
}
|
||||
|
||||
// Otherwise, if there are '.' characters present, the name has a
|
||||
// package path in front.
|
||||
nameParts := strings.Split(in, ".")
|
||||
name := types.Name{Name: in}
|
||||
if n := len(nameParts); n >= 2 {
|
||||
// The final "." is the name of the type--previous ones must
|
||||
// have been in the package path.
|
||||
name.Package, name.Name = strings.Join(nameParts[:n-1], "."), nameParts[n-1]
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func (p *Parser) convertSignature(u types.Universe, t *gotypes.Signature) *types.Signature {
|
||||
signature := &types.Signature{}
|
||||
for i := 0; i < t.Params().Len(); i++ {
|
||||
signature.Parameters = append(signature.Parameters, p.walkType(u, nil, t.Params().At(i).Type()))
|
||||
signature.ParameterNames = append(signature.ParameterNames, t.Params().At(i).Name())
|
||||
}
|
||||
for i := 0; i < t.Results().Len(); i++ {
|
||||
signature.Results = append(signature.Results, p.walkType(u, nil, t.Results().At(i).Type()))
|
||||
signature.ResultNames = append(signature.ResultNames, t.Results().At(i).Name())
|
||||
}
|
||||
if r := t.Recv(); r != nil {
|
||||
signature.Receiver = p.walkType(u, nil, r.Type())
|
||||
}
|
||||
signature.Variadic = t.Variadic()
|
||||
return signature
|
||||
}
|
||||
|
||||
// walkType adds the type, and any necessary child types.
|
||||
func (p *Parser) walkType(u types.Universe, useName *types.Name, in gotypes.Type) *types.Type {
|
||||
// Most of the cases are underlying types of the named type.
|
||||
name := goNameToName(in.String())
|
||||
if useName != nil {
|
||||
name = *useName
|
||||
}
|
||||
|
||||
switch t := in.(type) {
|
||||
case *gotypes.Struct:
|
||||
out := u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Struct
|
||||
for i := 0; i < t.NumFields(); i++ {
|
||||
f := t.Field(i)
|
||||
m := types.Member{
|
||||
Name: f.Name(),
|
||||
Embedded: f.Anonymous(),
|
||||
Tags: t.Tag(i),
|
||||
Type: p.walkType(u, nil, f.Type()),
|
||||
CommentLines: p.docComment(f.Pos()),
|
||||
}
|
||||
out.Members = append(out.Members, m)
|
||||
}
|
||||
return out
|
||||
case *gotypes.Map:
|
||||
out := u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Map
|
||||
out.Elem = p.walkType(u, nil, t.Elem())
|
||||
out.Key = p.walkType(u, nil, t.Key())
|
||||
return out
|
||||
case *gotypes.Pointer:
|
||||
out := u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Pointer
|
||||
out.Elem = p.walkType(u, nil, t.Elem())
|
||||
return out
|
||||
case *gotypes.Slice:
|
||||
out := u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Slice
|
||||
out.Elem = p.walkType(u, nil, t.Elem())
|
||||
return out
|
||||
case *gotypes.Array:
|
||||
out := u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Array
|
||||
out.Elem = p.walkType(u, nil, t.Elem())
|
||||
out.Len = in.(*gotypes.Array).Len()
|
||||
return out
|
||||
case *gotypes.Chan:
|
||||
out := u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Chan
|
||||
out.Elem = p.walkType(u, nil, t.Elem())
|
||||
// TODO: need to store direction, otherwise raw type name
|
||||
// cannot be properly written.
|
||||
return out
|
||||
case *gotypes.Basic:
|
||||
out := u.Type(types.Name{
|
||||
Package: "", // This is a magic package name in the Universe.
|
||||
Name: t.Name(),
|
||||
})
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Unsupported
|
||||
return out
|
||||
case *gotypes.Signature:
|
||||
out := u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Func
|
||||
out.Signature = p.convertSignature(u, t)
|
||||
return out
|
||||
case *gotypes.Interface:
|
||||
out := u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Interface
|
||||
t.Complete()
|
||||
for i := 0; i < t.NumMethods(); i++ {
|
||||
if out.Methods == nil {
|
||||
out.Methods = map[string]*types.Type{}
|
||||
}
|
||||
method := t.Method(i)
|
||||
name := goNameToName(method.String())
|
||||
mt := p.walkType(u, &name, method.Type())
|
||||
mt.CommentLines = p.docComment(method.Pos())
|
||||
out.Methods[method.Name()] = mt
|
||||
}
|
||||
return out
|
||||
case *gotypes.Named:
|
||||
var out *types.Type
|
||||
switch t.Underlying().(type) {
|
||||
case *gotypes.Named, *gotypes.Basic, *gotypes.Map, *gotypes.Slice:
|
||||
name := goNameToName(t.String())
|
||||
out = u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Alias
|
||||
out.Underlying = p.walkType(u, nil, t.Underlying())
|
||||
default:
|
||||
// gotypes package makes everything "named" with an
|
||||
// underlying anonymous type--we remove that annoying
|
||||
// "feature" for users. This flattens those types
|
||||
// together.
|
||||
name := goNameToName(t.String())
|
||||
if out := u.Type(name); out.Kind != types.Unknown {
|
||||
return out // short circuit if we've already made this.
|
||||
}
|
||||
out = p.walkType(u, &name, t.Underlying())
|
||||
}
|
||||
// If the underlying type didn't already add methods, add them.
|
||||
// (Interface types will have already added methods.)
|
||||
if len(out.Methods) == 0 {
|
||||
for i := 0; i < t.NumMethods(); i++ {
|
||||
if out.Methods == nil {
|
||||
out.Methods = map[string]*types.Type{}
|
||||
}
|
||||
method := t.Method(i)
|
||||
name := goNameToName(method.String())
|
||||
mt := p.walkType(u, &name, method.Type())
|
||||
mt.CommentLines = p.docComment(method.Pos())
|
||||
out.Methods[method.Name()] = mt
|
||||
}
|
||||
}
|
||||
return out
|
||||
default:
|
||||
out := u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Unsupported
|
||||
klog.Warningf("Making unsupported type entry %q for: %#v\n", out, t)
|
||||
return out
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Parser) addFunction(u types.Universe, useName *types.Name, in *gotypes.Func) *types.Type {
|
||||
name := goFuncNameToName(in.String())
|
||||
if useName != nil {
|
||||
name = *useName
|
||||
}
|
||||
out := u.Function(name)
|
||||
out.Kind = types.DeclarationOf
|
||||
out.Underlying = p.walkType(u, nil, in.Type())
|
||||
return out
|
||||
}
|
||||
|
||||
func (p *Parser) addVariable(u types.Universe, useName *types.Name, in *gotypes.Var) *types.Type {
|
||||
name := goVarNameToName(in.String())
|
||||
if useName != nil {
|
||||
name = *useName
|
||||
}
|
||||
out := u.Variable(name)
|
||||
out.Kind = types.DeclarationOf
|
||||
out.Underlying = p.walkType(u, nil, in.Type())
|
||||
return out
|
||||
}
|
||||
|
||||
func (p *Parser) addConstant(u types.Universe, useName *types.Name, in *gotypes.Const) *types.Type {
|
||||
name := goVarNameToName(in.String())
|
||||
if useName != nil {
|
||||
name = *useName
|
||||
}
|
||||
out := u.Constant(name)
|
||||
out.Kind = types.DeclarationOf
|
||||
out.Underlying = p.walkType(u, nil, in.Type())
|
||||
|
||||
var constval string
|
||||
|
||||
// For strings, we use `StringVal()` to get the un-truncated,
|
||||
// un-quoted string. For other values, `.String()` is preferable to
|
||||
// get something relatively human readable (especially since for
|
||||
// floating point types, `ExactString()` will generate numeric
|
||||
// expressions using `big.(*Float).Text()`.
|
||||
switch in.Val().Kind() {
|
||||
case constant.String:
|
||||
constval = constant.StringVal(in.Val())
|
||||
default:
|
||||
constval = in.Val().String()
|
||||
}
|
||||
|
||||
out.ConstValue = &constval
|
||||
return out
|
||||
}
|
||||
2
vendor/k8s.io/gengo/types/doc.go → vendor/k8s.io/gengo/v2/types/doc.go
generated
vendored
2
vendor/k8s.io/gengo/types/doc.go → vendor/k8s.io/gengo/v2/types/doc.go
generated
vendored
@@ -16,4 +16,4 @@ limitations under the License.
|
||||
|
||||
// Package types contains go type information, packaged in a way that makes
|
||||
// auto-generation convenient, whether by template or straight go functions.
|
||||
package types // import "k8s.io/gengo/types"
|
||||
package types // import "k8s.io/gengo/v2/types"
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user