implement sync logic
This commit is contained in:
parent
0d69426b2c
commit
35e0ee2e5f
139 changed files with 1102 additions and 38093 deletions
40
cache/cache.go
vendored
40
cache/cache.go
vendored
|
@ -1,40 +0,0 @@
|
|||
package cache
|
||||
|
||||
//go:generate mockery -name Cache -output mock -case=underscore
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/koding/cache"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type Cache interface {
|
||||
Get(string) (interface{}, error)
|
||||
Set(string, interface{}) error
|
||||
Delete(string) error
|
||||
}
|
||||
|
||||
func Get(c context.Context, key string) (interface{}, error) {
|
||||
return FromContext(c).Get(key)
|
||||
}
|
||||
|
||||
func Set(c context.Context, key string, value interface{}) error {
|
||||
return FromContext(c).Set(key, value)
|
||||
}
|
||||
|
||||
func Delete(c context.Context, key string) error {
|
||||
return FromContext(c).Delete(key)
|
||||
}
|
||||
|
||||
// Default creates an in-memory cache with the default
|
||||
// 30 minute expiration period.
|
||||
func Default() Cache {
|
||||
return NewTTL(time.Minute * 30)
|
||||
}
|
||||
|
||||
// NewTTL returns an in-memory cache with the specified
|
||||
// ttl expiration period.
|
||||
func NewTTL(t time.Duration) Cache {
|
||||
return cache.NewMemoryWithTTL(t)
|
||||
}
|
34
cache/cache_test.go
vendored
34
cache/cache_test.go
vendored
|
@ -1,34 +0,0 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/franela/goblin"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func TestCache(t *testing.T) {
|
||||
|
||||
g := goblin.Goblin(t)
|
||||
g.Describe("Cache", func() {
|
||||
|
||||
var c *gin.Context
|
||||
g.BeforeEach(func() {
|
||||
c = new(gin.Context)
|
||||
ToContext(c, Default())
|
||||
})
|
||||
|
||||
g.It("Should set and get an item", func() {
|
||||
Set(c, "foo", "bar")
|
||||
v, e := Get(c, "foo")
|
||||
g.Assert(v).Equal("bar")
|
||||
g.Assert(e == nil).IsTrue()
|
||||
})
|
||||
|
||||
g.It("Should return nil when item not found", func() {
|
||||
v, e := Get(c, "foo")
|
||||
g.Assert(v == nil).IsTrue()
|
||||
g.Assert(e == nil).IsFalse()
|
||||
})
|
||||
})
|
||||
}
|
23
cache/context.go
vendored
23
cache/context.go
vendored
|
@ -1,23 +0,0 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const key = "cache"
|
||||
|
||||
// Setter defines a context that enables setting values.
|
||||
type Setter interface {
|
||||
Set(string, interface{})
|
||||
}
|
||||
|
||||
// FromContext returns the Cache associated with this context.
|
||||
func FromContext(c context.Context) Cache {
|
||||
return c.Value(key).(Cache)
|
||||
}
|
||||
|
||||
// ToContext adds the Cache to this context if it supports
|
||||
// the Setter interface.
|
||||
func ToContext(c Setter, cache Cache) {
|
||||
c.Set(key, cache)
|
||||
}
|
99
cache/helper.go
vendored
99
cache/helper.go
vendored
|
@ -1,99 +0,0 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/drone/drone/model"
|
||||
"github.com/drone/drone/remote"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// GetPerms returns the user permissions repositories from the cache
|
||||
// associated with the current repository.
|
||||
func GetPerms(c context.Context, user *model.User, owner, name string) (*model.Perm, error) {
|
||||
key := fmt.Sprintf("perms:%s:%s/%s",
|
||||
user.Login,
|
||||
owner,
|
||||
name,
|
||||
)
|
||||
// if we fetch from the cache we can return immediately
|
||||
val, err := Get(c, key)
|
||||
if err == nil {
|
||||
return val.(*model.Perm), nil
|
||||
}
|
||||
// else we try to grab from the remote system and
|
||||
// populate our cache.
|
||||
perm, err := remote.Perm(c, user, owner, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
Set(c, key, perm)
|
||||
return perm, nil
|
||||
}
|
||||
|
||||
// GetTeamPerms returns the user permissions from the cache
|
||||
// associated with the current organization.
|
||||
func GetTeamPerms(c context.Context, user *model.User, org string) (*model.Perm, error) {
|
||||
key := fmt.Sprintf("perms:%s:%s",
|
||||
user.Login,
|
||||
org,
|
||||
)
|
||||
// if we fetch from the cache we can return immediately
|
||||
val, err := Get(c, key)
|
||||
if err == nil {
|
||||
return val.(*model.Perm), nil
|
||||
}
|
||||
// else we try to grab from the remote system and
|
||||
// populate our cache.
|
||||
perm, err := remote.TeamPerm(c, user, org)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
Set(c, key, perm)
|
||||
return perm, nil
|
||||
}
|
||||
|
||||
// GetRepos returns the list of user repositories from the cache
|
||||
// associated with the current context.
|
||||
func GetRepos(c context.Context, user *model.User) ([]*model.RepoLite, error) {
|
||||
key := fmt.Sprintf("repos:%s",
|
||||
user.Login,
|
||||
)
|
||||
// if we fetch from the cache we can return immediately
|
||||
val, err := Get(c, key)
|
||||
if err == nil {
|
||||
return val.([]*model.RepoLite), nil
|
||||
}
|
||||
// else we try to grab from the remote system and
|
||||
// populate our cache.
|
||||
repos, err := remote.Repos(c, user)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
Set(c, key, repos)
|
||||
return repos, nil
|
||||
}
|
||||
|
||||
// GetRepoMap returns the list of user repositories from the cache
|
||||
// associated with the current context in a map structure.
|
||||
func GetRepoMap(c context.Context, user *model.User) (map[string]bool, error) {
|
||||
repos, err := GetRepos(c, user)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
repom := map[string]bool{}
|
||||
for _, repo := range repos {
|
||||
repom[repo.FullName] = true
|
||||
}
|
||||
return repom, nil
|
||||
}
|
||||
|
||||
// DeleteRepos evicts the cached user repositories from the cache associated
|
||||
// with the current context.
|
||||
func DeleteRepos(c context.Context, user *model.User) error {
|
||||
key := fmt.Sprintf("repos:%s",
|
||||
user.Login,
|
||||
)
|
||||
return Delete(c, key)
|
||||
}
|
115
cache/helper_test.go
vendored
115
cache/helper_test.go
vendored
|
@ -1,115 +0,0 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/drone/drone/model"
|
||||
"github.com/drone/drone/remote"
|
||||
"github.com/drone/drone/remote/mock"
|
||||
"github.com/franela/goblin"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func TestHelper(t *testing.T) {
|
||||
|
||||
g := goblin.Goblin(t)
|
||||
|
||||
g.Describe("Cache helpers", func() {
|
||||
|
||||
var c *gin.Context
|
||||
var r *mock.Remote
|
||||
|
||||
g.BeforeEach(func() {
|
||||
c = new(gin.Context)
|
||||
ToContext(c, Default())
|
||||
|
||||
r = new(mock.Remote)
|
||||
remote.ToContext(c, r)
|
||||
})
|
||||
|
||||
g.It("Should get permissions from remote", func() {
|
||||
r.On("Perm", fakeUser, fakeRepo.Owner, fakeRepo.Name).Return(fakePerm, nil).Once()
|
||||
p, err := GetPerms(c, fakeUser, fakeRepo.Owner, fakeRepo.Name)
|
||||
g.Assert(p).Equal(fakePerm)
|
||||
g.Assert(err).Equal(nil)
|
||||
|
||||
})
|
||||
|
||||
g.It("Should get permissions from cache", func() {
|
||||
key := fmt.Sprintf("perms:%s:%s/%s",
|
||||
fakeUser.Login,
|
||||
fakeRepo.Owner,
|
||||
fakeRepo.Name,
|
||||
)
|
||||
|
||||
Set(c, key, fakePerm)
|
||||
r.On("Perm", fakeUser, fakeRepo.Owner, fakeRepo.Name).Return(nil, fakeErr).Once()
|
||||
p, err := GetPerms(c, fakeUser, fakeRepo.Owner, fakeRepo.Name)
|
||||
g.Assert(p).Equal(fakePerm)
|
||||
g.Assert(err).Equal(nil)
|
||||
})
|
||||
|
||||
g.It("Should get permissions error", func() {
|
||||
r.On("Perm", fakeUser, fakeRepo.Owner, fakeRepo.Name).Return(nil, fakeErr).Once()
|
||||
p, err := GetPerms(c, fakeUser, fakeRepo.Owner, fakeRepo.Name)
|
||||
g.Assert(p == nil).IsTrue()
|
||||
g.Assert(err).Equal(fakeErr)
|
||||
})
|
||||
|
||||
g.It("Should set and get repos", func() {
|
||||
|
||||
r.On("Repos", fakeUser).Return(fakeRepos, nil).Once()
|
||||
p, err := GetRepos(c, fakeUser)
|
||||
g.Assert(p).Equal(fakeRepos)
|
||||
g.Assert(err).Equal(nil)
|
||||
})
|
||||
|
||||
g.It("Should get repos", func() {
|
||||
key := fmt.Sprintf("repos:%s",
|
||||
fakeUser.Login,
|
||||
)
|
||||
|
||||
Set(c, key, fakeRepos)
|
||||
r.On("Repos", fakeUser).Return(nil, fakeErr).Once()
|
||||
p, err := GetRepos(c, fakeUser)
|
||||
g.Assert(p).Equal(fakeRepos)
|
||||
g.Assert(err).Equal(nil)
|
||||
})
|
||||
|
||||
g.It("Should get repos error", func() {
|
||||
r.On("Repos", fakeUser).Return(nil, fakeErr).Once()
|
||||
p, err := GetRepos(c, fakeUser)
|
||||
g.Assert(p == nil).IsTrue()
|
||||
g.Assert(err).Equal(fakeErr)
|
||||
})
|
||||
|
||||
g.It("Should evict repos", func() {
|
||||
key := fmt.Sprintf("repos:%s",
|
||||
fakeUser.Login,
|
||||
)
|
||||
|
||||
Set(c, key, fakeRepos)
|
||||
repos, err := Get(c, key)
|
||||
g.Assert(repos != nil).IsTrue()
|
||||
g.Assert(err == nil).IsTrue()
|
||||
|
||||
DeleteRepos(c, fakeUser)
|
||||
repos, err = Get(c, key)
|
||||
g.Assert(repos == nil).IsTrue()
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
fakeErr = errors.New("Not Found")
|
||||
fakeUser = &model.User{Login: "octocat"}
|
||||
fakePerm = &model.Perm{true, true, true}
|
||||
fakeRepo = &model.RepoLite{Owner: "octocat", Name: "Hello-World"}
|
||||
fakeRepos = []*model.RepoLite{
|
||||
{Owner: "octocat", Name: "Hello-World"},
|
||||
{Owner: "octocat", Name: "hello-world"},
|
||||
{Owner: "octocat", Name: "Spoon-Knife"},
|
||||
}
|
||||
)
|
|
@ -80,12 +80,6 @@ var flags = []cli.Flag{
|
|||
Name: "open",
|
||||
Usage: "open user registration",
|
||||
},
|
||||
cli.DurationFlag{
|
||||
EnvVar: "DRONE_CACHE_TTL",
|
||||
Name: "cache-ttl",
|
||||
Usage: "cache duration",
|
||||
Value: time.Minute * 15,
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
EnvVar: "DRONE_ESCALATE",
|
||||
Name: "escalate",
|
||||
|
@ -415,7 +409,6 @@ func server(c *cli.Context) error {
|
|||
ginrus.Ginrus(logrus.StandardLogger(), time.RFC3339, true),
|
||||
middleware.Version,
|
||||
middleware.Config(c),
|
||||
middleware.Cache(c),
|
||||
middleware.Store(c, store_),
|
||||
middleware.Remote(remote_),
|
||||
)
|
||||
|
|
|
@ -1,7 +1,21 @@
|
|||
package model
|
||||
|
||||
type Perm struct {
|
||||
Pull bool `json:"pull"`
|
||||
Push bool `json:"push"`
|
||||
Admin bool `json:"admin"`
|
||||
// PermStore persists repository permissions information to storage.
|
||||
type PermStore interface {
|
||||
PermFind(user *User, repo *Repo) (*Perm, error)
|
||||
PermUpsert(perm *Perm) error
|
||||
PermBatch(perms []*Perm) error
|
||||
PermDelete(perm *Perm) error
|
||||
// PermFlush(user *User) error
|
||||
}
|
||||
|
||||
// Perm defines a repository permission for an individual user.
|
||||
type Perm struct {
|
||||
UserID int64 `json:"-" meddler:"perm_user_id"`
|
||||
RepoID int64 `json:"-" meddler:"perm_repo_id"`
|
||||
Repo string `json:"-" meddler:"-"`
|
||||
Pull bool `json:"pull" meddler:"perm_pull"`
|
||||
Push bool `json:"push" meddler:"perm_push"`
|
||||
Admin bool `json:"admin" meddler:"perm_admin"`
|
||||
Synced int64 `json:"synced" meddler:"perm_synced"`
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ type Repo struct {
|
|||
IsTrusted bool `json:"trusted" meddler:"repo_trusted"`
|
||||
IsStarred bool `json:"starred,omitempty" meddler:"-"`
|
||||
IsGated bool `json:"gated" meddler:"repo_gated"`
|
||||
IsActive bool `json:"active,omitempty" meddler:"repo_active"`
|
||||
AllowPull bool `json:"allow_pr" meddler:"repo_allow_pr"`
|
||||
AllowPush bool `json:"allow_push" meddler:"repo_allow_push"`
|
||||
AllowDeploy bool `json:"allow_deploys" meddler:"repo_allow_deploys"`
|
||||
|
@ -34,6 +35,7 @@ type Repo struct {
|
|||
Counter int `json:"last_build" meddler:"repo_counter"`
|
||||
Config string `json:"config_file" meddler:"repo_config_path"`
|
||||
Hash string `json:"-" meddler:"repo_hash"`
|
||||
Perm *Perm `json:"-" meddler:"-"`
|
||||
}
|
||||
|
||||
// RepoPatch represents a repository patch object.
|
||||
|
|
|
@ -34,6 +34,9 @@ type User struct {
|
|||
// Activate indicates the user is active in the system.
|
||||
Active bool `json:"active" meddler:"user_active"`
|
||||
|
||||
// Synced is the timestamp when the user was synced with the remote system.
|
||||
Synced int64 `json:"synced" meddler:"user_synced"`
|
||||
|
||||
// Admin indicates the user is a system administrator.
|
||||
//
|
||||
// NOTE: This is sourced from the DRONE_ADMINS environment variable and is no
|
||||
|
|
|
@ -114,11 +114,6 @@ func (c *config) Teams(u *model.User) ([]*model.Team, error) {
|
|||
return convertTeamList(resp.Values), nil
|
||||
}
|
||||
|
||||
// TeamPerm is not supported by the Bitbucket driver.
|
||||
func (c *config) TeamPerm(u *model.User, org string) (*model.Perm, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Repo returns the named Bitbucket repository.
|
||||
func (c *config) Repo(u *model.User, owner, name string) (*model.Repo, error) {
|
||||
repo, err := c.newClient(u).FindRepo(owner, name)
|
||||
|
@ -130,10 +125,10 @@ func (c *config) Repo(u *model.User, owner, name string) (*model.Repo, error) {
|
|||
|
||||
// Repos returns a list of all repositories for Bitbucket account, including
|
||||
// organization repositories.
|
||||
func (c *config) Repos(u *model.User) ([]*model.RepoLite, error) {
|
||||
func (c *config) Repos(u *model.User) ([]*model.Repo, error) {
|
||||
client := c.newClient(u)
|
||||
|
||||
var all []*model.RepoLite
|
||||
var all []*model.Repo
|
||||
|
||||
accounts := []string{u.Login}
|
||||
resp, err := client.ListTeams(&internal.ListTeamOpts{
|
||||
|
@ -153,7 +148,7 @@ func (c *config) Repos(u *model.User) ([]*model.RepoLite, error) {
|
|||
return all, err
|
||||
}
|
||||
for _, repo := range repos {
|
||||
all = append(all, convertRepoLite(repo))
|
||||
all = append(all, convertRepo(repo))
|
||||
}
|
||||
}
|
||||
return all, nil
|
||||
|
|
|
@ -110,17 +110,6 @@ func cloneLink(repo *internal.Repo) string {
|
|||
return clone
|
||||
}
|
||||
|
||||
// convertRepoLite is a helper function used to convert a Bitbucket repository
|
||||
// structure to the simplified Drone repository structure.
|
||||
func convertRepoLite(from *internal.Repo) *model.RepoLite {
|
||||
return &model.RepoLite{
|
||||
Owner: strings.Split(from.FullName, "/")[0],
|
||||
Name: strings.Split(from.FullName, "/")[1],
|
||||
FullName: from.FullName,
|
||||
Avatar: from.Owner.Links.Avatar.Href,
|
||||
}
|
||||
}
|
||||
|
||||
// convertUser is a helper function used to convert a Bitbucket user account
|
||||
// structure to the Drone User structure.
|
||||
func convertUser(from *internal.Account, token *oauth2.Token) *model.User {
|
||||
|
|
|
@ -49,18 +49,6 @@ func Test_helper(t *testing.T) {
|
|||
g.Assert(convertDesc(model.StatusError)).Equal(descError)
|
||||
})
|
||||
|
||||
g.It("should convert repository lite", func() {
|
||||
from := &internal.Repo{}
|
||||
from.FullName = "octocat/hello-world"
|
||||
from.Owner.Links.Avatar.Href = "http://..."
|
||||
|
||||
to := convertRepoLite(from)
|
||||
g.Assert(to.Avatar).Equal(from.Owner.Links.Avatar.Href)
|
||||
g.Assert(to.FullName).Equal(from.FullName)
|
||||
g.Assert(to.Owner).Equal("octocat")
|
||||
g.Assert(to.Name).Equal("hello-world")
|
||||
})
|
||||
|
||||
g.It("should convert repository", func() {
|
||||
from := &internal.Repo{
|
||||
FullName: "octocat/hello-world",
|
||||
|
|
|
@ -140,14 +140,14 @@ func (c *Config) Repo(u *model.User, owner, name string) (*model.Repo, error) {
|
|||
return convertRepo(repo), nil
|
||||
}
|
||||
|
||||
func (c *Config) Repos(u *model.User) ([]*model.RepoLite, error) {
|
||||
func (c *Config) Repos(u *model.User) ([]*model.Repo, error) {
|
||||
repos, err := internal.NewClientWithToken(c.URL, c.Consumer, u.Token).FindRepos()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var all []*model.RepoLite
|
||||
var all []*model.Repo
|
||||
for _, repo := range repos {
|
||||
all = append(all, convertRepoLite(repo))
|
||||
all = append(all, convertRepo(repo))
|
||||
}
|
||||
|
||||
return all, nil
|
||||
|
@ -233,7 +233,7 @@ func CreateConsumer(URL string, ConsumerKey string, PrivateKey *rsa.PrivateKey)
|
|||
consumer.HttpClient = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
},
|
||||
}
|
||||
return consumer
|
||||
|
|
|
@ -86,19 +86,6 @@ func convertRepo(from *internal.Repo) *model.Repo {
|
|||
|
||||
}
|
||||
|
||||
// convertRepoLite is a helper function used to convert a Bitbucket repository
|
||||
// structure to the simplified Drone repository structure.
|
||||
func convertRepoLite(from *internal.Repo) *model.RepoLite {
|
||||
return &model.RepoLite{
|
||||
Owner: from.Project.Key,
|
||||
Name: from.Slug,
|
||||
FullName: from.Project.Key + "/" + from.Slug,
|
||||
//TODO: find the avatar for the repo
|
||||
//Avatar: might need another ws call?
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// convertPushHook is a helper function used to convert a Bitbucket push
|
||||
// hook to the Drone build struct holding commit information.
|
||||
func convertPushHook(hook *internal.PostHook, baseURL string) *model.Build {
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
package bitbucketserver
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/drone/drone/model"
|
||||
"github.com/drone/drone/remote/bitbucketserver/internal"
|
||||
"github.com/franela/goblin"
|
||||
"github.com/mrjones/oauth"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_helper(t *testing.T) {
|
||||
|
@ -13,17 +14,6 @@ func Test_helper(t *testing.T) {
|
|||
g := goblin.Goblin(t)
|
||||
g.Describe("Bitbucket Server converter", func() {
|
||||
|
||||
g.It("should convert repository lite", func() {
|
||||
from := &internal.Repo{}
|
||||
from.Project.Key = "octocat"
|
||||
from.Slug = "hello-world"
|
||||
|
||||
to := convertRepoLite(from)
|
||||
g.Assert(to.FullName).Equal("octocat/hello-world")
|
||||
g.Assert(to.Owner).Equal("octocat")
|
||||
g.Assert(to.Name).Equal("hello-world")
|
||||
})
|
||||
|
||||
g.It("should convert repository", func() {
|
||||
from := &internal.Repo{
|
||||
Slug: "hello-world",
|
||||
|
|
|
@ -200,8 +200,8 @@ func (c *client) Repo(u *model.User, owner, name string) (*model.Repo, error) {
|
|||
|
||||
// Repos returns a list of all repositories for the Gitea account, including
|
||||
// organization repositories.
|
||||
func (c *client) Repos(u *model.User) ([]*model.RepoLite, error) {
|
||||
repos := []*model.RepoLite{}
|
||||
func (c *client) Repos(u *model.User) ([]*model.Repo, error) {
|
||||
repos := []*model.Repo{}
|
||||
|
||||
client := c.newClientToken(u.Token)
|
||||
all, err := client.ListMyRepos()
|
||||
|
@ -210,7 +210,7 @@ func (c *client) Repos(u *model.User) ([]*model.RepoLite, error) {
|
|||
}
|
||||
|
||||
for _, repo := range all {
|
||||
repos = append(repos, toRepoLite(repo))
|
||||
repos = append(repos, toRepo(repo))
|
||||
}
|
||||
return repos, err
|
||||
}
|
||||
|
|
|
@ -12,21 +12,6 @@ import (
|
|||
"github.com/drone/drone/model"
|
||||
)
|
||||
|
||||
// helper function that converts a Gitea repository to a Drone repository.
|
||||
func toRepoLite(from *gitea.Repository) *model.RepoLite {
|
||||
name := strings.Split(from.FullName, "/")[1]
|
||||
avatar := expandAvatar(
|
||||
from.HTMLURL,
|
||||
from.Owner.AvatarURL,
|
||||
)
|
||||
return &model.RepoLite{
|
||||
Name: name,
|
||||
Owner: from.Owner.UserName,
|
||||
FullName: from.FullName,
|
||||
Avatar: avatar,
|
||||
}
|
||||
}
|
||||
|
||||
// helper function that converts a Gitea repository to a Drone repository.
|
||||
func toRepo(from *gitea.Repository) *model.Repo {
|
||||
name := strings.Split(from.FullName, "/")[1]
|
||||
|
|
|
@ -177,21 +177,6 @@ func Test_parse(t *testing.T) {
|
|||
g.Assert(repo.IsPrivate).Equal(from.Private)
|
||||
})
|
||||
|
||||
g.It("Should return a RepoLite struct from a Gitea Repo", func() {
|
||||
from := gitea.Repository{
|
||||
FullName: "gophers/hello-world",
|
||||
Owner: &gitea.User{
|
||||
UserName: "gordon",
|
||||
AvatarURL: "http://1.gravatar.com/avatar/8c58a0be77ee441bb8f8595b7f1b4e87",
|
||||
},
|
||||
}
|
||||
repo := toRepoLite(&from)
|
||||
g.Assert(repo.FullName).Equal(from.FullName)
|
||||
g.Assert(repo.Owner).Equal(from.Owner.UserName)
|
||||
g.Assert(repo.Name).Equal("hello-world")
|
||||
g.Assert(repo.Avatar).Equal(from.Owner.AvatarURL)
|
||||
})
|
||||
|
||||
g.It("Should correct a malformed avatar url", func() {
|
||||
|
||||
var urls = []struct {
|
||||
|
|
|
@ -80,6 +80,7 @@ func convertRepo(from *github.Repository, private bool) *model.Repo {
|
|||
Avatar: *from.Owner.AvatarURL,
|
||||
Kind: model.RepoGit,
|
||||
Branch: defaultBranch,
|
||||
Perm: convertPerm(from),
|
||||
}
|
||||
if from.DefaultBranch != nil {
|
||||
repo.Branch = *from.DefaultBranch
|
||||
|
@ -114,24 +115,24 @@ func convertTeamPerm(from *github.Membership) *model.Perm {
|
|||
|
||||
// convertRepoList is a helper function used to convert a GitHub repository
|
||||
// list to the common Drone repository structure.
|
||||
func convertRepoList(from []github.Repository) []*model.RepoLite {
|
||||
var repos []*model.RepoLite
|
||||
func convertRepoList(from []github.Repository, private bool) []*model.Repo {
|
||||
var repos []*model.Repo
|
||||
for _, repo := range from {
|
||||
repos = append(repos, convertRepoLite(repo))
|
||||
repos = append(repos, convertRepo(&repo, private))
|
||||
}
|
||||
return repos
|
||||
}
|
||||
|
||||
// convertRepoLite is a helper function used to convert a GitHub repository
|
||||
// structure to the common Drone repository structure.
|
||||
func convertRepoLite(from github.Repository) *model.RepoLite {
|
||||
return &model.RepoLite{
|
||||
Owner: *from.Owner.Login,
|
||||
Name: *from.Name,
|
||||
FullName: *from.FullName,
|
||||
Avatar: *from.Owner.AvatarURL,
|
||||
}
|
||||
}
|
||||
// // convertRepoLite is a helper function used to convert a GitHub repository
|
||||
// // structure to the common Drone repository structure.
|
||||
// func convertRepoLite(from github.Repository) *model.RepoLite {
|
||||
// return &model.RepoLite{
|
||||
// Owner: *from.Owner.Login,
|
||||
// Name: *from.Name,
|
||||
// FullName: *from.FullName,
|
||||
// Avatar: *from.Owner.AvatarURL,
|
||||
// }
|
||||
// }
|
||||
|
||||
// convertTeamList is a helper function used to convert a GitHub team list to
|
||||
// the common Drone repository structure.
|
||||
|
|
|
@ -50,36 +50,27 @@ func Test_helper(t *testing.T) {
|
|||
g.Assert(convertDesc(model.StatusError)).Equal(descError)
|
||||
})
|
||||
|
||||
g.It("should convert repository lite", func() {
|
||||
from := github.Repository{
|
||||
FullName: github.String("octocat/hello-world"),
|
||||
Name: github.String("hello-world"),
|
||||
Owner: &github.User{
|
||||
AvatarURL: github.String("http://..."),
|
||||
Login: github.String("octocat"),
|
||||
},
|
||||
}
|
||||
|
||||
to := convertRepoLite(from)
|
||||
g.Assert(to.Avatar).Equal("http://...")
|
||||
g.Assert(to.FullName).Equal("octocat/hello-world")
|
||||
g.Assert(to.Owner).Equal("octocat")
|
||||
g.Assert(to.Name).Equal("hello-world")
|
||||
})
|
||||
|
||||
g.It("should convert repository list", func() {
|
||||
from := []github.Repository{
|
||||
{
|
||||
Private: github.Bool(false),
|
||||
FullName: github.String("octocat/hello-world"),
|
||||
Name: github.String("hello-world"),
|
||||
Owner: &github.User{
|
||||
AvatarURL: github.String("http://..."),
|
||||
Login: github.String("octocat"),
|
||||
},
|
||||
HTMLURL: github.String("https://github.com/octocat/hello-world"),
|
||||
CloneURL: github.String("https://github.com/octocat/hello-world.git"),
|
||||
Permissions: &map[string]bool{
|
||||
"push": true,
|
||||
"pull": true,
|
||||
"admin": true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
to := convertRepoList(from)
|
||||
to := convertRepoList(from, false)
|
||||
g.Assert(to[0].Avatar).Equal("http://...")
|
||||
g.Assert(to[0].FullName).Equal("octocat/hello-world")
|
||||
g.Assert(to[0].Owner).Equal("octocat")
|
||||
|
@ -98,6 +89,11 @@ func Test_helper(t *testing.T) {
|
|||
AvatarURL: github.String("http://..."),
|
||||
Login: github.String("octocat"),
|
||||
},
|
||||
Permissions: &map[string]bool{
|
||||
"push": true,
|
||||
"pull": true,
|
||||
"admin": true,
|
||||
},
|
||||
}
|
||||
|
||||
to := convertRepo(&from, false)
|
||||
|
|
|
@ -168,16 +168,6 @@ func (c *client) Teams(u *model.User) ([]*model.Team, error) {
|
|||
return teams, nil
|
||||
}
|
||||
|
||||
// TeamPerm returns the user permissions for the named GitHub organization.
|
||||
func (c *client) TeamPerm(u *model.User, org string) (*model.Perm, error) {
|
||||
client := c.newClientToken(u.Token)
|
||||
membership, _, err := client.Organizations.GetOrgMembership(u.Login, org)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return convertTeamPerm(membership), nil
|
||||
}
|
||||
|
||||
// Repo returns the named GitHub repository.
|
||||
func (c *client) Repo(u *model.User, owner, name string) (*model.Repo, error) {
|
||||
client := c.newClientToken(u.Token)
|
||||
|
@ -190,20 +180,20 @@ func (c *client) Repo(u *model.User, owner, name string) (*model.Repo, error) {
|
|||
|
||||
// Repos returns a list of all repositories for GitHub account, including
|
||||
// organization repositories.
|
||||
func (c *client) Repos(u *model.User) ([]*model.RepoLite, error) {
|
||||
func (c *client) Repos(u *model.User) ([]*model.Repo, error) {
|
||||
client := c.newClientToken(u.Token)
|
||||
|
||||
opts := new(github.RepositoryListOptions)
|
||||
opts.PerPage = 100
|
||||
opts.Page = 1
|
||||
|
||||
var repos []*model.RepoLite
|
||||
var repos []*model.Repo
|
||||
for opts.Page > 0 {
|
||||
list, resp, err := client.Repositories.List("", opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
repos = append(repos, convertRepoList(list)...)
|
||||
repos = append(repos, convertRepoList(list, c.PrivateMode)...)
|
||||
opts.Page = resp.NextPage
|
||||
}
|
||||
return repos, nil
|
||||
|
|
|
@ -110,23 +110,6 @@ func Test_github(t *testing.T) {
|
|||
})
|
||||
})
|
||||
|
||||
g.Describe("Requesting organization permissions", func() {
|
||||
g.It("Should return the permission details of an admin", func() {
|
||||
perm, err := c.TeamPerm(fakeUser, "octocat")
|
||||
g.Assert(err == nil).IsTrue()
|
||||
g.Assert(perm.Admin).IsTrue()
|
||||
})
|
||||
g.It("Should return the permission details of a member", func() {
|
||||
perm, err := c.TeamPerm(fakeUser, "github")
|
||||
g.Assert(err == nil).IsTrue()
|
||||
g.Assert(perm.Admin).IsFalse()
|
||||
})
|
||||
g.It("Should handle a not found error", func() {
|
||||
_, err := c.TeamPerm(fakeUser, "org_not_found")
|
||||
g.Assert(err != nil).IsTrue()
|
||||
})
|
||||
})
|
||||
|
||||
g.It("Should return a user repository list")
|
||||
|
||||
g.It("Should return a user team list")
|
||||
|
|
|
@ -203,11 +203,6 @@ func (g *Gitlab) Teams(u *model.User) ([]*model.Team, error) {
|
|||
return teams, nil
|
||||
}
|
||||
|
||||
// TeamPerm is not supported by the Gitlab driver.
|
||||
func (g *Gitlab) TeamPerm(u *model.User, org string) (*model.Perm, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Repo fetches the named repository from the remote system.
|
||||
func (g *Gitlab) Repo(u *model.User, owner, name string) (*model.Repo, error) {
|
||||
client := NewClient(g.URL, u.Token, g.SkipVerify)
|
||||
|
@ -248,32 +243,40 @@ func (g *Gitlab) Repo(u *model.User, owner, name string) (*model.Repo, error) {
|
|||
}
|
||||
|
||||
// Repos fetches a list of repos from the remote system.
|
||||
func (g *Gitlab) Repos(u *model.User) ([]*model.RepoLite, error) {
|
||||
func (g *Gitlab) Repos(u *model.User) ([]*model.Repo, error) {
|
||||
client := NewClient(g.URL, u.Token, g.SkipVerify)
|
||||
|
||||
var repos = []*model.RepoLite{}
|
||||
var repos = []*model.Repo{}
|
||||
|
||||
all, err := client.AllProjects(g.HideArchives)
|
||||
if err != nil {
|
||||
return repos, err
|
||||
}
|
||||
|
||||
for _, repo := range all {
|
||||
var parts = strings.Split(repo.PathWithNamespace, "/")
|
||||
for _, repo_ := range all {
|
||||
var parts = strings.Split(repo_.PathWithNamespace, "/")
|
||||
var owner = parts[0]
|
||||
var name = parts[1]
|
||||
var avatar = repo.AvatarUrl
|
||||
|
||||
if len(avatar) != 0 && !strings.HasPrefix(avatar, "http") {
|
||||
avatar = fmt.Sprintf("%s/%s", g.URL, avatar)
|
||||
repo := &model.Repo{}
|
||||
repo.Owner = owner
|
||||
repo.Name = name
|
||||
repo.FullName = repo_.PathWithNamespace
|
||||
repo.Link = repo_.Url
|
||||
repo.Clone = repo_.HttpRepoUrl
|
||||
repo.Branch = "master"
|
||||
|
||||
if repo_.DefaultBranch != "" {
|
||||
repo.Branch = repo_.DefaultBranch
|
||||
}
|
||||
|
||||
repos = append(repos, &model.RepoLite{
|
||||
Owner: owner,
|
||||
Name: name,
|
||||
FullName: repo.PathWithNamespace,
|
||||
Avatar: avatar,
|
||||
})
|
||||
if g.PrivateMode {
|
||||
repo.IsPrivate = true
|
||||
} else {
|
||||
repo.IsPrivate = !repo_.Public
|
||||
}
|
||||
|
||||
repos = append(repos, repo)
|
||||
}
|
||||
|
||||
return repos, err
|
||||
|
@ -295,7 +298,7 @@ func (g *Gitlab) Perm(u *model.User, owner, name string) (*model.Perm, error) {
|
|||
|
||||
// repo owner is granted full access
|
||||
if repo.Owner != nil && repo.Owner.Username == u.Login {
|
||||
return &model.Perm{true, true, true}, nil
|
||||
return &model.Perm{Push: true, Pull: true, Admin: true}, nil
|
||||
}
|
||||
|
||||
// check permission for current user
|
||||
|
|
|
@ -127,11 +127,6 @@ func (c *client) Teams(u *model.User) ([]*model.Team, error) {
|
|||
return teams, nil
|
||||
}
|
||||
|
||||
// TeamPerm is not supported by the Gogs driver.
|
||||
func (c *client) TeamPerm(u *model.User, org string) (*model.Perm, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Repo returns the named Gogs repository.
|
||||
func (c *client) Repo(u *model.User, owner, name string) (*model.Repo, error) {
|
||||
client := c.newClientToken(u.Token)
|
||||
|
@ -147,8 +142,8 @@ func (c *client) Repo(u *model.User, owner, name string) (*model.Repo, error) {
|
|||
|
||||
// Repos returns a list of all repositories for the Gogs account, including
|
||||
// organization repositories.
|
||||
func (c *client) Repos(u *model.User) ([]*model.RepoLite, error) {
|
||||
repos := []*model.RepoLite{}
|
||||
func (c *client) Repos(u *model.User) ([]*model.Repo, error) {
|
||||
repos := []*model.Repo{}
|
||||
|
||||
client := c.newClientToken(u.Token)
|
||||
all, err := client.ListMyRepos()
|
||||
|
@ -157,7 +152,7 @@ func (c *client) Repos(u *model.User) ([]*model.RepoLite, error) {
|
|||
}
|
||||
|
||||
for _, repo := range all {
|
||||
repos = append(repos, toRepoLite(repo))
|
||||
repos = append(repos, toRepo(repo))
|
||||
}
|
||||
return repos, err
|
||||
}
|
||||
|
|
|
@ -12,21 +12,6 @@ import (
|
|||
"github.com/gogits/go-gogs-client"
|
||||
)
|
||||
|
||||
// helper function that converts a Gogs repository to a Drone repository.
|
||||
func toRepoLite(from *gogs.Repository) *model.RepoLite {
|
||||
name := strings.Split(from.FullName, "/")[1]
|
||||
avatar := expandAvatar(
|
||||
from.HtmlUrl,
|
||||
from.Owner.AvatarUrl,
|
||||
)
|
||||
return &model.RepoLite{
|
||||
Name: name,
|
||||
Owner: from.Owner.UserName,
|
||||
FullName: from.FullName,
|
||||
Avatar: avatar,
|
||||
}
|
||||
}
|
||||
|
||||
// helper function that converts a Gogs repository to a Drone repository.
|
||||
func toRepo(from *gogs.Repository) *model.Repo {
|
||||
name := strings.Split(from.FullName, "/")[1]
|
||||
|
|
|
@ -176,21 +176,6 @@ func Test_parse(t *testing.T) {
|
|||
g.Assert(repo.IsPrivate).Equal(from.Private)
|
||||
})
|
||||
|
||||
g.It("Should return a RepoLite struct from a Gogs Repo", func() {
|
||||
from := gogs.Repository{
|
||||
FullName: "gophers/hello-world",
|
||||
Owner: gogs.User{
|
||||
UserName: "gordon",
|
||||
AvatarUrl: "http://1.gravatar.com/avatar/8c58a0be77ee441bb8f8595b7f1b4e87",
|
||||
},
|
||||
}
|
||||
repo := toRepoLite(&from)
|
||||
g.Assert(repo.FullName).Equal(from.FullName)
|
||||
g.Assert(repo.Owner).Equal(from.Owner.UserName)
|
||||
g.Assert(repo.Name).Equal("hello-world")
|
||||
g.Assert(repo.Avatar).Equal(from.Owner.AvatarUrl)
|
||||
})
|
||||
|
||||
g.It("Should correct a malformed avatar url", func() {
|
||||
|
||||
var urls = []struct {
|
||||
|
|
|
@ -23,15 +23,11 @@ type Remote interface {
|
|||
// Teams fetches a list of team memberships from the remote system.
|
||||
Teams(u *model.User) ([]*model.Team, error)
|
||||
|
||||
// TeamPerm fetches the named organization permissions from
|
||||
// the remote system for the specified user.
|
||||
TeamPerm(u *model.User, org string) (*model.Perm, error)
|
||||
|
||||
// Repo fetches the named repository from the remote system.
|
||||
Repo(u *model.User, owner, repo string) (*model.Repo, error)
|
||||
|
||||
// Repos fetches a list of repos from the remote system.
|
||||
Repos(u *model.User) ([]*model.RepoLite, error)
|
||||
Repos(u *model.User) ([]*model.Repo, error)
|
||||
|
||||
// Perm fetches the named repository permissions from
|
||||
// the remote system for the specified user.
|
||||
|
@ -89,19 +85,13 @@ func Teams(c context.Context, u *model.User) ([]*model.Team, error) {
|
|||
return FromContext(c).Teams(u)
|
||||
}
|
||||
|
||||
// TeamPerm fetches the named organization permissions from
|
||||
// the remote system for the specified user.
|
||||
func TeamPerm(c context.Context, u *model.User, org string) (*model.Perm, error) {
|
||||
return FromContext(c).TeamPerm(u, org)
|
||||
}
|
||||
|
||||
// Repo fetches the named repository from the remote system.
|
||||
func Repo(c context.Context, u *model.User, owner, repo string) (*model.Repo, error) {
|
||||
return FromContext(c).Repo(u, owner, repo)
|
||||
}
|
||||
|
||||
// Repos fetches a list of repos from the remote system.
|
||||
func Repos(c context.Context, u *model.User) ([]*model.RepoLite, error) {
|
||||
func Repos(c context.Context, u *model.User) ([]*model.Repo, error) {
|
||||
return FromContext(c).Repos(u)
|
||||
}
|
||||
|
||||
|
|
|
@ -1,24 +0,0 @@
|
|||
package middleware
|
||||
|
||||
import (
|
||||
"github.com/drone/drone/cache"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// Cache is a middleware function that initializes the Cache and attaches to
|
||||
// the context of every http.Request.
|
||||
func Cache(cli *cli.Context) gin.HandlerFunc {
|
||||
v := setupCache(cli)
|
||||
return func(c *gin.Context) {
|
||||
cache.ToContext(c, v)
|
||||
}
|
||||
}
|
||||
|
||||
// helper function to create the cache from the CLI context.
|
||||
func setupCache(c *cli.Context) cache.Cache {
|
||||
return cache.NewTTL(
|
||||
c.Duration("cache-ttl"),
|
||||
)
|
||||
}
|
|
@ -2,9 +2,10 @@ package session
|
|||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/drone/drone/cache"
|
||||
"github.com/drone/drone/model"
|
||||
"github.com/drone/drone/remote"
|
||||
"github.com/drone/drone/store"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
|
@ -92,10 +93,20 @@ func SetPerm() gin.HandlerFunc {
|
|||
|
||||
case user != nil:
|
||||
var err error
|
||||
perm, err = cache.GetPerms(c, user, repo.Owner, repo.Name)
|
||||
perm, err = store.FromContext(c).PermFind(user, repo)
|
||||
if err != nil {
|
||||
log.Errorf("Error fetching permission for %s %s",
|
||||
user.Login, repo.FullName)
|
||||
log.Errorf("Error fetching permission for %s %s. %s",
|
||||
user.Login, repo.FullName, err)
|
||||
}
|
||||
if time.Unix(perm.Synced, 0).Add(time.Hour).Before(time.Now()) {
|
||||
perm, err = remote.FromContext(c).Perm(user, repo.Owner, repo.Name)
|
||||
if err == nil {
|
||||
log.Debugf("Synced user permission for %s %s", user.Login, repo.FullName)
|
||||
perm.Repo = repo.FullName
|
||||
perm.UserID = user.ID
|
||||
perm.Synced = time.Now().Unix()
|
||||
store.FromContext(c).PermUpsert(perm)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,73 +0,0 @@
|
|||
package session
|
||||
|
||||
import (
|
||||
"github.com/drone/drone/cache"
|
||||
"github.com/drone/drone/model"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func TeamPerm(c *gin.Context) *model.Perm {
|
||||
user := User(c)
|
||||
team := c.Param("team")
|
||||
perm := &model.Perm{}
|
||||
|
||||
switch {
|
||||
// if the user is not authenticated
|
||||
case user == nil:
|
||||
perm.Admin = false
|
||||
perm.Pull = false
|
||||
perm.Push = false
|
||||
|
||||
// if the user is a DRONE_ADMIN
|
||||
case user.Admin:
|
||||
perm.Admin = true
|
||||
perm.Pull = true
|
||||
perm.Push = true
|
||||
|
||||
// otherwise if the user is authenticated we should
|
||||
// check the remote system to get the users permissiosn.
|
||||
default:
|
||||
log.Debugf("Fetching team permission for %s %s",
|
||||
user.Login, team)
|
||||
|
||||
var err error
|
||||
perm, err = cache.GetTeamPerms(c, user, team)
|
||||
if err != nil {
|
||||
// debug
|
||||
log.Errorf("Error fetching team permission for %s %s",
|
||||
user.Login, team)
|
||||
|
||||
perm.Admin = false
|
||||
perm.Pull = false
|
||||
perm.Push = false
|
||||
}
|
||||
}
|
||||
|
||||
if user != nil {
|
||||
log.Debugf("%s granted %+v team permission to %s",
|
||||
user.Login, perm, team)
|
||||
} else {
|
||||
log.Debugf("Guest granted %+v to %s", perm, team)
|
||||
|
||||
perm.Admin = false
|
||||
perm.Pull = false
|
||||
perm.Push = false
|
||||
}
|
||||
|
||||
return perm
|
||||
}
|
||||
|
||||
func MustTeamAdmin() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
perm := TeamPerm(c)
|
||||
|
||||
if perm.Admin {
|
||||
c.Next()
|
||||
} else {
|
||||
c.String(401, "User not authorized")
|
||||
c.Abort()
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,95 +0,0 @@
|
|||
package session
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/drone/drone/cache"
|
||||
"github.com/drone/drone/model"
|
||||
"github.com/franela/goblin"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func TestTeamPerm(t *testing.T) {
|
||||
g := goblin.Goblin(t)
|
||||
|
||||
g.Describe("TeamPerm", func() {
|
||||
|
||||
var c *gin.Context
|
||||
g.BeforeEach(func() {
|
||||
c = new(gin.Context)
|
||||
cache.ToContext(c, cache.Default())
|
||||
})
|
||||
|
||||
g.It("Should set admin to false (user not logged in)", func() {
|
||||
p := TeamPerm(c)
|
||||
g.Assert(p.Admin).IsFalse("admin should be false")
|
||||
})
|
||||
g.It("Should set admin to true (user is DRONE_ADMIN)", func() {
|
||||
// Set DRONE_ADMIN user
|
||||
c.Set("user", fakeUserAdmin)
|
||||
|
||||
p := TeamPerm(c)
|
||||
g.Assert(p.Admin).IsTrue("admin should be false")
|
||||
})
|
||||
g.It("Should set admin to false (user logged in, not owner of org)", func() {
|
||||
// Set fake org
|
||||
params := gin.Params{
|
||||
gin.Param{
|
||||
Key: "team",
|
||||
Value: "test_org",
|
||||
},
|
||||
}
|
||||
c.Params = params
|
||||
|
||||
// Set cache to show user does not Owner/Admin
|
||||
cache.Set(c, "perms:octocat:test_org", fakeTeamPerm)
|
||||
|
||||
// Set User
|
||||
c.Set("user", fakeUser)
|
||||
|
||||
p := TeamPerm(c)
|
||||
g.Assert(p.Admin).IsFalse("admin should be false")
|
||||
})
|
||||
g.It("Should set admin to true (user logged in, owner of org)", func() {
|
||||
// Set fake org
|
||||
params := gin.Params{
|
||||
gin.Param{
|
||||
Key: "team",
|
||||
Value: "test_org",
|
||||
},
|
||||
}
|
||||
c.Params = params
|
||||
|
||||
// Set cache to show user is Owner/Admin
|
||||
cache.Set(c, "perms:octocat:test_org", fakeTeamPermAdmin)
|
||||
|
||||
// Set User
|
||||
c.Set("user", fakeUser)
|
||||
|
||||
p := TeamPerm(c)
|
||||
g.Assert(p.Admin).IsTrue("admin should be true")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
fakeUserAdmin = &model.User{
|
||||
Login: "octocatAdmin",
|
||||
Token: "cfcd2084",
|
||||
Admin: true,
|
||||
}
|
||||
|
||||
fakeUser = &model.User{
|
||||
Login: "octocat",
|
||||
Token: "cfcd2084",
|
||||
Admin: false,
|
||||
}
|
||||
|
||||
fakeTeamPermAdmin = &model.Perm{
|
||||
Admin: true,
|
||||
}
|
||||
|
||||
fakeTeamPerm = &model.Perm{
|
||||
Admin: false,
|
||||
}
|
||||
)
|
|
@ -47,7 +47,6 @@ func Load(middleware ...gin.HandlerFunc) http.Handler {
|
|||
user.GET("", server.GetSelf)
|
||||
user.GET("/feed", server.GetFeed)
|
||||
user.GET("/repos", server.GetRepos)
|
||||
user.GET("/repos/remote", server.GetRemoteRepos)
|
||||
user.POST("/token", server.PostToken)
|
||||
user.DELETE("/token", server.DeleteToken)
|
||||
}
|
||||
|
@ -62,46 +61,42 @@ func Load(middleware ...gin.HandlerFunc) http.Handler {
|
|||
users.DELETE("/:login", server.DeleteUser)
|
||||
}
|
||||
|
||||
repos := e.Group("/api/repos/:owner/:name")
|
||||
repo := e.Group("/api/repos/:owner/:name")
|
||||
{
|
||||
repos.POST("", server.PostRepo)
|
||||
repo.Use(session.SetRepo())
|
||||
repo.Use(session.SetPerm())
|
||||
repo.Use(session.MustPull)
|
||||
|
||||
repo := repos.Group("")
|
||||
{
|
||||
repo.Use(session.SetRepo())
|
||||
repo.Use(session.SetPerm())
|
||||
repo.Use(session.MustPull)
|
||||
repo.POST("", session.MustRepoAdmin(), server.PostRepo)
|
||||
repo.GET("", server.GetRepo)
|
||||
repo.GET("/builds", server.GetBuilds)
|
||||
repo.GET("/builds/:number", server.GetBuild)
|
||||
repo.GET("/logs/:number/:ppid/:proc", server.GetBuildLogs)
|
||||
|
||||
repo.GET("", server.GetRepo)
|
||||
repo.GET("/builds", server.GetBuilds)
|
||||
repo.GET("/builds/:number", server.GetBuild)
|
||||
repo.GET("/logs/:number/:ppid/:proc", server.GetBuildLogs)
|
||||
// requires push permissions
|
||||
repo.GET("/secrets", session.MustPush, server.GetSecretList)
|
||||
repo.POST("/secrets", session.MustPush, server.PostSecret)
|
||||
repo.GET("/secrets/:secret", session.MustPush, server.GetSecret)
|
||||
repo.PATCH("/secrets/:secret", session.MustPush, server.PatchSecret)
|
||||
repo.DELETE("/secrets/:secret", session.MustPush, server.DeleteSecret)
|
||||
|
||||
// requires push permissions
|
||||
repo.GET("/secrets", session.MustPush, server.GetSecretList)
|
||||
repo.POST("/secrets", session.MustPush, server.PostSecret)
|
||||
repo.GET("/secrets/:secret", session.MustPush, server.GetSecret)
|
||||
repo.PATCH("/secrets/:secret", session.MustPush, server.PatchSecret)
|
||||
repo.DELETE("/secrets/:secret", session.MustPush, server.DeleteSecret)
|
||||
// requires push permissions
|
||||
repo.GET("/registry", session.MustPush, server.GetRegistryList)
|
||||
repo.POST("/registry", session.MustPush, server.PostRegistry)
|
||||
repo.GET("/registry/:registry", session.MustPush, server.GetRegistry)
|
||||
repo.PATCH("/registry/:registry", session.MustPush, server.PatchRegistry)
|
||||
repo.DELETE("/registry/:registry", session.MustPush, server.DeleteRegistry)
|
||||
|
||||
// requires push permissions
|
||||
repo.GET("/registry", session.MustPush, server.GetRegistryList)
|
||||
repo.POST("/registry", session.MustPush, server.PostRegistry)
|
||||
repo.GET("/registry/:registry", session.MustPush, server.GetRegistry)
|
||||
repo.PATCH("/registry/:registry", session.MustPush, server.PatchRegistry)
|
||||
repo.DELETE("/registry/:registry", session.MustPush, server.DeleteRegistry)
|
||||
// requires admin permissions
|
||||
repo.PATCH("", session.MustRepoAdmin(), server.PatchRepo)
|
||||
repo.DELETE("", session.MustRepoAdmin(), server.DeleteRepo)
|
||||
repo.POST("/chown", session.MustRepoAdmin(), server.ChownRepo)
|
||||
repo.POST("/repair", session.MustRepoAdmin(), server.RepairRepo)
|
||||
|
||||
// requires push permissions
|
||||
repo.PATCH("", session.MustPush, server.PatchRepo)
|
||||
repo.DELETE("", session.MustRepoAdmin(), server.DeleteRepo)
|
||||
repo.POST("/chown", session.MustRepoAdmin(), server.ChownRepo)
|
||||
repo.POST("/repair", session.MustRepoAdmin(), server.RepairRepo)
|
||||
|
||||
repo.POST("/builds/:number", session.MustPush, server.PostBuild)
|
||||
repo.POST("/builds/:number/approve", session.MustPush, server.PostApproval)
|
||||
repo.POST("/builds/:number/decline", session.MustPush, server.PostDecline)
|
||||
repo.DELETE("/builds/:number/:job", session.MustPush, server.DeleteBuild)
|
||||
}
|
||||
repo.POST("/builds/:number", session.MustPush, server.PostBuild)
|
||||
repo.POST("/builds/:number/approve", session.MustPush, server.PostApproval)
|
||||
repo.POST("/builds/:number/decline", session.MustPush, server.PostDecline)
|
||||
repo.DELETE("/builds/:number/:job", session.MustPush, server.DeleteBuild)
|
||||
}
|
||||
|
||||
badges := e.Group("/api/badges/:owner/:name")
|
||||
|
|
|
@ -84,6 +84,11 @@ func PostHook(c *gin.Context) {
|
|||
c.AbortWithError(404, err)
|
||||
return
|
||||
}
|
||||
if !repo.IsActive {
|
||||
logrus.Errorf("ignoring hook. %s/%s is inactive.", tmprepo.Owner, tmprepo.Name)
|
||||
c.AbortWithError(204, err)
|
||||
return
|
||||
}
|
||||
|
||||
// get the token and verify the hook is authorized
|
||||
parsed, err := token.ParseRequest(c.Request, func(t *token.Token) (string, error) {
|
||||
|
|
|
@ -4,11 +4,11 @@ import (
|
|||
"encoding/base32"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/gorilla/securecookie"
|
||||
|
||||
"github.com/drone/drone/cache"
|
||||
"github.com/drone/drone/model"
|
||||
"github.com/drone/drone/remote"
|
||||
"github.com/drone/drone/router/middleware/session"
|
||||
|
@ -20,54 +20,40 @@ import (
|
|||
func PostRepo(c *gin.Context) {
|
||||
remote := remote.FromContext(c)
|
||||
user := session.User(c)
|
||||
owner := c.Param("owner")
|
||||
name := c.Param("name")
|
||||
repo := session.Repo(c)
|
||||
|
||||
if user == nil {
|
||||
c.AbortWithStatus(403)
|
||||
if repo.IsActive {
|
||||
c.String(409, "Repository is already active.")
|
||||
return
|
||||
}
|
||||
|
||||
r, err := remote.Repo(user, owner, name)
|
||||
if err != nil {
|
||||
c.String(404, err.Error())
|
||||
return
|
||||
repo.IsActive = true
|
||||
repo.UserID = user.ID
|
||||
if !repo.AllowPush && !repo.AllowPull && !repo.AllowDeploy && !repo.AllowTag {
|
||||
repo.AllowPush = true
|
||||
repo.AllowPull = true
|
||||
}
|
||||
m, err := cache.GetPerms(c, user, owner, name)
|
||||
if err != nil {
|
||||
c.String(404, err.Error())
|
||||
return
|
||||
if repo.Visibility == "" {
|
||||
repo.Visibility = model.VisibilityPublic
|
||||
if repo.IsPrivate {
|
||||
repo.Visibility = model.VisibilityPrivate
|
||||
}
|
||||
}
|
||||
if !m.Admin {
|
||||
c.String(403, "Administrative access is required.")
|
||||
return
|
||||
if repo.Config == "" {
|
||||
repo.Config = ".drone.yml"
|
||||
}
|
||||
if repo.Timeout == 0 {
|
||||
repo.Timeout = 60 // 1 hour default build time
|
||||
}
|
||||
if repo.Hash == "" {
|
||||
repo.Hash = base32.StdEncoding.EncodeToString(
|
||||
securecookie.GenerateRandomKey(32),
|
||||
)
|
||||
}
|
||||
|
||||
// error if the repository already exists
|
||||
_, err = store.GetRepoOwnerName(c, owner, name)
|
||||
if err == nil {
|
||||
c.String(409, "Repository already exists.")
|
||||
return
|
||||
}
|
||||
|
||||
// set the repository owner to the
|
||||
// currently authenticated user.
|
||||
r.UserID = user.ID
|
||||
r.AllowPush = true
|
||||
r.AllowPull = true
|
||||
r.Visibility = model.VisibilityPublic
|
||||
r.Config = ".drone.yml"
|
||||
r.Timeout = 60 // 1 hour default build time
|
||||
r.Hash = base32.StdEncoding.EncodeToString(
|
||||
securecookie.GenerateRandomKey(32),
|
||||
)
|
||||
if r.IsPrivate {
|
||||
r.Visibility = model.VisibilityPrivate
|
||||
}
|
||||
|
||||
// crates the jwt token used to verify the repository
|
||||
t := token.New(token.HookToken, r.FullName)
|
||||
sig, err := t.Sign(r.Hash)
|
||||
// creates the jwt token used to verify the repository
|
||||
t := token.New(token.HookToken, repo.FullName)
|
||||
sig, err := t.Sign(repo.Hash)
|
||||
if err != nil {
|
||||
c.String(500, err.Error())
|
||||
return
|
||||
|
@ -79,22 +65,19 @@ func PostRepo(c *gin.Context) {
|
|||
sig,
|
||||
)
|
||||
|
||||
// activate the repository before we make any
|
||||
// local changes to the database.
|
||||
err = remote.Activate(user, r, link)
|
||||
err = remote.Activate(user, repo, link)
|
||||
if err != nil {
|
||||
c.String(500, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// persist the repository
|
||||
err = store.CreateRepo(c, r)
|
||||
err = store.UpdateRepo(c, repo)
|
||||
if err != nil {
|
||||
c.String(500, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(200, r)
|
||||
c.JSON(200, repo)
|
||||
}
|
||||
|
||||
func PatchRepo(c *gin.Context) {
|
||||
|
@ -173,16 +156,28 @@ func GetRepo(c *gin.Context) {
|
|||
}
|
||||
|
||||
func DeleteRepo(c *gin.Context) {
|
||||
remove, _ := strconv.ParseBool(c.Query("remove"))
|
||||
remote := remote.FromContext(c)
|
||||
repo := session.Repo(c)
|
||||
user := session.User(c)
|
||||
|
||||
err := store.DeleteRepo(c, repo)
|
||||
repo.IsActive = false
|
||||
repo.UserID = 0
|
||||
|
||||
err := store.UpdateRepo(c, repo)
|
||||
if err != nil {
|
||||
c.AbortWithError(http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
|
||||
if remove {
|
||||
err := store.DeleteRepo(c, repo)
|
||||
if err != nil {
|
||||
c.AbortWithError(http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
remote.Deactivate(user, repo, httputil.GetURL(c.Request))
|
||||
c.Writer.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
|
||||
"github.com/cncd/logging"
|
||||
"github.com/cncd/pubsub"
|
||||
"github.com/drone/drone/cache"
|
||||
"github.com/drone/drone/model"
|
||||
"github.com/drone/drone/router/middleware/session"
|
||||
"github.com/drone/drone/store"
|
||||
|
@ -149,7 +148,10 @@ func EventStream(c *gin.Context) {
|
|||
user := session.User(c)
|
||||
repo := map[string]bool{}
|
||||
if user != nil {
|
||||
repo, _ = cache.GetRepoMap(c, user)
|
||||
repos, _ := store.FromContext(c).RepoList(user)
|
||||
for _, r := range repos {
|
||||
repo[r.FullName] = true
|
||||
}
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(pingPeriod)
|
||||
|
|
55
server/sync.go
Normal file
55
server/sync.go
Normal file
|
@ -0,0 +1,55 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/drone/drone/model"
|
||||
"github.com/drone/drone/remote"
|
||||
"github.com/drone/drone/store"
|
||||
)
|
||||
|
||||
// Syncer synces the user repository and permissions.
|
||||
type Syncer interface {
|
||||
Sync(user *model.User) error
|
||||
}
|
||||
|
||||
type syncer struct {
|
||||
remote remote.Remote
|
||||
store store.Store
|
||||
perms model.PermStore
|
||||
}
|
||||
|
||||
func (s *syncer) Sync(user *model.User) error {
|
||||
unix := time.Now().Unix()
|
||||
repos, err := s.remote.Repos(user)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var perms []*model.Perm
|
||||
for _, repo := range repos {
|
||||
perm := model.Perm{
|
||||
UserID: user.ID,
|
||||
Repo: repo.FullName,
|
||||
Pull: true,
|
||||
Synced: unix,
|
||||
}
|
||||
if repo.Perm != nil {
|
||||
perm.Push = repo.Perm.Push
|
||||
perm.Admin = repo.Perm.Admin
|
||||
}
|
||||
perms = append(perms, &perm)
|
||||
}
|
||||
|
||||
err = s.store.RepoBatch(repos)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.store.PermBatch(perms)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -4,13 +4,14 @@ import (
|
|||
"encoding/base32"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/gorilla/securecookie"
|
||||
|
||||
"github.com/drone/drone/cache"
|
||||
"github.com/drone/drone/model"
|
||||
"github.com/drone/drone/remote"
|
||||
"github.com/drone/drone/router/middleware/session"
|
||||
"github.com/drone/drone/shared/token"
|
||||
"github.com/drone/drone/store"
|
||||
|
@ -21,17 +22,38 @@ func GetSelf(c *gin.Context) {
|
|||
}
|
||||
|
||||
func GetFeed(c *gin.Context) {
|
||||
user := session.User(c)
|
||||
latest, _ := strconv.ParseBool(c.Query("latest"))
|
||||
|
||||
repos, err := cache.GetRepos(c, session.User(c))
|
||||
if err != nil {
|
||||
c.String(500, "Error fetching repository list. %s", err)
|
||||
if time.Unix(user.Synced, 0).Add(time.Hour * 72).Before(time.Now()) {
|
||||
logrus.Debugf("sync begin: %s", user.Login)
|
||||
sync := syncer{
|
||||
remote: remote.FromContext(c),
|
||||
store: store.FromContext(c),
|
||||
perms: store.FromContext(c),
|
||||
}
|
||||
if err := sync.Sync(user); err != nil {
|
||||
logrus.Debugf("sync error: %s: %s", user.Login, err)
|
||||
} else {
|
||||
logrus.Debugf("sync complete: %s", user.Login)
|
||||
user.Synced = time.Now().Unix()
|
||||
store.FromContext(c).UpdateUser(user)
|
||||
}
|
||||
}
|
||||
|
||||
if latest {
|
||||
feed, err := store.FromContext(c).RepoListLatest(user)
|
||||
if err != nil {
|
||||
c.String(500, "Error fetching feed. %s", err)
|
||||
} else {
|
||||
c.JSON(200, feed)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
feed, err := store.GetUserFeed(c, repos, latest)
|
||||
feed, err := store.FromContext(c).UserFeed(user)
|
||||
if err != nil {
|
||||
c.String(500, "Error fetching feed. %s", err)
|
||||
c.String(500, "Error fetching user feed. %s", err)
|
||||
return
|
||||
}
|
||||
c.JSON(200, feed)
|
||||
|
@ -44,58 +66,40 @@ func GetRepos(c *gin.Context) {
|
|||
flush, _ = strconv.ParseBool(c.Query("flush"))
|
||||
)
|
||||
|
||||
if flush {
|
||||
log.Debugf("Evicting repository cache for user %s.", user.Login)
|
||||
cache.DeleteRepos(c, user)
|
||||
if flush || time.Unix(user.Synced, 0).Add(time.Hour*72).Before(time.Now()) {
|
||||
logrus.Debugf("sync begin: %s", user.Login)
|
||||
sync := syncer{
|
||||
remote: remote.FromContext(c),
|
||||
store: store.FromContext(c),
|
||||
perms: store.FromContext(c),
|
||||
}
|
||||
if err := sync.Sync(user); err != nil {
|
||||
logrus.Debugf("sync error: %s: %s", user.Login, err)
|
||||
} else {
|
||||
logrus.Debugf("sync complete: %s", user.Login)
|
||||
user.Synced = time.Now().Unix()
|
||||
store.FromContext(c).UpdateUser(user)
|
||||
}
|
||||
}
|
||||
|
||||
remote, err := cache.GetRepos(c, user)
|
||||
repos, err := store.FromContext(c).RepoList(user)
|
||||
if err != nil {
|
||||
c.String(500, "Error fetching repository list. %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
repos, err := store.GetRepoListOf(c, remote)
|
||||
if err != nil {
|
||||
c.String(500, "Error fetching repository list. %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
if !all {
|
||||
if all {
|
||||
c.JSON(http.StatusOK, repos)
|
||||
return
|
||||
}
|
||||
|
||||
// below we combine the two lists to include both active and inactive
|
||||
// repositories. This is displayed on the settings screen to enable
|
||||
// toggling on / off repository settings.
|
||||
|
||||
repom := map[string]bool{}
|
||||
active := []*model.Repo{}
|
||||
for _, repo := range repos {
|
||||
repom[repo.FullName] = true
|
||||
}
|
||||
|
||||
for _, repo := range remote {
|
||||
if repom[repo.FullName] {
|
||||
continue
|
||||
if repo.IsActive {
|
||||
active = append(active, repo)
|
||||
}
|
||||
repos = append(repos, &model.Repo{
|
||||
Avatar: repo.Avatar,
|
||||
FullName: repo.FullName,
|
||||
Owner: repo.Owner,
|
||||
Name: repo.Name,
|
||||
})
|
||||
}
|
||||
c.JSON(http.StatusOK, repos)
|
||||
}
|
||||
|
||||
func GetRemoteRepos(c *gin.Context) {
|
||||
repos, err := cache.GetRepos(c, session.User(c))
|
||||
if err != nil {
|
||||
c.String(500, "Error fetching repository list. %s", err)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, repos)
|
||||
c.JSON(http.StatusOK, active)
|
||||
}
|
||||
|
||||
func PostToken(c *gin.Context) {
|
||||
|
|
|
@ -112,6 +112,34 @@ var migrations = []struct {
|
|||
name: "update-table-set-repo-seq-default",
|
||||
stmt: updateTableSetRepoSeqDefault,
|
||||
},
|
||||
{
|
||||
name: "alter-table-add-repo-active",
|
||||
stmt: alterTableAddRepoActive,
|
||||
},
|
||||
{
|
||||
name: "update-table-set-repo-active",
|
||||
stmt: updateTableSetRepoActive,
|
||||
},
|
||||
{
|
||||
name: "alter-table-add-user-synced",
|
||||
stmt: alterTableAddUserSynced,
|
||||
},
|
||||
{
|
||||
name: "update-table-set-user-synced",
|
||||
stmt: updateTableSetUserSynced,
|
||||
},
|
||||
{
|
||||
name: "create-table-perms",
|
||||
stmt: createTablePerms,
|
||||
},
|
||||
{
|
||||
name: "create-index-perms-repo",
|
||||
stmt: createIndexPermsRepo,
|
||||
},
|
||||
{
|
||||
name: "create-index-perms-user",
|
||||
stmt: createIndexPermsUser,
|
||||
},
|
||||
}
|
||||
|
||||
// Migrate performs the database migration. If the migration fails
|
||||
|
@ -500,3 +528,51 @@ var updateTableSetRepoSeqDefault = `
|
|||
UPDATE repos SET repo_counter = 0
|
||||
WHERE repo_counter IS NULL
|
||||
`
|
||||
|
||||
//
|
||||
// 015_add_column_repo_active.sql
|
||||
//
|
||||
|
||||
var alterTableAddRepoActive = `
|
||||
ALTER TABLE repos ADD COLUMN repo_active BOOLEAN
|
||||
`
|
||||
|
||||
var updateTableSetRepoActive = `
|
||||
UPDATE repos SET repo_active = 1
|
||||
`
|
||||
|
||||
//
|
||||
// 016_add_column_user_synced.sql
|
||||
//
|
||||
|
||||
var alterTableAddUserSynced = `
|
||||
ALTER TABLE users ADD COLUMN user_synced INTEGER;
|
||||
`
|
||||
|
||||
var updateTableSetUserSynced = `
|
||||
UPDATE users SET user_synced = 0
|
||||
`
|
||||
|
||||
//
|
||||
// 017_create_table_perms.sql
|
||||
//
|
||||
|
||||
var createTablePerms = `
|
||||
CREATE TABLE IF NOT EXISTS perms (
|
||||
perm_user_id INTEGER NOT NULL
|
||||
,perm_repo_id INTEGER NOT NULL
|
||||
,perm_pull BOOLEAN
|
||||
,perm_push BOOLEAN
|
||||
,perm_admin BOOLEAN
|
||||
,perm_synced INTEGER
|
||||
,UNIQUE(perm_user_id, perm_repo_id)
|
||||
);
|
||||
`
|
||||
|
||||
var createIndexPermsRepo = `
|
||||
CREATE INDEX IF NOT EXISTS ix_perms_repo ON perms (perm_repo_id);
|
||||
`
|
||||
|
||||
var createIndexPermsUser = `
|
||||
CREATE INDEX IF NOT EXISTS ix_perms_user ON perms (perm_user_id);
|
||||
`
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
-- name: alter-table-add-repo-active
|
||||
|
||||
ALTER TABLE repos ADD COLUMN repo_active BOOLEAN
|
||||
|
||||
-- name: update-table-set-repo-active
|
||||
|
||||
UPDATE repos SET repo_active = 1
|
|
@ -0,0 +1,7 @@
|
|||
-- name: alter-table-add-user-synced
|
||||
|
||||
ALTER TABLE users ADD COLUMN user_synced INTEGER;
|
||||
|
||||
-- name: update-table-set-user-synced
|
||||
|
||||
UPDATE users SET user_synced = 0
|
19
store/datastore/ddl/sqlite/files/017_create_table_perms.sql
Normal file
19
store/datastore/ddl/sqlite/files/017_create_table_perms.sql
Normal file
|
@ -0,0 +1,19 @@
|
|||
-- name: create-table-perms
|
||||
|
||||
CREATE TABLE IF NOT EXISTS perms (
|
||||
perm_user_id INTEGER NOT NULL
|
||||
,perm_repo_id INTEGER NOT NULL
|
||||
,perm_pull BOOLEAN
|
||||
,perm_push BOOLEAN
|
||||
,perm_admin BOOLEAN
|
||||
,perm_synced INTEGER
|
||||
,UNIQUE(perm_user_id, perm_repo_id)
|
||||
);
|
||||
|
||||
-- name: create-index-perms-repo
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_perms_repo ON perms (perm_repo_id);
|
||||
|
||||
-- name: create-index-perms-user
|
||||
|
||||
CREATE INDEX IF NOT EXISTS ix_perms_user ON perms (perm_user_id);
|
|
@ -1,242 +0,0 @@
|
|||
-- name: create-table-users
|
||||
|
||||
CREATE TABLE users (
|
||||
user_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,user_login TEXT
|
||||
,user_token TEXT
|
||||
,user_secret TEXT
|
||||
,user_expiry INTEGER
|
||||
,user_email TEXT
|
||||
,user_avatar TEXT
|
||||
,user_active BOOLEAN
|
||||
,user_admin BOOLEAN
|
||||
,user_hash TEXT
|
||||
|
||||
,UNIQUE(user_login)
|
||||
);
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- name: create-table-repos
|
||||
|
||||
CREATE TABLE repos (
|
||||
repo_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,repo_user_id INTEGER
|
||||
,repo_owner TEXT
|
||||
,repo_name TEXT
|
||||
,repo_full_name TEXT
|
||||
,repo_avatar TEXT
|
||||
,repo_link TEXT
|
||||
,repo_clone TEXT
|
||||
,repo_branch TEXT
|
||||
,repo_timeout INTEGER
|
||||
,repo_private BOOLEAN
|
||||
,repo_trusted BOOLEAN
|
||||
,repo_allow_pr BOOLEAN
|
||||
,repo_allow_push BOOLEAN
|
||||
,repo_allow_deploys BOOLEAN
|
||||
,repo_allow_tags BOOLEAN
|
||||
,repo_hash TEXT
|
||||
,repo_scm TEXT
|
||||
,repo_config_path TEXT
|
||||
,repo_gated BOOLEAN
|
||||
,UNIQUE(repo_full_name)
|
||||
);
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- name: create-table-secrets
|
||||
|
||||
CREATE TABLE secrets (
|
||||
secret_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,secret_repo_id INTEGER
|
||||
,secret_name TEXT
|
||||
,secret_value TEXT
|
||||
,secret_images TEXT
|
||||
,secret_events TEXT
|
||||
,secret_skip_verify BOOLEAN
|
||||
,secret_conceal BOOLEAN
|
||||
,UNIQUE(secret_name, secret_repo_id)
|
||||
);
|
||||
|
||||
-- name: create-index-secrets-repo
|
||||
|
||||
CREATE INDEX ix_secrets_repo ON secrets (secret_repo_id);
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- name: create-table-registry
|
||||
|
||||
CREATE TABLE registry (
|
||||
registry_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,registry_repo_id INTEGER
|
||||
,registry_addr TEXT
|
||||
,registry_username TEXT
|
||||
,registry_password TEXT
|
||||
,registry_email TEXT
|
||||
,registry_token TEXT
|
||||
|
||||
,UNIQUE(registry_addr, registry_repo_id)
|
||||
);
|
||||
|
||||
-- name: create-index-registry-repo
|
||||
|
||||
CREATE INDEX ix_registry_repo ON registry (registry_repo_id);
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- name: create-table-builds
|
||||
|
||||
CREATE TABLE builds (
|
||||
build_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,build_repo_id INTEGER
|
||||
,build_number INTEGER
|
||||
,build_event TEXT
|
||||
,build_status TEXT
|
||||
,build_enqueued INTEGER
|
||||
,build_created INTEGER
|
||||
,build_started INTEGER
|
||||
,build_finished INTEGER
|
||||
,build_commit TEXT
|
||||
,build_branch TEXT
|
||||
,build_ref TEXT
|
||||
,build_refspec TEXT
|
||||
,build_remote TEXT
|
||||
,build_title TEXT
|
||||
,build_message TEXT
|
||||
,build_timestamp INTEGER
|
||||
,build_author TEXT
|
||||
,build_avatar TEXT
|
||||
,build_email TEXT
|
||||
,build_link TEXT
|
||||
,build_deploy TEXT
|
||||
,build_signed BOOLEAN
|
||||
,build_verified BOOLEAN
|
||||
,build_parent INTEGER
|
||||
,build_error TEXT
|
||||
,build_reviewer TEXT
|
||||
,build_reviewed INTEGER
|
||||
,build_sender TEXT
|
||||
,build_config_id INTEGER
|
||||
,UNIQUE(build_number, build_repo_id)
|
||||
);
|
||||
|
||||
-- name: create-index-builds-repo
|
||||
|
||||
CREATE INDEX ix_build_repo ON builds (build_repo_id);
|
||||
|
||||
-- name: create-index-builds-author
|
||||
|
||||
CREATE INDEX ix_build_author ON builds (build_author);
|
||||
|
||||
-- name: create-index-builds-status
|
||||
|
||||
CREATE INDEX ix_build_status_running ON builds (build_status)
|
||||
WHERE build_status IN ('pending', 'running');
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- name: create-table-procs
|
||||
|
||||
CREATE TABLE procs (
|
||||
proc_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,proc_build_id INTEGER
|
||||
,proc_pid INTEGER
|
||||
,proc_ppid INTEGER
|
||||
,proc_pgid INTEGER
|
||||
,proc_name TEXT
|
||||
,proc_state TEXT
|
||||
,proc_error TEXT
|
||||
,proc_exit_code INTEGER
|
||||
,proc_started INTEGER
|
||||
,proc_stopped INTEGER
|
||||
,proc_machine TEXT
|
||||
,proc_platform TEXT
|
||||
,proc_environ TEXT
|
||||
,UNIQUE(proc_build_id, proc_pid)
|
||||
);
|
||||
|
||||
-- name: create-index-procs-build
|
||||
|
||||
CREATE INDEX proc_build_ix ON procs (proc_build_id);
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- name: create-table-logs
|
||||
|
||||
CREATE TABLE IF NOT EXISTS logs (
|
||||
log_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,log_job_id INTEGER
|
||||
,log_data BLOB
|
||||
,UNIQUE(log_job_id)
|
||||
);
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- name: create-table-files
|
||||
|
||||
CREATE TABLE IF NOT EXISTS files (
|
||||
file_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,file_build_id INTEGER
|
||||
,file_proc_id INTEGER
|
||||
,file_name TEXT
|
||||
,file_mime TEXT
|
||||
,file_size INTEGER
|
||||
,file_time INTEGER
|
||||
,file_data BLOB
|
||||
,UNIQUE(file_proc_id,file_name)
|
||||
,FOREIGN KEY(file_proc_id) REFERENCES procs (proc_id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- name: create-index-files-builds
|
||||
|
||||
CREATE INDEX file_build_ix ON files (file_build_id);
|
||||
|
||||
-- name: create-index-files-procs
|
||||
|
||||
CREATE INDEX file_proc_ix ON files (file_proc_id);
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- name: create-table-senders
|
||||
|
||||
CREATE TABLE IF NOT EXISTS senders (
|
||||
sender_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,sender_repo_id INTEGER
|
||||
,sender_login BOOLEAN
|
||||
,sender_allow BOOLEAN
|
||||
,sender_block BOOLEAN
|
||||
|
||||
,UNIQUE(sender_repo_id,sender_login)
|
||||
);
|
||||
|
||||
-- name: create-index-sender-repos
|
||||
|
||||
CREATE INDEX sender_repo_ix ON senders (sender_repo_id);
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- name: create-table-config
|
||||
|
||||
CREATE TABLE IF NOT EXISTS config (
|
||||
config_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,config_repo_id INTEGER
|
||||
,config_hash TEXT
|
||||
,config_data BLOB
|
||||
|
||||
,UNIQUE(config_hash, config_repo_id)
|
||||
);
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- name: create-table-tasks
|
||||
|
||||
CREATE TABLE IF NOT EXISTS tasks (
|
||||
task_id TEXT PRIMARY KEY
|
||||
,task_data BLOB
|
||||
,task_labels BLOB
|
||||
);
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
-- name: create-table-agents
|
||||
|
||||
CREATE TABLE IF NOT EXISTS agents (
|
||||
agent_id INTEGER PRIMARY KEY AUTOINCREMENT
|
||||
,agent_addr TEXT
|
||||
,agent_platform TEXT
|
||||
,agent_capacity INTEGER
|
||||
,agent_created INTEGER
|
||||
,agent_updated INTEGER
|
||||
|
||||
,UNIQUE(agent_addr)
|
||||
);
|
44
store/datastore/perms.go
Normal file
44
store/datastore/perms.go
Normal file
|
@ -0,0 +1,44 @@
|
|||
package datastore
|
||||
|
||||
import (
|
||||
"github.com/drone/drone/model"
|
||||
"github.com/drone/drone/store/datastore/sql"
|
||||
|
||||
"github.com/russross/meddler"
|
||||
)
|
||||
|
||||
func (db *datastore) PermFind(user *model.User, repo *model.Repo) (*model.Perm, error) {
|
||||
stmt := sql.Lookup(db.driver, "perms-find-user-repo")
|
||||
data := new(model.Perm)
|
||||
err := meddler.QueryRow(db, data, stmt, user.ID, repo.ID)
|
||||
return data, err
|
||||
}
|
||||
|
||||
func (db *datastore) PermUpsert(perm *model.Perm) error {
|
||||
stmt := sql.Lookup(db.driver, "perms-insert-replace-lookup")
|
||||
_, err := db.Exec(stmt,
|
||||
perm.UserID,
|
||||
perm.Repo,
|
||||
perm.Pull,
|
||||
perm.Push,
|
||||
perm.Admin,
|
||||
perm.Synced,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
func (db *datastore) PermBatch(perms []*model.Perm) (err error) {
|
||||
for _, perm := range perms {
|
||||
err = db.PermUpsert(perm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *datastore) PermDelete(perm *model.Perm) error {
|
||||
stmt := sql.Lookup(db.driver, "perms-delete-user-repo")
|
||||
_, err := db.Exec(stmt, perm.UserID, perm.RepoID)
|
||||
return err
|
||||
}
|
138
store/datastore/perms_test.go
Normal file
138
store/datastore/perms_test.go
Normal file
|
@ -0,0 +1,138 @@
|
|||
package datastore
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/drone/drone/model"
|
||||
)
|
||||
|
||||
func TestPermFind(t *testing.T) {
|
||||
s := newTest()
|
||||
defer func() {
|
||||
s.Exec("delete from perms")
|
||||
s.Exec("delete from repos")
|
||||
s.Close()
|
||||
}()
|
||||
|
||||
user := &model.User{ID: 1}
|
||||
repo := &model.Repo{
|
||||
UserID: 1,
|
||||
FullName: "bradrydzewski/drone",
|
||||
Owner: "bradrydzewski",
|
||||
Name: "drone",
|
||||
}
|
||||
s.CreateRepo(repo)
|
||||
|
||||
err := s.PermUpsert(
|
||||
&model.Perm{
|
||||
UserID: user.ID,
|
||||
RepoID: repo.ID,
|
||||
Repo: repo.FullName,
|
||||
Pull: true,
|
||||
Push: false,
|
||||
Admin: false,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
perm, err := s.PermFind(user, repo)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
if got, want := perm.Pull, true; got != want {
|
||||
t.Errorf("Wanted pull %v, got %v", want, got)
|
||||
}
|
||||
if got, want := perm.Push, false; got != want {
|
||||
t.Errorf("Wanted push %v, got %v", want, got)
|
||||
}
|
||||
if got, want := perm.Admin, false; got != want {
|
||||
t.Errorf("Wanted admin %v, got %v", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPermUpsert(t *testing.T) {
|
||||
s := newTest()
|
||||
defer func() {
|
||||
s.Exec("delete from perms")
|
||||
s.Exec("delete from repos")
|
||||
s.Close()
|
||||
}()
|
||||
|
||||
user := &model.User{ID: 1}
|
||||
repo := &model.Repo{
|
||||
UserID: 1,
|
||||
FullName: "bradrydzewski/drone",
|
||||
Owner: "bradrydzewski",
|
||||
Name: "drone",
|
||||
}
|
||||
s.CreateRepo(repo)
|
||||
|
||||
err := s.PermUpsert(
|
||||
&model.Perm{
|
||||
UserID: user.ID,
|
||||
RepoID: repo.ID,
|
||||
Repo: repo.FullName,
|
||||
Pull: true,
|
||||
Push: false,
|
||||
Admin: false,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
perm, err := s.PermFind(user, repo)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
if got, want := perm.Pull, true; got != want {
|
||||
t.Errorf("Wanted pull %v, got %v", want, got)
|
||||
}
|
||||
if got, want := perm.Push, false; got != want {
|
||||
t.Errorf("Wanted push %v, got %v", want, got)
|
||||
}
|
||||
if got, want := perm.Admin, false; got != want {
|
||||
t.Errorf("Wanted admin %v, got %v", want, got)
|
||||
}
|
||||
|
||||
//
|
||||
// this will attempt to replace the existing permissions
|
||||
// using the insert or replace logic.
|
||||
//
|
||||
|
||||
err = s.PermUpsert(
|
||||
&model.Perm{
|
||||
UserID: user.ID,
|
||||
RepoID: repo.ID,
|
||||
Repo: repo.FullName,
|
||||
Pull: true,
|
||||
Push: true,
|
||||
Admin: true,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
perm, err = s.PermFind(user, repo)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
if got, want := perm.Pull, true; got != want {
|
||||
t.Errorf("Wanted pull %v, got %v", want, got)
|
||||
}
|
||||
if got, want := perm.Push, true; got != want {
|
||||
t.Errorf("Wanted push %v, got %v", want, got)
|
||||
}
|
||||
if got, want := perm.Admin, true; got != want {
|
||||
t.Errorf("Wanted admin %v, got %v", want, got)
|
||||
}
|
||||
}
|
|
@ -59,6 +59,58 @@ func (db *datastore) DeleteRepo(repo *model.Repo) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func (db *datastore) RepoList(user *model.User) ([]*model.Repo, error) {
|
||||
stmt := sql.Lookup(db.driver, "repo-find-user")
|
||||
data := []*model.Repo{}
|
||||
err := meddler.QueryAll(db, &data, stmt, user.ID)
|
||||
return data, err
|
||||
}
|
||||
|
||||
func (db *datastore) RepoListLatest(user *model.User) ([]*model.Feed, error) {
|
||||
stmt := sql.Lookup(db.driver, "feed-latest-build")
|
||||
data := []*model.Feed{}
|
||||
err := meddler.QueryAll(db, &data, stmt, user.ID)
|
||||
return data, err
|
||||
}
|
||||
|
||||
func (db *datastore) RepoBatch(repos []*model.Repo) error {
|
||||
stmt := sql.Lookup(db.driver, "repo-insert-ignore")
|
||||
for _, repo := range repos {
|
||||
_, err := db.Exec(stmt,
|
||||
repo.UserID,
|
||||
repo.Owner,
|
||||
repo.Name,
|
||||
repo.FullName,
|
||||
repo.Avatar,
|
||||
repo.Link,
|
||||
repo.Clone,
|
||||
repo.Branch,
|
||||
repo.Timeout,
|
||||
repo.IsPrivate,
|
||||
repo.IsTrusted,
|
||||
repo.IsActive,
|
||||
repo.AllowPull,
|
||||
repo.AllowPush,
|
||||
repo.AllowDeploy,
|
||||
repo.AllowTag,
|
||||
repo.Hash,
|
||||
repo.Kind,
|
||||
repo.Config,
|
||||
repo.IsGated,
|
||||
repo.Visibility,
|
||||
repo.Counter,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// last, _ := res.LastInsertId()
|
||||
// if last != 0 {
|
||||
// repo.ID = last
|
||||
// }
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const repoTable = "repos"
|
||||
|
||||
const repoNameQuery = `
|
||||
|
|
|
@ -174,3 +174,55 @@ func TestRepos(t *testing.T) {
|
|||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestRepoBatch(t *testing.T) {
|
||||
s := newTest()
|
||||
defer func() {
|
||||
s.Exec("delete from repos")
|
||||
s.Close()
|
||||
}()
|
||||
|
||||
repo := &model.Repo{
|
||||
UserID: 1,
|
||||
FullName: "foo/bar",
|
||||
Owner: "foo",
|
||||
Name: "bar",
|
||||
}
|
||||
err := s.CreateRepo(repo)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
err = s.RepoBatch(
|
||||
[]*model.Repo{
|
||||
{
|
||||
UserID: 1,
|
||||
FullName: "foo/bar",
|
||||
Owner: "foo",
|
||||
Name: "bar",
|
||||
},
|
||||
{
|
||||
UserID: 1,
|
||||
FullName: "bar/baz",
|
||||
Owner: "bar",
|
||||
Name: "baz",
|
||||
},
|
||||
{
|
||||
UserID: 1,
|
||||
FullName: "baz/qux",
|
||||
Owner: "baz",
|
||||
Name: "qux",
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
count, _ := s.GetRepoCount()
|
||||
if got, want := count, 3; got != want {
|
||||
t.Errorf("Want %d repositories, got %d", want, got)
|
||||
}
|
||||
}
|
||||
|
|
61
store/datastore/sql/sqlite/files/feed.sql
Normal file
61
store/datastore/sql/sqlite/files/feed.sql
Normal file
|
@ -0,0 +1,61 @@
|
|||
-- name: feed-latest-build
|
||||
|
||||
SELECT
|
||||
repo_owner
|
||||
,repo_name
|
||||
,repo_full_name
|
||||
,build_number
|
||||
,build_event
|
||||
,build_status
|
||||
,build_created
|
||||
,build_started
|
||||
,build_finished
|
||||
,build_commit
|
||||
,build_branch
|
||||
,build_ref
|
||||
,build_refspec
|
||||
,build_remote
|
||||
,build_title
|
||||
,build_message
|
||||
,build_author
|
||||
,build_email
|
||||
,build_avatar
|
||||
FROM repos LEFT OUTER JOIN builds ON build_id = (
|
||||
SELECT build_id FROM builds
|
||||
WHERE builds.build_repo_id = repos.repo_id
|
||||
ORDER BY build_id DESC
|
||||
LIMIT 1
|
||||
)
|
||||
INNER JOIN perms ON perms.perm_repo_id = repos.repo_id
|
||||
WHERE perms.perm_user_id = ?
|
||||
AND repos.repo_active = 1
|
||||
ORDER BY repo_full_name ASC;
|
||||
|
||||
-- name: feed
|
||||
|
||||
SELECT
|
||||
repo_owner
|
||||
,repo_name
|
||||
,repo_full_name
|
||||
,build_number
|
||||
,build_event
|
||||
,build_status
|
||||
,build_created
|
||||
,build_started
|
||||
,build_finished
|
||||
,build_commit
|
||||
,build_branch
|
||||
,build_ref
|
||||
,build_refspec
|
||||
,build_remote
|
||||
,build_title
|
||||
,build_message
|
||||
,build_author
|
||||
,build_email
|
||||
,build_avatar
|
||||
FROM repos
|
||||
INNER JOIN perms ON perms.perm_repo_id = repos.repo_id
|
||||
INNER JOIN builds ON builds.build_repo_id = repos.repo_id
|
||||
WHERE perms.perm_user_id = ?
|
||||
ORDER BY build_id DESC
|
||||
LIMIT 50
|
58
store/datastore/sql/sqlite/files/perms.sql
Normal file
58
store/datastore/sql/sqlite/files/perms.sql
Normal file
|
@ -0,0 +1,58 @@
|
|||
-- name: perms-find-user
|
||||
|
||||
SELECT
|
||||
perm_user_id
|
||||
,perm_repo_id
|
||||
,perm_pull
|
||||
,perm_push
|
||||
,perm_admin
|
||||
,perm_date
|
||||
FROM perms
|
||||
WHERE perm_user_id = ?
|
||||
|
||||
-- name: perms-find-user-repo
|
||||
|
||||
SELECT
|
||||
perm_user_id
|
||||
,perm_repo_id
|
||||
,perm_pull
|
||||
,perm_push
|
||||
,perm_admin
|
||||
,perm_synced
|
||||
FROM perms
|
||||
WHERE perm_user_id = ?
|
||||
AND perm_repo_id = ?
|
||||
|
||||
-- name: perms-insert-replace
|
||||
|
||||
INSERT OR REPLACE INTO perms (
|
||||
perm_user_id
|
||||
,perm_repo_id
|
||||
,perm_pull
|
||||
,perm_push
|
||||
,perm_admin
|
||||
,perm_synced
|
||||
) VALUES (?,?,?,?,?,?)
|
||||
|
||||
-- name: perms-insert-replace-lookup
|
||||
|
||||
INSERT OR REPLACE INTO perms (
|
||||
perm_user_id
|
||||
,perm_repo_id
|
||||
,perm_pull
|
||||
,perm_push
|
||||
,perm_admin
|
||||
,perm_synced
|
||||
) VALUES (?,(SELECT repo_id FROM repos WHERE repo_full_name = ?),?,?,?,?)
|
||||
|
||||
-- name: perms-delete-user-repo
|
||||
|
||||
DELETE FROM perms
|
||||
WHERE perm_user_id = ?
|
||||
AND perm_repo_id = ?
|
||||
|
||||
-- name: perms-delete-user-date
|
||||
|
||||
DELETE FROM perms
|
||||
WHERE perm_user_id = ?
|
||||
AND perm_synced < ?
|
|
@ -3,3 +3,61 @@
|
|||
UPDATE repos SET repo_counter = ?
|
||||
WHERE repo_counter = ?
|
||||
AND repo_id = ?
|
||||
|
||||
-- name: repo-find-user
|
||||
|
||||
SELECT
|
||||
repo_id
|
||||
,repo_user_id
|
||||
,repo_owner
|
||||
,repo_name
|
||||
,repo_full_name
|
||||
,repo_avatar
|
||||
,repo_link
|
||||
,repo_clone
|
||||
,repo_branch
|
||||
,repo_timeout
|
||||
,repo_private
|
||||
,repo_trusted
|
||||
,repo_active
|
||||
,repo_allow_pr
|
||||
,repo_allow_push
|
||||
,repo_allow_deploys
|
||||
,repo_allow_tags
|
||||
,repo_hash
|
||||
,repo_scm
|
||||
,repo_config_path
|
||||
,repo_gated
|
||||
,repo_visibility
|
||||
,repo_counter
|
||||
FROM repos
|
||||
INNER JOIN perms ON perms.perm_repo_id = repos.repo_id
|
||||
WHERE perms.perm_user_id = ?
|
||||
ORDER BY repo_full_name ASC
|
||||
|
||||
-- name: repo-insert-ignore
|
||||
|
||||
INSERT OR IGNORE INTO repos (
|
||||
repo_user_id
|
||||
,repo_owner
|
||||
,repo_name
|
||||
,repo_full_name
|
||||
,repo_avatar
|
||||
,repo_link
|
||||
,repo_clone
|
||||
,repo_branch
|
||||
,repo_timeout
|
||||
,repo_private
|
||||
,repo_trusted
|
||||
,repo_active
|
||||
,repo_allow_pr
|
||||
,repo_allow_push
|
||||
,repo_allow_deploys
|
||||
,repo_allow_tags
|
||||
,repo_hash
|
||||
,repo_scm
|
||||
,repo_config_path
|
||||
,repo_gated
|
||||
,repo_visibility
|
||||
,repo_counter
|
||||
) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
|
||||
|
|
|
@ -6,35 +6,45 @@ func Lookup(name string) string {
|
|||
}
|
||||
|
||||
var index = map[string]string{
|
||||
"config-find-id": configFindId,
|
||||
"config-find-repo-hash": configFindRepoHash,
|
||||
"config-find-approved": configFindApproved,
|
||||
"count-users": countUsers,
|
||||
"count-repos": countRepos,
|
||||
"count-builds": countBuilds,
|
||||
"files-find-build": filesFindBuild,
|
||||
"files-find-proc-name": filesFindProcName,
|
||||
"files-find-proc-name-data": filesFindProcNameData,
|
||||
"files-delete-build": filesDeleteBuild,
|
||||
"procs-find-id": procsFindId,
|
||||
"procs-find-build": procsFindBuild,
|
||||
"procs-find-build-pid": procsFindBuildPid,
|
||||
"procs-find-build-ppid": procsFindBuildPpid,
|
||||
"procs-delete-build": procsDeleteBuild,
|
||||
"registry-find-repo": registryFindRepo,
|
||||
"registry-find-repo-addr": registryFindRepoAddr,
|
||||
"registry-delete-repo": registryDeleteRepo,
|
||||
"registry-delete": registryDelete,
|
||||
"repo-update-counter": repoUpdateCounter,
|
||||
"secret-find-repo": secretFindRepo,
|
||||
"secret-find-repo-name": secretFindRepoName,
|
||||
"secret-delete": secretDelete,
|
||||
"sender-find-repo": senderFindRepo,
|
||||
"sender-find-repo-login": senderFindRepoLogin,
|
||||
"sender-delete-repo": senderDeleteRepo,
|
||||
"sender-delete": senderDelete,
|
||||
"task-list": taskList,
|
||||
"task-delete": taskDelete,
|
||||
"config-find-id": configFindId,
|
||||
"config-find-repo-hash": configFindRepoHash,
|
||||
"config-find-approved": configFindApproved,
|
||||
"count-users": countUsers,
|
||||
"count-repos": countRepos,
|
||||
"count-builds": countBuilds,
|
||||
"feed-latest-build": feedLatestBuild,
|
||||
"feed": feed,
|
||||
"files-find-build": filesFindBuild,
|
||||
"files-find-proc-name": filesFindProcName,
|
||||
"files-find-proc-name-data": filesFindProcNameData,
|
||||
"files-delete-build": filesDeleteBuild,
|
||||
"perms-find-user": permsFindUser,
|
||||
"perms-find-user-repo": permsFindUserRepo,
|
||||
"perms-insert-replace": permsInsertReplace,
|
||||
"perms-insert-replace-lookup": permsInsertReplaceLookup,
|
||||
"perms-delete-user-repo": permsDeleteUserRepo,
|
||||
"perms-delete-user-date": permsDeleteUserDate,
|
||||
"procs-find-id": procsFindId,
|
||||
"procs-find-build": procsFindBuild,
|
||||
"procs-find-build-pid": procsFindBuildPid,
|
||||
"procs-find-build-ppid": procsFindBuildPpid,
|
||||
"procs-delete-build": procsDeleteBuild,
|
||||
"registry-find-repo": registryFindRepo,
|
||||
"registry-find-repo-addr": registryFindRepoAddr,
|
||||
"registry-delete-repo": registryDeleteRepo,
|
||||
"registry-delete": registryDelete,
|
||||
"repo-update-counter": repoUpdateCounter,
|
||||
"repo-find-user": repoFindUser,
|
||||
"repo-insert-ignore": repoInsertIgnore,
|
||||
"secret-find-repo": secretFindRepo,
|
||||
"secret-find-repo-name": secretFindRepoName,
|
||||
"secret-delete": secretDelete,
|
||||
"sender-find-repo": senderFindRepo,
|
||||
"sender-find-repo-login": senderFindRepoLogin,
|
||||
"sender-delete-repo": senderDeleteRepo,
|
||||
"sender-delete": senderDelete,
|
||||
"task-list": taskList,
|
||||
"task-delete": taskDelete,
|
||||
}
|
||||
|
||||
var configFindId = `
|
||||
|
@ -81,6 +91,68 @@ SELECT count(1)
|
|||
FROM builds
|
||||
`
|
||||
|
||||
var feedLatestBuild = `
|
||||
SELECT
|
||||
repo_owner
|
||||
,repo_name
|
||||
,repo_full_name
|
||||
,build_number
|
||||
,build_event
|
||||
,build_status
|
||||
,build_created
|
||||
,build_started
|
||||
,build_finished
|
||||
,build_commit
|
||||
,build_branch
|
||||
,build_ref
|
||||
,build_refspec
|
||||
,build_remote
|
||||
,build_title
|
||||
,build_message
|
||||
,build_author
|
||||
,build_email
|
||||
,build_avatar
|
||||
FROM repos LEFT OUTER JOIN builds ON build_id = (
|
||||
SELECT build_id FROM builds
|
||||
WHERE builds.build_repo_id = repos.repo_id
|
||||
ORDER BY build_id DESC
|
||||
LIMIT 1
|
||||
)
|
||||
INNER JOIN perms ON perms.perm_repo_id = repos.repo_id
|
||||
WHERE perms.perm_user_id = ?
|
||||
AND repos.repo_active = 1
|
||||
ORDER BY repo_full_name ASC;
|
||||
`
|
||||
|
||||
var feed = `
|
||||
SELECT
|
||||
repo_owner
|
||||
,repo_name
|
||||
,repo_full_name
|
||||
,build_number
|
||||
,build_event
|
||||
,build_status
|
||||
,build_created
|
||||
,build_started
|
||||
,build_finished
|
||||
,build_commit
|
||||
,build_branch
|
||||
,build_ref
|
||||
,build_refspec
|
||||
,build_remote
|
||||
,build_title
|
||||
,build_message
|
||||
,build_author
|
||||
,build_email
|
||||
,build_avatar
|
||||
FROM repos
|
||||
INNER JOIN perms ON perms.perm_repo_id = repos.repo_id
|
||||
INNER JOIN builds ON builds.build_repo_id = repos.repo_id
|
||||
WHERE perms.perm_user_id = ?
|
||||
ORDER BY build_id DESC
|
||||
LIMIT 50
|
||||
`
|
||||
|
||||
var filesFindBuild = `
|
||||
SELECT
|
||||
file_id
|
||||
|
@ -127,6 +199,65 @@ var filesDeleteBuild = `
|
|||
DELETE FROM files WHERE file_build_id = ?
|
||||
`
|
||||
|
||||
var permsFindUser = `
|
||||
SELECT
|
||||
perm_user_id
|
||||
,perm_repo_id
|
||||
,perm_pull
|
||||
,perm_push
|
||||
,perm_admin
|
||||
,perm_date
|
||||
FROM perms
|
||||
WHERE perm_user_id = ?
|
||||
`
|
||||
|
||||
var permsFindUserRepo = `
|
||||
SELECT
|
||||
perm_user_id
|
||||
,perm_repo_id
|
||||
,perm_pull
|
||||
,perm_push
|
||||
,perm_admin
|
||||
,perm_synced
|
||||
FROM perms
|
||||
WHERE perm_user_id = ?
|
||||
AND perm_repo_id = ?
|
||||
`
|
||||
|
||||
var permsInsertReplace = `
|
||||
INSERT OR REPLACE INTO perms (
|
||||
perm_user_id
|
||||
,perm_repo_id
|
||||
,perm_pull
|
||||
,perm_push
|
||||
,perm_admin
|
||||
,perm_synced
|
||||
) VALUES (?,?,?,?,?,?)
|
||||
`
|
||||
|
||||
var permsInsertReplaceLookup = `
|
||||
INSERT OR REPLACE INTO perms (
|
||||
perm_user_id
|
||||
,perm_repo_id
|
||||
,perm_pull
|
||||
,perm_push
|
||||
,perm_admin
|
||||
,perm_synced
|
||||
) VALUES (?,(SELECT repo_id FROM repos WHERE repo_full_name = ?),?,?,?,?)
|
||||
`
|
||||
|
||||
var permsDeleteUserRepo = `
|
||||
DELETE FROM perms
|
||||
WHERE perm_user_id = ?
|
||||
AND perm_repo_id = ?
|
||||
`
|
||||
|
||||
var permsDeleteUserDate = `
|
||||
DELETE FROM perms
|
||||
WHERE perm_user_id = ?
|
||||
AND perm_synced < ?
|
||||
`
|
||||
|
||||
var procsFindId = `
|
||||
SELECT
|
||||
proc_id
|
||||
|
@ -256,6 +387,64 @@ WHERE repo_counter = ?
|
|||
AND repo_id = ?
|
||||
`
|
||||
|
||||
var repoFindUser = `
|
||||
SELECT
|
||||
repo_id
|
||||
,repo_user_id
|
||||
,repo_owner
|
||||
,repo_name
|
||||
,repo_full_name
|
||||
,repo_avatar
|
||||
,repo_link
|
||||
,repo_clone
|
||||
,repo_branch
|
||||
,repo_timeout
|
||||
,repo_private
|
||||
,repo_trusted
|
||||
,repo_active
|
||||
,repo_allow_pr
|
||||
,repo_allow_push
|
||||
,repo_allow_deploys
|
||||
,repo_allow_tags
|
||||
,repo_hash
|
||||
,repo_scm
|
||||
,repo_config_path
|
||||
,repo_gated
|
||||
,repo_visibility
|
||||
,repo_counter
|
||||
FROM repos
|
||||
INNER JOIN perms ON perms.perm_repo_id = repos.repo_id
|
||||
WHERE perms.perm_user_id = ?
|
||||
ORDER BY repo_full_name ASC
|
||||
`
|
||||
|
||||
var repoInsertIgnore = `
|
||||
INSERT OR IGNORE INTO repos (
|
||||
repo_user_id
|
||||
,repo_owner
|
||||
,repo_name
|
||||
,repo_full_name
|
||||
,repo_avatar
|
||||
,repo_link
|
||||
,repo_clone
|
||||
,repo_branch
|
||||
,repo_timeout
|
||||
,repo_private
|
||||
,repo_trusted
|
||||
,repo_active
|
||||
,repo_allow_pr
|
||||
,repo_allow_push
|
||||
,repo_allow_deploys
|
||||
,repo_allow_tags
|
||||
,repo_hash
|
||||
,repo_scm
|
||||
,repo_config_path
|
||||
,repo_gated
|
||||
,repo_visibility
|
||||
,repo_counter
|
||||
) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
|
||||
`
|
||||
|
||||
var secretFindRepo = `
|
||||
SELECT
|
||||
secret_id
|
||||
|
|
|
@ -86,6 +86,13 @@ func (db *datastore) DeleteUser(user *model.User) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func (db *datastore) UserFeed(user *model.User) ([]*model.Feed, error) {
|
||||
stmt := sql.Lookup(db.driver, "feed")
|
||||
data := []*model.Feed{}
|
||||
err := meddler.QueryAll(db, &data, stmt, user.ID)
|
||||
return data, err
|
||||
}
|
||||
|
||||
const userTable = "users"
|
||||
|
||||
const userLoginQuery = `
|
||||
|
|
|
@ -92,6 +92,17 @@ type Store interface {
|
|||
// new functions
|
||||
//
|
||||
|
||||
UserFeed(*model.User) ([]*model.Feed, error)
|
||||
|
||||
RepoList(*model.User) ([]*model.Repo, error)
|
||||
RepoListLatest(*model.User) ([]*model.Feed, error)
|
||||
RepoBatch([]*model.Repo) error
|
||||
|
||||
PermFind(user *model.User, repo *model.Repo) (*model.Perm, error)
|
||||
PermUpsert(perm *model.Perm) error
|
||||
PermBatch(perms []*model.Perm) error
|
||||
PermDelete(perm *model.Perm) error
|
||||
|
||||
ConfigLoad(int64) (*model.Config, error)
|
||||
ConfigFind(*model.Repo, string) (*model.Config, error)
|
||||
ConfigFindApproved(*model.Config) (bool, error)
|
||||
|
|
27
vendor/github.com/ianschenck/envflag/LICENSE
generated
vendored
27
vendor/github.com/ianschenck/envflag/LICENSE
generated
vendored
|
@ -1,27 +0,0 @@
|
|||
Copyright (c) 2013, Ian Schenck
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
Neither the name of Ian Schenck nor the names of its contributors
|
||||
may be used to endorse or promote products derived from this
|
||||
software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
37
vendor/github.com/ianschenck/envflag/README.md
generated
vendored
37
vendor/github.com/ianschenck/envflag/README.md
generated
vendored
|
@ -1,37 +0,0 @@
|
|||
envflag
|
||||
=======
|
||||
|
||||
Golang flags, but bolted onto the environment rather than the command-line.
|
||||
|
||||
Read the [godocs](http://godoc.org/github.com/ianschenck/envflag).
|
||||
|
||||
Motivation
|
||||
==========
|
||||
|
||||
Some like the distinction that command-line flags control behavior
|
||||
while environment variables configure. Also
|
||||
[12-factor](http://12factor.net/) recommends the use of environment
|
||||
variables for configuration. The interface of the golang flag package
|
||||
is well designed and easy to use, and allows for other lists
|
||||
(os.Environ() vs os.Args) to be parsed as flags. It makes sense then
|
||||
to use the same interface, the same types, and the same parsing
|
||||
(caveat: there is some ugly string hacking to make environment
|
||||
variables look like flags) to the same ends.
|
||||
|
||||
Differences
|
||||
===========
|
||||
|
||||
Calling `flag.Parse()` will not parse environment flags. Calling
|
||||
`envflag.Parse()` will not parse command-line flags. There is no good
|
||||
reason to combine these two when the net savings is a single line in a
|
||||
`func main()`. Furthermore, doing so would require users to accept a
|
||||
precedence order of my choosing.
|
||||
|
||||
The presence of an environment variable named `h` or `help` will
|
||||
probably cause problems (print Usage and os.Exit(0)). Work around this
|
||||
by defining those flags somewhere (and ignoring them).
|
||||
|
||||
Before calling `Flagset.Parse` on `EnvironmentFlags`, the environment
|
||||
variables being passed to `Parse` are trimmed down using
|
||||
`Lookup`. This behavior is different from `flag.Parse` in that extra
|
||||
environment variables are ignored (and won't crash `envflag.Parse`).
|
192
vendor/github.com/ianschenck/envflag/envflag.go
generated
vendored
192
vendor/github.com/ianschenck/envflag/envflag.go
generated
vendored
|
@ -1,192 +0,0 @@
|
|||
// Copyright 2013 Ian Schenck. Use of this source code is governed by
|
||||
// a license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package envflag adds environment variable flags to the flag package.
|
||||
|
||||
Usage:
|
||||
|
||||
Define flags using envflag.String(), Bool(), Int(), etc. This package
|
||||
works nearly the same as the stdlib flag package. Parsing the
|
||||
Environment flags is done by calling envflag.Parse()
|
||||
|
||||
It will *not* attempt to parse any normally-defined command-line
|
||||
flags. Command-line flags are explicitly left alone and separate.
|
||||
*/
|
||||
package envflag
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// VisitAll visits the environment flags in lexicographical order,
|
||||
// calling fn for each. It visits all flags, even those not set.
|
||||
func VisitAll(fn func(*flag.Flag)) {
|
||||
EnvironmentFlags.VisitAll(fn)
|
||||
}
|
||||
|
||||
// Visit visits the environment flags in lexicographical order,
|
||||
// calling fn for each. It visits only those flags that have been
|
||||
// set.
|
||||
func Visit(fn func(*flag.Flag)) {
|
||||
EnvironmentFlags.Visit(fn)
|
||||
}
|
||||
|
||||
// Lookup returns the Flag structure of the named environment flag,
|
||||
// returning nil if none exists.
|
||||
func Lookup(name string) *flag.Flag {
|
||||
return EnvironmentFlags.Lookup(name)
|
||||
}
|
||||
|
||||
// Set sets the value of the named environment flag.
|
||||
func Set(name, value string) error {
|
||||
return EnvironmentFlags.Set(name, value)
|
||||
}
|
||||
|
||||
// BoolVar defines a bool flag with specified name, default value, and
|
||||
// usage string. The argument p points to a bool variable in which to
|
||||
// store the value of the flag.
|
||||
func BoolVar(p *bool, name string, value bool, usage string) {
|
||||
EnvironmentFlags.BoolVar(p, name, value, usage)
|
||||
}
|
||||
|
||||
// Bool defines a bool flag with specified name, default value, and
|
||||
// usage string. The return value is the address of a bool variable
|
||||
// that stores the value of the flag.
|
||||
func Bool(name string, value bool, usage string) *bool {
|
||||
return EnvironmentFlags.Bool(name, value, usage)
|
||||
}
|
||||
|
||||
// IntVar defines an int flag with specified name, default value, and
|
||||
// usage string. The argument p points to an int variable in which to
|
||||
// store the value of the flag.
|
||||
func IntVar(p *int, name string, value int, usage string) {
|
||||
EnvironmentFlags.IntVar(p, name, value, usage)
|
||||
}
|
||||
|
||||
// Int defines an int flag with specified name, default value, and
|
||||
// usage string. The return value is the address of an int variable
|
||||
// that stores the value of the flag.
|
||||
func Int(name string, value int, usage string) *int {
|
||||
return EnvironmentFlags.Int(name, value, usage)
|
||||
}
|
||||
|
||||
// Int64Var defines an int64 flag with specified name, default value,
|
||||
// and usage string. The argument p points to an int64 variable in
|
||||
// which to store the value of the flag.
|
||||
func Int64Var(p *int64, name string, value int64, usage string) {
|
||||
EnvironmentFlags.Int64Var(p, name, value, usage)
|
||||
}
|
||||
|
||||
// Int64 defines an int64 flag with specified name, default value, and
|
||||
// usage string. The return value is the address of an int64 variable
|
||||
// that stores the value of the flag.
|
||||
func Int64(name string, value int64, usage string) *int64 {
|
||||
return EnvironmentFlags.Int64(name, value, usage)
|
||||
}
|
||||
|
||||
// UintVar defines a uint flag with specified name, default value, and
|
||||
// usage string. The argument p points to a uint variable in which to
|
||||
// store the value of the flag.
|
||||
func UintVar(p *uint, name string, value uint, usage string) {
|
||||
EnvironmentFlags.UintVar(p, name, value, usage)
|
||||
}
|
||||
|
||||
// Uint defines a uint flag with specified name, default value, and
|
||||
// usage string. The return value is the address of a uint variable
|
||||
// that stores the value of the flag.
|
||||
func Uint(name string, value uint, usage string) *uint {
|
||||
return EnvironmentFlags.Uint(name, value, usage)
|
||||
}
|
||||
|
||||
// Uint64Var defines a uint64 flag with specified name, default value,
|
||||
// and usage string. The argument p points to a uint64 variable in
|
||||
// which to store the value of the flag.
|
||||
func Uint64Var(p *uint64, name string, value uint64, usage string) {
|
||||
EnvironmentFlags.Uint64Var(p, name, value, usage)
|
||||
}
|
||||
|
||||
// Uint64 defines a uint64 flag with specified name, default value,
|
||||
// and usage string. The return value is the address of a uint64
|
||||
// variable that stores the value of the flag.
|
||||
func Uint64(name string, value uint64, usage string) *uint64 {
|
||||
return EnvironmentFlags.Uint64(name, value, usage)
|
||||
}
|
||||
|
||||
// StringVar defines a string flag with specified name, default value,
|
||||
// and usage string. The argument p points to a string variable in
|
||||
// which to store the value of the flag.
|
||||
func StringVar(p *string, name string, value string, usage string) {
|
||||
EnvironmentFlags.StringVar(p, name, value, usage)
|
||||
}
|
||||
|
||||
// String defines a string flag with specified name, default value,
|
||||
// and usage string. The return value is the address of a string
|
||||
// variable that stores the value of the flag.
|
||||
func String(name string, value string, usage string) *string {
|
||||
return EnvironmentFlags.String(name, value, usage)
|
||||
}
|
||||
|
||||
// Float64Var defines a float64 flag with specified name, default
|
||||
// value, and usage string. The argument p points to a float64
|
||||
// variable in which to store the value of the flag.
|
||||
func Float64Var(p *float64, name string, value float64, usage string) {
|
||||
EnvironmentFlags.Float64Var(p, name, value, usage)
|
||||
}
|
||||
|
||||
// Float64 defines a float64 flag with specified name, default value,
|
||||
// and usage string. The return value is the address of a float64
|
||||
// variable that stores the value of the flag.
|
||||
func Float64(name string, value float64, usage string) *float64 {
|
||||
return EnvironmentFlags.Float64(name, value, usage)
|
||||
}
|
||||
|
||||
// DurationVar defines a time.Duration flag with specified name,
|
||||
// default value, and usage string. The argument p points to a
|
||||
// time.Duration variable in which to store the value of the flag.
|
||||
func DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
|
||||
EnvironmentFlags.DurationVar(p, name, value, usage)
|
||||
}
|
||||
|
||||
// Duration defines a time.Duration flag with specified name, default
|
||||
// value, and usage string. The return value is the address of a
|
||||
// time.Duration variable that stores the value of the flag.
|
||||
func Duration(name string, value time.Duration, usage string) *time.Duration {
|
||||
return EnvironmentFlags.Duration(name, value, usage)
|
||||
}
|
||||
|
||||
// PrintDefaults prints to standard error the default values of all
|
||||
// defined environment flags.
|
||||
func PrintDefaults() {
|
||||
EnvironmentFlags.PrintDefaults()
|
||||
}
|
||||
|
||||
// Parse parses the environment flags from os.Environ. Must be called
|
||||
// after all flags are defined and before flags are accessed by the
|
||||
// program.
|
||||
func Parse() {
|
||||
env := os.Environ()
|
||||
// Clean up and "fake" some flag k/v pairs.
|
||||
args := make([]string, 0, len(env))
|
||||
for _, value := range env {
|
||||
if Lookup(value[:strings.Index(value, "=")]) == nil {
|
||||
continue
|
||||
}
|
||||
args = append(args, fmt.Sprintf("-%s", value))
|
||||
}
|
||||
EnvironmentFlags.Parse(args)
|
||||
}
|
||||
|
||||
// Parsed returns true if the environment flags have been parsed.
|
||||
func Parsed() bool {
|
||||
return EnvironmentFlags.Parsed()
|
||||
}
|
||||
|
||||
// EnvironmentFlags is the default set of environment flags, parsed
|
||||
// from os.Environ(). The top-level functions such as BoolVar, Arg,
|
||||
// and on are wrappers for the methods of EnvironmentFlags.
|
||||
var EnvironmentFlags = flag.NewFlagSet("environment", flag.ExitOnError)
|
201
vendor/github.com/jackspirou/syscerts/LICENSE
generated
vendored
201
vendor/github.com/jackspirou/syscerts/LICENSE
generated
vendored
|
@ -1,201 +0,0 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
42
vendor/github.com/jackspirou/syscerts/README.md
generated
vendored
42
vendor/github.com/jackspirou/syscerts/README.md
generated
vendored
|
@ -1,42 +0,0 @@
|
|||
# syscerts
|
||||
Gather local system certificates in Go via a public `SystemRootsPool` method.
|
||||
|
||||
#### What does this do?
|
||||
Provide a way to gather local system certificates
|
||||
on different OS platforms.
|
||||
|
||||
#### How does it do it?
|
||||
It uses the `crypto/x509` package and provides a single public method called
|
||||
`SystemRootsPool()` to return a `*x509.CertPool` object.
|
||||
|
||||
#### How do you use it?
|
||||
```Go
|
||||
// gather CA certs
|
||||
certpool := syscerts.SystemRootsPool()
|
||||
|
||||
// place them in an HTTP client for trusted SSL/TLS connections
|
||||
tlsConfig := &tls.Config{RootCAs: certpool}
|
||||
transport := &http.Transport{TLSClientConfig: tlsConfig}
|
||||
client := &http.Client{Transport: transport}
|
||||
|
||||
// make a request
|
||||
resp, err := client.Do(req)
|
||||
```
|
||||
|
||||
#### Why even do it?
|
||||
The `crypto/x509` package already has a `systemRootsPool` method.
|
||||
The `crypto/x509.systemRootsPool` method is almost the same as
|
||||
`github.com/jackspirou/syscerts.SystemRootsPool`.
|
||||
The difference? The `crypto/x509.systemRootsPool` method is private so you
|
||||
cannot access it. :(
|
||||
|
||||
There are plans for the `crypto/x509.systemRootsPool` method to become public
|
||||
in Go 1.7. When this happens you might no longer need `github.com/jackspirou/syscerts.SystemRootsPool`.
|
||||
|
||||
The only reason you may still use this package after the Go 1.7 release might
|
||||
be for the Mac OSX System Keychain certs which are not included in the
|
||||
`crypto/x509` package. Relevant lines below:
|
||||
|
||||
* https://github.com/jackspirou/syscerts/blob/master/root_darwin.go#L24-L32
|
||||
|
||||
Find more about this Go issue here: https://github.com/golang/go/issues/13335
|
22
vendor/github.com/jackspirou/syscerts/root.go
generated
vendored
22
vendor/github.com/jackspirou/syscerts/root.go
generated
vendored
|
@ -1,22 +0,0 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package syscerts
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
once sync.Once
|
||||
systemRoots *x509.CertPool
|
||||
)
|
||||
|
||||
// SystemRootsPool attempts to find and return a pool of all all installed
|
||||
// system certificates.
|
||||
func SystemRootsPool() *x509.CertPool {
|
||||
once.Do(initSystemRoots)
|
||||
return systemRoots
|
||||
}
|
14
vendor/github.com/jackspirou/syscerts/root_bsd.go
generated
vendored
14
vendor/github.com/jackspirou/syscerts/root_bsd.go
generated
vendored
|
@ -1,14 +0,0 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build dragonfly freebsd netbsd openbsd
|
||||
|
||||
package syscerts
|
||||
|
||||
// Possible certificate files; stop after finding one.
|
||||
var certFiles = []string{
|
||||
"/usr/local/share/certs/ca-root-nss.crt", // FreeBSD/DragonFly
|
||||
"/etc/ssl/cert.pem", // OpenBSD
|
||||
"/etc/openssl/certs/ca-certificates.crt", // NetBSD
|
||||
}
|
85
vendor/github.com/jackspirou/syscerts/root_cgo_darwin.go
generated
vendored
85
vendor/github.com/jackspirou/syscerts/root_cgo_darwin.go
generated
vendored
|
@ -1,85 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build cgo,darwin,!arm,!arm64,!ios
|
||||
|
||||
package syscerts
|
||||
|
||||
/*
|
||||
#cgo CFLAGS: -mmacosx-version-min=10.6 -D__MAC_OS_X_VERSION_MAX_ALLOWED=1060
|
||||
#cgo LDFLAGS: -framework CoreFoundation -framework Security
|
||||
|
||||
#include <CoreFoundation/CoreFoundation.h>
|
||||
#include <Security/Security.h>
|
||||
|
||||
// FetchPEMRootsC fetches the system's list of trusted X.509 root certificates.
|
||||
//
|
||||
// On success it returns 0 and fills pemRoots with a CFDataRef that contains the extracted root
|
||||
// certificates of the system. On failure, the function returns -1.
|
||||
//
|
||||
// Note: The CFDataRef returned in pemRoots must be released (using CFRelease) after
|
||||
// we've consumed its content.
|
||||
int FetchPEMRootsC(CFDataRef *pemRoots) {
|
||||
if (pemRoots == NULL) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
CFArrayRef certs = NULL;
|
||||
OSStatus err = SecTrustCopyAnchorCertificates(&certs);
|
||||
if (err != noErr) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
CFMutableDataRef combinedData = CFDataCreateMutable(kCFAllocatorDefault, 0);
|
||||
int i, ncerts = CFArrayGetCount(certs);
|
||||
for (i = 0; i < ncerts; i++) {
|
||||
CFDataRef data = NULL;
|
||||
SecCertificateRef cert = (SecCertificateRef)CFArrayGetValueAtIndex(certs, i);
|
||||
if (cert == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Note: SecKeychainItemExport is deprecated as of 10.7 in favor of SecItemExport.
|
||||
// Once we support weak imports via cgo we should prefer that, and fall back to this
|
||||
// for older systems.
|
||||
err = SecKeychainItemExport(cert, kSecFormatX509Cert, kSecItemPemArmour, NULL, &data);
|
||||
if (err != noErr) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (data != NULL) {
|
||||
CFDataAppendBytes(combinedData, CFDataGetBytePtr(data), CFDataGetLength(data));
|
||||
CFRelease(data);
|
||||
}
|
||||
}
|
||||
|
||||
CFRelease(certs);
|
||||
|
||||
*pemRoots = combinedData;
|
||||
return 0;
|
||||
}
|
||||
*/
|
||||
|
||||
/*
|
||||
import "C"
|
||||
import (
|
||||
"crypto/x509"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func initSystemRoots() {
|
||||
roots := x509.NewCertPool()
|
||||
|
||||
var data C.CFDataRef = nil
|
||||
err := C.FetchPEMRootsC(&data)
|
||||
if err == -1 {
|
||||
return
|
||||
}
|
||||
|
||||
defer C.CFRelease(C.CFTypeRef(data))
|
||||
buf := C.GoBytes(unsafe.Pointer(C.CFDataGetBytePtr(data)), C.int(C.CFDataGetLength(data)))
|
||||
roots.AppendCertsFromPEM(buf)
|
||||
systemRoots = roots
|
||||
}
|
||||
*/
|
39
vendor/github.com/jackspirou/syscerts/root_darwin.go
generated
vendored
39
vendor/github.com/jackspirou/syscerts/root_darwin.go
generated
vendored
|
@ -1,39 +0,0 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:generate go run root_darwin_arm_gen.go -output root_darwin_armx.go
|
||||
|
||||
package syscerts
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"os"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func execSecurityRoots() (*x509.CertPool, error) {
|
||||
roots := x509.NewCertPool()
|
||||
cmd := exec.Command("/usr/bin/security", "find-certificate", "-a", "-p", "/System/Library/Keychains/SystemRootCertificates.keychain")
|
||||
data, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
roots.AppendCertsFromPEM(data)
|
||||
|
||||
// if available add the Mac OSX System Keychain
|
||||
if _, err := os.Stat("/Library/Keychains/System.keychain"); err == nil {
|
||||
cmd = exec.Command("/usr/bin/security", "find-certificate", "-a", "-p", "/Library/Keychains/System.keychain")
|
||||
data, err = cmd.Output()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
roots.AppendCertsFromPEM(data)
|
||||
}
|
||||
|
||||
return roots, nil
|
||||
}
|
||||
|
||||
func initSystemRoots() {
|
||||
systemRoots, _ = execSecurityRoots()
|
||||
}
|
4909
vendor/github.com/jackspirou/syscerts/root_darwin_armx.go
generated
vendored
4909
vendor/github.com/jackspirou/syscerts/root_darwin_armx.go
generated
vendored
File diff suppressed because it is too large
Load diff
13
vendor/github.com/jackspirou/syscerts/root_linux.go
generated
vendored
13
vendor/github.com/jackspirou/syscerts/root_linux.go
generated
vendored
|
@ -1,13 +0,0 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package syscerts
|
||||
|
||||
// Possible certificate files; stop after finding one.
|
||||
var certFiles = []string{
|
||||
"/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc.
|
||||
"/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL
|
||||
"/etc/ssl/ca-bundle.pem", // OpenSUSE
|
||||
"/etc/pki/tls/cacert.pem", // OpenELEC
|
||||
}
|
8
vendor/github.com/jackspirou/syscerts/root_nacl.go
generated
vendored
8
vendor/github.com/jackspirou/syscerts/root_nacl.go
generated
vendored
|
@ -1,8 +0,0 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package syscerts
|
||||
|
||||
// Possible certificate files; stop after finding one.
|
||||
var certFiles = []string{}
|
32
vendor/github.com/jackspirou/syscerts/root_plan9.go
generated
vendored
32
vendor/github.com/jackspirou/syscerts/root_plan9.go
generated
vendored
|
@ -1,32 +0,0 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build plan9
|
||||
|
||||
package syscerts
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
// Possible certificate files; stop after finding one.
|
||||
var certFiles = []string{
|
||||
"/sys/lib/tls/ca.pem",
|
||||
}
|
||||
|
||||
func initSystemRoots() {
|
||||
roots := x509.NewCertPool()
|
||||
for _, file := range certFiles {
|
||||
data, err := ioutil.ReadFile(file)
|
||||
if err == nil {
|
||||
roots.AppendCertsFromPEM(data)
|
||||
systemRoots = roots
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// All of the files failed to load. systemRoots will be nil which will
|
||||
// trigger a specific error at verification time.
|
||||
}
|
12
vendor/github.com/jackspirou/syscerts/root_solaris.go
generated
vendored
12
vendor/github.com/jackspirou/syscerts/root_solaris.go
generated
vendored
|
@ -1,12 +0,0 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package syscerts
|
||||
|
||||
// Possible certificate files; stop after finding one.
|
||||
var certFiles = []string{
|
||||
"/etc/certs/ca-certificates.crt", // Solaris 11.2+
|
||||
"/etc/ssl/certs/ca-certificates.crt", // Joyent SmartOS
|
||||
"/etc/ssl/cacert.pem", // OmniOS
|
||||
}
|
52
vendor/github.com/jackspirou/syscerts/root_unix.go
generated
vendored
52
vendor/github.com/jackspirou/syscerts/root_unix.go
generated
vendored
|
@ -1,52 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build dragonfly freebsd linux nacl netbsd openbsd solaris
|
||||
|
||||
package syscerts
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
// Possible directories with certificate files; stop after successfully
|
||||
// reading at least one file from a directory.
|
||||
var certDirectories = []string{
|
||||
"/etc/ssl/certs", // SLES10/SLES11, https://golang.org/issue/12139
|
||||
"/system/etc/security/cacerts", // Android
|
||||
}
|
||||
|
||||
func initSystemRoots() {
|
||||
roots := x509.NewCertPool()
|
||||
for _, file := range certFiles {
|
||||
data, err := ioutil.ReadFile(file)
|
||||
if err == nil {
|
||||
roots.AppendCertsFromPEM(data)
|
||||
systemRoots = roots
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for _, directory := range certDirectories {
|
||||
fis, err := ioutil.ReadDir(directory)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
rootsAdded := false
|
||||
for _, fi := range fis {
|
||||
data, err := ioutil.ReadFile(directory + "/" + fi.Name())
|
||||
if err == nil && roots.AppendCertsFromPEM(data) {
|
||||
rootsAdded = true
|
||||
}
|
||||
}
|
||||
if rootsAdded {
|
||||
systemRoots = roots
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// All of the files failed to load. systemRoots will be nil which will
|
||||
// trigger a specific error at verification time.
|
||||
}
|
40
vendor/github.com/jackspirou/syscerts/root_windows.go
generated
vendored
40
vendor/github.com/jackspirou/syscerts/root_windows.go
generated
vendored
|
@ -1,40 +0,0 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package syscerts
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// extractSimpleChain extracts the final certificate chain from a CertSimpleChain.
|
||||
func extractSimpleChain(simpleChain **syscall.CertSimpleChain, count int) (chain []*x509.Certificate, err error) {
|
||||
if simpleChain == nil || count == 0 {
|
||||
return nil, errors.New("x509: invalid simple chain")
|
||||
}
|
||||
|
||||
simpleChains := (*[1 << 20]*syscall.CertSimpleChain)(unsafe.Pointer(simpleChain))[:]
|
||||
lastChain := simpleChains[count-1]
|
||||
elements := (*[1 << 20]*syscall.CertChainElement)(unsafe.Pointer(lastChain.Elements))[:]
|
||||
for i := 0; i < int(lastChain.NumElements); i++ {
|
||||
// Copy the buf, since ParseCertificate does not create its own copy.
|
||||
cert := elements[i].CertContext
|
||||
encodedCert := (*[1 << 20]byte)(unsafe.Pointer(cert.EncodedCert))[:]
|
||||
buf := make([]byte, cert.Length)
|
||||
copy(buf, encodedCert[:])
|
||||
parsedCert, err := x509.ParseCertificate(buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chain = append(chain, parsedCert)
|
||||
}
|
||||
|
||||
return chain, nil
|
||||
}
|
||||
|
||||
func initSystemRoots() {
|
||||
}
|
32
vendor/github.com/koding/cache/README.md
generated
vendored
32
vendor/github.com/koding/cache/README.md
generated
vendored
|
@ -1,32 +0,0 @@
|
|||
# Cache [![GoDoc](https://godoc.org/github.com/koding/cache?status.svg)](https://godoc.org/github.com/koding/cache) [![Build Status](https://travis-ci.org/koding/cache.svg?branch=master)](https://travis-ci.org/koding/cache)
|
||||
|
||||
|
||||
Cache is a backend provider for common use cases
|
||||
|
||||
## Install and Usage
|
||||
|
||||
Install the package with:
|
||||
|
||||
```bash
|
||||
go get github.com/koding/cache
|
||||
```
|
||||
|
||||
Import it with:
|
||||
|
||||
```go
|
||||
import "github.com/koding/cache"
|
||||
```
|
||||
|
||||
|
||||
Example
|
||||
```go
|
||||
|
||||
// create a cache with 2 second TTL
|
||||
cache := NewMemoryWithTTL(2 * time.Second)
|
||||
// start garbage collection for expired keys
|
||||
cache.StartGC(time.Millisecond * 10)
|
||||
// set item
|
||||
err := cache.Set("test_key", "test_data")
|
||||
// get item
|
||||
data, err := cache.Get("test_key")
|
||||
```
|
15
vendor/github.com/koding/cache/cache.go
generated
vendored
15
vendor/github.com/koding/cache/cache.go
generated
vendored
|
@ -1,15 +0,0 @@
|
|||
package cache
|
||||
|
||||
// Cache is the contract for all of the cache backends that are supported by
|
||||
// this package
|
||||
type Cache interface {
|
||||
// Get returns single item from the backend if the requested item is not
|
||||
// found, returns NotFound err
|
||||
Get(key string) (interface{}, error)
|
||||
|
||||
// Set sets a single item to the backend
|
||||
Set(key string, value interface{}) error
|
||||
|
||||
// Delete deletes single item from backend
|
||||
Delete(key string) error
|
||||
}
|
9
vendor/github.com/koding/cache/doc.go
generated
vendored
9
vendor/github.com/koding/cache/doc.go
generated
vendored
|
@ -1,9 +0,0 @@
|
|||
// Package cache provides basic caching mechanisms for Go(lang) projects.
|
||||
//
|
||||
// Currently supported caching algorithms:
|
||||
// MemoryNoTS: provides a non-thread safe in-memory caching system
|
||||
// Memory : provides a thread safe in-memory caching system, built on top of MemoryNoTS cache
|
||||
// LRUNoTS : provides a non-thread safe, fixed size in-memory caching system, built on top of MemoryNoTS cache
|
||||
// LRU : provides a thread safe, fixed size in-memory caching system, built on top of LRUNoTS cache
|
||||
// MemoryTTL : provides a thread safe, expiring in-memory caching system, built on top of MemoryNoTS cache
|
||||
package cache
|
8
vendor/github.com/koding/cache/errors.go
generated
vendored
8
vendor/github.com/koding/cache/errors.go
generated
vendored
|
@ -1,8 +0,0 @@
|
|||
package cache
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
// ErrNotFound holds exported `not found error` for not found items
|
||||
ErrNotFound = errors.New("not found")
|
||||
)
|
51
vendor/github.com/koding/cache/lru.go
generated
vendored
51
vendor/github.com/koding/cache/lru.go
generated
vendored
|
@ -1,51 +0,0 @@
|
|||
package cache
|
||||
|
||||
import "sync"
|
||||
|
||||
// LRU Discards the least recently used items first. This algorithm
|
||||
// requires keeping track of what was used when.
|
||||
type LRU struct {
|
||||
// Mutex is used for handling the concurrent
|
||||
// read/write requests for cache
|
||||
sync.Mutex
|
||||
|
||||
// cache holds the all cache values
|
||||
cache Cache
|
||||
}
|
||||
|
||||
// NewLRU creates a thread-safe LRU cache
|
||||
func NewLRU(size int) Cache {
|
||||
return &LRU{
|
||||
cache: NewLRUNoTS(size),
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns the value of a given key if it exists, every get item will be
|
||||
// moved to the head of the linked list for keeping track of least recent used
|
||||
// item
|
||||
func (l *LRU) Get(key string) (interface{}, error) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
|
||||
return l.cache.Get(key)
|
||||
}
|
||||
|
||||
// Set sets or overrides the given key with the given value, every set item will
|
||||
// be moved or prepended to the head of the linked list for keeping track of
|
||||
// least recent used item. When the cache is full, last item of the linked list
|
||||
// will be evicted from the cache
|
||||
func (l *LRU) Set(key string, val interface{}) error {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
|
||||
return l.cache.Set(key, val)
|
||||
}
|
||||
|
||||
// Delete deletes the given key-value pair from cache, this function doesnt
|
||||
// return an error if item is not in the cache
|
||||
func (l *LRU) Delete(key string) error {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
|
||||
return l.cache.Delete(key)
|
||||
}
|
122
vendor/github.com/koding/cache/lru_nots.go
generated
vendored
122
vendor/github.com/koding/cache/lru_nots.go
generated
vendored
|
@ -1,122 +0,0 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
)
|
||||
|
||||
// LRUNoTS Discards the least recently used items first. This algorithm
|
||||
// requires keeping track of what was used when.
|
||||
type LRUNoTS struct {
|
||||
// list holds all items in a linked list, for finding the `tail` of the list
|
||||
list *list.List
|
||||
|
||||
// cache holds the all cache values
|
||||
cache Cache
|
||||
|
||||
// size holds the limit of the LRU cache
|
||||
size int
|
||||
}
|
||||
|
||||
// kv is an helper struct for keeping track of the key for the list item. Only
|
||||
// place where we need the key of a value is while removing the last item from
|
||||
// linked list, for other cases, all operations alread have the key
|
||||
type kv struct {
|
||||
k string
|
||||
v interface{}
|
||||
}
|
||||
|
||||
// NewLRUNoTS creates a new LRU cache struct for further cache operations. Size
|
||||
// is used for limiting the upper bound of the cache
|
||||
func NewLRUNoTS(size int) Cache {
|
||||
if size < 1 {
|
||||
panic("invalid cache size")
|
||||
}
|
||||
|
||||
return &LRUNoTS{
|
||||
list: list.New(),
|
||||
cache: NewMemoryNoTS(),
|
||||
size: size,
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns the value of a given key if it exists, every get item will be
|
||||
// moved to the head of the linked list for keeping track of least recent used
|
||||
// item
|
||||
func (l *LRUNoTS) Get(key string) (interface{}, error) {
|
||||
res, err := l.cache.Get(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
elem := res.(*list.Element)
|
||||
// move found item to the head
|
||||
l.list.MoveToFront(elem)
|
||||
|
||||
return elem.Value.(*kv).v, nil
|
||||
}
|
||||
|
||||
// Set sets or overrides the given key with the given value, every set item will
|
||||
// be moved or prepended to the head of the linked list for keeping track of
|
||||
// least recent used item. When the cache is full, last item of the linked list
|
||||
// will be evicted from the cache
|
||||
func (l *LRUNoTS) Set(key string, val interface{}) error {
|
||||
// try to get item
|
||||
res, err := l.cache.Get(key)
|
||||
if err != nil && err != ErrNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
var elem *list.Element
|
||||
|
||||
// if elem is not in the cache, push it to front of the list
|
||||
if err == ErrNotFound {
|
||||
elem = l.list.PushFront(&kv{k: key, v: val})
|
||||
} else {
|
||||
// if elem is in the cache, update the data and move it the front
|
||||
elem = res.(*list.Element)
|
||||
|
||||
// update the data
|
||||
elem.Value.(*kv).v = val
|
||||
|
||||
// item already exists, so move it to the front of the list
|
||||
l.list.MoveToFront(elem)
|
||||
}
|
||||
|
||||
// in any case, set the item to the cache
|
||||
err = l.cache.Set(key, elem)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if the cache is full, evict last entry
|
||||
if l.list.Len() > l.size {
|
||||
// remove last element from cache
|
||||
return l.removeElem(l.list.Back())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete deletes the given key-value pair from cache, this function doesnt
|
||||
// return an error if item is not in the cache
|
||||
func (l *LRUNoTS) Delete(key string) error {
|
||||
res, err := l.cache.Get(key)
|
||||
if err != nil && err != ErrNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
// item already deleted
|
||||
if err == ErrNotFound {
|
||||
// surpress not found errors
|
||||
return nil
|
||||
}
|
||||
|
||||
elem := res.(*list.Element)
|
||||
|
||||
return l.removeElem(elem)
|
||||
}
|
||||
|
||||
func (l *LRUNoTS) removeElem(e *list.Element) error {
|
||||
l.list.Remove(e)
|
||||
return l.cache.Delete(e.Value.(*kv).k)
|
||||
}
|
46
vendor/github.com/koding/cache/memory.go
generated
vendored
46
vendor/github.com/koding/cache/memory.go
generated
vendored
|
@ -1,46 +0,0 @@
|
|||
package cache
|
||||
|
||||
import "sync"
|
||||
|
||||
// Memory provides an inmemory caching mechanism
|
||||
type Memory struct {
|
||||
// Mutex is used for handling the concurrent
|
||||
// read/write requests for cache
|
||||
sync.Mutex
|
||||
|
||||
// cache holds the cache data
|
||||
cache Cache
|
||||
}
|
||||
|
||||
// NewMemory creates an inmemory cache system
|
||||
// Which everytime will return the true value about a cache hit
|
||||
func NewMemory() Cache {
|
||||
return &Memory{
|
||||
cache: NewMemoryNoTS(),
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns the value of a given key if it exists
|
||||
func (r *Memory) Get(key string) (interface{}, error) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
|
||||
return r.cache.Get(key)
|
||||
}
|
||||
|
||||
// Set sets a value to the cache or overrides existing one with the given value
|
||||
func (r *Memory) Set(key string, value interface{}) error {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
|
||||
return r.cache.Set(key, value)
|
||||
}
|
||||
|
||||
// Delete deletes the given key-value pair from cache, this function doesnt
|
||||
// return an error if item is not in the cache
|
||||
func (r *Memory) Delete(key string) error {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
|
||||
return r.cache.Delete(key)
|
||||
}
|
39
vendor/github.com/koding/cache/memory_nots.go
generated
vendored
39
vendor/github.com/koding/cache/memory_nots.go
generated
vendored
|
@ -1,39 +0,0 @@
|
|||
package cache
|
||||
|
||||
// MemoryNoTS provides a non-thread safe caching mechanism
|
||||
type MemoryNoTS struct {
|
||||
// items holds the cache data
|
||||
items map[string]interface{}
|
||||
}
|
||||
|
||||
// NewMemoryNoTS creates MemoryNoTS struct
|
||||
func NewMemoryNoTS() *MemoryNoTS {
|
||||
return &MemoryNoTS{
|
||||
items: map[string]interface{}{},
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a value of a given key if it exists
|
||||
// and valid for the time being
|
||||
func (r *MemoryNoTS) Get(key string) (interface{}, error) {
|
||||
value, ok := r.items[key]
|
||||
if !ok {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
||||
|
||||
// Set will persist a value to the cache or
|
||||
// override existing one with the new one
|
||||
func (r *MemoryNoTS) Set(key string, value interface{}) error {
|
||||
r.items[key] = value
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete deletes a given key, it doesnt return error if the item is not in the
|
||||
// system
|
||||
func (r *MemoryNoTS) Delete(key string) error {
|
||||
delete(r.items, key)
|
||||
return nil
|
||||
}
|
111
vendor/github.com/koding/cache/memory_ttl.go
generated
vendored
111
vendor/github.com/koding/cache/memory_ttl.go
generated
vendored
|
@ -1,111 +0,0 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var zeroTTL = time.Duration(0)
|
||||
|
||||
// MemoryTTL holds the required variables to compose an in memory cache system
|
||||
// which also provides expiring key mechanism
|
||||
type MemoryTTL struct {
|
||||
// Mutex is used for handling the concurrent
|
||||
// read/write requests for cache
|
||||
sync.Mutex
|
||||
|
||||
// cache holds the cache data
|
||||
cache *MemoryNoTS
|
||||
|
||||
// setAts holds the time that related item's set at
|
||||
setAts map[string]time.Time
|
||||
|
||||
// ttl is a duration for a cache key to expire
|
||||
ttl time.Duration
|
||||
|
||||
// gcInterval is a duration for garbage collection
|
||||
gcInterval time.Duration
|
||||
}
|
||||
|
||||
// NewMemoryWithTTL creates an inmemory cache system
|
||||
// Which everytime will return the true values about a cache hit
|
||||
// and never will leak memory
|
||||
// ttl is used for expiration of a key from cache
|
||||
func NewMemoryWithTTL(ttl time.Duration) *MemoryTTL {
|
||||
return &MemoryTTL{
|
||||
cache: NewMemoryNoTS(),
|
||||
setAts: map[string]time.Time{},
|
||||
ttl: ttl,
|
||||
}
|
||||
}
|
||||
|
||||
// StartGC starts the garbage collection process in a go routine
|
||||
func (r *MemoryTTL) StartGC(gcInterval time.Duration) {
|
||||
r.gcInterval = gcInterval
|
||||
go func() {
|
||||
for _ = range time.Tick(gcInterval) {
|
||||
for key := range r.cache.items {
|
||||
if !r.isValid(key) {
|
||||
r.Delete(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Get returns a value of a given key if it exists
|
||||
// and valid for the time being
|
||||
func (r *MemoryTTL) Get(key string) (interface{}, error) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
|
||||
if !r.isValid(key) {
|
||||
r.delete(key)
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
|
||||
value, err := r.cache.Get(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
||||
|
||||
// Set will persist a value to the cache or
|
||||
// override existing one with the new one
|
||||
func (r *MemoryTTL) Set(key string, value interface{}) error {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
|
||||
r.cache.Set(key, value)
|
||||
r.setAts[key] = time.Now()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete deletes a given key if exists
|
||||
func (r *MemoryTTL) Delete(key string) error {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
|
||||
r.delete(key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *MemoryTTL) delete(key string) {
|
||||
r.cache.Delete(key)
|
||||
delete(r.setAts, key)
|
||||
}
|
||||
|
||||
func (r *MemoryTTL) isValid(key string) bool {
|
||||
setAt, ok := r.setAts[key]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if r.ttl == zeroTTL {
|
||||
return true
|
||||
}
|
||||
|
||||
return setAt.Add(r.ttl).After(time.Now())
|
||||
}
|
10
vendor/github.com/square/go-jose/BUG-BOUNTY.md
generated
vendored
10
vendor/github.com/square/go-jose/BUG-BOUNTY.md
generated
vendored
|
@ -1,10 +0,0 @@
|
|||
Serious about security
|
||||
======================
|
||||
|
||||
Square recognizes the important contributions the security research community
|
||||
can make. We therefore encourage reporting security issues with the code
|
||||
contained in this repository.
|
||||
|
||||
If you believe you have discovered a security vulnerability, please follow the
|
||||
guidelines at <https://hackerone.com/square-open-source>.
|
||||
|
14
vendor/github.com/square/go-jose/CONTRIBUTING.md
generated
vendored
14
vendor/github.com/square/go-jose/CONTRIBUTING.md
generated
vendored
|
@ -1,14 +0,0 @@
|
|||
# Contributing
|
||||
|
||||
If you would like to contribute code to go-jose you can do so through GitHub by
|
||||
forking the repository and sending a pull request.
|
||||
|
||||
When submitting code, please make every effort to follow existing conventions
|
||||
and style in order to keep the code as readable as possible. Please also make
|
||||
sure all tests pass by running `go test`, and format your code with `go fmt`.
|
||||
We also recommend using `golint` and `errcheck`.
|
||||
|
||||
Before your code can be accepted into the project you must also sign the
|
||||
[Individual Contributor License Agreement][1].
|
||||
|
||||
[1]: https://spreadsheets.google.com/spreadsheet/viewform?formkey=dDViT2xzUHAwRkI3X3k5Z0lQM091OGc6MQ&ndplr=1
|
202
vendor/github.com/square/go-jose/LICENSE
generated
vendored
202
vendor/github.com/square/go-jose/LICENSE
generated
vendored
|
@ -1,202 +0,0 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
185
vendor/github.com/square/go-jose/README.md
generated
vendored
185
vendor/github.com/square/go-jose/README.md
generated
vendored
|
@ -1,185 +0,0 @@
|
|||
# Go JOSE
|
||||
|
||||
[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/square/go-jose) [![license](http://img.shields.io/badge/license-apache_2.0-red.svg?style=flat)](https://raw.githubusercontent.com/square/go-jose/master/LICENSE) [![build](https://img.shields.io/travis/square/go-jose.svg?style=flat)](https://travis-ci.org/square/go-jose) [![coverage](https://img.shields.io/coveralls/square/go-jose.svg?style=flat)](https://coveralls.io/r/square/go-jose)
|
||||
|
||||
Package jose aims to provide an implementation of the Javascript Object Signing
|
||||
and Encryption set of standards. For the moment, it mainly focuses on encryption
|
||||
and signing based on the JSON Web Encryption and JSON Web Signature standards.
|
||||
|
||||
**Disclaimer**: This library contains encryption software that is subject to
|
||||
the U.S. Export Administration Regulations. You may not export, re-export,
|
||||
transfer or download this code or any part of it in violation of any United
|
||||
States law, directive or regulation. In particular this software may not be
|
||||
exported or re-exported in any form or on any media to Iran, North Sudan,
|
||||
Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any
|
||||
US maintained blocked list.
|
||||
|
||||
## Overview
|
||||
|
||||
The implementation follows the
|
||||
[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516)
|
||||
standard (RFC 7516) and
|
||||
[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515)
|
||||
standard (RFC 7515). Tables of supported algorithms are shown below.
|
||||
The library supports both the compact and full serialization formats, and has
|
||||
optional support for multiple recipients. It also comes with a small
|
||||
command-line utility (`jose-util`) for encrypting/decrypting JWE messages in a
|
||||
shell.
|
||||
|
||||
### Supported algorithms
|
||||
|
||||
See below for a table of supported algorithms. Algorithm identifiers match
|
||||
the names in the
|
||||
[JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518)
|
||||
standard where possible. The
|
||||
[Godoc reference](https://godoc.org/github.com/square/go-jose#pkg-constants)
|
||||
has a list of constants.
|
||||
|
||||
Key encryption | Algorithm identifier(s)
|
||||
:------------------------- | :------------------------------
|
||||
RSA-PKCS#1v1.5 | RSA1_5
|
||||
RSA-OAEP | RSA-OAEP, RSA-OAEP-256
|
||||
AES key wrap | A128KW, A192KW, A256KW
|
||||
AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW
|
||||
ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW
|
||||
ECDH-ES (direct) | ECDH-ES<sup>1</sup>
|
||||
Direct encryption | dir<sup>1</sup>
|
||||
|
||||
<sup>1. Not supported in multi-recipient mode</sup>
|
||||
|
||||
Signing / MAC | Algorithm identifier(s)
|
||||
:------------------------- | :------------------------------
|
||||
RSASSA-PKCS#1v1.5 | RS256, RS384, RS512
|
||||
RSASSA-PSS | PS256, PS384, PS512
|
||||
HMAC | HS256, HS384, HS512
|
||||
ECDSA | ES256, ES384, ES512
|
||||
|
||||
Content encryption | Algorithm identifier(s)
|
||||
:------------------------- | :------------------------------
|
||||
AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512
|
||||
AES-GCM | A128GCM, A192GCM, A256GCM
|
||||
|
||||
Compression | Algorithm identifiers(s)
|
||||
:------------------------- | -------------------------------
|
||||
DEFLATE (RFC 1951) | DEF
|
||||
|
||||
### Supported key types
|
||||
|
||||
See below for a table of supported key types. These are understood by the
|
||||
library, and can be passed to corresponding functions such as `NewEncrypter` or
|
||||
`NewSigner`.
|
||||
|
||||
Algorithm(s) | Corresponding types
|
||||
:------------------------- | -------------------------------
|
||||
RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey)
|
||||
ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey)
|
||||
AES, HMAC | []byte
|
||||
|
||||
## Examples
|
||||
|
||||
Encryption/decryption example using RSA:
|
||||
|
||||
```Go
|
||||
// Generate a public/private key pair to use for this example. The library
|
||||
// also provides two utility functions (LoadPublicKey and LoadPrivateKey)
|
||||
// that can be used to load keys from PEM/DER-encoded data.
|
||||
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Instantiate an encrypter using RSA-OAEP with AES128-GCM. An error would
|
||||
// indicate that the selected algorithm(s) are not currently supported.
|
||||
publicKey := &privateKey.PublicKey
|
||||
encrypter, err := NewEncrypter(RSA_OAEP, A128GCM, publicKey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Encrypt a sample plaintext. Calling the encrypter returns an encrypted
|
||||
// JWE object, which can then be serialized for output afterwards. An error
|
||||
// would indicate a problem in an underlying cryptographic primitive.
|
||||
var plaintext = []byte("Lorem ipsum dolor sit amet")
|
||||
object, err := encrypter.Encrypt(plaintext)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Serialize the encrypted object using the full serialization format.
|
||||
// Alternatively you can also use the compact format here by calling
|
||||
// object.CompactSerialize() instead.
|
||||
serialized := object.FullSerialize()
|
||||
|
||||
// Parse the serialized, encrypted JWE object. An error would indicate that
|
||||
// the given input did not represent a valid message.
|
||||
object, err = ParseEncrypted(serialized)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Now we can decrypt and get back our original plaintext. An error here
|
||||
// would indicate the the message failed to decrypt, e.g. because the auth
|
||||
// tag was broken or the message was tampered with.
|
||||
decrypted, err := object.Decrypt(privateKey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fmt.Printf(string(decrypted))
|
||||
// output: Lorem ipsum dolor sit amet
|
||||
```
|
||||
|
||||
Signing/verification example using RSA:
|
||||
|
||||
```Go
|
||||
// Generate a public/private key pair to use for this example. The library
|
||||
// also provides two utility functions (LoadPublicKey and LoadPrivateKey)
|
||||
// that can be used to load keys from PEM/DER-encoded data.
|
||||
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Instantiate a signer using RSASSA-PSS (SHA512) with the given private key.
|
||||
signer, err := NewSigner(PS512, privateKey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Sign a sample payload. Calling the signer returns a protected JWS object,
|
||||
// which can then be serialized for output afterwards. An error would
|
||||
// indicate a problem in an underlying cryptographic primitive.
|
||||
var payload = []byte("Lorem ipsum dolor sit amet")
|
||||
object, err := signer.Sign(payload)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Serialize the encrypted object using the full serialization format.
|
||||
// Alternatively you can also use the compact format here by calling
|
||||
// object.CompactSerialize() instead.
|
||||
serialized := object.FullSerialize()
|
||||
|
||||
// Parse the serialized, protected JWS object. An error would indicate that
|
||||
// the given input did not represent a valid message.
|
||||
object, err = ParseSigned(serialized)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Now we can verify the signature on the payload. An error here would
|
||||
// indicate the the message failed to verify, e.g. because the signature was
|
||||
// broken or the message was tampered with.
|
||||
output, err := object.Verify(&privateKey.PublicKey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fmt.Printf(string(output))
|
||||
// output: Lorem ipsum dolor sit amet
|
||||
```
|
||||
|
||||
More examples can be found in the [Godoc
|
||||
reference](https://godoc.org/github.com/square/go-jose) for this package. The
|
||||
`jose-util` subdirectory also contains a small command-line utility for
|
||||
encrypting/decrypting JWE messages which might be useful as an example.
|
498
vendor/github.com/square/go-jose/asymmetric.go
generated
vendored
498
vendor/github.com/square/go-jose/asymmetric.go
generated
vendored
|
@ -1,498 +0,0 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/aes"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/square/go-jose/cipher"
|
||||
)
|
||||
|
||||
// A generic RSA-based encrypter/verifier
|
||||
type rsaEncrypterVerifier struct {
|
||||
publicKey *rsa.PublicKey
|
||||
}
|
||||
|
||||
// A generic RSA-based decrypter/signer
|
||||
type rsaDecrypterSigner struct {
|
||||
privateKey *rsa.PrivateKey
|
||||
}
|
||||
|
||||
// A generic EC-based encrypter/verifier
|
||||
type ecEncrypterVerifier struct {
|
||||
publicKey *ecdsa.PublicKey
|
||||
}
|
||||
|
||||
// A key generator for ECDH-ES
|
||||
type ecKeyGenerator struct {
|
||||
size int
|
||||
algID string
|
||||
publicKey *ecdsa.PublicKey
|
||||
}
|
||||
|
||||
// A generic EC-based decrypter/signer
|
||||
type ecDecrypterSigner struct {
|
||||
privateKey *ecdsa.PrivateKey
|
||||
}
|
||||
|
||||
// newRSARecipient creates recipientKeyInfo based on the given key.
|
||||
func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) {
|
||||
// Verify that key management algorithm is supported by this encrypter
|
||||
switch keyAlg {
|
||||
case RSA1_5, RSA_OAEP, RSA_OAEP_256:
|
||||
default:
|
||||
return recipientKeyInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
return recipientKeyInfo{
|
||||
keyAlg: keyAlg,
|
||||
keyEncrypter: &rsaEncrypterVerifier{
|
||||
publicKey: publicKey,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// newRSASigner creates a recipientSigInfo based on the given key.
|
||||
func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) {
|
||||
// Verify that key management algorithm is supported by this encrypter
|
||||
switch sigAlg {
|
||||
case RS256, RS384, RS512, PS256, PS384, PS512:
|
||||
default:
|
||||
return recipientSigInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
return recipientSigInfo{
|
||||
sigAlg: sigAlg,
|
||||
publicKey: &JsonWebKey{
|
||||
Key: &privateKey.PublicKey,
|
||||
},
|
||||
signer: &rsaDecrypterSigner{
|
||||
privateKey: privateKey,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// newECDHRecipient creates recipientKeyInfo based on the given key.
|
||||
func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) {
|
||||
// Verify that key management algorithm is supported by this encrypter
|
||||
switch keyAlg {
|
||||
case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
|
||||
default:
|
||||
return recipientKeyInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
return recipientKeyInfo{
|
||||
keyAlg: keyAlg,
|
||||
keyEncrypter: &ecEncrypterVerifier{
|
||||
publicKey: publicKey,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// newECDSASigner creates a recipientSigInfo based on the given key.
|
||||
func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) {
|
||||
// Verify that key management algorithm is supported by this encrypter
|
||||
switch sigAlg {
|
||||
case ES256, ES384, ES512:
|
||||
default:
|
||||
return recipientSigInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
return recipientSigInfo{
|
||||
sigAlg: sigAlg,
|
||||
publicKey: &JsonWebKey{
|
||||
Key: &privateKey.PublicKey,
|
||||
},
|
||||
signer: &ecDecrypterSigner{
|
||||
privateKey: privateKey,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Encrypt the given payload and update the object.
|
||||
func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
|
||||
encryptedKey, err := ctx.encrypt(cek, alg)
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
return recipientInfo{
|
||||
encryptedKey: encryptedKey,
|
||||
header: &rawHeader{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Encrypt the given payload. Based on the key encryption algorithm,
|
||||
// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
|
||||
func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) {
|
||||
switch alg {
|
||||
case RSA1_5:
|
||||
return rsa.EncryptPKCS1v15(randReader, ctx.publicKey, cek)
|
||||
case RSA_OAEP:
|
||||
return rsa.EncryptOAEP(sha1.New(), randReader, ctx.publicKey, cek, []byte{})
|
||||
case RSA_OAEP_256:
|
||||
return rsa.EncryptOAEP(sha256.New(), randReader, ctx.publicKey, cek, []byte{})
|
||||
}
|
||||
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
// Decrypt the given payload and return the content encryption key.
|
||||
func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
|
||||
return ctx.decrypt(recipient.encryptedKey, KeyAlgorithm(headers.Alg), generator)
|
||||
}
|
||||
|
||||
// Decrypt the given payload. Based on the key encryption algorithm,
|
||||
// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
|
||||
func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) {
|
||||
// Note: The random reader on decrypt operations is only used for blinding,
|
||||
// so stubbing is meanlingless (hence the direct use of rand.Reader).
|
||||
switch alg {
|
||||
case RSA1_5:
|
||||
defer func() {
|
||||
// DecryptPKCS1v15SessionKey sometimes panics on an invalid payload
|
||||
// because of an index out of bounds error, which we want to ignore.
|
||||
// This has been fixed in Go 1.3.1 (released 2014/08/13), the recover()
|
||||
// only exists for preventing crashes with unpatched versions.
|
||||
// See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k
|
||||
// See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33
|
||||
_ = recover()
|
||||
}()
|
||||
|
||||
// Perform some input validation.
|
||||
keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8
|
||||
if keyBytes != len(jek) {
|
||||
// Input size is incorrect, the encrypted payload should always match
|
||||
// the size of the public modulus (e.g. using a 2048 bit key will
|
||||
// produce 256 bytes of output). Reject this since it's invalid input.
|
||||
return nil, ErrCryptoFailure
|
||||
}
|
||||
|
||||
cek, _, err := generator.genKey()
|
||||
if err != nil {
|
||||
return nil, ErrCryptoFailure
|
||||
}
|
||||
|
||||
// When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to
|
||||
// prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing
|
||||
// the Million Message Attack on Cryptographic Message Syntax". We are
|
||||
// therefore deliberatly ignoring errors here.
|
||||
_ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek)
|
||||
|
||||
return cek, nil
|
||||
case RSA_OAEP:
|
||||
// Use rand.Reader for RSA blinding
|
||||
return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{})
|
||||
case RSA_OAEP_256:
|
||||
// Use rand.Reader for RSA blinding
|
||||
return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{})
|
||||
}
|
||||
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
// Sign the given payload
|
||||
func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
|
||||
var hash crypto.Hash
|
||||
|
||||
switch alg {
|
||||
case RS256, PS256:
|
||||
hash = crypto.SHA256
|
||||
case RS384, PS384:
|
||||
hash = crypto.SHA384
|
||||
case RS512, PS512:
|
||||
hash = crypto.SHA512
|
||||
default:
|
||||
return Signature{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
hasher := hash.New()
|
||||
|
||||
// According to documentation, Write() on hash never fails
|
||||
_, _ = hasher.Write(payload)
|
||||
hashed := hasher.Sum(nil)
|
||||
|
||||
var out []byte
|
||||
var err error
|
||||
|
||||
switch alg {
|
||||
case RS256, RS384, RS512:
|
||||
out, err = rsa.SignPKCS1v15(randReader, ctx.privateKey, hash, hashed)
|
||||
case PS256, PS384, PS512:
|
||||
out, err = rsa.SignPSS(randReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{
|
||||
SaltLength: rsa.PSSSaltLengthAuto,
|
||||
})
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return Signature{}, err
|
||||
}
|
||||
|
||||
return Signature{
|
||||
signature: out,
|
||||
protected: &rawHeader{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Verify the given payload
|
||||
func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
|
||||
var hash crypto.Hash
|
||||
|
||||
switch alg {
|
||||
case RS256, PS256:
|
||||
hash = crypto.SHA256
|
||||
case RS384, PS384:
|
||||
hash = crypto.SHA384
|
||||
case RS512, PS512:
|
||||
hash = crypto.SHA512
|
||||
default:
|
||||
return ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
hasher := hash.New()
|
||||
|
||||
// According to documentation, Write() on hash never fails
|
||||
_, _ = hasher.Write(payload)
|
||||
hashed := hasher.Sum(nil)
|
||||
|
||||
switch alg {
|
||||
case RS256, RS384, RS512:
|
||||
return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature)
|
||||
case PS256, PS384, PS512:
|
||||
return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil)
|
||||
}
|
||||
|
||||
return ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
// Encrypt the given payload and update the object.
|
||||
func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
|
||||
switch alg {
|
||||
case ECDH_ES:
|
||||
// ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key.
|
||||
return recipientInfo{
|
||||
header: &rawHeader{},
|
||||
}, nil
|
||||
case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
|
||||
default:
|
||||
return recipientInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
generator := ecKeyGenerator{
|
||||
algID: string(alg),
|
||||
publicKey: ctx.publicKey,
|
||||
}
|
||||
|
||||
switch alg {
|
||||
case ECDH_ES_A128KW:
|
||||
generator.size = 16
|
||||
case ECDH_ES_A192KW:
|
||||
generator.size = 24
|
||||
case ECDH_ES_A256KW:
|
||||
generator.size = 32
|
||||
}
|
||||
|
||||
kek, header, err := generator.genKey()
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
block, err := aes.NewCipher(kek)
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
jek, err := josecipher.KeyWrap(block, cek)
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
return recipientInfo{
|
||||
encryptedKey: jek,
|
||||
header: &header,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Get key size for EC key generator
|
||||
func (ctx ecKeyGenerator) keySize() int {
|
||||
return ctx.size
|
||||
}
|
||||
|
||||
// Get a content encryption key for ECDH-ES
|
||||
func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) {
|
||||
priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, randReader)
|
||||
if err != nil {
|
||||
return nil, rawHeader{}, err
|
||||
}
|
||||
|
||||
out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size)
|
||||
|
||||
headers := rawHeader{
|
||||
Epk: &JsonWebKey{
|
||||
Key: &priv.PublicKey,
|
||||
},
|
||||
}
|
||||
|
||||
return out, headers, nil
|
||||
}
|
||||
|
||||
// Decrypt the given payload and return the content encryption key.
|
||||
func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
|
||||
if headers.Epk == nil {
|
||||
return nil, errors.New("square/go-jose: missing epk header")
|
||||
}
|
||||
|
||||
publicKey, ok := headers.Epk.Key.(*ecdsa.PublicKey)
|
||||
if publicKey == nil || !ok {
|
||||
return nil, errors.New("square/go-jose: invalid epk header")
|
||||
}
|
||||
|
||||
apuData := headers.Apu.bytes()
|
||||
apvData := headers.Apv.bytes()
|
||||
|
||||
deriveKey := func(algID string, size int) []byte {
|
||||
return josecipher.DeriveECDHES(algID, apuData, apvData, ctx.privateKey, publicKey, size)
|
||||
}
|
||||
|
||||
var keySize int
|
||||
|
||||
switch KeyAlgorithm(headers.Alg) {
|
||||
case ECDH_ES:
|
||||
// ECDH-ES uses direct key agreement, no key unwrapping necessary.
|
||||
return deriveKey(string(headers.Enc), generator.keySize()), nil
|
||||
case ECDH_ES_A128KW:
|
||||
keySize = 16
|
||||
case ECDH_ES_A192KW:
|
||||
keySize = 24
|
||||
case ECDH_ES_A256KW:
|
||||
keySize = 32
|
||||
default:
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
key := deriveKey(headers.Alg, keySize)
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return josecipher.KeyUnwrap(block, recipient.encryptedKey)
|
||||
}
|
||||
|
||||
// Sign the given payload
|
||||
func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
|
||||
var expectedBitSize int
|
||||
var hash crypto.Hash
|
||||
|
||||
switch alg {
|
||||
case ES256:
|
||||
expectedBitSize = 256
|
||||
hash = crypto.SHA256
|
||||
case ES384:
|
||||
expectedBitSize = 384
|
||||
hash = crypto.SHA384
|
||||
case ES512:
|
||||
expectedBitSize = 521
|
||||
hash = crypto.SHA512
|
||||
}
|
||||
|
||||
curveBits := ctx.privateKey.Curve.Params().BitSize
|
||||
if expectedBitSize != curveBits {
|
||||
return Signature{}, fmt.Errorf("square/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits)
|
||||
}
|
||||
|
||||
hasher := hash.New()
|
||||
|
||||
// According to documentation, Write() on hash never fails
|
||||
_, _ = hasher.Write(payload)
|
||||
hashed := hasher.Sum(nil)
|
||||
|
||||
r, s, err := ecdsa.Sign(randReader, ctx.privateKey, hashed)
|
||||
if err != nil {
|
||||
return Signature{}, err
|
||||
}
|
||||
|
||||
keyBytes := curveBits / 8
|
||||
if curveBits%8 > 0 {
|
||||
keyBytes += 1
|
||||
}
|
||||
|
||||
// We serialize the outpus (r and s) into big-endian byte arrays and pad
|
||||
// them with zeros on the left to make sure the sizes work out. Both arrays
|
||||
// must be keyBytes long, and the output must be 2*keyBytes long.
|
||||
rBytes := r.Bytes()
|
||||
rBytesPadded := make([]byte, keyBytes)
|
||||
copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
|
||||
|
||||
sBytes := s.Bytes()
|
||||
sBytesPadded := make([]byte, keyBytes)
|
||||
copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
|
||||
|
||||
out := append(rBytesPadded, sBytesPadded...)
|
||||
|
||||
return Signature{
|
||||
signature: out,
|
||||
protected: &rawHeader{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Verify the given payload
|
||||
func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
|
||||
var keySize int
|
||||
var hash crypto.Hash
|
||||
|
||||
switch alg {
|
||||
case ES256:
|
||||
keySize = 32
|
||||
hash = crypto.SHA256
|
||||
case ES384:
|
||||
keySize = 48
|
||||
hash = crypto.SHA384
|
||||
case ES512:
|
||||
keySize = 66
|
||||
hash = crypto.SHA512
|
||||
}
|
||||
|
||||
if len(signature) != 2*keySize {
|
||||
return fmt.Errorf("square/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize)
|
||||
}
|
||||
|
||||
hasher := hash.New()
|
||||
|
||||
// According to documentation, Write() on hash never fails
|
||||
_, _ = hasher.Write(payload)
|
||||
hashed := hasher.Sum(nil)
|
||||
|
||||
r := big.NewInt(0).SetBytes(signature[:keySize])
|
||||
s := big.NewInt(0).SetBytes(signature[keySize:])
|
||||
|
||||
match := ecdsa.Verify(ctx.publicKey, hashed, r, s)
|
||||
if !match {
|
||||
return errors.New("square/go-jose: ecdsa signature failed to verify")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
196
vendor/github.com/square/go-jose/cipher/cbc_hmac.go
generated
vendored
196
vendor/github.com/square/go-jose/cipher/cbc_hmac.go
generated
vendored
|
@ -1,196 +0,0 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package josecipher
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/cipher"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"crypto/subtle"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"hash"
|
||||
)
|
||||
|
||||
const (
|
||||
nonceBytes = 16
|
||||
)
|
||||
|
||||
// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC.
|
||||
func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) {
|
||||
keySize := len(key) / 2
|
||||
integrityKey := key[:keySize]
|
||||
encryptionKey := key[keySize:]
|
||||
|
||||
blockCipher, err := newBlockCipher(encryptionKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var hash func() hash.Hash
|
||||
switch keySize {
|
||||
case 16:
|
||||
hash = sha256.New
|
||||
case 24:
|
||||
hash = sha512.New384
|
||||
case 32:
|
||||
hash = sha512.New
|
||||
}
|
||||
|
||||
return &cbcAEAD{
|
||||
hash: hash,
|
||||
blockCipher: blockCipher,
|
||||
authtagBytes: keySize,
|
||||
integrityKey: integrityKey,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// An AEAD based on CBC+HMAC
|
||||
type cbcAEAD struct {
|
||||
hash func() hash.Hash
|
||||
authtagBytes int
|
||||
integrityKey []byte
|
||||
blockCipher cipher.Block
|
||||
}
|
||||
|
||||
func (ctx *cbcAEAD) NonceSize() int {
|
||||
return nonceBytes
|
||||
}
|
||||
|
||||
func (ctx *cbcAEAD) Overhead() int {
|
||||
// Maximum overhead is block size (for padding) plus auth tag length, where
|
||||
// the length of the auth tag is equivalent to the key size.
|
||||
return ctx.blockCipher.BlockSize() + ctx.authtagBytes
|
||||
}
|
||||
|
||||
// Seal encrypts and authenticates the plaintext.
|
||||
func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte {
|
||||
// Output buffer -- must take care not to mangle plaintext input.
|
||||
ciphertext := make([]byte, len(plaintext)+ctx.Overhead())[:len(plaintext)]
|
||||
copy(ciphertext, plaintext)
|
||||
ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize())
|
||||
|
||||
cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce)
|
||||
|
||||
cbc.CryptBlocks(ciphertext, ciphertext)
|
||||
authtag := ctx.computeAuthTag(data, nonce, ciphertext)
|
||||
|
||||
ret, out := resize(dst, len(dst)+len(ciphertext)+len(authtag))
|
||||
copy(out, ciphertext)
|
||||
copy(out[len(ciphertext):], authtag)
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// Open decrypts and authenticates the ciphertext.
|
||||
func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
|
||||
if len(ciphertext) < ctx.authtagBytes {
|
||||
return nil, errors.New("square/go-jose: invalid ciphertext (too short)")
|
||||
}
|
||||
|
||||
offset := len(ciphertext) - ctx.authtagBytes
|
||||
expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset])
|
||||
match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:])
|
||||
if match != 1 {
|
||||
return nil, errors.New("square/go-jose: invalid ciphertext (auth tag mismatch)")
|
||||
}
|
||||
|
||||
cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce)
|
||||
|
||||
// Make copy of ciphertext buffer, don't want to modify in place
|
||||
buffer := append([]byte{}, []byte(ciphertext[:offset])...)
|
||||
|
||||
if len(buffer)%ctx.blockCipher.BlockSize() > 0 {
|
||||
return nil, errors.New("square/go-jose: invalid ciphertext (invalid length)")
|
||||
}
|
||||
|
||||
cbc.CryptBlocks(buffer, buffer)
|
||||
|
||||
// Remove padding
|
||||
plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret, out := resize(dst, len(dst)+len(plaintext))
|
||||
copy(out, plaintext)
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// Compute an authentication tag
|
||||
func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte {
|
||||
buffer := make([]byte, len(aad)+len(nonce)+len(ciphertext)+8)
|
||||
n := 0
|
||||
n += copy(buffer, aad)
|
||||
n += copy(buffer[n:], nonce)
|
||||
n += copy(buffer[n:], ciphertext)
|
||||
binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad)*8))
|
||||
|
||||
// According to documentation, Write() on hash.Hash never fails.
|
||||
hmac := hmac.New(ctx.hash, ctx.integrityKey)
|
||||
_, _ = hmac.Write(buffer)
|
||||
|
||||
return hmac.Sum(nil)[:ctx.authtagBytes]
|
||||
}
|
||||
|
||||
// resize ensures the the given slice has a capacity of at least n bytes.
|
||||
// If the capacity of the slice is less than n, a new slice is allocated
|
||||
// and the existing data will be copied.
|
||||
func resize(in []byte, n int) (head, tail []byte) {
|
||||
if cap(in) >= n {
|
||||
head = in[:n]
|
||||
} else {
|
||||
head = make([]byte, n)
|
||||
copy(head, in)
|
||||
}
|
||||
|
||||
tail = head[len(in):]
|
||||
return
|
||||
}
|
||||
|
||||
// Apply padding
|
||||
func padBuffer(buffer []byte, blockSize int) []byte {
|
||||
missing := blockSize - (len(buffer) % blockSize)
|
||||
ret, out := resize(buffer, len(buffer)+missing)
|
||||
padding := bytes.Repeat([]byte{byte(missing)}, missing)
|
||||
copy(out, padding)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Remove padding
|
||||
func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) {
|
||||
if len(buffer)%blockSize != 0 {
|
||||
return nil, errors.New("square/go-jose: invalid padding")
|
||||
}
|
||||
|
||||
last := buffer[len(buffer)-1]
|
||||
count := int(last)
|
||||
|
||||
if count == 0 || count > blockSize || count > len(buffer) {
|
||||
return nil, errors.New("square/go-jose: invalid padding")
|
||||
}
|
||||
|
||||
padding := bytes.Repeat([]byte{last}, count)
|
||||
if !bytes.HasSuffix(buffer, padding) {
|
||||
return nil, errors.New("square/go-jose: invalid padding")
|
||||
}
|
||||
|
||||
return buffer[:len(buffer)-count], nil
|
||||
}
|
75
vendor/github.com/square/go-jose/cipher/concat_kdf.go
generated
vendored
75
vendor/github.com/square/go-jose/cipher/concat_kdf.go
generated
vendored
|
@ -1,75 +0,0 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package josecipher
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"encoding/binary"
|
||||
"hash"
|
||||
"io"
|
||||
)
|
||||
|
||||
type concatKDF struct {
|
||||
z, info []byte
|
||||
i uint32
|
||||
cache []byte
|
||||
hasher hash.Hash
|
||||
}
|
||||
|
||||
// NewConcatKDF builds a KDF reader based on the given inputs.
|
||||
func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader {
|
||||
buffer := make([]byte, len(algID)+len(ptyUInfo)+len(ptyVInfo)+len(supPubInfo)+len(supPrivInfo))
|
||||
n := 0
|
||||
n += copy(buffer, algID)
|
||||
n += copy(buffer[n:], ptyUInfo)
|
||||
n += copy(buffer[n:], ptyVInfo)
|
||||
n += copy(buffer[n:], supPubInfo)
|
||||
copy(buffer[n:], supPrivInfo)
|
||||
|
||||
hasher := hash.New()
|
||||
|
||||
return &concatKDF{
|
||||
z: z,
|
||||
info: buffer,
|
||||
hasher: hasher,
|
||||
cache: []byte{},
|
||||
i: 1,
|
||||
}
|
||||
}
|
||||
|
||||
func (ctx *concatKDF) Read(out []byte) (int, error) {
|
||||
copied := copy(out, ctx.cache)
|
||||
ctx.cache = ctx.cache[copied:]
|
||||
|
||||
for copied < len(out) {
|
||||
ctx.hasher.Reset()
|
||||
|
||||
// Write on a hash.Hash never fails
|
||||
_ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i)
|
||||
_, _ = ctx.hasher.Write(ctx.z)
|
||||
_, _ = ctx.hasher.Write(ctx.info)
|
||||
|
||||
hash := ctx.hasher.Sum(nil)
|
||||
chunkCopied := copy(out[copied:], hash)
|
||||
copied += chunkCopied
|
||||
ctx.cache = hash[chunkCopied:]
|
||||
|
||||
ctx.i++
|
||||
}
|
||||
|
||||
return copied, nil
|
||||
}
|
51
vendor/github.com/square/go-jose/cipher/ecdh_es.go
generated
vendored
51
vendor/github.com/square/go-jose/cipher/ecdh_es.go
generated
vendored
|
@ -1,51 +0,0 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package josecipher
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA.
|
||||
func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte {
|
||||
// algId, partyUInfo, partyVInfo inputs must be prefixed with the length
|
||||
algID := lengthPrefixed([]byte(alg))
|
||||
ptyUInfo := lengthPrefixed(apuData)
|
||||
ptyVInfo := lengthPrefixed(apvData)
|
||||
|
||||
// suppPubInfo is the encoded length of the output size in bits
|
||||
supPubInfo := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8)
|
||||
|
||||
z, _ := priv.PublicKey.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes())
|
||||
reader := NewConcatKDF(crypto.SHA256, z.Bytes(), algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{})
|
||||
|
||||
key := make([]byte, size)
|
||||
|
||||
// Read on the KDF will never fail
|
||||
_, _ = reader.Read(key)
|
||||
return key
|
||||
}
|
||||
|
||||
func lengthPrefixed(data []byte) []byte {
|
||||
out := make([]byte, len(data)+4)
|
||||
binary.BigEndian.PutUint32(out, uint32(len(data)))
|
||||
copy(out[4:], data)
|
||||
return out
|
||||
}
|
109
vendor/github.com/square/go-jose/cipher/key_wrap.go
generated
vendored
109
vendor/github.com/square/go-jose/cipher/key_wrap.go
generated
vendored
|
@ -1,109 +0,0 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package josecipher
|
||||
|
||||
import (
|
||||
"crypto/cipher"
|
||||
"crypto/subtle"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
)
|
||||
|
||||
var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6}
|
||||
|
||||
// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher.
|
||||
func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) {
|
||||
if len(cek)%8 != 0 {
|
||||
return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks")
|
||||
}
|
||||
|
||||
n := len(cek) / 8
|
||||
r := make([][]byte, n)
|
||||
|
||||
for i := range r {
|
||||
r[i] = make([]byte, 8)
|
||||
copy(r[i], cek[i*8:])
|
||||
}
|
||||
|
||||
buffer := make([]byte, 16)
|
||||
tBytes := make([]byte, 8)
|
||||
copy(buffer, defaultIV)
|
||||
|
||||
for t := 0; t < 6*n; t++ {
|
||||
copy(buffer[8:], r[t%n])
|
||||
|
||||
block.Encrypt(buffer, buffer)
|
||||
|
||||
binary.BigEndian.PutUint64(tBytes, uint64(t+1))
|
||||
|
||||
for i := 0; i < 8; i++ {
|
||||
buffer[i] = buffer[i] ^ tBytes[i]
|
||||
}
|
||||
copy(r[t%n], buffer[8:])
|
||||
}
|
||||
|
||||
out := make([]byte, (n+1)*8)
|
||||
copy(out, buffer[:8])
|
||||
for i := range r {
|
||||
copy(out[(i+1)*8:], r[i])
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher.
|
||||
func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) {
|
||||
if len(ciphertext)%8 != 0 {
|
||||
return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks")
|
||||
}
|
||||
|
||||
n := (len(ciphertext) / 8) - 1
|
||||
r := make([][]byte, n)
|
||||
|
||||
for i := range r {
|
||||
r[i] = make([]byte, 8)
|
||||
copy(r[i], ciphertext[(i+1)*8:])
|
||||
}
|
||||
|
||||
buffer := make([]byte, 16)
|
||||
tBytes := make([]byte, 8)
|
||||
copy(buffer[:8], ciphertext[:8])
|
||||
|
||||
for t := 6*n - 1; t >= 0; t-- {
|
||||
binary.BigEndian.PutUint64(tBytes, uint64(t+1))
|
||||
|
||||
for i := 0; i < 8; i++ {
|
||||
buffer[i] = buffer[i] ^ tBytes[i]
|
||||
}
|
||||
copy(buffer[8:], r[t%n])
|
||||
|
||||
block.Decrypt(buffer, buffer)
|
||||
|
||||
copy(r[t%n], buffer[8:])
|
||||
}
|
||||
|
||||
if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 {
|
||||
return nil, errors.New("square/go-jose: failed to unwrap key")
|
||||
}
|
||||
|
||||
out := make([]byte, n*8)
|
||||
for i := range r {
|
||||
copy(out[i*8:], r[i])
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
317
vendor/github.com/square/go-jose/crypter.go
generated
vendored
317
vendor/github.com/square/go-jose/crypter.go
generated
vendored
|
@ -1,317 +0,0 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// Encrypter represents an encrypter which produces an encrypted JWE object.
|
||||
type Encrypter interface {
|
||||
Encrypt(plaintext []byte) (*JsonWebEncryption, error)
|
||||
EncryptWithAuthData(plaintext []byte, aad []byte) (*JsonWebEncryption, error)
|
||||
SetCompression(alg CompressionAlgorithm)
|
||||
}
|
||||
|
||||
// MultiEncrypter represents an encrypter which supports multiple recipients.
|
||||
type MultiEncrypter interface {
|
||||
Encrypt(plaintext []byte) (*JsonWebEncryption, error)
|
||||
EncryptWithAuthData(plaintext []byte, aad []byte) (*JsonWebEncryption, error)
|
||||
SetCompression(alg CompressionAlgorithm)
|
||||
AddRecipient(alg KeyAlgorithm, encryptionKey interface{}) error
|
||||
}
|
||||
|
||||
// A generic content cipher
|
||||
type contentCipher interface {
|
||||
keySize() int
|
||||
encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error)
|
||||
decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error)
|
||||
}
|
||||
|
||||
// A key generator (for generating/getting a CEK)
|
||||
type keyGenerator interface {
|
||||
keySize() int
|
||||
genKey() ([]byte, rawHeader, error)
|
||||
}
|
||||
|
||||
// A generic key encrypter
|
||||
type keyEncrypter interface {
|
||||
encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key
|
||||
}
|
||||
|
||||
// A generic key decrypter
|
||||
type keyDecrypter interface {
|
||||
decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key
|
||||
}
|
||||
|
||||
// A generic encrypter based on the given key encrypter and content cipher.
|
||||
type genericEncrypter struct {
|
||||
contentAlg ContentEncryption
|
||||
compressionAlg CompressionAlgorithm
|
||||
cipher contentCipher
|
||||
recipients []recipientKeyInfo
|
||||
keyGenerator keyGenerator
|
||||
}
|
||||
|
||||
type recipientKeyInfo struct {
|
||||
keyAlg KeyAlgorithm
|
||||
keyEncrypter keyEncrypter
|
||||
}
|
||||
|
||||
// SetCompression sets a compression algorithm to be applied before encryption.
|
||||
func (ctx *genericEncrypter) SetCompression(compressionAlg CompressionAlgorithm) {
|
||||
ctx.compressionAlg = compressionAlg
|
||||
}
|
||||
|
||||
// NewEncrypter creates an appropriate encrypter based on the key type
|
||||
func NewEncrypter(alg KeyAlgorithm, enc ContentEncryption, encryptionKey interface{}) (Encrypter, error) {
|
||||
encrypter := &genericEncrypter{
|
||||
contentAlg: enc,
|
||||
compressionAlg: NONE,
|
||||
recipients: []recipientKeyInfo{},
|
||||
cipher: getContentCipher(enc),
|
||||
}
|
||||
|
||||
if encrypter.cipher == nil {
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
switch alg {
|
||||
case DIRECT:
|
||||
// Direct encryption mode must be treated differently
|
||||
if reflect.TypeOf(encryptionKey) != reflect.TypeOf([]byte{}) {
|
||||
return nil, ErrUnsupportedKeyType
|
||||
}
|
||||
encrypter.keyGenerator = staticKeyGenerator{
|
||||
key: encryptionKey.([]byte),
|
||||
}
|
||||
recipient, _ := newSymmetricRecipient(alg, encryptionKey.([]byte))
|
||||
encrypter.recipients = []recipientKeyInfo{recipient}
|
||||
return encrypter, nil
|
||||
case ECDH_ES:
|
||||
// ECDH-ES (w/o key wrapping) is similar to DIRECT mode
|
||||
typeOf := reflect.TypeOf(encryptionKey)
|
||||
if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) {
|
||||
return nil, ErrUnsupportedKeyType
|
||||
}
|
||||
encrypter.keyGenerator = ecKeyGenerator{
|
||||
size: encrypter.cipher.keySize(),
|
||||
algID: string(enc),
|
||||
publicKey: encryptionKey.(*ecdsa.PublicKey),
|
||||
}
|
||||
recipient, _ := newECDHRecipient(alg, encryptionKey.(*ecdsa.PublicKey))
|
||||
encrypter.recipients = []recipientKeyInfo{recipient}
|
||||
return encrypter, nil
|
||||
default:
|
||||
// Can just add a standard recipient
|
||||
encrypter.keyGenerator = randomKeyGenerator{
|
||||
size: encrypter.cipher.keySize(),
|
||||
}
|
||||
err := encrypter.AddRecipient(alg, encryptionKey)
|
||||
return encrypter, err
|
||||
}
|
||||
}
|
||||
|
||||
// NewMultiEncrypter creates a multi-encrypter based on the given parameters
|
||||
func NewMultiEncrypter(enc ContentEncryption) (MultiEncrypter, error) {
|
||||
cipher := getContentCipher(enc)
|
||||
|
||||
if cipher == nil {
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
encrypter := &genericEncrypter{
|
||||
contentAlg: enc,
|
||||
compressionAlg: NONE,
|
||||
recipients: []recipientKeyInfo{},
|
||||
cipher: cipher,
|
||||
keyGenerator: randomKeyGenerator{
|
||||
size: cipher.keySize(),
|
||||
},
|
||||
}
|
||||
|
||||
return encrypter, nil
|
||||
}
|
||||
|
||||
func (ctx *genericEncrypter) AddRecipient(alg KeyAlgorithm, encryptionKey interface{}) (err error) {
|
||||
var recipient recipientKeyInfo
|
||||
|
||||
switch alg {
|
||||
case DIRECT, ECDH_ES:
|
||||
return fmt.Errorf("square/go-jose: key algorithm '%s' not supported in multi-recipient mode", alg)
|
||||
}
|
||||
|
||||
switch encryptionKey := encryptionKey.(type) {
|
||||
case *rsa.PublicKey:
|
||||
recipient, err = newRSARecipient(alg, encryptionKey)
|
||||
case []byte:
|
||||
recipient, err = newSymmetricRecipient(alg, encryptionKey)
|
||||
case *ecdsa.PublicKey:
|
||||
recipient, err = newECDHRecipient(alg, encryptionKey)
|
||||
default:
|
||||
return ErrUnsupportedKeyType
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
ctx.recipients = append(ctx.recipients, recipient)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// newDecrypter creates an appropriate decrypter based on the key type
|
||||
func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) {
|
||||
switch decryptionKey := decryptionKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return &rsaDecrypterSigner{
|
||||
privateKey: decryptionKey,
|
||||
}, nil
|
||||
case *ecdsa.PrivateKey:
|
||||
return &ecDecrypterSigner{
|
||||
privateKey: decryptionKey,
|
||||
}, nil
|
||||
case []byte:
|
||||
return &symmetricKeyCipher{
|
||||
key: decryptionKey,
|
||||
}, nil
|
||||
default:
|
||||
return nil, ErrUnsupportedKeyType
|
||||
}
|
||||
}
|
||||
|
||||
// Implementation of encrypt method producing a JWE object.
|
||||
func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JsonWebEncryption, error) {
|
||||
return ctx.EncryptWithAuthData(plaintext, nil)
|
||||
}
|
||||
|
||||
// Implementation of encrypt method producing a JWE object.
|
||||
func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JsonWebEncryption, error) {
|
||||
obj := &JsonWebEncryption{}
|
||||
obj.aad = aad
|
||||
|
||||
obj.protected = &rawHeader{
|
||||
Enc: ctx.contentAlg,
|
||||
}
|
||||
obj.recipients = make([]recipientInfo, len(ctx.recipients))
|
||||
|
||||
if len(ctx.recipients) == 0 {
|
||||
return nil, fmt.Errorf("square/go-jose: no recipients to encrypt to")
|
||||
}
|
||||
|
||||
cek, headers, err := ctx.keyGenerator.genKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obj.protected.merge(&headers)
|
||||
|
||||
for i, info := range ctx.recipients {
|
||||
recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
recipient.header.Alg = string(info.keyAlg)
|
||||
obj.recipients[i] = recipient
|
||||
}
|
||||
|
||||
if len(ctx.recipients) == 1 {
|
||||
// Move per-recipient headers into main protected header if there's
|
||||
// only a single recipient.
|
||||
obj.protected.merge(obj.recipients[0].header)
|
||||
obj.recipients[0].header = nil
|
||||
}
|
||||
|
||||
if ctx.compressionAlg != NONE {
|
||||
plaintext, err = compress(ctx.compressionAlg, plaintext)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obj.protected.Zip = ctx.compressionAlg
|
||||
}
|
||||
|
||||
authData := obj.computeAuthData()
|
||||
parts, err := ctx.cipher.encrypt(cek, authData, plaintext)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obj.iv = parts.iv
|
||||
obj.ciphertext = parts.ciphertext
|
||||
obj.tag = parts.tag
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// Decrypt and validate the object and return the plaintext.
|
||||
func (obj JsonWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) {
|
||||
headers := obj.mergedHeaders(nil)
|
||||
|
||||
if len(headers.Crit) > 0 {
|
||||
return nil, fmt.Errorf("square/go-jose: unsupported crit header")
|
||||
}
|
||||
|
||||
decrypter, err := newDecrypter(decryptionKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cipher := getContentCipher(headers.Enc)
|
||||
if cipher == nil {
|
||||
return nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(headers.Enc))
|
||||
}
|
||||
|
||||
generator := randomKeyGenerator{
|
||||
size: cipher.keySize(),
|
||||
}
|
||||
|
||||
parts := &aeadParts{
|
||||
iv: obj.iv,
|
||||
ciphertext: obj.ciphertext,
|
||||
tag: obj.tag,
|
||||
}
|
||||
|
||||
authData := obj.computeAuthData()
|
||||
|
||||
var plaintext []byte
|
||||
for _, recipient := range obj.recipients {
|
||||
recipientHeaders := obj.mergedHeaders(&recipient)
|
||||
|
||||
cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator)
|
||||
if err == nil {
|
||||
// Found a valid CEK -- let's try to decrypt.
|
||||
plaintext, err = cipher.decrypt(cek, authData, parts)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if plaintext == nil {
|
||||
return nil, ErrCryptoFailure
|
||||
}
|
||||
|
||||
// The "zip" header paramter may only be present in the protected header.
|
||||
if obj.protected.Zip != "" {
|
||||
plaintext, err = decompress(obj.protected.Zip, plaintext)
|
||||
}
|
||||
|
||||
return plaintext, err
|
||||
}
|
26
vendor/github.com/square/go-jose/doc.go
generated
vendored
26
vendor/github.com/square/go-jose/doc.go
generated
vendored
|
@ -1,26 +0,0 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
|
||||
Package jose aims to provide an implementation of the Javascript Object Signing
|
||||
and Encryption set of standards. For the moment, it mainly focuses on
|
||||
encryption and signing based on the JSON Web Encryption and JSON Web Signature
|
||||
standards. The library supports both the compact and full serialization
|
||||
formats, and has optional support for multiple recipients.
|
||||
|
||||
*/
|
||||
package jose
|
192
vendor/github.com/square/go-jose/encoding.go
generated
vendored
192
vendor/github.com/square/go-jose/encoding.go
generated
vendored
|
@ -1,192 +0,0 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/flate"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"math/big"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var stripWhitespaceRegex = regexp.MustCompile("\\s")
|
||||
|
||||
// Url-safe base64 encode that strips padding
|
||||
func base64URLEncode(data []byte) string {
|
||||
var result = base64.URLEncoding.EncodeToString(data)
|
||||
return strings.TrimRight(result, "=")
|
||||
}
|
||||
|
||||
// Url-safe base64 decoder that adds padding
|
||||
func base64URLDecode(data string) ([]byte, error) {
|
||||
var missing = (4 - len(data)%4) % 4
|
||||
data += strings.Repeat("=", missing)
|
||||
return base64.URLEncoding.DecodeString(data)
|
||||
}
|
||||
|
||||
// Helper function to serialize known-good objects.
|
||||
// Precondition: value is not a nil pointer.
|
||||
func mustSerializeJSON(value interface{}) []byte {
|
||||
out, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// We never want to serialize the top-level value "null," since it's not a
|
||||
// valid JOSE message. But if a caller passes in a nil pointer to this method,
|
||||
// json.Marshal will happily serialize it as the top-level value "null". If
|
||||
// that value is then embedded in another operation, for instance by being
|
||||
// base64-encoded and fed as input to a signing algorithm
|
||||
// (https://github.com/square/go-jose/issues/22), the result will be
|
||||
// incorrect. Because this method is intended for known-good objects, and a nil
|
||||
// pointer is not a known-good object, we are free to panic in this case.
|
||||
// Note: It's not possible to directly check whether the data pointed at by an
|
||||
// interface is a nil pointer, so we do this hacky workaround.
|
||||
// https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I
|
||||
if string(out) == "null" {
|
||||
panic("Tried to serialize a nil pointer.")
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Strip all newlines and whitespace
|
||||
func stripWhitespace(data string) string {
|
||||
return stripWhitespaceRegex.ReplaceAllString(data, "")
|
||||
}
|
||||
|
||||
// Perform compression based on algorithm
|
||||
func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) {
|
||||
switch algorithm {
|
||||
case DEFLATE:
|
||||
return deflate(input)
|
||||
default:
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
}
|
||||
|
||||
// Perform decompression based on algorithm
|
||||
func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) {
|
||||
switch algorithm {
|
||||
case DEFLATE:
|
||||
return inflate(input)
|
||||
default:
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
}
|
||||
|
||||
// Compress with DEFLATE
|
||||
func deflate(input []byte) ([]byte, error) {
|
||||
output := new(bytes.Buffer)
|
||||
|
||||
// Writing to byte buffer, err is always nil
|
||||
writer, _ := flate.NewWriter(output, 1)
|
||||
_, _ = io.Copy(writer, bytes.NewBuffer(input))
|
||||
|
||||
err := writer.Close()
|
||||
return output.Bytes(), err
|
||||
}
|
||||
|
||||
// Decompress with DEFLATE
|
||||
func inflate(input []byte) ([]byte, error) {
|
||||
output := new(bytes.Buffer)
|
||||
reader := flate.NewReader(bytes.NewBuffer(input))
|
||||
|
||||
_, err := io.Copy(output, reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = reader.Close()
|
||||
return output.Bytes(), err
|
||||
}
|
||||
|
||||
// byteBuffer represents a slice of bytes that can be serialized to url-safe base64.
|
||||
type byteBuffer struct {
|
||||
data []byte
|
||||
}
|
||||
|
||||
func newBuffer(data []byte) *byteBuffer {
|
||||
if data == nil {
|
||||
return nil
|
||||
}
|
||||
return &byteBuffer{
|
||||
data: data,
|
||||
}
|
||||
}
|
||||
|
||||
func newFixedSizeBuffer(data []byte, length int) *byteBuffer {
|
||||
if len(data) > length {
|
||||
panic("square/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)")
|
||||
}
|
||||
pad := make([]byte, length-len(data))
|
||||
return newBuffer(append(pad, data...))
|
||||
}
|
||||
|
||||
func newBufferFromInt(num uint64) *byteBuffer {
|
||||
data := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(data, num)
|
||||
return newBuffer(bytes.TrimLeft(data, "\x00"))
|
||||
}
|
||||
|
||||
func (b *byteBuffer) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(b.base64())
|
||||
}
|
||||
|
||||
func (b *byteBuffer) UnmarshalJSON(data []byte) error {
|
||||
var encoded string
|
||||
err := json.Unmarshal(data, &encoded)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if encoded == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
decoded, err := base64URLDecode(encoded)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*b = *newBuffer(decoded)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *byteBuffer) base64() string {
|
||||
return base64URLEncode(b.data)
|
||||
}
|
||||
|
||||
func (b *byteBuffer) bytes() []byte {
|
||||
// Handling nil here allows us to transparently handle nil slices when serializing.
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
return b.data
|
||||
}
|
||||
|
||||
func (b byteBuffer) bigInt() *big.Int {
|
||||
return new(big.Int).SetBytes(b.data)
|
||||
}
|
||||
|
||||
func (b byteBuffer) toInt() int {
|
||||
return int(b.bigInt().Int64())
|
||||
}
|
263
vendor/github.com/square/go-jose/jwe.go
generated
vendored
263
vendor/github.com/square/go-jose/jwe.go
generated
vendored
|
@ -1,263 +0,0 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// rawJsonWebEncryption represents a raw JWE JSON object. Used for parsing/serializing.
|
||||
type rawJsonWebEncryption struct {
|
||||
Protected *byteBuffer `json:"protected,omitempty"`
|
||||
Unprotected *rawHeader `json:"unprotected,omitempty"`
|
||||
Header *rawHeader `json:"header,omitempty"`
|
||||
Recipients []rawRecipientInfo `json:"recipients,omitempty"`
|
||||
Aad *byteBuffer `json:"aad,omitempty"`
|
||||
EncryptedKey *byteBuffer `json:"encrypted_key,omitempty"`
|
||||
Iv *byteBuffer `json:"iv,omitempty"`
|
||||
Ciphertext *byteBuffer `json:"ciphertext,omitempty"`
|
||||
Tag *byteBuffer `json:"tag,omitempty"`
|
||||
}
|
||||
|
||||
// rawRecipientInfo represents a raw JWE Per-Recipient header JSON object. Used for parsing/serializing.
|
||||
type rawRecipientInfo struct {
|
||||
Header *rawHeader `json:"header,omitempty"`
|
||||
EncryptedKey string `json:"encrypted_key,omitempty"`
|
||||
}
|
||||
|
||||
// JsonWebEncryption represents an encrypted JWE object after parsing.
|
||||
type JsonWebEncryption struct {
|
||||
Header JoseHeader
|
||||
protected, unprotected *rawHeader
|
||||
recipients []recipientInfo
|
||||
aad, iv, ciphertext, tag []byte
|
||||
original *rawJsonWebEncryption
|
||||
}
|
||||
|
||||
// recipientInfo represents a raw JWE Per-Recipient header JSON object after parsing.
|
||||
type recipientInfo struct {
|
||||
header *rawHeader
|
||||
encryptedKey []byte
|
||||
}
|
||||
|
||||
// GetAuthData retrieves the (optional) authenticated data attached to the object.
|
||||
func (obj JsonWebEncryption) GetAuthData() []byte {
|
||||
if obj.aad != nil {
|
||||
out := make([]byte, len(obj.aad))
|
||||
copy(out, obj.aad)
|
||||
return out
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the merged header values
|
||||
func (obj JsonWebEncryption) mergedHeaders(recipient *recipientInfo) rawHeader {
|
||||
out := rawHeader{}
|
||||
out.merge(obj.protected)
|
||||
out.merge(obj.unprotected)
|
||||
|
||||
if recipient != nil {
|
||||
out.merge(recipient.header)
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// Get the additional authenticated data from a JWE object.
|
||||
func (obj JsonWebEncryption) computeAuthData() []byte {
|
||||
var protected string
|
||||
|
||||
if obj.original != nil {
|
||||
protected = obj.original.Protected.base64()
|
||||
} else {
|
||||
protected = base64URLEncode(mustSerializeJSON((obj.protected)))
|
||||
}
|
||||
|
||||
output := []byte(protected)
|
||||
if obj.aad != nil {
|
||||
output = append(output, '.')
|
||||
output = append(output, []byte(base64URLEncode(obj.aad))...)
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
// ParseEncrypted parses an encrypted message in compact or full serialization format.
|
||||
func ParseEncrypted(input string) (*JsonWebEncryption, error) {
|
||||
input = stripWhitespace(input)
|
||||
if strings.HasPrefix(input, "{") {
|
||||
return parseEncryptedFull(input)
|
||||
}
|
||||
|
||||
return parseEncryptedCompact(input)
|
||||
}
|
||||
|
||||
// parseEncryptedFull parses a message in compact format.
|
||||
func parseEncryptedFull(input string) (*JsonWebEncryption, error) {
|
||||
var parsed rawJsonWebEncryption
|
||||
err := json.Unmarshal([]byte(input), &parsed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return parsed.sanitized()
|
||||
}
|
||||
|
||||
// sanitized produces a cleaned-up JWE object from the raw JSON.
|
||||
func (parsed *rawJsonWebEncryption) sanitized() (*JsonWebEncryption, error) {
|
||||
obj := &JsonWebEncryption{
|
||||
original: parsed,
|
||||
unprotected: parsed.Unprotected,
|
||||
}
|
||||
|
||||
obj.Header = obj.mergedHeaders(nil).sanitized()
|
||||
|
||||
if parsed.Protected != nil && len(parsed.Protected.bytes()) > 0 {
|
||||
err := json.Unmarshal(parsed.Protected.bytes(), &obj.protected)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid protected header: %s, %s", err, parsed.Protected.base64())
|
||||
}
|
||||
}
|
||||
|
||||
if len(parsed.Recipients) == 0 {
|
||||
obj.recipients = []recipientInfo{
|
||||
recipientInfo{
|
||||
header: parsed.Header,
|
||||
encryptedKey: parsed.EncryptedKey.bytes(),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
obj.recipients = make([]recipientInfo, len(parsed.Recipients))
|
||||
for r := range parsed.Recipients {
|
||||
encryptedKey, err := base64URLDecode(parsed.Recipients[r].EncryptedKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obj.recipients[r].header = parsed.Recipients[r].Header
|
||||
obj.recipients[r].encryptedKey = encryptedKey
|
||||
}
|
||||
}
|
||||
|
||||
for _, recipient := range obj.recipients {
|
||||
headers := obj.mergedHeaders(&recipient)
|
||||
if headers.Alg == "" || headers.Enc == "" {
|
||||
return nil, fmt.Errorf("square/go-jose: message is missing alg/enc headers")
|
||||
}
|
||||
}
|
||||
|
||||
obj.iv = parsed.Iv.bytes()
|
||||
obj.ciphertext = parsed.Ciphertext.bytes()
|
||||
obj.tag = parsed.Tag.bytes()
|
||||
obj.aad = parsed.Aad.bytes()
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// parseEncryptedCompact parses a message in compact format.
|
||||
func parseEncryptedCompact(input string) (*JsonWebEncryption, error) {
|
||||
parts := strings.Split(input, ".")
|
||||
if len(parts) != 5 {
|
||||
return nil, fmt.Errorf("square/go-jose: compact JWE format must have five parts")
|
||||
}
|
||||
|
||||
rawProtected, err := base64URLDecode(parts[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
encryptedKey, err := base64URLDecode(parts[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
iv, err := base64URLDecode(parts[2])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ciphertext, err := base64URLDecode(parts[3])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tag, err := base64URLDecode(parts[4])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
raw := &rawJsonWebEncryption{
|
||||
Protected: newBuffer(rawProtected),
|
||||
EncryptedKey: newBuffer(encryptedKey),
|
||||
Iv: newBuffer(iv),
|
||||
Ciphertext: newBuffer(ciphertext),
|
||||
Tag: newBuffer(tag),
|
||||
}
|
||||
|
||||
return raw.sanitized()
|
||||
}
|
||||
|
||||
// CompactSerialize serializes an object using the compact serialization format.
|
||||
func (obj JsonWebEncryption) CompactSerialize() (string, error) {
|
||||
if len(obj.recipients) > 1 || obj.unprotected != nil || obj.recipients[0].header != nil {
|
||||
return "", ErrNotSupported
|
||||
}
|
||||
|
||||
serializedProtected := mustSerializeJSON(obj.protected)
|
||||
|
||||
return fmt.Sprintf(
|
||||
"%s.%s.%s.%s.%s",
|
||||
base64URLEncode(serializedProtected),
|
||||
base64URLEncode(obj.recipients[0].encryptedKey),
|
||||
base64URLEncode(obj.iv),
|
||||
base64URLEncode(obj.ciphertext),
|
||||
base64URLEncode(obj.tag)), nil
|
||||
}
|
||||
|
||||
// FullSerialize serializes an object using the full JSON serialization format.
|
||||
func (obj JsonWebEncryption) FullSerialize() string {
|
||||
raw := rawJsonWebEncryption{
|
||||
Unprotected: obj.unprotected,
|
||||
Iv: newBuffer(obj.iv),
|
||||
Ciphertext: newBuffer(obj.ciphertext),
|
||||
EncryptedKey: newBuffer(obj.recipients[0].encryptedKey),
|
||||
Tag: newBuffer(obj.tag),
|
||||
Aad: newBuffer(obj.aad),
|
||||
Recipients: []rawRecipientInfo{},
|
||||
}
|
||||
|
||||
if len(obj.recipients) > 1 {
|
||||
for _, recipient := range obj.recipients {
|
||||
info := rawRecipientInfo{
|
||||
Header: recipient.header,
|
||||
EncryptedKey: base64URLEncode(recipient.encryptedKey),
|
||||
}
|
||||
raw.Recipients = append(raw.Recipients, info)
|
||||
}
|
||||
} else {
|
||||
// Use flattened serialization
|
||||
raw.Header = obj.recipients[0].header
|
||||
raw.EncryptedKey = newBuffer(obj.recipients[0].encryptedKey)
|
||||
}
|
||||
|
||||
raw.Protected = newBuffer(mustSerializeJSON(obj.protected))
|
||||
|
||||
return string(mustSerializeJSON(raw))
|
||||
}
|
275
vendor/github.com/square/go-jose/jwk.go
generated
vendored
275
vendor/github.com/square/go-jose/jwk.go
generated
vendored
|
@ -1,275 +0,0 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rsa"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// rawJsonWebKey represents a public or private key in JWK format, used for parsing/serializing.
|
||||
type rawJsonWebKey struct {
|
||||
Kty string `json:"kty,omitempty"`
|
||||
Kid string `json:"kid,omitempty"`
|
||||
Crv string `json:"crv,omitempty"`
|
||||
Alg string `json:"alg,omitempty"`
|
||||
X *byteBuffer `json:"x,omitempty"`
|
||||
Y *byteBuffer `json:"y,omitempty"`
|
||||
N *byteBuffer `json:"n,omitempty"`
|
||||
E *byteBuffer `json:"e,omitempty"`
|
||||
// -- Following fields are only used for private keys --
|
||||
// RSA uses D, P and Q, while ECDSA uses only D. Fields Dp, Dq, and Qi are
|
||||
// completely optional. Therefore for RSA/ECDSA, D != nil is a contract that
|
||||
// we have a private key whereas D == nil means we have only a public key.
|
||||
D *byteBuffer `json:"d,omitempty"`
|
||||
P *byteBuffer `json:"p,omitempty"`
|
||||
Q *byteBuffer `json:"q,omitempty"`
|
||||
Dp *byteBuffer `json:"dp,omitempty"`
|
||||
Dq *byteBuffer `json:"dq,omitempty"`
|
||||
Qi *byteBuffer `json:"qi,omitempty"`
|
||||
}
|
||||
|
||||
// JsonWebKey represents a public or private key in JWK format.
|
||||
type JsonWebKey struct {
|
||||
Key interface{}
|
||||
KeyID string
|
||||
Algorithm string
|
||||
}
|
||||
|
||||
// MarshalJSON serializes the given key to its JSON representation.
|
||||
func (k JsonWebKey) MarshalJSON() ([]byte, error) {
|
||||
var raw *rawJsonWebKey
|
||||
var err error
|
||||
|
||||
switch key := k.Key.(type) {
|
||||
case *ecdsa.PublicKey:
|
||||
raw, err = fromEcPublicKey(key)
|
||||
case *rsa.PublicKey:
|
||||
raw = fromRsaPublicKey(key)
|
||||
case *ecdsa.PrivateKey:
|
||||
raw, err = fromEcPrivateKey(key)
|
||||
case *rsa.PrivateKey:
|
||||
raw, err = fromRsaPrivateKey(key)
|
||||
default:
|
||||
return nil, fmt.Errorf("square/go-jose: unkown key type '%s'", reflect.TypeOf(key))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
raw.Kid = k.KeyID
|
||||
raw.Alg = k.Algorithm
|
||||
|
||||
return json.Marshal(raw)
|
||||
}
|
||||
|
||||
// UnmarshalJSON reads a key from its JSON representation.
|
||||
func (k *JsonWebKey) UnmarshalJSON(data []byte) (err error) {
|
||||
var raw rawJsonWebKey
|
||||
err = json.Unmarshal(data, &raw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var key interface{}
|
||||
switch raw.Kty {
|
||||
case "EC":
|
||||
if raw.D != nil {
|
||||
key, err = raw.ecPrivateKey()
|
||||
} else {
|
||||
key, err = raw.ecPublicKey()
|
||||
}
|
||||
case "RSA":
|
||||
if raw.D != nil {
|
||||
key, err = raw.rsaPrivateKey()
|
||||
} else {
|
||||
key, err = raw.rsaPublicKey()
|
||||
}
|
||||
default:
|
||||
err = fmt.Errorf("square/go-jose: unkown json web key type '%s'", raw.Kty)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
*k = JsonWebKey{Key: key, KeyID: raw.Kid, Algorithm: raw.Alg}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (key rawJsonWebKey) rsaPublicKey() (*rsa.PublicKey, error) {
|
||||
if key.N == nil || key.E == nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid RSA key, missing n/e values")
|
||||
}
|
||||
|
||||
return &rsa.PublicKey{
|
||||
N: key.N.bigInt(),
|
||||
E: key.E.toInt(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func fromRsaPublicKey(pub *rsa.PublicKey) *rawJsonWebKey {
|
||||
return &rawJsonWebKey{
|
||||
Kty: "RSA",
|
||||
N: newBuffer(pub.N.Bytes()),
|
||||
E: newBufferFromInt(uint64(pub.E)),
|
||||
}
|
||||
}
|
||||
|
||||
func (key rawJsonWebKey) ecPublicKey() (*ecdsa.PublicKey, error) {
|
||||
var curve elliptic.Curve
|
||||
switch key.Crv {
|
||||
case "P-256":
|
||||
curve = elliptic.P256()
|
||||
case "P-384":
|
||||
curve = elliptic.P384()
|
||||
case "P-521":
|
||||
curve = elliptic.P521()
|
||||
default:
|
||||
return nil, fmt.Errorf("square/go-jose: unsupported elliptic curve '%s'", key.Crv)
|
||||
}
|
||||
|
||||
if key.X == nil || key.Y == nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid EC key, missing x/y values")
|
||||
}
|
||||
|
||||
return &ecdsa.PublicKey{
|
||||
Curve: curve,
|
||||
X: key.X.bigInt(),
|
||||
Y: key.Y.bigInt(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func fromEcPublicKey(pub *ecdsa.PublicKey) (*rawJsonWebKey, error) {
|
||||
if pub == nil || pub.X == nil || pub.Y == nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid EC key (nil, or X/Y missing)")
|
||||
}
|
||||
|
||||
name, err := curveName(pub.Curve)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
size := curveSize(pub.Curve)
|
||||
|
||||
xBytes := pub.X.Bytes()
|
||||
yBytes := pub.Y.Bytes()
|
||||
|
||||
if len(xBytes) > size || len(yBytes) > size {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid EC key (X/Y too large)")
|
||||
}
|
||||
|
||||
key := &rawJsonWebKey{
|
||||
Kty: "EC",
|
||||
Crv: name,
|
||||
X: newFixedSizeBuffer(xBytes, size),
|
||||
Y: newFixedSizeBuffer(yBytes, size),
|
||||
}
|
||||
|
||||
return key, nil
|
||||
}
|
||||
|
||||
func (key rawJsonWebKey) rsaPrivateKey() (*rsa.PrivateKey, error) {
|
||||
if key.N == nil || key.E == nil || key.D == nil || key.P == nil || key.Q == nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid RSA private key, missing values")
|
||||
}
|
||||
|
||||
rv := &rsa.PrivateKey{
|
||||
PublicKey: rsa.PublicKey{
|
||||
N: key.N.bigInt(),
|
||||
E: key.E.toInt(),
|
||||
},
|
||||
D: key.D.bigInt(),
|
||||
Primes: []*big.Int{
|
||||
key.P.bigInt(),
|
||||
key.Q.bigInt(),
|
||||
},
|
||||
}
|
||||
|
||||
if key.Dp != nil {
|
||||
rv.Precomputed.Dp = key.Dp.bigInt()
|
||||
}
|
||||
if key.Dq != nil {
|
||||
rv.Precomputed.Dq = key.Dq.bigInt()
|
||||
}
|
||||
if key.Qi != nil {
|
||||
rv.Precomputed.Qinv = key.Qi.bigInt()
|
||||
}
|
||||
|
||||
err := rv.Validate()
|
||||
return rv, err
|
||||
}
|
||||
|
||||
func fromRsaPrivateKey(rsa *rsa.PrivateKey) (*rawJsonWebKey, error) {
|
||||
if len(rsa.Primes) != 2 {
|
||||
return nil, ErrUnsupportedKeyType
|
||||
}
|
||||
|
||||
raw := fromRsaPublicKey(&rsa.PublicKey)
|
||||
|
||||
raw.D = newBuffer(rsa.D.Bytes())
|
||||
raw.P = newBuffer(rsa.Primes[0].Bytes())
|
||||
raw.Q = newBuffer(rsa.Primes[1].Bytes())
|
||||
|
||||
return raw, nil
|
||||
}
|
||||
|
||||
func (key rawJsonWebKey) ecPrivateKey() (*ecdsa.PrivateKey, error) {
|
||||
var curve elliptic.Curve
|
||||
switch key.Crv {
|
||||
case "P-256":
|
||||
curve = elliptic.P256()
|
||||
case "P-384":
|
||||
curve = elliptic.P384()
|
||||
case "P-521":
|
||||
curve = elliptic.P521()
|
||||
default:
|
||||
return nil, fmt.Errorf("square/go-jose: unsupported elliptic curve '%s'", key.Crv)
|
||||
}
|
||||
|
||||
if key.X == nil || key.Y == nil || key.D == nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid EC private key, missing x/y/d values")
|
||||
}
|
||||
|
||||
return &ecdsa.PrivateKey{
|
||||
PublicKey: ecdsa.PublicKey{
|
||||
Curve: curve,
|
||||
X: key.X.bigInt(),
|
||||
Y: key.Y.bigInt(),
|
||||
},
|
||||
D: key.D.bigInt(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func fromEcPrivateKey(ec *ecdsa.PrivateKey) (*rawJsonWebKey, error) {
|
||||
raw, err := fromEcPublicKey(&ec.PublicKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ec.D == nil {
|
||||
return nil, fmt.Errorf("square/go-jose: invalid EC private key")
|
||||
}
|
||||
|
||||
raw.D = newBuffer(ec.D.Bytes())
|
||||
|
||||
return raw, nil
|
||||
}
|
237
vendor/github.com/square/go-jose/jws.go
generated
vendored
237
vendor/github.com/square/go-jose/jws.go
generated
vendored
|
@ -1,237 +0,0 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// rawJsonWebSignature represents a raw JWS JSON object. Used for parsing/serializing.
|
||||
type rawJsonWebSignature struct {
|
||||
Payload *byteBuffer `json:"payload,omitempty"`
|
||||
Signatures []rawSignatureInfo `json:"signatures,omitempty"`
|
||||
Protected *byteBuffer `json:"protected,omitempty"`
|
||||
Header *rawHeader `json:"header,omitempty"`
|
||||
Signature *byteBuffer `json:"signature,omitempty"`
|
||||
}
|
||||
|
||||
// rawSignatureInfo represents a single JWS signature over the JWS payload and protected header.
|
||||
type rawSignatureInfo struct {
|
||||
Protected *byteBuffer `json:"protected,omitempty"`
|
||||
Header *rawHeader `json:"header,omitempty"`
|
||||
Signature *byteBuffer `json:"signature,omitempty"`
|
||||
}
|
||||
|
||||
// JsonWebSignature represents a signed JWS object after parsing.
|
||||
type JsonWebSignature struct {
|
||||
payload []byte
|
||||
Signatures []Signature
|
||||
}
|
||||
|
||||
// Signature represents a single signature over the JWS payload and protected header.
|
||||
type Signature struct {
|
||||
Header JoseHeader
|
||||
protected *rawHeader
|
||||
header *rawHeader
|
||||
signature []byte
|
||||
original *rawSignatureInfo
|
||||
}
|
||||
|
||||
// ParseSigned parses an encrypted message in compact or full serialization format.
|
||||
func ParseSigned(input string) (*JsonWebSignature, error) {
|
||||
input = stripWhitespace(input)
|
||||
if strings.HasPrefix(input, "{") {
|
||||
return parseSignedFull(input)
|
||||
}
|
||||
|
||||
return parseSignedCompact(input)
|
||||
}
|
||||
|
||||
// Get a header value
|
||||
func (sig Signature) mergedHeaders() rawHeader {
|
||||
out := rawHeader{}
|
||||
out.merge(sig.protected)
|
||||
out.merge(sig.header)
|
||||
return out
|
||||
}
|
||||
|
||||
// Compute data to be signed
|
||||
func (obj JsonWebSignature) computeAuthData(signature *Signature) []byte {
|
||||
var serializedProtected string
|
||||
|
||||
if signature.original != nil && signature.original.Protected != nil {
|
||||
serializedProtected = signature.original.Protected.base64()
|
||||
} else if signature.protected != nil {
|
||||
serializedProtected = base64URLEncode(mustSerializeJSON(signature.protected))
|
||||
} else {
|
||||
serializedProtected = ""
|
||||
}
|
||||
|
||||
return []byte(fmt.Sprintf("%s.%s",
|
||||
serializedProtected,
|
||||
base64URLEncode(obj.payload)))
|
||||
}
|
||||
|
||||
// parseSignedFull parses a message in full format.
|
||||
func parseSignedFull(input string) (*JsonWebSignature, error) {
|
||||
var parsed rawJsonWebSignature
|
||||
err := json.Unmarshal([]byte(input), &parsed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return parsed.sanitized()
|
||||
}
|
||||
|
||||
// sanitized produces a cleaned-up JWS object from the raw JSON.
|
||||
func (parsed *rawJsonWebSignature) sanitized() (*JsonWebSignature, error) {
|
||||
if parsed.Payload == nil {
|
||||
return nil, fmt.Errorf("square/go-jose: missing payload in JWS message")
|
||||
}
|
||||
|
||||
obj := &JsonWebSignature{
|
||||
payload: parsed.Payload.bytes(),
|
||||
Signatures: make([]Signature, len(parsed.Signatures)),
|
||||
}
|
||||
|
||||
if len(parsed.Signatures) == 0 {
|
||||
// No signatures array, must be flattened serialization
|
||||
signature := Signature{}
|
||||
if parsed.Protected != nil && len(parsed.Protected.bytes()) > 0 {
|
||||
signature.protected = &rawHeader{}
|
||||
err := json.Unmarshal(parsed.Protected.bytes(), signature.protected)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
signature.header = parsed.Header
|
||||
signature.signature = parsed.Signature.bytes()
|
||||
// Make a fake "original" rawSignatureInfo to store the unprocessed
|
||||
// Protected header. This is necessary because the Protected header can
|
||||
// contain arbitrary fields not registered as part of the spec. See
|
||||
// https://tools.ietf.org/html/draft-ietf-jose-json-web-signature-41#section-4
|
||||
// If we unmarshal Protected into a rawHeader with its explicit list of fields,
|
||||
// we cannot marshal losslessly. So we have to keep around the original bytes.
|
||||
// This is used in computeAuthData, which will first attempt to use
|
||||
// the original bytes of a protected header, and fall back on marshaling the
|
||||
// header struct only if those bytes are not available.
|
||||
signature.original = &rawSignatureInfo{
|
||||
Protected: parsed.Protected,
|
||||
Header: parsed.Header,
|
||||
Signature: parsed.Signature,
|
||||
}
|
||||
|
||||
signature.Header = signature.mergedHeaders().sanitized()
|
||||
obj.Signatures = append(obj.Signatures, signature)
|
||||
}
|
||||
|
||||
for i, sig := range parsed.Signatures {
|
||||
if sig.Protected != nil && len(sig.Protected.bytes()) > 0 {
|
||||
obj.Signatures[i].protected = &rawHeader{}
|
||||
err := json.Unmarshal(sig.Protected.bytes(), obj.Signatures[i].protected)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
obj.Signatures[i].signature = sig.Signature.bytes()
|
||||
|
||||
// Copy value of sig
|
||||
original := sig
|
||||
|
||||
obj.Signatures[i].header = sig.Header
|
||||
obj.Signatures[i].original = &original
|
||||
obj.Signatures[i].Header = obj.Signatures[i].mergedHeaders().sanitized()
|
||||
}
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// parseSignedCompact parses a message in compact format.
|
||||
func parseSignedCompact(input string) (*JsonWebSignature, error) {
|
||||
parts := strings.Split(input, ".")
|
||||
if len(parts) != 3 {
|
||||
return nil, fmt.Errorf("square/go-jose: compact JWS format must have three parts")
|
||||
}
|
||||
|
||||
rawProtected, err := base64URLDecode(parts[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
payload, err := base64URLDecode(parts[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
signature, err := base64URLDecode(parts[2])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
raw := &rawJsonWebSignature{
|
||||
Payload: newBuffer(payload),
|
||||
Protected: newBuffer(rawProtected),
|
||||
Signature: newBuffer(signature),
|
||||
}
|
||||
return raw.sanitized()
|
||||
}
|
||||
|
||||
// CompactSerialize serializes an object using the compact serialization format.
|
||||
func (obj JsonWebSignature) CompactSerialize() (string, error) {
|
||||
if len(obj.Signatures) > 1 || obj.Signatures[0].header != nil {
|
||||
return "", ErrNotSupported
|
||||
}
|
||||
|
||||
serializedProtected := mustSerializeJSON(obj.Signatures[0].protected)
|
||||
|
||||
return fmt.Sprintf(
|
||||
"%s.%s.%s",
|
||||
base64URLEncode(serializedProtected),
|
||||
base64URLEncode(obj.payload),
|
||||
base64URLEncode(obj.Signatures[0].signature)), nil
|
||||
}
|
||||
|
||||
// FullSerialize serializes an object using the full JSON serialization format.
|
||||
func (obj JsonWebSignature) FullSerialize() string {
|
||||
raw := rawJsonWebSignature{
|
||||
Payload: newBuffer(obj.payload),
|
||||
}
|
||||
|
||||
if len(obj.Signatures) == 1 {
|
||||
serializedProtected := mustSerializeJSON(obj.Signatures[0].protected)
|
||||
raw.Protected = newBuffer(serializedProtected)
|
||||
raw.Header = obj.Signatures[0].header
|
||||
raw.Signature = newBuffer(obj.Signatures[0].signature)
|
||||
} else {
|
||||
raw.Signatures = make([]rawSignatureInfo, len(obj.Signatures))
|
||||
for i, signature := range obj.Signatures {
|
||||
serializedProtected := mustSerializeJSON(signature.protected)
|
||||
|
||||
raw.Signatures[i] = rawSignatureInfo{
|
||||
Protected: newBuffer(serializedProtected),
|
||||
Header: signature.header,
|
||||
Signature: newBuffer(signature.signature),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return string(mustSerializeJSON(raw))
|
||||
}
|
214
vendor/github.com/square/go-jose/shared.go
generated
vendored
214
vendor/github.com/square/go-jose/shared.go
generated
vendored
|
@ -1,214 +0,0 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"crypto/elliptic"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// KeyAlgorithm represents a key management algorithm.
|
||||
type KeyAlgorithm string
|
||||
|
||||
// SignatureAlgorithm represents a signature (or MAC) algorithm.
|
||||
type SignatureAlgorithm string
|
||||
|
||||
// ContentEncryption represents a content encryption algorithm.
|
||||
type ContentEncryption string
|
||||
|
||||
// CompressionAlgorithm represents an algorithm used for plaintext compression.
|
||||
type CompressionAlgorithm string
|
||||
|
||||
var (
|
||||
// ErrCryptoFailure represents an error in cryptographic primitive. This
|
||||
// occurs when, for example, a message had an invalid authentication tag or
|
||||
// could not be decrypted.
|
||||
ErrCryptoFailure = errors.New("square/go-jose: error in cryptographic primitive")
|
||||
|
||||
// ErrUnsupportedAlgorithm indicates that a selected algorithm is not
|
||||
// supported. This occurs when trying to instantiate an encrypter for an
|
||||
// algorithm that is not yet implemented.
|
||||
ErrUnsupportedAlgorithm = errors.New("square/go-jose: unknown/unsupported algorithm")
|
||||
|
||||
// ErrUnsupportedKeyType indicates that the given key type/format is not
|
||||
// supported. This occurs when trying to instantiate an encrypter and passing
|
||||
// it a key of an unrecognized type or with unsupported parameters, such as
|
||||
// an RSA private key with more than two primes.
|
||||
ErrUnsupportedKeyType = errors.New("square/go-jose: unsupported key type/format")
|
||||
|
||||
// ErrNotSupported serialization of object is not supported. This occurs when
|
||||
// trying to compact-serialize an object which can't be represented in
|
||||
// compact form.
|
||||
ErrNotSupported = errors.New("square/go-jose: compact serialization not supported for object")
|
||||
)
|
||||
|
||||
// Key management algorithms
|
||||
const (
|
||||
RSA1_5 = KeyAlgorithm("RSA1_5") // RSA-PKCS1v1.5
|
||||
RSA_OAEP = KeyAlgorithm("RSA-OAEP") // RSA-OAEP-SHA1
|
||||
RSA_OAEP_256 = KeyAlgorithm("RSA-OAEP-256") // RSA-OAEP-SHA256
|
||||
A128KW = KeyAlgorithm("A128KW") // AES key wrap (128)
|
||||
A192KW = KeyAlgorithm("A192KW") // AES key wrap (192)
|
||||
A256KW = KeyAlgorithm("A256KW") // AES key wrap (256)
|
||||
DIRECT = KeyAlgorithm("dir") // Direct encryption
|
||||
ECDH_ES = KeyAlgorithm("ECDH-ES") // ECDH-ES
|
||||
ECDH_ES_A128KW = KeyAlgorithm("ECDH-ES+A128KW") // ECDH-ES + AES key wrap (128)
|
||||
ECDH_ES_A192KW = KeyAlgorithm("ECDH-ES+A192KW") // ECDH-ES + AES key wrap (192)
|
||||
ECDH_ES_A256KW = KeyAlgorithm("ECDH-ES+A256KW") // ECDH-ES + AES key wrap (256)
|
||||
A128GCMKW = KeyAlgorithm("A128GCMKW") // AES-GCM key wrap (128)
|
||||
A192GCMKW = KeyAlgorithm("A192GCMKW") // AES-GCM key wrap (192)
|
||||
A256GCMKW = KeyAlgorithm("A256GCMKW") // AES-GCM key wrap (256)
|
||||
PBES2_HS256_A128KW = KeyAlgorithm("PBES2-HS256+A128KW") // PBES2 + HMAC-SHA256 + AES key wrap (128)
|
||||
PBES2_HS384_A192KW = KeyAlgorithm("PBES2-HS384+A192KW") // PBES2 + HMAC-SHA384 + AES key wrap (192)
|
||||
PBES2_HS512_A256KW = KeyAlgorithm("PBES2-HS512+A256KW") // PBES2 + HMAC-SHA512 + AES key wrap (256)
|
||||
)
|
||||
|
||||
// Signature algorithms
|
||||
const (
|
||||
HS256 = SignatureAlgorithm("HS256") // HMAC using SHA-256
|
||||
HS384 = SignatureAlgorithm("HS384") // HMAC using SHA-384
|
||||
HS512 = SignatureAlgorithm("HS512") // HMAC using SHA-512
|
||||
RS256 = SignatureAlgorithm("RS256") // RSASSA-PKCS-v1.5 using SHA-256
|
||||
RS384 = SignatureAlgorithm("RS384") // RSASSA-PKCS-v1.5 using SHA-384
|
||||
RS512 = SignatureAlgorithm("RS512") // RSASSA-PKCS-v1.5 using SHA-512
|
||||
ES256 = SignatureAlgorithm("ES256") // RCDSA using P-256 and SHA-256
|
||||
ES384 = SignatureAlgorithm("ES384") // RCDSA using P-384 and SHA-384
|
||||
ES512 = SignatureAlgorithm("ES512") // RCDSA using P-521 and SHA-512
|
||||
PS256 = SignatureAlgorithm("PS256") // RSASSA-PSS using SHA256 and MGF1-SHA256
|
||||
PS384 = SignatureAlgorithm("PS384") // RSASSA-PSS using SHA384 and MGF1-SHA384
|
||||
PS512 = SignatureAlgorithm("PS512") // RSASSA-PSS using SHA512 and MGF1-SHA512
|
||||
)
|
||||
|
||||
// Content encryption algorithms
|
||||
const (
|
||||
A128CBC_HS256 = ContentEncryption("A128CBC-HS256") // AES-CBC + HMAC-SHA256 (128)
|
||||
A192CBC_HS384 = ContentEncryption("A192CBC-HS384") // AES-CBC + HMAC-SHA384 (192)
|
||||
A256CBC_HS512 = ContentEncryption("A256CBC-HS512") // AES-CBC + HMAC-SHA512 (256)
|
||||
A128GCM = ContentEncryption("A128GCM") // AES-GCM (128)
|
||||
A192GCM = ContentEncryption("A192GCM") // AES-GCM (192)
|
||||
A256GCM = ContentEncryption("A256GCM") // AES-GCM (256)
|
||||
)
|
||||
|
||||
// Compression algorithms
|
||||
const (
|
||||
NONE = CompressionAlgorithm("") // No compression
|
||||
DEFLATE = CompressionAlgorithm("DEF") // DEFLATE (RFC 1951)
|
||||
)
|
||||
|
||||
// rawHeader represents the JOSE header for JWE/JWS objects (used for parsing).
|
||||
type rawHeader struct {
|
||||
Alg string `json:"alg,omitempty"`
|
||||
Enc ContentEncryption `json:"enc,omitempty"`
|
||||
Zip CompressionAlgorithm `json:"zip,omitempty"`
|
||||
Crit []string `json:"crit,omitempty"`
|
||||
Apu *byteBuffer `json:"apu,omitempty"`
|
||||
Apv *byteBuffer `json:"apv,omitempty"`
|
||||
Epk *JsonWebKey `json:"epk,omitempty"`
|
||||
Iv *byteBuffer `json:"iv,omitempty"`
|
||||
Tag *byteBuffer `json:"tag,omitempty"`
|
||||
Jwk *JsonWebKey `json:"jwk,omitempty"`
|
||||
Kid string `json:"kid,omitempty"`
|
||||
}
|
||||
|
||||
// JoseHeader represents the read-only JOSE header for JWE/JWS objects.
|
||||
type JoseHeader struct {
|
||||
KeyID string
|
||||
JsonWebKey *JsonWebKey
|
||||
Algorithm string
|
||||
}
|
||||
|
||||
// sanitized produces a cleaned-up header object from the raw JSON.
|
||||
func (parsed rawHeader) sanitized() JoseHeader {
|
||||
return JoseHeader{
|
||||
KeyID: parsed.Kid,
|
||||
JsonWebKey: parsed.Jwk,
|
||||
Algorithm: parsed.Alg,
|
||||
}
|
||||
}
|
||||
|
||||
// Merge headers from src into dst, giving precedence to headers from l.
|
||||
func (dst *rawHeader) merge(src *rawHeader) {
|
||||
if src == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if dst.Alg == "" {
|
||||
dst.Alg = src.Alg
|
||||
}
|
||||
if dst.Enc == "" {
|
||||
dst.Enc = src.Enc
|
||||
}
|
||||
if dst.Zip == "" {
|
||||
dst.Zip = src.Zip
|
||||
}
|
||||
if dst.Crit == nil {
|
||||
dst.Crit = src.Crit
|
||||
}
|
||||
if dst.Crit == nil {
|
||||
dst.Crit = src.Crit
|
||||
}
|
||||
if dst.Apu == nil {
|
||||
dst.Apu = src.Apu
|
||||
}
|
||||
if dst.Apv == nil {
|
||||
dst.Apv = src.Apv
|
||||
}
|
||||
if dst.Epk == nil {
|
||||
dst.Epk = src.Epk
|
||||
}
|
||||
if dst.Iv == nil {
|
||||
dst.Iv = src.Iv
|
||||
}
|
||||
if dst.Tag == nil {
|
||||
dst.Tag = src.Tag
|
||||
}
|
||||
if dst.Kid == "" {
|
||||
dst.Kid = src.Kid
|
||||
}
|
||||
if dst.Jwk == nil {
|
||||
dst.Jwk = src.Jwk
|
||||
}
|
||||
}
|
||||
|
||||
// Get JOSE name of curve
|
||||
func curveName(crv elliptic.Curve) (string, error) {
|
||||
switch crv {
|
||||
case elliptic.P256():
|
||||
return "P-256", nil
|
||||
case elliptic.P384():
|
||||
return "P-384", nil
|
||||
case elliptic.P521():
|
||||
return "P-521", nil
|
||||
default:
|
||||
return "", fmt.Errorf("square/go-jose: unsupported/unknown elliptic curve")
|
||||
}
|
||||
}
|
||||
|
||||
// Get size of curve in bytes
|
||||
func curveSize(crv elliptic.Curve) int {
|
||||
bits := crv.Params().BitSize
|
||||
|
||||
div := bits / 8
|
||||
mod := bits % 8
|
||||
|
||||
if mod == 0 {
|
||||
return div
|
||||
}
|
||||
|
||||
return div + 1
|
||||
}
|
182
vendor/github.com/square/go-jose/signing.go
generated
vendored
182
vendor/github.com/square/go-jose/signing.go
generated
vendored
|
@ -1,182 +0,0 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Signer represents a signer which takes a payload and produces a signed JWS object.
|
||||
type Signer interface {
|
||||
Sign(payload []byte) (*JsonWebSignature, error)
|
||||
}
|
||||
|
||||
// MultiSigner represents a signer which supports multiple recipients.
|
||||
type MultiSigner interface {
|
||||
Sign(payload []byte) (*JsonWebSignature, error)
|
||||
AddRecipient(alg SignatureAlgorithm, signingKey interface{}) error
|
||||
}
|
||||
|
||||
type payloadSigner interface {
|
||||
signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error)
|
||||
}
|
||||
|
||||
type payloadVerifier interface {
|
||||
verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error
|
||||
}
|
||||
|
||||
type genericSigner struct {
|
||||
recipients []recipientSigInfo
|
||||
}
|
||||
|
||||
type recipientSigInfo struct {
|
||||
sigAlg SignatureAlgorithm
|
||||
publicKey *JsonWebKey
|
||||
signer payloadSigner
|
||||
}
|
||||
|
||||
// NewSigner creates an appropriate signer based on the key type
|
||||
func NewSigner(alg SignatureAlgorithm, signingKey interface{}) (Signer, error) {
|
||||
// NewMultiSigner never fails (currently)
|
||||
signer := NewMultiSigner()
|
||||
|
||||
err := signer.AddRecipient(alg, signingKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return signer, nil
|
||||
}
|
||||
|
||||
// NewMultiSigner creates a signer for multiple recipients
|
||||
func NewMultiSigner() MultiSigner {
|
||||
return &genericSigner{
|
||||
recipients: []recipientSigInfo{},
|
||||
}
|
||||
}
|
||||
|
||||
// newVerifier creates a verifier based on the key type
|
||||
func newVerifier(verificationKey interface{}) (payloadVerifier, error) {
|
||||
switch verificationKey := verificationKey.(type) {
|
||||
case *rsa.PublicKey:
|
||||
return &rsaEncrypterVerifier{
|
||||
publicKey: verificationKey,
|
||||
}, nil
|
||||
case *ecdsa.PublicKey:
|
||||
return &ecEncrypterVerifier{
|
||||
publicKey: verificationKey,
|
||||
}, nil
|
||||
case []byte:
|
||||
return &symmetricMac{
|
||||
key: verificationKey,
|
||||
}, nil
|
||||
case *JsonWebKey:
|
||||
return newVerifier(verificationKey.Key)
|
||||
default:
|
||||
return nil, ErrUnsupportedKeyType
|
||||
}
|
||||
}
|
||||
|
||||
func (ctx *genericSigner) AddRecipient(alg SignatureAlgorithm, signingKey interface{}) error {
|
||||
recipient, err := makeRecipient(alg, signingKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx.recipients = append(ctx.recipients, recipient)
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeRecipient(alg SignatureAlgorithm, signingKey interface{}) (recipientSigInfo, error) {
|
||||
switch signingKey := signingKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return newRSASigner(alg, signingKey)
|
||||
case *ecdsa.PrivateKey:
|
||||
return newECDSASigner(alg, signingKey)
|
||||
case []byte:
|
||||
return newSymmetricSigner(alg, signingKey)
|
||||
case *JsonWebKey:
|
||||
recipient, err := makeRecipient(alg, signingKey.Key)
|
||||
if err != nil {
|
||||
return recipientSigInfo{}, err
|
||||
}
|
||||
recipient.publicKey.KeyID = signingKey.KeyID
|
||||
return recipient, nil
|
||||
default:
|
||||
return recipientSigInfo{}, ErrUnsupportedKeyType
|
||||
}
|
||||
}
|
||||
|
||||
func (ctx *genericSigner) Sign(payload []byte) (*JsonWebSignature, error) {
|
||||
obj := &JsonWebSignature{}
|
||||
obj.payload = payload
|
||||
obj.Signatures = make([]Signature, len(ctx.recipients))
|
||||
|
||||
for i, recipient := range ctx.recipients {
|
||||
protected := &rawHeader{
|
||||
Alg: string(recipient.sigAlg),
|
||||
}
|
||||
|
||||
if recipient.publicKey != nil {
|
||||
protected.Jwk = recipient.publicKey
|
||||
protected.Kid = recipient.publicKey.KeyID
|
||||
}
|
||||
|
||||
serializedProtected := mustSerializeJSON(protected)
|
||||
|
||||
input := []byte(fmt.Sprintf("%s.%s",
|
||||
base64URLEncode(serializedProtected),
|
||||
base64URLEncode(payload)))
|
||||
|
||||
signatureInfo, err := recipient.signer.signPayload(input, recipient.sigAlg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
signatureInfo.protected = protected
|
||||
obj.Signatures[i] = signatureInfo
|
||||
}
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// Verify validates the signature on the object and returns the payload.
|
||||
func (obj JsonWebSignature) Verify(verificationKey interface{}) ([]byte, error) {
|
||||
verifier, err := newVerifier(verificationKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, signature := range obj.Signatures {
|
||||
headers := signature.mergedHeaders()
|
||||
if len(headers.Crit) > 0 {
|
||||
// Unsupported crit header
|
||||
continue
|
||||
}
|
||||
|
||||
input := obj.computeAuthData(&signature)
|
||||
alg := SignatureAlgorithm(headers.Alg)
|
||||
err := verifier.verifyPayload(input, signature.signature, alg)
|
||||
if err == nil {
|
||||
return obj.payload, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, ErrCryptoFailure
|
||||
}
|
348
vendor/github.com/square/go-jose/symmetric.go
generated
vendored
348
vendor/github.com/square/go-jose/symmetric.go
generated
vendored
|
@ -1,348 +0,0 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"crypto/subtle"
|
||||
"errors"
|
||||
"github.com/square/go-jose/cipher"
|
||||
"hash"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Random reader (stubbed out in tests)
|
||||
var randReader = rand.Reader
|
||||
|
||||
// Dummy key cipher for shared symmetric key mode
|
||||
type symmetricKeyCipher struct {
|
||||
key []byte // Pre-shared content-encryption key
|
||||
}
|
||||
|
||||
// Signer/verifier for MAC modes
|
||||
type symmetricMac struct {
|
||||
key []byte
|
||||
}
|
||||
|
||||
// Input/output from an AEAD operation
|
||||
type aeadParts struct {
|
||||
iv, ciphertext, tag []byte
|
||||
}
|
||||
|
||||
// A content cipher based on an AEAD construction
|
||||
type aeadContentCipher struct {
|
||||
keyBytes int
|
||||
authtagBytes int
|
||||
getAead func(key []byte) (cipher.AEAD, error)
|
||||
}
|
||||
|
||||
// Random key generator
|
||||
type randomKeyGenerator struct {
|
||||
size int
|
||||
}
|
||||
|
||||
// Static key generator
|
||||
type staticKeyGenerator struct {
|
||||
key []byte
|
||||
}
|
||||
|
||||
// Create a new content cipher based on AES-GCM
|
||||
func newAESGCM(keySize int) contentCipher {
|
||||
return &aeadContentCipher{
|
||||
keyBytes: keySize,
|
||||
authtagBytes: 16,
|
||||
getAead: func(key []byte) (cipher.AEAD, error) {
|
||||
aes, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cipher.NewGCM(aes)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Create a new content cipher based on AES-CBC+HMAC
|
||||
func newAESCBC(keySize int) contentCipher {
|
||||
return &aeadContentCipher{
|
||||
keyBytes: keySize * 2,
|
||||
authtagBytes: 16,
|
||||
getAead: func(key []byte) (cipher.AEAD, error) {
|
||||
return josecipher.NewCBCHMAC(key, aes.NewCipher)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Get an AEAD cipher object for the given content encryption algorithm
|
||||
func getContentCipher(alg ContentEncryption) contentCipher {
|
||||
switch alg {
|
||||
case A128GCM:
|
||||
return newAESGCM(16)
|
||||
case A192GCM:
|
||||
return newAESGCM(24)
|
||||
case A256GCM:
|
||||
return newAESGCM(32)
|
||||
case A128CBC_HS256:
|
||||
return newAESCBC(16)
|
||||
case A192CBC_HS384:
|
||||
return newAESCBC(24)
|
||||
case A256CBC_HS512:
|
||||
return newAESCBC(32)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// newSymmetricRecipient creates a JWE encrypter based on AES-GCM key wrap.
|
||||
func newSymmetricRecipient(keyAlg KeyAlgorithm, key []byte) (recipientKeyInfo, error) {
|
||||
switch keyAlg {
|
||||
case DIRECT, A128GCMKW, A192GCMKW, A256GCMKW, A128KW, A192KW, A256KW:
|
||||
default:
|
||||
return recipientKeyInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
return recipientKeyInfo{
|
||||
keyAlg: keyAlg,
|
||||
keyEncrypter: &symmetricKeyCipher{
|
||||
key: key,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// newSymmetricSigner creates a recipientSigInfo based on the given key.
|
||||
func newSymmetricSigner(sigAlg SignatureAlgorithm, key []byte) (recipientSigInfo, error) {
|
||||
// Verify that key management algorithm is supported by this encrypter
|
||||
switch sigAlg {
|
||||
case HS256, HS384, HS512:
|
||||
default:
|
||||
return recipientSigInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
return recipientSigInfo{
|
||||
sigAlg: sigAlg,
|
||||
signer: &symmetricMac{
|
||||
key: key,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Generate a random key for the given content cipher
|
||||
func (ctx randomKeyGenerator) genKey() ([]byte, rawHeader, error) {
|
||||
key := make([]byte, ctx.size)
|
||||
_, err := io.ReadFull(randReader, key)
|
||||
if err != nil {
|
||||
return nil, rawHeader{}, err
|
||||
}
|
||||
|
||||
return key, rawHeader{}, nil
|
||||
}
|
||||
|
||||
// Key size for random generator
|
||||
func (ctx randomKeyGenerator) keySize() int {
|
||||
return ctx.size
|
||||
}
|
||||
|
||||
// Generate a static key (for direct mode)
|
||||
func (ctx staticKeyGenerator) genKey() ([]byte, rawHeader, error) {
|
||||
cek := make([]byte, len(ctx.key))
|
||||
copy(cek, ctx.key)
|
||||
return cek, rawHeader{}, nil
|
||||
}
|
||||
|
||||
// Key size for static generator
|
||||
func (ctx staticKeyGenerator) keySize() int {
|
||||
return len(ctx.key)
|
||||
}
|
||||
|
||||
// Get key size for this cipher
|
||||
func (ctx aeadContentCipher) keySize() int {
|
||||
return ctx.keyBytes
|
||||
}
|
||||
|
||||
// Encrypt some data
|
||||
func (ctx aeadContentCipher) encrypt(key, aad, pt []byte) (*aeadParts, error) {
|
||||
// Get a new AEAD instance
|
||||
aead, err := ctx.getAead(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize a new nonce
|
||||
iv := make([]byte, aead.NonceSize())
|
||||
_, err = io.ReadFull(randReader, iv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ciphertextAndTag := aead.Seal(nil, iv, pt, aad)
|
||||
offset := len(ciphertextAndTag) - ctx.authtagBytes
|
||||
|
||||
return &aeadParts{
|
||||
iv: iv,
|
||||
ciphertext: ciphertextAndTag[:offset],
|
||||
tag: ciphertextAndTag[offset:],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Decrypt some data
|
||||
func (ctx aeadContentCipher) decrypt(key, aad []byte, parts *aeadParts) ([]byte, error) {
|
||||
aead, err := ctx.getAead(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return aead.Open(nil, parts.iv, append(parts.ciphertext, parts.tag...), aad)
|
||||
}
|
||||
|
||||
// Encrypt the content encryption key.
|
||||
func (ctx *symmetricKeyCipher) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
|
||||
switch alg {
|
||||
case DIRECT:
|
||||
return recipientInfo{
|
||||
header: &rawHeader{},
|
||||
}, nil
|
||||
case A128GCMKW, A192GCMKW, A256GCMKW:
|
||||
aead := newAESGCM(len(ctx.key))
|
||||
|
||||
parts, err := aead.encrypt(ctx.key, []byte{}, cek)
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
return recipientInfo{
|
||||
header: &rawHeader{
|
||||
Iv: newBuffer(parts.iv),
|
||||
Tag: newBuffer(parts.tag),
|
||||
},
|
||||
encryptedKey: parts.ciphertext,
|
||||
}, nil
|
||||
case A128KW, A192KW, A256KW:
|
||||
block, err := aes.NewCipher(ctx.key)
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
jek, err := josecipher.KeyWrap(block, cek)
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
return recipientInfo{
|
||||
encryptedKey: jek,
|
||||
header: &rawHeader{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
return recipientInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
// Decrypt the content encryption key.
|
||||
func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
|
||||
switch KeyAlgorithm(headers.Alg) {
|
||||
case DIRECT:
|
||||
cek := make([]byte, len(ctx.key))
|
||||
copy(cek, ctx.key)
|
||||
return cek, nil
|
||||
case A128GCMKW, A192GCMKW, A256GCMKW:
|
||||
aead := newAESGCM(len(ctx.key))
|
||||
|
||||
parts := &aeadParts{
|
||||
iv: headers.Iv.bytes(),
|
||||
ciphertext: recipient.encryptedKey,
|
||||
tag: headers.Tag.bytes(),
|
||||
}
|
||||
|
||||
cek, err := aead.decrypt(ctx.key, []byte{}, parts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cek, nil
|
||||
case A128KW, A192KW, A256KW:
|
||||
block, err := aes.NewCipher(ctx.key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cek, err := josecipher.KeyUnwrap(block, recipient.encryptedKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cek, nil
|
||||
}
|
||||
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
// Sign the given payload
|
||||
func (ctx symmetricMac) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
|
||||
mac, err := ctx.hmac(payload, alg)
|
||||
if err != nil {
|
||||
return Signature{}, errors.New("square/go-jose: failed to compute hmac")
|
||||
}
|
||||
|
||||
return Signature{
|
||||
signature: mac,
|
||||
protected: &rawHeader{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Verify the given payload
|
||||
func (ctx symmetricMac) verifyPayload(payload []byte, mac []byte, alg SignatureAlgorithm) error {
|
||||
expected, err := ctx.hmac(payload, alg)
|
||||
if err != nil {
|
||||
return errors.New("square/go-jose: failed to compute hmac")
|
||||
}
|
||||
|
||||
if len(mac) != len(expected) {
|
||||
return errors.New("square/go-jose: invalid hmac")
|
||||
}
|
||||
|
||||
match := subtle.ConstantTimeCompare(mac, expected)
|
||||
if match != 1 {
|
||||
return errors.New("square/go-jose: invalid hmac")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compute the HMAC based on the given alg value
|
||||
func (ctx symmetricMac) hmac(payload []byte, alg SignatureAlgorithm) ([]byte, error) {
|
||||
var hash func() hash.Hash
|
||||
|
||||
switch alg {
|
||||
case HS256:
|
||||
hash = sha256.New
|
||||
case HS384:
|
||||
hash = sha512.New384
|
||||
case HS512:
|
||||
hash = sha512.New
|
||||
default:
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
hmac := hmac.New(hash, ctx.key)
|
||||
|
||||
// According to documentation, Write() on hash never fails
|
||||
_, _ = hmac.Write(payload)
|
||||
return hmac.Sum(nil), nil
|
||||
}
|
74
vendor/github.com/square/go-jose/utils.go
generated
vendored
74
vendor/github.com/square/go-jose/utils.go
generated
vendored
|
@ -1,74 +0,0 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// LoadPublicKey loads a public key from PEM/DER-encoded data.
|
||||
func LoadPublicKey(data []byte) (interface{}, error) {
|
||||
input := data
|
||||
|
||||
block, _ := pem.Decode(data)
|
||||
if block != nil {
|
||||
input = block.Bytes
|
||||
}
|
||||
|
||||
// Try to load SubjectPublicKeyInfo
|
||||
pub, err0 := x509.ParsePKIXPublicKey(input)
|
||||
if err0 == nil {
|
||||
return pub, nil
|
||||
}
|
||||
|
||||
cert, err1 := x509.ParseCertificate(input)
|
||||
if err1 == nil {
|
||||
return cert.PublicKey, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("square/go-jose: parse error, got '%s' and '%s'", err0, err1)
|
||||
}
|
||||
|
||||
// LoadPrivateKey loads a private key from PEM/DER-encoded data.
|
||||
func LoadPrivateKey(data []byte) (interface{}, error) {
|
||||
input := data
|
||||
|
||||
block, _ := pem.Decode(data)
|
||||
if block != nil {
|
||||
input = block.Bytes
|
||||
}
|
||||
|
||||
var priv interface{}
|
||||
priv, err0 := x509.ParsePKCS1PrivateKey(input)
|
||||
if err0 == nil {
|
||||
return priv, nil
|
||||
}
|
||||
|
||||
priv, err1 := x509.ParsePKCS8PrivateKey(input)
|
||||
if err1 == nil {
|
||||
return priv, nil
|
||||
}
|
||||
|
||||
priv, err2 := x509.ParseECPrivateKey(input)
|
||||
if err2 == nil {
|
||||
return priv, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("square/go-jose: parse error, got '%s', '%s' and '%s'", err0, err1, err2)
|
||||
}
|
27
vendor/golang.org/x/text/LICENSE
generated
vendored
27
vendor/golang.org/x/text/LICENSE
generated
vendored
|
@ -1,27 +0,0 @@
|
|||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
22
vendor/golang.org/x/text/PATENTS
generated
vendored
22
vendor/golang.org/x/text/PATENTS
generated
vendored
|
@ -1,22 +0,0 @@
|
|||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
351
vendor/golang.org/x/text/internal/gen/code.go
generated
vendored
351
vendor/golang.org/x/text/internal/gen/code.go
generated
vendored
|
@ -1,351 +0,0 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gen
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/fnv"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// This file contains utilities for generating code.
|
||||
|
||||
// TODO: other write methods like:
|
||||
// - slices, maps, types, etc.
|
||||
|
||||
// CodeWriter is a utility for writing structured code. It computes the content
|
||||
// hash and size of written content. It ensures there are newlines between
|
||||
// written code blocks.
|
||||
type CodeWriter struct {
|
||||
buf bytes.Buffer
|
||||
Size int
|
||||
Hash hash.Hash32 // content hash
|
||||
gob *gob.Encoder
|
||||
// For comments we skip the usual one-line separator if they are followed by
|
||||
// a code block.
|
||||
skipSep bool
|
||||
}
|
||||
|
||||
func (w *CodeWriter) Write(p []byte) (n int, err error) {
|
||||
return w.buf.Write(p)
|
||||
}
|
||||
|
||||
// NewCodeWriter returns a new CodeWriter.
|
||||
func NewCodeWriter() *CodeWriter {
|
||||
h := fnv.New32()
|
||||
return &CodeWriter{Hash: h, gob: gob.NewEncoder(h)}
|
||||
}
|
||||
|
||||
// WriteGoFile appends the buffer with the total size of all created structures
|
||||
// and writes it as a Go file to the the given file with the given package name.
|
||||
func (w *CodeWriter) WriteGoFile(filename, pkg string) {
|
||||
f, err := os.Create(filename)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not create file %s: %v", filename, err)
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err = w.WriteGo(f, pkg); err != nil {
|
||||
log.Fatalf("Error writing file %s: %v", filename, err)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteGo appends the buffer with the total size of all created structures and
|
||||
// writes it as a Go file to the the given writer with the given package name.
|
||||
func (w *CodeWriter) WriteGo(out io.Writer, pkg string) (n int, err error) {
|
||||
sz := w.Size
|
||||
w.WriteComment("Total table size %d bytes (%dKiB); checksum: %X\n", sz, sz/1024, w.Hash.Sum32())
|
||||
defer w.buf.Reset()
|
||||
return WriteGo(out, pkg, w.buf.Bytes())
|
||||
}
|
||||
|
||||
func (w *CodeWriter) printf(f string, x ...interface{}) {
|
||||
fmt.Fprintf(w, f, x...)
|
||||
}
|
||||
|
||||
func (w *CodeWriter) insertSep() {
|
||||
if w.skipSep {
|
||||
w.skipSep = false
|
||||
return
|
||||
}
|
||||
// Use at least two newlines to ensure a blank space between the previous
|
||||
// block. WriteGoFile will remove extraneous newlines.
|
||||
w.printf("\n\n")
|
||||
}
|
||||
|
||||
// WriteComment writes a comment block. All line starts are prefixed with "//".
|
||||
// Initial empty lines are gobbled. The indentation for the first line is
|
||||
// stripped from consecutive lines.
|
||||
func (w *CodeWriter) WriteComment(comment string, args ...interface{}) {
|
||||
s := fmt.Sprintf(comment, args...)
|
||||
s = strings.Trim(s, "\n")
|
||||
|
||||
// Use at least two newlines to ensure a blank space between the previous
|
||||
// block. WriteGoFile will remove extraneous newlines.
|
||||
w.printf("\n\n// ")
|
||||
w.skipSep = true
|
||||
|
||||
// strip first indent level.
|
||||
sep := "\n"
|
||||
for ; len(s) > 0 && (s[0] == '\t' || s[0] == ' '); s = s[1:] {
|
||||
sep += s[:1]
|
||||
}
|
||||
|
||||
strings.NewReplacer(sep, "\n// ", "\n", "\n// ").WriteString(w, s)
|
||||
|
||||
w.printf("\n")
|
||||
}
|
||||
|
||||
func (w *CodeWriter) writeSizeInfo(size int) {
|
||||
w.printf("// Size: %d bytes\n", size)
|
||||
}
|
||||
|
||||
// WriteConst writes a constant of the given name and value.
|
||||
func (w *CodeWriter) WriteConst(name string, x interface{}) {
|
||||
w.insertSep()
|
||||
v := reflect.ValueOf(x)
|
||||
|
||||
switch v.Type().Kind() {
|
||||
case reflect.String:
|
||||
w.printf("const %s %s = ", name, typeName(x))
|
||||
w.WriteString(v.String())
|
||||
w.printf("\n")
|
||||
default:
|
||||
w.printf("const %s = %#v\n", name, x)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteVar writes a variable of the given name and value.
|
||||
func (w *CodeWriter) WriteVar(name string, x interface{}) {
|
||||
w.insertSep()
|
||||
v := reflect.ValueOf(x)
|
||||
oldSize := w.Size
|
||||
sz := int(v.Type().Size())
|
||||
w.Size += sz
|
||||
|
||||
switch v.Type().Kind() {
|
||||
case reflect.String:
|
||||
w.printf("var %s %s = ", name, typeName(x))
|
||||
w.WriteString(v.String())
|
||||
case reflect.Struct:
|
||||
w.gob.Encode(x)
|
||||
fallthrough
|
||||
case reflect.Slice, reflect.Array:
|
||||
w.printf("var %s = ", name)
|
||||
w.writeValue(v)
|
||||
w.writeSizeInfo(w.Size - oldSize)
|
||||
default:
|
||||
w.printf("var %s %s = ", name, typeName(x))
|
||||
w.gob.Encode(x)
|
||||
w.writeValue(v)
|
||||
w.writeSizeInfo(w.Size - oldSize)
|
||||
}
|
||||
w.printf("\n")
|
||||
}
|
||||
|
||||
func (w *CodeWriter) writeValue(v reflect.Value) {
|
||||
x := v.Interface()
|
||||
switch v.Kind() {
|
||||
case reflect.String:
|
||||
w.WriteString(v.String())
|
||||
case reflect.Array:
|
||||
// Don't double count: callers of WriteArray count on the size being
|
||||
// added, so we need to discount it here.
|
||||
w.Size -= int(v.Type().Size())
|
||||
w.writeSlice(x, true)
|
||||
case reflect.Slice:
|
||||
w.writeSlice(x, false)
|
||||
case reflect.Struct:
|
||||
w.printf("%s{\n", typeName(v.Interface()))
|
||||
t := v.Type()
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
w.printf("%s: ", t.Field(i).Name)
|
||||
w.writeValue(v.Field(i))
|
||||
w.printf(",\n")
|
||||
}
|
||||
w.printf("}")
|
||||
default:
|
||||
w.printf("%#v", x)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteString writes a string literal.
|
||||
func (w *CodeWriter) WriteString(s string) {
|
||||
s = strings.Replace(s, `\`, `\\`, -1)
|
||||
io.WriteString(w.Hash, s) // content hash
|
||||
w.Size += len(s)
|
||||
|
||||
const maxInline = 40
|
||||
if len(s) <= maxInline {
|
||||
w.printf("%q", s)
|
||||
return
|
||||
}
|
||||
|
||||
// We will render the string as a multi-line string.
|
||||
const maxWidth = 80 - 4 - len(`"`) - len(`" +`)
|
||||
|
||||
// When starting on its own line, go fmt indents line 2+ an extra level.
|
||||
n, max := maxWidth, maxWidth-4
|
||||
|
||||
// As per https://golang.org/issue/18078, the compiler has trouble
|
||||
// compiling the concatenation of many strings, s0 + s1 + s2 + ... + sN,
|
||||
// for large N. We insert redundant, explicit parentheses to work around
|
||||
// that, lowering the N at any given step: (s0 + s1 + ... + s63) + (s64 +
|
||||
// ... + s127) + etc + (etc + ... + sN).
|
||||
explicitParens, extraComment := len(s) > 128*1024, ""
|
||||
if explicitParens {
|
||||
w.printf(`(`)
|
||||
extraComment = "; the redundant, explicit parens are for https://golang.org/issue/18078"
|
||||
}
|
||||
|
||||
// Print "" +\n, if a string does not start on its own line.
|
||||
b := w.buf.Bytes()
|
||||
if p := len(bytes.TrimRight(b, " \t")); p > 0 && b[p-1] != '\n' {
|
||||
w.printf("\"\" + // Size: %d bytes%s\n", len(s), extraComment)
|
||||
n, max = maxWidth, maxWidth
|
||||
}
|
||||
|
||||
w.printf(`"`)
|
||||
|
||||
for sz, p, nLines := 0, 0, 0; p < len(s); {
|
||||
var r rune
|
||||
r, sz = utf8.DecodeRuneInString(s[p:])
|
||||
out := s[p : p+sz]
|
||||
chars := 1
|
||||
if !unicode.IsPrint(r) || r == utf8.RuneError || r == '"' {
|
||||
switch sz {
|
||||
case 1:
|
||||
out = fmt.Sprintf("\\x%02x", s[p])
|
||||
case 2, 3:
|
||||
out = fmt.Sprintf("\\u%04x", r)
|
||||
case 4:
|
||||
out = fmt.Sprintf("\\U%08x", r)
|
||||
}
|
||||
chars = len(out)
|
||||
}
|
||||
if n -= chars; n < 0 {
|
||||
nLines++
|
||||
if explicitParens && nLines&63 == 63 {
|
||||
w.printf("\") + (\"")
|
||||
}
|
||||
w.printf("\" +\n\"")
|
||||
n = max - len(out)
|
||||
}
|
||||
w.printf("%s", out)
|
||||
p += sz
|
||||
}
|
||||
w.printf(`"`)
|
||||
if explicitParens {
|
||||
w.printf(`)`)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteSlice writes a slice value.
|
||||
func (w *CodeWriter) WriteSlice(x interface{}) {
|
||||
w.writeSlice(x, false)
|
||||
}
|
||||
|
||||
// WriteArray writes an array value.
|
||||
func (w *CodeWriter) WriteArray(x interface{}) {
|
||||
w.writeSlice(x, true)
|
||||
}
|
||||
|
||||
func (w *CodeWriter) writeSlice(x interface{}, isArray bool) {
|
||||
v := reflect.ValueOf(x)
|
||||
w.gob.Encode(v.Len())
|
||||
w.Size += v.Len() * int(v.Type().Elem().Size())
|
||||
name := typeName(x)
|
||||
if isArray {
|
||||
name = fmt.Sprintf("[%d]%s", v.Len(), name[strings.Index(name, "]")+1:])
|
||||
}
|
||||
if isArray {
|
||||
w.printf("%s{\n", name)
|
||||
} else {
|
||||
w.printf("%s{ // %d elements\n", name, v.Len())
|
||||
}
|
||||
|
||||
switch kind := v.Type().Elem().Kind(); kind {
|
||||
case reflect.String:
|
||||
for _, s := range x.([]string) {
|
||||
w.WriteString(s)
|
||||
w.printf(",\n")
|
||||
}
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
// nLine and nBlock are the number of elements per line and block.
|
||||
nLine, nBlock, format := 8, 64, "%d,"
|
||||
switch kind {
|
||||
case reflect.Uint8:
|
||||
format = "%#02x,"
|
||||
case reflect.Uint16:
|
||||
format = "%#04x,"
|
||||
case reflect.Uint32:
|
||||
nLine, nBlock, format = 4, 32, "%#08x,"
|
||||
case reflect.Uint, reflect.Uint64:
|
||||
nLine, nBlock, format = 4, 32, "%#016x,"
|
||||
case reflect.Int8:
|
||||
nLine = 16
|
||||
}
|
||||
n := nLine
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if i%nBlock == 0 && v.Len() > nBlock {
|
||||
w.printf("// Entry %X - %X\n", i, i+nBlock-1)
|
||||
}
|
||||
x := v.Index(i).Interface()
|
||||
w.gob.Encode(x)
|
||||
w.printf(format, x)
|
||||
if n--; n == 0 {
|
||||
n = nLine
|
||||
w.printf("\n")
|
||||
}
|
||||
}
|
||||
w.printf("\n")
|
||||
case reflect.Struct:
|
||||
zero := reflect.Zero(v.Type().Elem()).Interface()
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
x := v.Index(i).Interface()
|
||||
w.gob.EncodeValue(v)
|
||||
if !reflect.DeepEqual(zero, x) {
|
||||
line := fmt.Sprintf("%#v,\n", x)
|
||||
line = line[strings.IndexByte(line, '{'):]
|
||||
w.printf("%d: ", i)
|
||||
w.printf(line)
|
||||
}
|
||||
}
|
||||
case reflect.Array:
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
w.printf("%d: %#v,\n", i, v.Index(i).Interface())
|
||||
}
|
||||
default:
|
||||
panic("gen: slice elem type not supported")
|
||||
}
|
||||
w.printf("}")
|
||||
}
|
||||
|
||||
// WriteType writes a definition of the type of the given value and returns the
|
||||
// type name.
|
||||
func (w *CodeWriter) WriteType(x interface{}) string {
|
||||
t := reflect.TypeOf(x)
|
||||
w.printf("type %s struct {\n", t.Name())
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
w.printf("\t%s %s\n", t.Field(i).Name, t.Field(i).Type)
|
||||
}
|
||||
w.printf("}\n")
|
||||
return t.Name()
|
||||
}
|
||||
|
||||
// typeName returns the name of the go type of x.
|
||||
func typeName(x interface{}) string {
|
||||
t := reflect.ValueOf(x).Type()
|
||||
return strings.Replace(fmt.Sprint(t), "main.", "", 1)
|
||||
}
|
281
vendor/golang.org/x/text/internal/gen/gen.go
generated
vendored
281
vendor/golang.org/x/text/internal/gen/gen.go
generated
vendored
|
@ -1,281 +0,0 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package gen contains common code for the various code generation tools in the
|
||||
// text repository. Its usage ensures consistency between tools.
|
||||
//
|
||||
// This package defines command line flags that are common to most generation
|
||||
// tools. The flags allow for specifying specific Unicode and CLDR versions
|
||||
// in the public Unicode data repository (http://www.unicode.org/Public).
|
||||
//
|
||||
// A local Unicode data mirror can be set through the flag -local or the
|
||||
// environment variable UNICODE_DIR. The former takes precedence. The local
|
||||
// directory should follow the same structure as the public repository.
|
||||
//
|
||||
// IANA data can also optionally be mirrored by putting it in the iana directory
|
||||
// rooted at the top of the local mirror. Beware, though, that IANA data is not
|
||||
// versioned. So it is up to the developer to use the right version.
|
||||
package gen // import "golang.org/x/text/internal/gen"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"go/format"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/text/unicode/cldr"
|
||||
)
|
||||
|
||||
var (
|
||||
url = flag.String("url",
|
||||
"http://www.unicode.org/Public",
|
||||
"URL of Unicode database directory")
|
||||
iana = flag.String("iana",
|
||||
"http://www.iana.org",
|
||||
"URL of the IANA repository")
|
||||
unicodeVersion = flag.String("unicode",
|
||||
getEnv("UNICODE_VERSION", unicode.Version),
|
||||
"unicode version to use")
|
||||
cldrVersion = flag.String("cldr",
|
||||
getEnv("CLDR_VERSION", cldr.Version),
|
||||
"cldr version to use")
|
||||
)
|
||||
|
||||
func getEnv(name, def string) string {
|
||||
if v := os.Getenv(name); v != "" {
|
||||
return v
|
||||
}
|
||||
return def
|
||||
}
|
||||
|
||||
// Init performs common initialization for a gen command. It parses the flags
|
||||
// and sets up the standard logging parameters.
|
||||
func Init() {
|
||||
log.SetPrefix("")
|
||||
log.SetFlags(log.Lshortfile)
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
const header = `// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||
|
||||
package %s
|
||||
|
||||
`
|
||||
|
||||
// UnicodeVersion reports the requested Unicode version.
|
||||
func UnicodeVersion() string {
|
||||
return *unicodeVersion
|
||||
}
|
||||
|
||||
// UnicodeVersion reports the requested CLDR version.
|
||||
func CLDRVersion() string {
|
||||
return *cldrVersion
|
||||
}
|
||||
|
||||
// IsLocal reports whether data files are available locally.
|
||||
func IsLocal() bool {
|
||||
dir, err := localReadmeFile()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if _, err = os.Stat(dir); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// OpenUCDFile opens the requested UCD file. The file is specified relative to
|
||||
// the public Unicode root directory. It will call log.Fatal if there are any
|
||||
// errors.
|
||||
func OpenUCDFile(file string) io.ReadCloser {
|
||||
return openUnicode(path.Join(*unicodeVersion, "ucd", file))
|
||||
}
|
||||
|
||||
// OpenCLDRCoreZip opens the CLDR core zip file. It will call log.Fatal if there
|
||||
// are any errors.
|
||||
func OpenCLDRCoreZip() io.ReadCloser {
|
||||
return OpenUnicodeFile("cldr", *cldrVersion, "core.zip")
|
||||
}
|
||||
|
||||
// OpenUnicodeFile opens the requested file of the requested category from the
|
||||
// root of the Unicode data archive. The file is specified relative to the
|
||||
// public Unicode root directory. If version is "", it will use the default
|
||||
// Unicode version. It will call log.Fatal if there are any errors.
|
||||
func OpenUnicodeFile(category, version, file string) io.ReadCloser {
|
||||
if version == "" {
|
||||
version = UnicodeVersion()
|
||||
}
|
||||
return openUnicode(path.Join(category, version, file))
|
||||
}
|
||||
|
||||
// OpenIANAFile opens the requested IANA file. The file is specified relative
|
||||
// to the IANA root, which is typically either http://www.iana.org or the
|
||||
// iana directory in the local mirror. It will call log.Fatal if there are any
|
||||
// errors.
|
||||
func OpenIANAFile(path string) io.ReadCloser {
|
||||
return Open(*iana, "iana", path)
|
||||
}
|
||||
|
||||
var (
|
||||
dirMutex sync.Mutex
|
||||
localDir string
|
||||
)
|
||||
|
||||
const permissions = 0755
|
||||
|
||||
func localReadmeFile() (string, error) {
|
||||
p, err := build.Import("golang.org/x/text", "", build.FindOnly)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Could not locate package: %v", err)
|
||||
}
|
||||
return filepath.Join(p.Dir, "DATA", "README"), nil
|
||||
}
|
||||
|
||||
func getLocalDir() string {
|
||||
dirMutex.Lock()
|
||||
defer dirMutex.Unlock()
|
||||
|
||||
readme, err := localReadmeFile()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
dir := filepath.Dir(readme)
|
||||
if _, err := os.Stat(readme); err != nil {
|
||||
if err := os.MkdirAll(dir, permissions); err != nil {
|
||||
log.Fatalf("Could not create directory: %v", err)
|
||||
}
|
||||
ioutil.WriteFile(readme, []byte(readmeTxt), permissions)
|
||||
}
|
||||
return dir
|
||||
}
|
||||
|
||||
const readmeTxt = `Generated by golang.org/x/text/internal/gen. DO NOT EDIT.
|
||||
|
||||
This directory contains downloaded files used to generate the various tables
|
||||
in the golang.org/x/text subrepo.
|
||||
|
||||
Note that the language subtag repo (iana/assignments/language-subtag-registry)
|
||||
and all other times in the iana subdirectory are not versioned and will need
|
||||
to be periodically manually updated. The easiest way to do this is to remove
|
||||
the entire iana directory. This is mostly of concern when updating the language
|
||||
package.
|
||||
`
|
||||
|
||||
// Open opens subdir/path if a local directory is specified and the file exists,
|
||||
// where subdir is a directory relative to the local root, or fetches it from
|
||||
// urlRoot/path otherwise. It will call log.Fatal if there are any errors.
|
||||
func Open(urlRoot, subdir, path string) io.ReadCloser {
|
||||
file := filepath.Join(getLocalDir(), subdir, filepath.FromSlash(path))
|
||||
return open(file, urlRoot, path)
|
||||
}
|
||||
|
||||
func openUnicode(path string) io.ReadCloser {
|
||||
file := filepath.Join(getLocalDir(), filepath.FromSlash(path))
|
||||
return open(file, *url, path)
|
||||
}
|
||||
|
||||
// TODO: automatically periodically update non-versioned files.
|
||||
|
||||
func open(file, urlRoot, path string) io.ReadCloser {
|
||||
if f, err := os.Open(file); err == nil {
|
||||
return f
|
||||
}
|
||||
r := get(urlRoot, path)
|
||||
defer r.Close()
|
||||
b, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not download file: %v", err)
|
||||
}
|
||||
os.MkdirAll(filepath.Dir(file), permissions)
|
||||
if err := ioutil.WriteFile(file, b, permissions); err != nil {
|
||||
log.Fatalf("Could not create file: %v", err)
|
||||
}
|
||||
return ioutil.NopCloser(bytes.NewReader(b))
|
||||
}
|
||||
|
||||
func get(root, path string) io.ReadCloser {
|
||||
url := root + "/" + path
|
||||
fmt.Printf("Fetching %s...", url)
|
||||
defer fmt.Println(" done.")
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
log.Fatalf("HTTP GET: %v", err)
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
log.Fatalf("Bad GET status for %q: %q", url, resp.Status)
|
||||
}
|
||||
return resp.Body
|
||||
}
|
||||
|
||||
// TODO: use Write*Version in all applicable packages.
|
||||
|
||||
// WriteUnicodeVersion writes a constant for the Unicode version from which the
|
||||
// tables are generated.
|
||||
func WriteUnicodeVersion(w io.Writer) {
|
||||
fmt.Fprintf(w, "// UnicodeVersion is the Unicode version from which the tables in this package are derived.\n")
|
||||
fmt.Fprintf(w, "const UnicodeVersion = %q\n\n", UnicodeVersion())
|
||||
}
|
||||
|
||||
// WriteCLDRVersion writes a constant for the CLDR version from which the
|
||||
// tables are generated.
|
||||
func WriteCLDRVersion(w io.Writer) {
|
||||
fmt.Fprintf(w, "// CLDRVersion is the CLDR version from which the tables in this package are derived.\n")
|
||||
fmt.Fprintf(w, "const CLDRVersion = %q\n\n", CLDRVersion())
|
||||
}
|
||||
|
||||
// WriteGoFile prepends a standard file comment and package statement to the
|
||||
// given bytes, applies gofmt, and writes them to a file with the given name.
|
||||
// It will call log.Fatal if there are any errors.
|
||||
func WriteGoFile(filename, pkg string, b []byte) {
|
||||
w, err := os.Create(filename)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not create file %s: %v", filename, err)
|
||||
}
|
||||
defer w.Close()
|
||||
if _, err = WriteGo(w, pkg, b); err != nil {
|
||||
log.Fatalf("Error writing file %s: %v", filename, err)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteGo prepends a standard file comment and package statement to the given
|
||||
// bytes, applies gofmt, and writes them to w.
|
||||
func WriteGo(w io.Writer, pkg string, b []byte) (n int, err error) {
|
||||
src := []byte(fmt.Sprintf(header, pkg))
|
||||
src = append(src, b...)
|
||||
formatted, err := format.Source(src)
|
||||
if err != nil {
|
||||
// Print the generated code even in case of an error so that the
|
||||
// returned error can be meaningfully interpreted.
|
||||
n, _ = w.Write(src)
|
||||
return n, err
|
||||
}
|
||||
return w.Write(formatted)
|
||||
}
|
||||
|
||||
// Repackage rewrites a Go file from belonging to package main to belonging to
|
||||
// the given package.
|
||||
func Repackage(inFile, outFile, pkg string) {
|
||||
src, err := ioutil.ReadFile(inFile)
|
||||
if err != nil {
|
||||
log.Fatalf("reading %s: %v", inFile, err)
|
||||
}
|
||||
const toDelete = "package main\n\n"
|
||||
i := bytes.Index(src, []byte(toDelete))
|
||||
if i < 0 {
|
||||
log.Fatalf("Could not find %q in %s.", toDelete, inFile)
|
||||
}
|
||||
w := &bytes.Buffer{}
|
||||
w.Write(src[i+len(toDelete):])
|
||||
WriteGoFile(outFile, pkg, w.Bytes())
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue